1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/if_arp.h> 6 #include "cam.h" 7 #include "chan.h" 8 #include "coex.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "phy.h" 13 #include "ps.h" 14 #include "reg.h" 15 #include "util.h" 16 #include "wow.h" 17 18 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev); 19 20 struct rtw89_eapol_2_of_2 { 21 u8 gtkbody[14]; 22 u8 key_des_ver; 23 u8 rsvd[92]; 24 } __packed; 25 26 struct rtw89_sa_query { 27 u8 category; 28 u8 action; 29 } __packed; 30 31 struct rtw89_arp_rsp { 32 u8 llc_hdr[sizeof(rfc1042_header)]; 33 __be16 llc_type; 34 struct arphdr arp_hdr; 35 u8 sender_hw[ETH_ALEN]; 36 __be32 sender_ip; 37 u8 target_hw[ETH_ALEN]; 38 __be32 target_ip; 39 } __packed; 40 41 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 42 43 const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = { 44 .ver = 0x00, 45 .list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 46 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 47 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 48 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 49 }, 50 }; 51 EXPORT_SYMBOL(rtw89_fw_blacklist_default); 52 53 union rtw89_fw_element_arg { 54 size_t offset; 55 enum rtw89_rf_path rf_path; 56 enum rtw89_fw_type fw_type; 57 }; 58 59 struct rtw89_fw_element_handler { 60 int (*fn)(struct rtw89_dev *rtwdev, 61 const struct rtw89_fw_element_hdr *elm, 62 const union rtw89_fw_element_arg arg); 63 const union rtw89_fw_element_arg arg; 64 const char *name; 65 }; 66 67 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 68 struct sk_buff *skb); 69 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 70 struct rtw89_wait_info *wait, unsigned int cond); 71 static int __parse_security_section(struct rtw89_dev *rtwdev, 72 struct rtw89_fw_bin_info *info, 73 struct rtw89_fw_hdr_section_info *section_info, 74 const void *content, 75 u32 *mssc_len); 76 77 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 78 bool header) 79 { 80 struct sk_buff *skb; 81 u32 header_len = 0; 82 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 83 84 if (header) 85 header_len = H2C_HEADER_LEN; 86 87 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 88 if (!skb) 89 return NULL; 90 skb_reserve(skb, header_len + h2c_desc_size); 91 memset(skb->data, 0, len); 92 93 return skb; 94 } 95 96 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 97 { 98 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 99 } 100 101 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 102 { 103 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 104 } 105 106 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 107 { 108 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 109 u8 val; 110 int ret; 111 112 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 113 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 114 1, FWDL_WAIT_CNT, false, rtwdev, type); 115 if (ret) { 116 switch (val) { 117 case RTW89_FWDL_CHECKSUM_FAIL: 118 rtw89_err(rtwdev, "fw checksum fail\n"); 119 return -EINVAL; 120 121 case RTW89_FWDL_SECURITY_FAIL: 122 rtw89_err(rtwdev, "fw security fail\n"); 123 return -EINVAL; 124 125 case RTW89_FWDL_CV_NOT_MATCH: 126 rtw89_err(rtwdev, "fw cv not match\n"); 127 return -EINVAL; 128 129 default: 130 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 131 return -EBUSY; 132 } 133 } 134 135 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 136 137 return 0; 138 } 139 140 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 141 struct rtw89_fw_bin_info *info) 142 { 143 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 144 const struct rtw89_chip_info *chip = rtwdev->chip; 145 struct rtw89_fw_hdr_section_info *section_info; 146 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 147 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 148 const struct rtw89_fw_hdr_section *section; 149 const u8 *fw_end = fw + len; 150 const u8 *bin; 151 u32 base_hdr_len; 152 u32 mssc_len; 153 int ret; 154 u32 i; 155 156 if (!info) 157 return -EINVAL; 158 159 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 160 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 161 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 162 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE); 163 164 if (info->dynamic_hdr_en) { 165 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 166 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 167 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 168 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 169 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 170 return -EINVAL; 171 } 172 } else { 173 info->hdr_len = base_hdr_len; 174 info->dynamic_hdr_len = 0; 175 } 176 177 bin = fw + info->hdr_len; 178 179 /* jump to section header */ 180 section_info = info->section_info; 181 for (i = 0; i < info->section_num; i++) { 182 section = &fw_hdr->sections[i]; 183 section_info->type = 184 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 185 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 186 187 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 188 section_info->len += FWDL_SECTION_CHKSUM_LEN; 189 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 190 section_info->dladdr = 191 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 192 section_info->addr = bin; 193 194 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 195 section_info->mssc = 196 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 197 198 ret = __parse_security_section(rtwdev, info, section_info, 199 bin, &mssc_len); 200 if (ret) 201 return ret; 202 203 if (sec->secure_boot && chip->chip_id == RTL8852B) 204 section_info->len_override = 960; 205 } else { 206 section_info->mssc = 0; 207 mssc_len = 0; 208 } 209 210 rtw89_debug(rtwdev, RTW89_DBG_FW, 211 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 212 i, section_info->type, section_info->len, 213 section_info->mssc, mssc_len, bin - fw); 214 rtw89_debug(rtwdev, RTW89_DBG_FW, 215 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 216 section_info->ignore, section_info->key_addr, 217 section_info->key_addr ? 218 section_info->key_addr - section_info->addr : 0, 219 section_info->key_len, section_info->key_idx); 220 221 bin += section_info->len + mssc_len; 222 section_info++; 223 } 224 225 if (fw_end != bin) { 226 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 227 return -EINVAL; 228 } 229 230 return 0; 231 } 232 233 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 234 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 235 u32 rmp_tbl_size, u32 *key_idx) 236 { 237 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 238 u32 sel_byte_idx; 239 u32 mss_sel_idx; 240 u8 sel_bit_idx; 241 int i; 242 243 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 244 if (!mss_hdr->defen) 245 return -ENOENT; 246 247 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 248 sec->mss_key_num; 249 } else { 250 if (mss_hdr->defen) 251 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 252 else 253 mss_sel_idx = 0; 254 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 255 le16_to_cpu(mss_hdr->msscust_max) + 256 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 257 sec->mss_key_num; 258 } 259 260 sel_byte_idx = mss_sel_idx >> 3; 261 sel_bit_idx = mss_sel_idx & 0x7; 262 263 if (sel_byte_idx >= rmp_tbl_size) 264 return -EFAULT; 265 266 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 267 return -ENOENT; 268 269 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 270 271 for (i = 0; i < sel_byte_idx; i++) 272 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 273 274 return 0; 275 } 276 277 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 278 struct rtw89_fw_bin_info *info, 279 struct rtw89_fw_hdr_section_info *section_info, 280 const void *content, 281 u32 *mssc_len) 282 { 283 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 284 const union rtw89_fw_section_mssc_content *section_content = content; 285 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 286 u32 rmp_tbl_size; 287 u32 key_sign_len; 288 u32 real_key_idx; 289 u32 sb_sel_ver; 290 int ret; 291 292 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 293 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 294 return -ENOENT; 295 } 296 297 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 298 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 299 le16_to_cpu(mss_hdr->msscust_max) * 300 mss_hdr->mssdev_max) >> 3; 301 if (mss_hdr->defen) 302 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 303 } else { 304 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 305 mss_hdr->rmpfmt); 306 return -EINVAL; 307 } 308 309 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 310 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 311 rmp_tbl_size, (int)sizeof(*mss_hdr), 312 le32_to_cpu(mss_hdr->key_raw_offset)); 313 return -EINVAL; 314 } 315 316 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 317 if (!key_sign_len) 318 key_sign_len = 512; 319 320 if (info->dsp_checksum) 321 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 322 323 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 324 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 325 326 if (!sec->secure_boot) 327 goto out; 328 329 sb_sel_ver = get_unaligned_le32(§ion_content->sb_sel_ver.v); 330 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 331 goto ignore; 332 333 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 334 if (ret) 335 goto ignore; 336 337 section_info->key_addr = content + section_info->len + 338 le32_to_cpu(mss_hdr->key_raw_offset) + 339 key_sign_len * real_key_idx; 340 section_info->key_len = key_sign_len; 341 section_info->key_idx = real_key_idx; 342 343 out: 344 if (info->secure_section_exist) { 345 section_info->ignore = true; 346 return 0; 347 } 348 349 info->secure_section_exist = true; 350 351 return 0; 352 353 ignore: 354 section_info->ignore = true; 355 356 return 0; 357 } 358 359 static int __check_secure_blacklist(struct rtw89_dev *rtwdev, 360 struct rtw89_fw_bin_info *info, 361 struct rtw89_fw_hdr_section_info *section_info, 362 const void *content) 363 { 364 const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist; 365 const union rtw89_fw_section_mssc_content *section_content = content; 366 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 367 u8 byte_idx; 368 u8 bit_mask; 369 370 if (!sec->secure_boot) 371 return 0; 372 373 if (!info->secure_section_exist || section_info->ignore) 374 return 0; 375 376 if (!chip_blacklist) { 377 rtw89_warn(rtwdev, "chip no blacklist for secure firmware\n"); 378 return -ENOENT; 379 } 380 381 byte_idx = section_content->blacklist.bit_in_chip_list >> 3; 382 bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7); 383 384 if (section_content->blacklist.ver > chip_blacklist->ver) { 385 rtw89_warn(rtwdev, "chip blacklist out of date (%u, %u)\n", 386 section_content->blacklist.ver, chip_blacklist->ver); 387 return -EINVAL; 388 } 389 390 if (chip_blacklist->list[byte_idx] & bit_mask) { 391 rtw89_warn(rtwdev, "firmware %u in chip blacklist\n", 392 section_content->blacklist.ver); 393 return -EPERM; 394 } 395 396 return 0; 397 } 398 399 static int __parse_security_section(struct rtw89_dev *rtwdev, 400 struct rtw89_fw_bin_info *info, 401 struct rtw89_fw_hdr_section_info *section_info, 402 const void *content, 403 u32 *mssc_len) 404 { 405 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 406 int ret; 407 408 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) { 409 ret = __parse_formatted_mssc(rtwdev, info, section_info, 410 content, mssc_len); 411 if (ret) 412 return -EINVAL; 413 } else { 414 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 415 if (info->dsp_checksum) 416 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 417 418 if (sec->secure_boot) { 419 if (sec->mss_idx >= section_info->mssc) { 420 rtw89_err(rtwdev, "unexpected MSS %d >= %d\n", 421 sec->mss_idx, section_info->mssc); 422 return -EFAULT; 423 } 424 section_info->key_addr = content + section_info->len + 425 sec->mss_idx * FWDL_SECURITY_SIGLEN; 426 section_info->key_len = FWDL_SECURITY_SIGLEN; 427 } 428 429 info->secure_section_exist = true; 430 } 431 432 ret = __check_secure_blacklist(rtwdev, info, section_info, content); 433 WARN_ONCE(ret, "Current firmware in blacklist. Please update firmware.\n"); 434 435 return 0; 436 } 437 438 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 439 struct rtw89_fw_bin_info *info) 440 { 441 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 442 struct rtw89_fw_hdr_section_info *section_info; 443 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 444 const struct rtw89_fw_hdr_section_v1 *section; 445 const u8 *fw_end = fw + len; 446 const u8 *bin; 447 u32 base_hdr_len; 448 u32 mssc_len; 449 int ret; 450 u32 i; 451 452 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 453 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 454 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 455 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 456 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE); 457 458 if (info->dynamic_hdr_en) { 459 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 460 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 461 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 462 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 463 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 464 return -EINVAL; 465 } 466 } else { 467 info->hdr_len = base_hdr_len; 468 info->dynamic_hdr_len = 0; 469 } 470 471 bin = fw + info->hdr_len; 472 473 /* jump to section header */ 474 section_info = info->section_info; 475 for (i = 0; i < info->section_num; i++) { 476 section = &fw_hdr->sections[i]; 477 478 section_info->type = 479 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 480 section_info->len = 481 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 482 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 483 section_info->len += FWDL_SECTION_CHKSUM_LEN; 484 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 485 section_info->dladdr = 486 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 487 section_info->addr = bin; 488 489 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 490 section_info->mssc = 491 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 492 493 ret = __parse_security_section(rtwdev, info, section_info, 494 bin, &mssc_len); 495 if (ret) 496 return ret; 497 } else { 498 section_info->mssc = 0; 499 mssc_len = 0; 500 } 501 502 rtw89_debug(rtwdev, RTW89_DBG_FW, 503 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 504 i, section_info->type, section_info->len, 505 section_info->mssc, mssc_len, bin - fw); 506 rtw89_debug(rtwdev, RTW89_DBG_FW, 507 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 508 section_info->ignore, section_info->key_addr, 509 section_info->key_addr ? 510 section_info->key_addr - section_info->addr : 0, 511 section_info->key_len, section_info->key_idx); 512 513 bin += section_info->len + mssc_len; 514 section_info++; 515 } 516 517 if (fw_end != bin) { 518 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 519 return -EINVAL; 520 } 521 522 if (!info->secure_section_exist) 523 rtw89_warn(rtwdev, "no firmware secure section\n"); 524 525 return 0; 526 } 527 528 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 529 const struct rtw89_fw_suit *fw_suit, 530 struct rtw89_fw_bin_info *info) 531 { 532 const u8 *fw = fw_suit->data; 533 u32 len = fw_suit->size; 534 535 if (!fw || !len) { 536 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 537 return -ENOENT; 538 } 539 540 switch (fw_suit->hdr_ver) { 541 case 0: 542 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 543 case 1: 544 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 545 default: 546 return -ENOENT; 547 } 548 } 549 550 static 551 const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev, 552 const struct firmware *firmware) 553 { 554 const struct rtw89_mfw_hdr *mfw_hdr; 555 556 if (sizeof(*mfw_hdr) > firmware->size) 557 return NULL; 558 559 mfw_hdr = (const struct rtw89_mfw_hdr *)&firmware->data[0]; 560 561 if (mfw_hdr->sig != RTW89_MFW_SIG) 562 return NULL; 563 564 return mfw_hdr; 565 } 566 567 static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev, 568 const struct firmware *firmware, 569 const struct rtw89_mfw_hdr *mfw_hdr) 570 { 571 const void *mfw = firmware->data; 572 u32 mfw_len = firmware->size; 573 u8 fw_nr = mfw_hdr->fw_nr; 574 const void *ptr; 575 576 if (fw_nr == 0) { 577 rtw89_err(rtwdev, "mfw header has no fw entry\n"); 578 return -ENOENT; 579 } 580 581 ptr = &mfw_hdr->info[fw_nr]; 582 583 if (ptr > mfw + mfw_len) { 584 rtw89_err(rtwdev, "mfw header out of address\n"); 585 return -EFAULT; 586 } 587 588 return 0; 589 } 590 591 static 592 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 593 struct rtw89_fw_suit *fw_suit, bool nowarn) 594 { 595 struct rtw89_fw_info *fw_info = &rtwdev->fw; 596 const struct firmware *firmware = fw_info->req.firmware; 597 const struct rtw89_mfw_info *mfw_info = NULL, *tmp; 598 const struct rtw89_mfw_hdr *mfw_hdr; 599 const u8 *mfw = firmware->data; 600 u32 mfw_len = firmware->size; 601 int ret; 602 int i; 603 604 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware); 605 if (!mfw_hdr) { 606 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 607 /* legacy firmware support normal type only */ 608 if (type != RTW89_FW_NORMAL) 609 return -EINVAL; 610 fw_suit->data = mfw; 611 fw_suit->size = mfw_len; 612 return 0; 613 } 614 615 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); 616 if (ret) 617 return ret; 618 619 for (i = 0; i < mfw_hdr->fw_nr; i++) { 620 tmp = &mfw_hdr->info[i]; 621 if (tmp->type != type) 622 continue; 623 624 if (type == RTW89_FW_LOGFMT) { 625 mfw_info = tmp; 626 goto found; 627 } 628 629 /* Version order of WiFi firmware in firmware file are not in order, 630 * pass all firmware to find the equal or less but closest version. 631 */ 632 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { 633 if (!mfw_info || mfw_info->cv < tmp->cv) 634 mfw_info = tmp; 635 } 636 } 637 638 if (mfw_info) 639 goto found; 640 641 if (!nowarn) 642 rtw89_err(rtwdev, "no suitable firmware found\n"); 643 return -ENOENT; 644 645 found: 646 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 647 fw_suit->size = le32_to_cpu(mfw_info->size); 648 649 if (fw_suit->data + fw_suit->size > mfw + mfw_len) { 650 rtw89_err(rtwdev, "fw_suit %d out of address\n", type); 651 return -EFAULT; 652 } 653 654 return 0; 655 } 656 657 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 658 { 659 struct rtw89_fw_info *fw_info = &rtwdev->fw; 660 const struct firmware *firmware = fw_info->req.firmware; 661 const struct rtw89_mfw_info *mfw_info; 662 const struct rtw89_mfw_hdr *mfw_hdr; 663 u32 size; 664 int ret; 665 666 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware); 667 if (!mfw_hdr) { 668 rtw89_warn(rtwdev, "not mfw format\n"); 669 return 0; 670 } 671 672 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); 673 if (ret) 674 return ret; 675 676 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 677 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 678 679 return size; 680 } 681 682 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 683 struct rtw89_fw_suit *fw_suit, 684 const struct rtw89_fw_hdr *hdr) 685 { 686 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 687 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 688 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 689 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 690 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 691 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 692 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 693 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 694 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 695 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 696 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 697 } 698 699 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 700 struct rtw89_fw_suit *fw_suit, 701 const struct rtw89_fw_hdr_v1 *hdr) 702 { 703 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 704 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 705 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 706 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 707 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 708 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 709 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 710 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 711 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 712 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 713 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 714 } 715 716 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 717 enum rtw89_fw_type type, 718 struct rtw89_fw_suit *fw_suit) 719 { 720 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 721 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 722 723 if (type == RTW89_FW_LOGFMT) 724 return 0; 725 726 fw_suit->type = type; 727 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 728 729 switch (fw_suit->hdr_ver) { 730 case 0: 731 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 732 break; 733 case 1: 734 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 735 break; 736 default: 737 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 738 fw_suit->hdr_ver); 739 return -ENOENT; 740 } 741 742 rtw89_info(rtwdev, 743 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 744 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 745 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 746 747 return 0; 748 } 749 750 static 751 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 752 bool nowarn) 753 { 754 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 755 int ret; 756 757 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 758 if (ret) 759 return ret; 760 761 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 762 } 763 764 static 765 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 766 const struct rtw89_fw_element_hdr *elm, 767 const union rtw89_fw_element_arg arg) 768 { 769 enum rtw89_fw_type type = arg.fw_type; 770 struct rtw89_hal *hal = &rtwdev->hal; 771 struct rtw89_fw_suit *fw_suit; 772 773 /* Version of BB MCU is in decreasing order in firmware file, so take 774 * first equal or less version, which is equal or less but closest version. 775 */ 776 if (hal->cv < elm->u.bbmcu.cv) 777 return 1; /* ignore this element */ 778 779 fw_suit = rtw89_fw_suit_get(rtwdev, type); 780 if (fw_suit->data) 781 return 1; /* ignore this element (a firmware is taken already) */ 782 783 fw_suit->data = elm->u.bbmcu.contents; 784 fw_suit->size = le32_to_cpu(elm->size); 785 786 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 787 } 788 789 #define __DEF_FW_FEAT_COND(__cond, __op) \ 790 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 791 { \ 792 return suit_ver_code __op comp_ver_code; \ 793 } 794 795 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 796 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 797 __DEF_FW_FEAT_COND(lt, <); /* less than */ 798 799 struct __fw_feat_cfg { 800 enum rtw89_core_chip_id chip_id; 801 enum rtw89_fw_feature feature; 802 u32 ver_code; 803 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 804 }; 805 806 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 807 { \ 808 .chip_id = _chip, \ 809 .feature = RTW89_FW_FEATURE_ ## _feat, \ 810 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 811 .cond = __fw_feat_cond_ ## _cond, \ 812 } 813 814 static const struct __fw_feat_cfg fw_feat_tbl[] = { 815 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 816 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 817 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER_TYPE_0), 818 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 819 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 820 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 821 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER_TYPE_0), 822 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX), 823 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 824 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 825 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 826 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER_TYPE_0), 827 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 828 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER), 829 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX), 830 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG), 831 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, CRASH_TRIGGER_TYPE_1), 832 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, SCAN_OFFLOAD_EXTRA_OP), 833 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, BEACON_TRACKING), 834 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 835 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 836 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER_TYPE_0), 837 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 838 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER), 839 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 122, 0, BEACON_TRACKING), 840 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SCAN_OFFLOAD_EXTRA_OP), 841 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG), 842 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, CRASH_TRIGGER_TYPE_1), 843 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 844 __CFG_FW_FEAT(RTL8852C, ge, 0, 0, 0, 0, RFK_NTFY_MCC_V0), 845 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 846 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 847 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER_TYPE_0), 848 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 849 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 850 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, BEACON_LOSS_COUNT_V1), 851 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, LPS_DACK_BY_C2H_REG), 852 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, CRASH_TRIGGER_TYPE_1), 853 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 129, 1, BEACON_TRACKING), 854 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER_TYPE_0), 855 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 856 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 857 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0), 858 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 859 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 860 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 28, 0, RFK_IQK_V0), 861 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0), 862 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO), 863 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0), 864 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO), 865 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0), 866 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1), 867 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1), 868 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE), 869 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1), 870 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 76, 0, LPS_DACK_BY_C2H_REG), 871 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 79, 0, CRASH_TRIGGER_TYPE_1), 872 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 80, 0, BEACON_TRACKING), 873 }; 874 875 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 876 const struct rtw89_chip_info *chip, 877 u32 ver_code) 878 { 879 int i; 880 881 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 882 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 883 884 if (chip->chip_id != ent->chip_id) 885 continue; 886 887 if (ent->cond(ver_code, ent->ver_code)) 888 RTW89_SET_FW_FEATURE(ent->feature, fw); 889 } 890 } 891 892 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 893 { 894 const struct rtw89_chip_info *chip = rtwdev->chip; 895 const struct rtw89_fw_suit *fw_suit; 896 u32 suit_ver_code; 897 898 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 899 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 900 901 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 902 } 903 904 const struct firmware * 905 rtw89_early_fw_feature_recognize(struct device *device, 906 const struct rtw89_chip_info *chip, 907 struct rtw89_fw_info *early_fw, 908 int *used_fw_format) 909 { 910 const struct firmware *firmware; 911 char fw_name[64]; 912 int fw_format; 913 u32 ver_code; 914 int ret; 915 916 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 917 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 918 chip->fw_basename, fw_format); 919 920 ret = request_firmware(&firmware, fw_name, device); 921 if (!ret) { 922 dev_info(device, "loaded firmware %s\n", fw_name); 923 *used_fw_format = fw_format; 924 break; 925 } 926 } 927 928 if (ret) { 929 dev_err(device, "failed to early request firmware: %d\n", ret); 930 return NULL; 931 } 932 933 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 934 935 if (!ver_code) 936 goto out; 937 938 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 939 940 out: 941 return firmware; 942 } 943 944 static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev) 945 { 946 const struct rtw89_chip_variant *variant = rtwdev->variant; 947 const struct rtw89_fw_suit *fw_suit; 948 u32 suit_ver_code; 949 950 if (!variant) 951 return 0; 952 953 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 954 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 955 956 if (variant->fw_min_ver_code > suit_ver_code) { 957 rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n", 958 variant->fw_min_ver_code); 959 return -ENOENT; 960 } 961 962 return 0; 963 } 964 965 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 966 { 967 const struct rtw89_chip_info *chip = rtwdev->chip; 968 int ret; 969 970 if (chip->try_ce_fw) { 971 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 972 if (!ret) 973 goto normal_done; 974 } 975 976 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 977 if (ret) 978 return ret; 979 980 normal_done: 981 ret = rtw89_fw_validate_ver_required(rtwdev); 982 if (ret) 983 return ret; 984 985 /* It still works if wowlan firmware isn't existing. */ 986 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 987 988 /* It still works if log format file isn't existing. */ 989 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 990 991 rtw89_fw_recognize_features(rtwdev); 992 993 rtw89_coex_recognize_ver(rtwdev); 994 995 return 0; 996 } 997 998 static 999 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 1000 const struct rtw89_fw_element_hdr *elm, 1001 const union rtw89_fw_element_arg arg) 1002 { 1003 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1004 struct rtw89_phy_table *tbl; 1005 struct rtw89_reg2_def *regs; 1006 enum rtw89_rf_path rf_path; 1007 u32 n_regs, i; 1008 u8 idx; 1009 1010 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 1011 if (!tbl) 1012 return -ENOMEM; 1013 1014 switch (le32_to_cpu(elm->id)) { 1015 case RTW89_FW_ELEMENT_ID_BB_REG: 1016 elm_info->bb_tbl = tbl; 1017 break; 1018 case RTW89_FW_ELEMENT_ID_BB_GAIN: 1019 elm_info->bb_gain = tbl; 1020 break; 1021 case RTW89_FW_ELEMENT_ID_RADIO_A: 1022 case RTW89_FW_ELEMENT_ID_RADIO_B: 1023 case RTW89_FW_ELEMENT_ID_RADIO_C: 1024 case RTW89_FW_ELEMENT_ID_RADIO_D: 1025 rf_path = arg.rf_path; 1026 idx = elm->u.reg2.idx; 1027 1028 elm_info->rf_radio[idx] = tbl; 1029 tbl->rf_path = rf_path; 1030 tbl->config = rtw89_phy_config_rf_reg_v1; 1031 break; 1032 case RTW89_FW_ELEMENT_ID_RF_NCTL: 1033 elm_info->rf_nctl = tbl; 1034 break; 1035 default: 1036 kfree(tbl); 1037 return -ENOENT; 1038 } 1039 1040 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 1041 regs = kcalloc(n_regs, sizeof(*regs), GFP_KERNEL); 1042 if (!regs) 1043 goto out; 1044 1045 for (i = 0; i < n_regs; i++) { 1046 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 1047 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 1048 } 1049 1050 tbl->n_regs = n_regs; 1051 tbl->regs = regs; 1052 1053 return 0; 1054 1055 out: 1056 kfree(tbl); 1057 return -ENOMEM; 1058 } 1059 1060 static 1061 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 1062 const struct rtw89_fw_element_hdr *elm, 1063 const union rtw89_fw_element_arg arg) 1064 { 1065 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 1066 const unsigned long offset = arg.offset; 1067 struct rtw89_efuse *efuse = &rtwdev->efuse; 1068 struct rtw89_txpwr_conf *conf; 1069 1070 if (!rtwdev->rfe_data) { 1071 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 1072 if (!rtwdev->rfe_data) 1073 return -ENOMEM; 1074 } 1075 1076 conf = (void *)rtwdev->rfe_data + offset; 1077 1078 /* if multiple matched, take the last eventually */ 1079 if (txpwr_elm->rfe_type == efuse->rfe_type) 1080 goto setup; 1081 1082 /* without one is matched, accept default */ 1083 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 1084 (!rtw89_txpwr_conf_valid(conf) || 1085 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 1086 goto setup; 1087 1088 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 1089 elm->id, txpwr_elm->rfe_type); 1090 return 0; 1091 1092 setup: 1093 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 1094 elm->id, txpwr_elm->rfe_type); 1095 1096 conf->rfe_type = txpwr_elm->rfe_type; 1097 conf->ent_sz = txpwr_elm->ent_sz; 1098 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 1099 conf->data = txpwr_elm->content; 1100 return 0; 1101 } 1102 1103 static 1104 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 1105 const struct rtw89_fw_element_hdr *elm, 1106 const union rtw89_fw_element_arg arg) 1107 { 1108 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1109 const struct rtw89_chip_info *chip = rtwdev->chip; 1110 u32 needed_bitmap = 0; 1111 u32 offset = 0; 1112 int subband; 1113 u32 bitmap; 1114 int type; 1115 1116 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 1117 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 1118 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 1119 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 1120 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 1121 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 1122 1123 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 1124 1125 if ((bitmap & needed_bitmap) != needed_bitmap) { 1126 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %08x\n", 1127 needed_bitmap, bitmap); 1128 return -ENOENT; 1129 } 1130 1131 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL); 1132 if (!elm_info->txpwr_trk) 1133 return -ENOMEM; 1134 1135 for (type = 0; bitmap; type++, bitmap >>= 1) { 1136 if (!(bitmap & BIT(0))) 1137 continue; 1138 1139 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 1140 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 1141 subband = 4; 1142 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 1143 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 1144 subband = 3; 1145 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 1146 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 1147 subband = 1; 1148 else 1149 break; 1150 1151 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 1152 1153 offset += subband; 1154 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 1155 goto err; 1156 } 1157 1158 return 0; 1159 1160 err: 1161 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 1162 offset, le32_to_cpu(elm->size)); 1163 kfree(elm_info->txpwr_trk); 1164 elm_info->txpwr_trk = NULL; 1165 1166 return -EFAULT; 1167 } 1168 1169 static 1170 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 1171 const struct rtw89_fw_element_hdr *elm, 1172 const union rtw89_fw_element_arg arg) 1173 { 1174 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1175 u8 rfk_id; 1176 1177 if (elm_info->rfk_log_fmt) 1178 goto allocated; 1179 1180 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL); 1181 if (!elm_info->rfk_log_fmt) 1182 return 1; /* this is an optional element, so just ignore this */ 1183 1184 allocated: 1185 rfk_id = elm->u.rfk_log_fmt.rfk_id; 1186 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 1187 return 1; 1188 1189 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 1190 1191 return 0; 1192 } 1193 1194 static bool rtw89_regd_entcpy(struct rtw89_regd *regd, const void *cursor, 1195 u8 cursor_size) 1196 { 1197 /* fill default values if needed for backward compatibility */ 1198 struct rtw89_fw_regd_entry entry = { 1199 .rule_2ghz = RTW89_NA, 1200 .rule_5ghz = RTW89_NA, 1201 .rule_6ghz = RTW89_NA, 1202 .fmap = cpu_to_le32(0x0), 1203 }; 1204 u8 valid_size = min_t(u8, sizeof(entry), cursor_size); 1205 unsigned int i; 1206 u32 fmap; 1207 1208 memcpy(&entry, cursor, valid_size); 1209 memset(regd, 0, sizeof(*regd)); 1210 1211 regd->alpha2[0] = entry.alpha2_0; 1212 regd->alpha2[1] = entry.alpha2_1; 1213 regd->alpha2[2] = '\0'; 1214 1215 /* also need to consider forward compatibility */ 1216 regd->txpwr_regd[RTW89_BAND_2G] = entry.rule_2ghz < RTW89_REGD_NUM ? 1217 entry.rule_2ghz : RTW89_NA; 1218 regd->txpwr_regd[RTW89_BAND_5G] = entry.rule_5ghz < RTW89_REGD_NUM ? 1219 entry.rule_5ghz : RTW89_NA; 1220 regd->txpwr_regd[RTW89_BAND_6G] = entry.rule_6ghz < RTW89_REGD_NUM ? 1221 entry.rule_6ghz : RTW89_NA; 1222 1223 BUILD_BUG_ON(sizeof(fmap) != sizeof(entry.fmap)); 1224 BUILD_BUG_ON(sizeof(fmap) * 8 < NUM_OF_RTW89_REGD_FUNC); 1225 1226 fmap = le32_to_cpu(entry.fmap); 1227 for (i = 0; i < NUM_OF_RTW89_REGD_FUNC; i++) { 1228 if (fmap & BIT(i)) 1229 set_bit(i, regd->func_bitmap); 1230 } 1231 1232 return true; 1233 } 1234 1235 #define rtw89_for_each_in_regd_element(regd, element) \ 1236 for (const void *cursor = (element)->content, \ 1237 *end = (element)->content + \ 1238 le32_to_cpu((element)->num_ents) * (element)->ent_sz; \ 1239 cursor < end; cursor += (element)->ent_sz) \ 1240 if (rtw89_regd_entcpy(regd, cursor, (element)->ent_sz)) 1241 1242 static 1243 int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev, 1244 const struct rtw89_fw_element_hdr *elm, 1245 const union rtw89_fw_element_arg arg) 1246 { 1247 const struct __rtw89_fw_regd_element *regd_elm = &elm->u.regd; 1248 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1249 u32 num_ents = le32_to_cpu(regd_elm->num_ents); 1250 struct rtw89_regd_data *p; 1251 struct rtw89_regd regd; 1252 u32 i = 0; 1253 1254 if (num_ents > RTW89_REGD_MAX_COUNTRY_NUM) { 1255 rtw89_warn(rtwdev, 1256 "regd element ents (%d) are over max num (%d)\n", 1257 num_ents, RTW89_REGD_MAX_COUNTRY_NUM); 1258 rtw89_warn(rtwdev, 1259 "regd element ignore and take another/common\n"); 1260 return 1; 1261 } 1262 1263 if (elm_info->regd) { 1264 rtw89_debug(rtwdev, RTW89_DBG_REGD, 1265 "regd element take the latter\n"); 1266 devm_kfree(rtwdev->dev, elm_info->regd); 1267 elm_info->regd = NULL; 1268 } 1269 1270 p = devm_kzalloc(rtwdev->dev, struct_size(p, map, num_ents), GFP_KERNEL); 1271 if (!p) 1272 return -ENOMEM; 1273 1274 p->nr = num_ents; 1275 rtw89_for_each_in_regd_element(®d, regd_elm) 1276 p->map[i++] = regd; 1277 1278 if (i != num_ents) { 1279 rtw89_err(rtwdev, "regd element has %d invalid ents\n", 1280 num_ents - i); 1281 devm_kfree(rtwdev->dev, p); 1282 return -EINVAL; 1283 } 1284 1285 elm_info->regd = p; 1286 return 0; 1287 } 1288 1289 static 1290 int rtw89_build_afe_pwr_seq_from_elm(struct rtw89_dev *rtwdev, 1291 const struct rtw89_fw_element_hdr *elm, 1292 const union rtw89_fw_element_arg arg) 1293 { 1294 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1295 1296 elm_info->afe = elm; 1297 1298 return 0; 1299 } 1300 1301 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 1302 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 1303 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 1304 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 1305 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 1306 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 1307 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 1308 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 1309 { .rf_path = RF_PATH_A }, "radio A"}, 1310 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 1311 { .rf_path = RF_PATH_B }, NULL}, 1312 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 1313 { .rf_path = RF_PATH_C }, NULL}, 1314 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 1315 { .rf_path = RF_PATH_D }, NULL}, 1316 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 1317 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 1318 rtw89_fw_recognize_txpwr_from_elm, 1319 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 1320 }, 1321 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 1322 rtw89_fw_recognize_txpwr_from_elm, 1323 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 1324 }, 1325 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 1326 rtw89_fw_recognize_txpwr_from_elm, 1327 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 1328 }, 1329 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 1330 rtw89_fw_recognize_txpwr_from_elm, 1331 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 1332 }, 1333 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_2GHZ] = { 1334 rtw89_fw_recognize_txpwr_from_elm, 1335 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_2ghz.conf) }, NULL, 1336 }, 1337 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_5GHZ] = { 1338 rtw89_fw_recognize_txpwr_from_elm, 1339 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_5ghz.conf) }, NULL, 1340 }, 1341 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_6GHZ] = { 1342 rtw89_fw_recognize_txpwr_from_elm, 1343 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_6ghz.conf) }, NULL, 1344 }, 1345 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 1346 rtw89_fw_recognize_txpwr_from_elm, 1347 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 1348 }, 1349 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 1350 rtw89_fw_recognize_txpwr_from_elm, 1351 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 1352 }, 1353 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 1354 rtw89_fw_recognize_txpwr_from_elm, 1355 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 1356 }, 1357 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ] = { 1358 rtw89_fw_recognize_txpwr_from_elm, 1359 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_2ghz.conf) }, NULL, 1360 }, 1361 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ] = { 1362 rtw89_fw_recognize_txpwr_from_elm, 1363 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_5ghz.conf) }, NULL, 1364 }, 1365 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ] = { 1366 rtw89_fw_recognize_txpwr_from_elm, 1367 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_6ghz.conf) }, NULL, 1368 }, 1369 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 1370 rtw89_fw_recognize_txpwr_from_elm, 1371 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 1372 }, 1373 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 1374 rtw89_fw_recognize_txpwr_from_elm, 1375 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 1376 }, 1377 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 1378 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 1379 }, 1380 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 1381 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 1382 }, 1383 [RTW89_FW_ELEMENT_ID_REGD] = { 1384 rtw89_recognize_regd_from_elm, {}, "REGD", 1385 }, 1386 [RTW89_FW_ELEMENT_ID_AFE_PWR_SEQ] = { 1387 rtw89_build_afe_pwr_seq_from_elm, {}, "AFE", 1388 }, 1389 }; 1390 1391 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 1392 { 1393 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1394 const struct firmware *firmware = fw_info->req.firmware; 1395 const struct rtw89_chip_info *chip = rtwdev->chip; 1396 u32 unrecognized_elements = chip->needed_fw_elms; 1397 const struct rtw89_fw_element_handler *handler; 1398 const struct rtw89_fw_element_hdr *hdr; 1399 u32 elm_size; 1400 u32 elem_id; 1401 u32 offset; 1402 int ret; 1403 1404 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1405 1406 offset = rtw89_mfw_get_size(rtwdev); 1407 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1408 if (offset == 0) 1409 return -EINVAL; 1410 1411 while (offset + sizeof(*hdr) < firmware->size) { 1412 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1413 1414 elm_size = le32_to_cpu(hdr->size); 1415 if (offset + elm_size >= firmware->size) { 1416 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1417 break; 1418 } 1419 1420 elem_id = le32_to_cpu(hdr->id); 1421 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1422 goto next; 1423 1424 handler = &__fw_element_handlers[elem_id]; 1425 if (!handler->fn) 1426 goto next; 1427 1428 ret = handler->fn(rtwdev, hdr, handler->arg); 1429 if (ret == 1) /* ignore this element */ 1430 goto next; 1431 if (ret) 1432 return ret; 1433 1434 if (handler->name) 1435 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1436 handler->name, hdr->ver); 1437 1438 unrecognized_elements &= ~BIT(elem_id); 1439 next: 1440 offset += sizeof(*hdr) + elm_size; 1441 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1442 } 1443 1444 if (unrecognized_elements) { 1445 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1446 unrecognized_elements); 1447 return -ENOENT; 1448 } 1449 1450 return 0; 1451 } 1452 1453 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1454 u8 type, u8 cat, u8 class, u8 func, 1455 bool rack, bool dack, u32 len) 1456 { 1457 struct fwcmd_hdr *hdr; 1458 1459 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1460 1461 if (!(rtwdev->fw.h2c_seq % 4)) 1462 rack = true; 1463 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1464 FIELD_PREP(H2C_HDR_CAT, cat) | 1465 FIELD_PREP(H2C_HDR_CLASS, class) | 1466 FIELD_PREP(H2C_HDR_FUNC, func) | 1467 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1468 1469 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1470 len + H2C_HEADER_LEN) | 1471 (rack ? H2C_HDR_REC_ACK : 0) | 1472 (dack ? H2C_HDR_DONE_ACK : 0)); 1473 1474 rtwdev->fw.h2c_seq++; 1475 } 1476 1477 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1478 struct sk_buff *skb, 1479 u8 type, u8 cat, u8 class, u8 func, 1480 u32 len) 1481 { 1482 struct fwcmd_hdr *hdr; 1483 1484 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1485 1486 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1487 FIELD_PREP(H2C_HDR_CAT, cat) | 1488 FIELD_PREP(H2C_HDR_CLASS, class) | 1489 FIELD_PREP(H2C_HDR_FUNC, func) | 1490 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1491 1492 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1493 len + H2C_HEADER_LEN)); 1494 } 1495 1496 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1497 struct rtw89_fw_bin_info *info, 1498 struct rtw89_fw_hdr *fw_hdr) 1499 { 1500 struct rtw89_fw_hdr_section_info *section_info; 1501 struct rtw89_fw_hdr_section *section; 1502 int i; 1503 1504 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1505 FW_HDR_W7_PART_SIZE); 1506 1507 for (i = 0; i < info->section_num; i++) { 1508 section_info = &info->section_info[i]; 1509 1510 if (!section_info->len_override) 1511 continue; 1512 1513 section = &fw_hdr->sections[i]; 1514 le32p_replace_bits(§ion->w1, section_info->len_override, 1515 FWSECTION_HDR_W1_SEC_SIZE); 1516 } 1517 1518 return 0; 1519 } 1520 1521 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1522 struct rtw89_fw_bin_info *info, 1523 struct rtw89_fw_hdr_v1 *fw_hdr) 1524 { 1525 struct rtw89_fw_hdr_section_info *section_info; 1526 struct rtw89_fw_hdr_section_v1 *section; 1527 u8 dst_sec_idx = 0; 1528 u8 sec_idx; 1529 1530 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1531 FW_HDR_V1_W7_PART_SIZE); 1532 1533 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1534 section_info = &info->section_info[sec_idx]; 1535 section = &fw_hdr->sections[sec_idx]; 1536 1537 if (section_info->ignore) 1538 continue; 1539 1540 if (dst_sec_idx != sec_idx) 1541 fw_hdr->sections[dst_sec_idx] = *section; 1542 1543 dst_sec_idx++; 1544 } 1545 1546 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1547 1548 return (info->section_num - dst_sec_idx) * sizeof(*section); 1549 } 1550 1551 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1552 const struct rtw89_fw_suit *fw_suit, 1553 struct rtw89_fw_bin_info *info) 1554 { 1555 u32 len = info->hdr_len - info->dynamic_hdr_len; 1556 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1557 const u8 *fw = fw_suit->data; 1558 struct rtw89_fw_hdr *fw_hdr; 1559 struct sk_buff *skb; 1560 u32 truncated; 1561 int ret; 1562 1563 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1564 if (!skb) { 1565 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1566 return -ENOMEM; 1567 } 1568 1569 skb_put_data(skb, fw, len); 1570 1571 switch (fw_suit->hdr_ver) { 1572 case 0: 1573 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1574 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1575 break; 1576 case 1: 1577 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1578 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1579 break; 1580 default: 1581 ret = -EOPNOTSUPP; 1582 goto fail; 1583 } 1584 1585 if (truncated) { 1586 len -= truncated; 1587 skb_trim(skb, len); 1588 } 1589 1590 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1591 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1592 H2C_FUNC_MAC_FWHDR_DL, len); 1593 1594 ret = rtw89_h2c_tx(rtwdev, skb, false); 1595 if (ret) { 1596 rtw89_err(rtwdev, "failed to send h2c\n"); 1597 goto fail; 1598 } 1599 1600 return 0; 1601 fail: 1602 dev_kfree_skb_any(skb); 1603 1604 return ret; 1605 } 1606 1607 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1608 const struct rtw89_fw_suit *fw_suit, 1609 struct rtw89_fw_bin_info *info) 1610 { 1611 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1612 int ret; 1613 1614 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1615 if (ret) { 1616 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1617 return ret; 1618 } 1619 1620 ret = mac->fwdl_check_path_ready(rtwdev, false); 1621 if (ret) { 1622 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1623 return ret; 1624 } 1625 1626 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1627 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1628 1629 return 0; 1630 } 1631 1632 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1633 struct rtw89_fw_hdr_section_info *info) 1634 { 1635 struct sk_buff *skb; 1636 const u8 *section = info->addr; 1637 u32 residue_len = info->len; 1638 bool copy_key = false; 1639 u32 pkt_len; 1640 int ret; 1641 1642 if (info->ignore) 1643 return 0; 1644 1645 if (info->len_override) { 1646 if (info->len_override > info->len) 1647 rtw89_warn(rtwdev, "override length %u larger than original %u\n", 1648 info->len_override, info->len); 1649 else 1650 residue_len = info->len_override; 1651 } 1652 1653 if (info->key_addr && info->key_len) { 1654 if (residue_len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) 1655 rtw89_warn(rtwdev, 1656 "ignore to copy key data because of len %d, %d, %d, %d\n", 1657 info->len, FWDL_SECTION_PER_PKT_LEN, 1658 info->key_len, residue_len); 1659 else 1660 copy_key = true; 1661 } 1662 1663 while (residue_len) { 1664 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 1665 pkt_len = FWDL_SECTION_PER_PKT_LEN; 1666 else 1667 pkt_len = residue_len; 1668 1669 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1670 if (!skb) { 1671 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1672 return -ENOMEM; 1673 } 1674 skb_put_data(skb, section, pkt_len); 1675 1676 if (copy_key) 1677 memcpy(skb->data + pkt_len - info->key_len, 1678 info->key_addr, info->key_len); 1679 1680 ret = rtw89_h2c_tx(rtwdev, skb, true); 1681 if (ret) { 1682 rtw89_err(rtwdev, "failed to send h2c\n"); 1683 goto fail; 1684 } 1685 1686 section += pkt_len; 1687 residue_len -= pkt_len; 1688 } 1689 1690 return 0; 1691 fail: 1692 dev_kfree_skb_any(skb); 1693 1694 return ret; 1695 } 1696 1697 static enum rtw89_fwdl_check_type 1698 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1699 const struct rtw89_fw_suit *fw_suit) 1700 { 1701 switch (fw_suit->type) { 1702 case RTW89_FW_BBMCU0: 1703 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1704 case RTW89_FW_BBMCU1: 1705 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1706 default: 1707 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1708 } 1709 } 1710 1711 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1712 const struct rtw89_fw_suit *fw_suit, 1713 struct rtw89_fw_bin_info *info) 1714 { 1715 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1716 const struct rtw89_chip_info *chip = rtwdev->chip; 1717 enum rtw89_fwdl_check_type chk_type; 1718 u8 section_num = info->section_num; 1719 int ret; 1720 1721 while (section_num--) { 1722 ret = __rtw89_fw_download_main(rtwdev, section_info); 1723 if (ret) 1724 return ret; 1725 section_info++; 1726 } 1727 1728 if (chip->chip_gen == RTW89_CHIP_AX) 1729 return 0; 1730 1731 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1732 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1733 if (ret) { 1734 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1735 fw_suit->type); 1736 return ret; 1737 } 1738 1739 return 0; 1740 } 1741 1742 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1743 { 1744 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1745 u32 addr = R_AX_DBG_PORT_SEL; 1746 u32 val32; 1747 u16 index; 1748 1749 if (chip_gen == RTW89_CHIP_BE) { 1750 addr = R_BE_WLCPU_PORT_PC; 1751 goto dump; 1752 } 1753 1754 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1755 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1756 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1757 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1758 1759 dump: 1760 for (index = 0; index < 15; index++) { 1761 val32 = rtw89_read32(rtwdev, addr); 1762 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1763 fsleep(10); 1764 } 1765 } 1766 1767 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1768 { 1769 u32 val32; 1770 1771 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1772 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1773 1774 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1775 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1776 1777 rtw89_fw_prog_cnt_dump(rtwdev); 1778 } 1779 1780 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1781 struct rtw89_fw_suit *fw_suit) 1782 { 1783 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1784 struct rtw89_fw_bin_info info = {}; 1785 int ret; 1786 1787 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1788 if (ret) { 1789 rtw89_err(rtwdev, "parse fw header fail\n"); 1790 return ret; 1791 } 1792 1793 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode); 1794 1795 if (rtwdev->chip->chip_id == RTL8922A && 1796 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1797 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1798 1799 ret = mac->fwdl_check_path_ready(rtwdev, true); 1800 if (ret) { 1801 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1802 return ret; 1803 } 1804 1805 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1806 if (ret) 1807 return ret; 1808 1809 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1810 if (ret) 1811 return ret; 1812 1813 return 0; 1814 } 1815 1816 static 1817 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1818 bool include_bb) 1819 { 1820 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1821 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1822 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1823 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1824 int ret; 1825 int i; 1826 1827 mac->disable_cpu(rtwdev); 1828 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1829 if (ret) 1830 return ret; 1831 1832 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1833 if (ret) 1834 goto fwdl_err; 1835 1836 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1837 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1838 1839 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1840 if (ret) 1841 goto fwdl_err; 1842 } 1843 1844 fw_info->h2c_seq = 0; 1845 fw_info->rec_seq = 0; 1846 fw_info->h2c_counter = 0; 1847 fw_info->c2h_counter = 0; 1848 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1849 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1850 1851 mdelay(5); 1852 1853 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1854 if (ret) { 1855 rtw89_warn(rtwdev, "download firmware fail\n"); 1856 goto fwdl_err; 1857 } 1858 1859 return ret; 1860 1861 fwdl_err: 1862 rtw89_fw_dl_fail_dump(rtwdev); 1863 return ret; 1864 } 1865 1866 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1867 bool include_bb) 1868 { 1869 int retry; 1870 int ret; 1871 1872 for (retry = 0; retry < 5; retry++) { 1873 ret = __rtw89_fw_download(rtwdev, type, include_bb); 1874 if (!ret) 1875 return 0; 1876 } 1877 1878 return ret; 1879 } 1880 1881 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1882 { 1883 struct rtw89_fw_info *fw = &rtwdev->fw; 1884 1885 wait_for_completion(&fw->req.completion); 1886 if (!fw->req.firmware) 1887 return -EINVAL; 1888 1889 return 0; 1890 } 1891 1892 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1893 struct rtw89_fw_req_info *req, 1894 const char *fw_name, bool nowarn) 1895 { 1896 int ret; 1897 1898 if (req->firmware) { 1899 rtw89_debug(rtwdev, RTW89_DBG_FW, 1900 "full firmware has been early requested\n"); 1901 complete_all(&req->completion); 1902 return 0; 1903 } 1904 1905 if (nowarn) 1906 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1907 else 1908 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1909 1910 complete_all(&req->completion); 1911 1912 return ret; 1913 } 1914 1915 void rtw89_load_firmware_work(struct work_struct *work) 1916 { 1917 struct rtw89_dev *rtwdev = 1918 container_of(work, struct rtw89_dev, load_firmware_work); 1919 const struct rtw89_chip_info *chip = rtwdev->chip; 1920 char fw_name[64]; 1921 1922 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1923 chip->fw_basename, rtwdev->fw.fw_format); 1924 1925 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1926 } 1927 1928 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1929 { 1930 if (!tbl) 1931 return; 1932 1933 kfree(tbl->regs); 1934 kfree(tbl); 1935 } 1936 1937 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1938 { 1939 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1940 int i; 1941 1942 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1943 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1944 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1945 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1946 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1947 1948 kfree(elm_info->txpwr_trk); 1949 kfree(elm_info->rfk_log_fmt); 1950 } 1951 1952 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1953 { 1954 struct rtw89_fw_info *fw = &rtwdev->fw; 1955 1956 cancel_work_sync(&rtwdev->load_firmware_work); 1957 1958 if (fw->req.firmware) { 1959 release_firmware(fw->req.firmware); 1960 1961 /* assign NULL back in case rtw89_free_ieee80211_hw() 1962 * try to release the same one again. 1963 */ 1964 fw->req.firmware = NULL; 1965 } 1966 1967 kfree(fw->log.fmts); 1968 rtw89_unload_firmware_elements(rtwdev); 1969 } 1970 1971 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1972 { 1973 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1974 u32 i; 1975 1976 if (fmt_id > fw_log->last_fmt_id) 1977 return 0; 1978 1979 for (i = 0; i < fw_log->fmt_count; i++) { 1980 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1981 return i; 1982 } 1983 return 0; 1984 } 1985 1986 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1987 { 1988 struct rtw89_fw_log *log = &rtwdev->fw.log; 1989 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1990 struct rtw89_fw_suit *suit = &log->suit; 1991 const void *fmts_ptr, *fmts_end_ptr; 1992 u32 fmt_count; 1993 int i; 1994 1995 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1996 fmt_count = le32_to_cpu(suit_hdr->count); 1997 log->fmt_ids = suit_hdr->ids; 1998 fmts_ptr = &suit_hdr->ids[fmt_count]; 1999 fmts_end_ptr = suit->data + suit->size; 2000 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 2001 if (!log->fmts) 2002 return -ENOMEM; 2003 2004 for (i = 0; i < fmt_count; i++) { 2005 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 2006 if (!fmts_ptr) 2007 break; 2008 2009 (*log->fmts)[i] = fmts_ptr; 2010 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 2011 log->fmt_count++; 2012 fmts_ptr += strlen(fmts_ptr); 2013 } 2014 2015 return 0; 2016 } 2017 2018 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 2019 { 2020 struct rtw89_fw_log *log = &rtwdev->fw.log; 2021 struct rtw89_fw_suit *suit = &log->suit; 2022 2023 if (!suit || !suit->data) { 2024 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 2025 return -EINVAL; 2026 } 2027 if (log->fmts) 2028 return 0; 2029 2030 return rtw89_fw_log_create_fmts_dict(rtwdev); 2031 } 2032 2033 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 2034 const struct rtw89_fw_c2h_log_fmt *log_fmt, 2035 u32 fmt_idx, u8 para_int, bool raw_data) 2036 { 2037 const char *(*fmts)[] = rtwdev->fw.log.fmts; 2038 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 2039 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 2040 int i; 2041 2042 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 2043 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 2044 log_fmt->argc); 2045 return; 2046 } 2047 2048 if (para_int) 2049 for (i = 0 ; i < log_fmt->argc; i++) 2050 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 2051 2052 if (raw_data) { 2053 if (para_int) 2054 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 2055 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 2056 para_int, log_fmt->argc, (int)sizeof(args), args); 2057 else 2058 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 2059 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 2060 para_int, log_fmt->argc, log_fmt->u.raw); 2061 } else { 2062 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 2063 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 2064 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 2065 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 2066 args[0xf]); 2067 } 2068 2069 rtw89_info(rtwdev, "C2H log: %s", str_buf); 2070 } 2071 2072 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 2073 { 2074 const struct rtw89_fw_c2h_log_fmt *log_fmt; 2075 u8 para_int; 2076 u32 fmt_idx; 2077 2078 if (len < RTW89_C2H_HEADER_LEN) { 2079 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 2080 return; 2081 } 2082 2083 buf += RTW89_C2H_HEADER_LEN; 2084 len -= RTW89_C2H_HEADER_LEN; 2085 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 2086 2087 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 2088 goto plain_log; 2089 2090 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 2091 goto plain_log; 2092 2093 if (!rtwdev->fw.log.fmts) 2094 return; 2095 2096 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 2097 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 2098 2099 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 2100 rtw89_info(rtwdev, "C2H log: %s%s", 2101 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 2102 else if (fmt_idx != 0 && para_int) 2103 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 2104 else 2105 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 2106 return; 2107 2108 plain_log: 2109 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 2110 2111 } 2112 2113 #define H2C_CAM_LEN 60 2114 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 2115 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr) 2116 { 2117 struct sk_buff *skb; 2118 int ret; 2119 2120 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 2121 if (!skb) { 2122 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2123 return -ENOMEM; 2124 } 2125 skb_put(skb, H2C_CAM_LEN); 2126 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, scan_mac_addr, 2127 skb->data); 2128 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, skb->data); 2129 2130 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2131 H2C_CAT_MAC, 2132 H2C_CL_MAC_ADDR_CAM_UPDATE, 2133 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 2134 H2C_CAM_LEN); 2135 2136 ret = rtw89_h2c_tx(rtwdev, skb, false); 2137 if (ret) { 2138 rtw89_err(rtwdev, "failed to send h2c\n"); 2139 goto fail; 2140 } 2141 2142 return 0; 2143 fail: 2144 dev_kfree_skb_any(skb); 2145 2146 return ret; 2147 } 2148 2149 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 2150 struct rtw89_vif_link *rtwvif_link, 2151 struct rtw89_sta_link *rtwsta_link) 2152 { 2153 struct rtw89_h2c_dctlinfo_ud_v1 *h2c; 2154 u32 len = sizeof(*h2c); 2155 struct sk_buff *skb; 2156 int ret; 2157 2158 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2159 if (!skb) { 2160 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 2161 return -ENOMEM; 2162 } 2163 skb_put(skb, len); 2164 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; 2165 2166 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c); 2167 2168 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2169 H2C_CAT_MAC, 2170 H2C_CL_MAC_FR_EXCHG, 2171 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 2172 len); 2173 2174 ret = rtw89_h2c_tx(rtwdev, skb, false); 2175 if (ret) { 2176 rtw89_err(rtwdev, "failed to send h2c\n"); 2177 goto fail; 2178 } 2179 2180 return 0; 2181 fail: 2182 dev_kfree_skb_any(skb); 2183 2184 return ret; 2185 } 2186 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 2187 2188 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 2189 struct rtw89_vif_link *rtwvif_link, 2190 struct rtw89_sta_link *rtwsta_link) 2191 { 2192 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 2193 u32 len = sizeof(*h2c); 2194 struct sk_buff *skb; 2195 int ret; 2196 2197 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2198 if (!skb) { 2199 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 2200 return -ENOMEM; 2201 } 2202 skb_put(skb, len); 2203 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 2204 2205 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c); 2206 2207 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2208 H2C_CAT_MAC, 2209 H2C_CL_MAC_FR_EXCHG, 2210 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 2211 len); 2212 2213 ret = rtw89_h2c_tx(rtwdev, skb, false); 2214 if (ret) { 2215 rtw89_err(rtwdev, "failed to send h2c\n"); 2216 goto fail; 2217 } 2218 2219 return 0; 2220 fail: 2221 dev_kfree_skb_any(skb); 2222 2223 return ret; 2224 } 2225 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 2226 2227 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 2228 struct rtw89_vif_link *rtwvif_link, 2229 struct rtw89_sta_link *rtwsta_link) 2230 { 2231 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2232 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 2233 u32 len = sizeof(*h2c); 2234 struct sk_buff *skb; 2235 int ret; 2236 2237 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2238 if (!skb) { 2239 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 2240 return -ENOMEM; 2241 } 2242 skb_put(skb, len); 2243 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 2244 2245 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 2246 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 2247 2248 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 2249 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 2250 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 2251 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 2252 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 2253 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 2254 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 2255 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 2256 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 2257 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 2258 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 2259 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 2260 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 2261 2262 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2263 H2C_CAT_MAC, 2264 H2C_CL_MAC_FR_EXCHG, 2265 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 2266 len); 2267 2268 ret = rtw89_h2c_tx(rtwdev, skb, false); 2269 if (ret) { 2270 rtw89_err(rtwdev, "failed to send h2c\n"); 2271 goto fail; 2272 } 2273 2274 return 0; 2275 fail: 2276 dev_kfree_skb_any(skb); 2277 2278 return ret; 2279 } 2280 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 2281 2282 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, 2283 struct rtw89_vif_link *rtwvif_link, 2284 struct rtw89_sta_link *rtwsta_link, 2285 bool valid, struct ieee80211_ampdu_params *params) 2286 { 2287 const struct rtw89_chip_info *chip = rtwdev->chip; 2288 struct rtw89_h2c_ba_cam *h2c; 2289 u8 macid = rtwsta_link->mac_id; 2290 u32 len = sizeof(*h2c); 2291 struct sk_buff *skb; 2292 u8 entry_idx; 2293 int ret; 2294 2295 ret = valid ? 2296 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2297 &entry_idx) : 2298 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2299 &entry_idx); 2300 if (ret) { 2301 /* it still works even if we don't have static BA CAM, because 2302 * hardware can create dynamic BA CAM automatically. 2303 */ 2304 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2305 "failed to %s entry tid=%d for h2c ba cam\n", 2306 valid ? "alloc" : "free", params->tid); 2307 return 0; 2308 } 2309 2310 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2311 if (!skb) { 2312 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2313 return -ENOMEM; 2314 } 2315 skb_put(skb, len); 2316 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2317 2318 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 2319 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 2320 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 2321 else 2322 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 2323 if (!valid) 2324 goto end; 2325 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 2326 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 2327 if (params->buf_size > 64) 2328 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2329 else 2330 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2331 /* If init req is set, hw will set the ssn */ 2332 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 2333 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 2334 2335 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 2336 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 2337 le32_encode_bits(rtwvif_link->mac_idx, 2338 RTW89_H2C_BA_CAM_W1_BAND); 2339 } 2340 2341 end: 2342 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2343 H2C_CAT_MAC, 2344 H2C_CL_BA_CAM, 2345 H2C_FUNC_MAC_BA_CAM, 0, 1, 2346 len); 2347 2348 ret = rtw89_h2c_tx(rtwdev, skb, false); 2349 if (ret) { 2350 rtw89_err(rtwdev, "failed to send h2c\n"); 2351 goto fail; 2352 } 2353 2354 return 0; 2355 fail: 2356 dev_kfree_skb_any(skb); 2357 2358 return ret; 2359 } 2360 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 2361 2362 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 2363 u8 entry_idx, u8 uid) 2364 { 2365 struct rtw89_h2c_ba_cam *h2c; 2366 u32 len = sizeof(*h2c); 2367 struct sk_buff *skb; 2368 int ret; 2369 2370 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2371 if (!skb) { 2372 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 2373 return -ENOMEM; 2374 } 2375 skb_put(skb, len); 2376 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2377 2378 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 2379 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 2380 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 2381 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 2382 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 2383 2384 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2385 H2C_CAT_MAC, 2386 H2C_CL_BA_CAM, 2387 H2C_FUNC_MAC_BA_CAM, 0, 1, 2388 len); 2389 2390 ret = rtw89_h2c_tx(rtwdev, skb, false); 2391 if (ret) { 2392 rtw89_err(rtwdev, "failed to send h2c\n"); 2393 goto fail; 2394 } 2395 2396 return 0; 2397 fail: 2398 dev_kfree_skb_any(skb); 2399 2400 return ret; 2401 } 2402 2403 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 2404 { 2405 const struct rtw89_chip_info *chip = rtwdev->chip; 2406 u8 entry_idx = chip->bacam_num; 2407 u8 uid = 0; 2408 int i; 2409 2410 for (i = 0; i < chip->bacam_dynamic_num; i++) { 2411 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 2412 entry_idx++; 2413 uid++; 2414 } 2415 } 2416 2417 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, 2418 struct rtw89_vif_link *rtwvif_link, 2419 struct rtw89_sta_link *rtwsta_link, 2420 bool valid, struct ieee80211_ampdu_params *params) 2421 { 2422 const struct rtw89_chip_info *chip = rtwdev->chip; 2423 struct rtw89_h2c_ba_cam_v1 *h2c; 2424 u8 macid = rtwsta_link->mac_id; 2425 u32 len = sizeof(*h2c); 2426 struct sk_buff *skb; 2427 u8 entry_idx; 2428 u8 bmap_size; 2429 int ret; 2430 2431 ret = valid ? 2432 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2433 &entry_idx) : 2434 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2435 &entry_idx); 2436 if (ret) { 2437 /* it still works even if we don't have static BA CAM, because 2438 * hardware can create dynamic BA CAM automatically. 2439 */ 2440 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2441 "failed to %s entry tid=%d for h2c ba cam\n", 2442 valid ? "alloc" : "free", params->tid); 2443 return 0; 2444 } 2445 2446 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2447 if (!skb) { 2448 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2449 return -ENOMEM; 2450 } 2451 skb_put(skb, len); 2452 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2453 2454 if (params->buf_size > 512) 2455 bmap_size = 10; 2456 else if (params->buf_size > 256) 2457 bmap_size = 8; 2458 else if (params->buf_size > 64) 2459 bmap_size = 4; 2460 else 2461 bmap_size = 0; 2462 2463 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2464 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2465 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2466 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2467 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2468 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2469 2470 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2471 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2472 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2473 le32_encode_bits(!!rtwvif_link->mac_idx, 2474 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2475 2476 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2477 H2C_CAT_MAC, 2478 H2C_CL_BA_CAM, 2479 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2480 len); 2481 2482 ret = rtw89_h2c_tx(rtwdev, skb, false); 2483 if (ret) { 2484 rtw89_err(rtwdev, "failed to send h2c\n"); 2485 goto fail; 2486 } 2487 2488 return 0; 2489 fail: 2490 dev_kfree_skb_any(skb); 2491 2492 return ret; 2493 } 2494 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2495 2496 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2497 u8 offset, u8 mac_idx) 2498 { 2499 struct rtw89_h2c_ba_cam_init *h2c; 2500 u32 len = sizeof(*h2c); 2501 struct sk_buff *skb; 2502 int ret; 2503 2504 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2505 if (!skb) { 2506 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2507 return -ENOMEM; 2508 } 2509 skb_put(skb, len); 2510 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2511 2512 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2513 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2514 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2515 2516 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2517 H2C_CAT_MAC, 2518 H2C_CL_BA_CAM, 2519 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2520 len); 2521 2522 ret = rtw89_h2c_tx(rtwdev, skb, false); 2523 if (ret) { 2524 rtw89_err(rtwdev, "failed to send h2c\n"); 2525 goto fail; 2526 } 2527 2528 return 0; 2529 fail: 2530 dev_kfree_skb_any(skb); 2531 2532 return ret; 2533 } 2534 2535 #define H2C_LOG_CFG_LEN 12 2536 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2537 { 2538 struct sk_buff *skb; 2539 u32 comp = 0; 2540 int ret; 2541 2542 if (enable) 2543 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2544 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2545 BIT(RTW89_FW_LOG_COMP_MLO) | BIT(RTW89_FW_LOG_COMP_SCAN); 2546 2547 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2548 if (!skb) { 2549 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2550 return -ENOMEM; 2551 } 2552 2553 skb_put(skb, H2C_LOG_CFG_LEN); 2554 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2555 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2556 SET_LOG_CFG_COMP(skb->data, comp); 2557 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2558 2559 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2560 H2C_CAT_MAC, 2561 H2C_CL_FW_INFO, 2562 H2C_FUNC_LOG_CFG, 0, 0, 2563 H2C_LOG_CFG_LEN); 2564 2565 ret = rtw89_h2c_tx(rtwdev, skb, false); 2566 if (ret) { 2567 rtw89_err(rtwdev, "failed to send h2c\n"); 2568 goto fail; 2569 } 2570 2571 return 0; 2572 fail: 2573 dev_kfree_skb_any(skb); 2574 2575 return ret; 2576 } 2577 2578 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 2579 struct rtw89_vif_link *rtwvif_link) 2580 { 2581 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 2582 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; 2583 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2584 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2585 struct rtw89_eapol_2_of_2 *eapol_pkt; 2586 struct ieee80211_bss_conf *bss_conf; 2587 struct ieee80211_hdr_3addr *hdr; 2588 struct sk_buff *skb; 2589 u8 key_des_ver; 2590 2591 if (rtw_wow->ptk_alg == 3) 2592 key_des_ver = 1; 2593 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) 2594 key_des_ver = 2; 2595 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) 2596 key_des_ver = 3; 2597 else 2598 key_des_ver = 0; 2599 2600 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); 2601 if (!skb) 2602 return NULL; 2603 2604 hdr = skb_put_zero(skb, sizeof(*hdr)); 2605 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2606 IEEE80211_FCTL_TODS | 2607 IEEE80211_FCTL_PROTECTED); 2608 2609 rcu_read_lock(); 2610 2611 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2612 2613 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2614 ether_addr_copy(hdr->addr2, bss_conf->addr); 2615 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2616 2617 rcu_read_unlock(); 2618 2619 skb_put_zero(skb, sec_hdr_len); 2620 2621 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); 2622 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); 2623 eapol_pkt->key_des_ver = key_des_ver; 2624 2625 return skb; 2626 } 2627 2628 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, 2629 struct rtw89_vif_link *rtwvif_link) 2630 { 2631 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2632 struct ieee80211_bss_conf *bss_conf; 2633 struct ieee80211_hdr_3addr *hdr; 2634 struct rtw89_sa_query *sa_query; 2635 struct sk_buff *skb; 2636 2637 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); 2638 if (!skb) 2639 return NULL; 2640 2641 hdr = skb_put_zero(skb, sizeof(*hdr)); 2642 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2643 IEEE80211_STYPE_ACTION | 2644 IEEE80211_FCTL_PROTECTED); 2645 2646 rcu_read_lock(); 2647 2648 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2649 2650 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2651 ether_addr_copy(hdr->addr2, bss_conf->addr); 2652 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2653 2654 rcu_read_unlock(); 2655 2656 skb_put_zero(skb, sec_hdr_len); 2657 2658 sa_query = skb_put_zero(skb, sizeof(*sa_query)); 2659 sa_query->category = WLAN_CATEGORY_SA_QUERY; 2660 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; 2661 2662 return skb; 2663 } 2664 2665 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, 2666 struct rtw89_vif_link *rtwvif_link) 2667 { 2668 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 2669 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2670 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2671 struct ieee80211_hdr_3addr *hdr; 2672 struct rtw89_arp_rsp *arp_skb; 2673 struct arphdr *arp_hdr; 2674 struct sk_buff *skb; 2675 __le16 fc; 2676 2677 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); 2678 if (!skb) 2679 return NULL; 2680 2681 hdr = skb_put_zero(skb, sizeof(*hdr)); 2682 2683 if (rtw_wow->ptk_alg) 2684 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | 2685 IEEE80211_FCTL_PROTECTED); 2686 else 2687 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); 2688 2689 hdr->frame_control = fc; 2690 ether_addr_copy(hdr->addr1, rtwvif_link->bssid); 2691 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr); 2692 ether_addr_copy(hdr->addr3, rtwvif_link->bssid); 2693 2694 skb_put_zero(skb, sec_hdr_len); 2695 2696 arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); 2697 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 2698 arp_skb->llc_type = htons(ETH_P_ARP); 2699 2700 arp_hdr = &arp_skb->arp_hdr; 2701 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 2702 arp_hdr->ar_pro = htons(ETH_P_IP); 2703 arp_hdr->ar_hln = ETH_ALEN; 2704 arp_hdr->ar_pln = 4; 2705 arp_hdr->ar_op = htons(ARPOP_REPLY); 2706 2707 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr); 2708 arp_skb->sender_ip = rtwvif->ip_addr; 2709 2710 return skb; 2711 } 2712 2713 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2714 struct rtw89_vif_link *rtwvif_link, 2715 enum rtw89_fw_pkt_ofld_type type, 2716 u8 *id) 2717 { 2718 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 2719 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; 2720 struct rtw89_pktofld_info *info; 2721 struct sk_buff *skb; 2722 int ret; 2723 2724 info = kzalloc(sizeof(*info), GFP_KERNEL); 2725 if (!info) 2726 return -ENOMEM; 2727 2728 switch (type) { 2729 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2730 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2731 break; 2732 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2733 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2734 break; 2735 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2736 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false); 2737 break; 2738 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2739 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true); 2740 break; 2741 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2742 skb = rtw89_eapol_get(rtwdev, rtwvif_link); 2743 break; 2744 case RTW89_PKT_OFLD_TYPE_SA_QUERY: 2745 skb = rtw89_sa_query_get(rtwdev, rtwvif_link); 2746 break; 2747 case RTW89_PKT_OFLD_TYPE_ARP_RSP: 2748 skb = rtw89_arp_response_get(rtwdev, rtwvif_link); 2749 break; 2750 default: 2751 goto err; 2752 } 2753 2754 if (!skb) 2755 goto err; 2756 2757 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 2758 kfree_skb(skb); 2759 2760 if (ret) 2761 goto err; 2762 2763 list_add_tail(&info->list, &rtwvif_link->general_pkt_list); 2764 *id = info->id; 2765 return 0; 2766 2767 err: 2768 kfree(info); 2769 return -ENOMEM; 2770 } 2771 2772 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 2773 struct rtw89_vif_link *rtwvif_link, 2774 bool notify_fw) 2775 { 2776 struct list_head *pkt_list = &rtwvif_link->general_pkt_list; 2777 struct rtw89_pktofld_info *info, *tmp; 2778 2779 list_for_each_entry_safe(info, tmp, pkt_list, list) { 2780 if (notify_fw) 2781 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2782 else 2783 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 2784 list_del(&info->list); 2785 kfree(info); 2786 } 2787 } 2788 2789 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 2790 { 2791 struct rtw89_vif_link *rtwvif_link; 2792 struct rtw89_vif *rtwvif; 2793 unsigned int link_id; 2794 2795 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2796 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 2797 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link, 2798 notify_fw); 2799 } 2800 2801 #define H2C_GENERAL_PKT_LEN 6 2802 #define H2C_GENERAL_PKT_ID_UND 0xff 2803 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 2804 struct rtw89_vif_link *rtwvif_link, u8 macid) 2805 { 2806 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 2807 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 2808 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 2809 struct sk_buff *skb; 2810 int ret; 2811 2812 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2813 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 2814 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2815 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 2816 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2817 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 2818 2819 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 2820 if (!skb) { 2821 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2822 return -ENOMEM; 2823 } 2824 skb_put(skb, H2C_GENERAL_PKT_LEN); 2825 SET_GENERAL_PKT_MACID(skb->data, macid); 2826 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2827 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 2828 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 2829 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 2830 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2831 2832 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2833 H2C_CAT_MAC, 2834 H2C_CL_FW_INFO, 2835 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 2836 H2C_GENERAL_PKT_LEN); 2837 2838 ret = rtw89_h2c_tx(rtwdev, skb, false); 2839 if (ret) { 2840 rtw89_err(rtwdev, "failed to send h2c\n"); 2841 goto fail; 2842 } 2843 2844 return 0; 2845 fail: 2846 dev_kfree_skb_any(skb); 2847 2848 return ret; 2849 } 2850 2851 #define H2C_LPS_PARM_LEN 8 2852 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 2853 struct rtw89_lps_parm *lps_param) 2854 { 2855 struct sk_buff *skb; 2856 bool done_ack; 2857 int ret; 2858 2859 if (RTW89_CHK_FW_FEATURE(LPS_DACK_BY_C2H_REG, &rtwdev->fw)) 2860 done_ack = false; 2861 else 2862 done_ack = !lps_param->psmode; 2863 2864 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 2865 if (!skb) { 2866 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2867 return -ENOMEM; 2868 } 2869 skb_put(skb, H2C_LPS_PARM_LEN); 2870 2871 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 2872 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 2873 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 2874 SET_LPS_PARM_RLBM(skb->data, 1); 2875 SET_LPS_PARM_SMARTPS(skb->data, 1); 2876 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 2877 SET_LPS_PARM_VOUAPSD(skb->data, 0); 2878 SET_LPS_PARM_VIUAPSD(skb->data, 0); 2879 SET_LPS_PARM_BEUAPSD(skb->data, 0); 2880 SET_LPS_PARM_BKUAPSD(skb->data, 0); 2881 2882 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2883 H2C_CAT_MAC, 2884 H2C_CL_MAC_PS, 2885 H2C_FUNC_MAC_LPS_PARM, 0, done_ack, 2886 H2C_LPS_PARM_LEN); 2887 2888 ret = rtw89_h2c_tx(rtwdev, skb, false); 2889 if (ret) { 2890 rtw89_err(rtwdev, "failed to send h2c\n"); 2891 goto fail; 2892 } 2893 2894 return 0; 2895 fail: 2896 dev_kfree_skb_any(skb); 2897 2898 return ret; 2899 } 2900 2901 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2902 { 2903 const struct rtw89_chip_info *chip = rtwdev->chip; 2904 const struct rtw89_chan *chan; 2905 struct rtw89_vif_link *rtwvif_link; 2906 struct rtw89_h2c_lps_ch_info *h2c; 2907 u32 len = sizeof(*h2c); 2908 unsigned int link_id; 2909 struct sk_buff *skb; 2910 bool no_chan = true; 2911 u8 phy_idx; 2912 u32 done; 2913 int ret; 2914 2915 if (chip->chip_gen != RTW89_CHIP_BE) 2916 return 0; 2917 2918 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2919 if (!skb) { 2920 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 2921 return -ENOMEM; 2922 } 2923 skb_put(skb, len); 2924 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2925 2926 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2927 phy_idx = rtwvif_link->phy_idx; 2928 if (phy_idx >= ARRAY_SIZE(h2c->info)) 2929 continue; 2930 2931 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2932 no_chan = false; 2933 2934 h2c->info[phy_idx].central_ch = chan->channel; 2935 h2c->info[phy_idx].pri_ch = chan->primary_channel; 2936 h2c->info[phy_idx].band = chan->band_type; 2937 h2c->info[phy_idx].bw = chan->band_width; 2938 } 2939 2940 if (no_chan) { 2941 rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n"); 2942 ret = -ENOENT; 2943 goto fail; 2944 } 2945 2946 h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2947 2948 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2949 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2950 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 2951 2952 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2953 ret = rtw89_h2c_tx(rtwdev, skb, false); 2954 if (ret) { 2955 rtw89_err(rtwdev, "failed to send h2c\n"); 2956 goto fail; 2957 } 2958 2959 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2960 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2961 if (ret) 2962 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 2963 2964 return 0; 2965 fail: 2966 dev_kfree_skb_any(skb); 2967 2968 return ret; 2969 } 2970 2971 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, 2972 struct rtw89_vif *rtwvif) 2973 { 2974 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be; 2975 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 2976 static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12}; 2977 const struct rtw89_chip_info *chip = rtwdev->chip; 2978 struct rtw89_efuse *efuse = &rtwdev->efuse; 2979 struct rtw89_h2c_lps_ml_cmn_info *h2c; 2980 struct rtw89_vif_link *rtwvif_link; 2981 const struct rtw89_chan *chan; 2982 u8 bw_idx = RTW89_BB_BW_20_40; 2983 u32 len = sizeof(*h2c); 2984 unsigned int link_id; 2985 struct sk_buff *skb; 2986 u8 beacon_bw_ofst; 2987 u8 gain_band; 2988 u32 done; 2989 u8 path; 2990 int ret; 2991 int i; 2992 2993 if (chip->chip_gen != RTW89_CHIP_BE) 2994 return 0; 2995 2996 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2997 if (!skb) { 2998 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n"); 2999 return -ENOMEM; 3000 } 3001 skb_put(skb, len); 3002 h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data; 3003 3004 h2c->fmt_id = 0x3; 3005 3006 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 3007 h2c->rfe_type = efuse->rfe_type; 3008 3009 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 3010 path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A; 3011 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3012 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type); 3013 3014 h2c->central_ch[rtwvif_link->phy_idx] = chan->channel; 3015 h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel; 3016 h2c->band[rtwvif_link->phy_idx] = chan->band_type; 3017 h2c->bw[rtwvif_link->phy_idx] = chan->band_width; 3018 if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6) 3019 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1; 3020 else 3021 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2; 3022 3023 /* Fill BW20 RX gain table for beacon mode */ 3024 for (i = 0; i < TIA_GAIN_NUM; i++) { 3025 h2c->tia_gain[rtwvif_link->phy_idx][i] = 3026 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]); 3027 } 3028 3029 if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) { 3030 beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx]; 3031 h2c->dup_bcn_ofst[rtwvif_link->phy_idx] = beacon_bw_ofst; 3032 } 3033 3034 memcpy(h2c->lna_gain[rtwvif_link->phy_idx], 3035 gain->lna_gain[gain_band][bw_idx][path], 3036 LNA_GAIN_NUM); 3037 memcpy(h2c->tia_lna_op1db[rtwvif_link->phy_idx], 3038 gain->tia_lna_op1db[gain_band][bw_idx][path], 3039 LNA_GAIN_NUM + 1); 3040 memcpy(h2c->lna_op1db[rtwvif_link->phy_idx], 3041 gain->lna_op1db[gain_band][bw_idx][path], 3042 LNA_GAIN_NUM); 3043 } 3044 3045 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3046 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 3047 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len); 3048 3049 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 3050 ret = rtw89_h2c_tx(rtwdev, skb, false); 3051 if (ret) { 3052 rtw89_err(rtwdev, "failed to send h2c\n"); 3053 goto fail; 3054 } 3055 3056 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 3057 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 3058 if (ret) 3059 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n"); 3060 3061 return 0; 3062 fail: 3063 dev_kfree_skb_any(skb); 3064 3065 return ret; 3066 } 3067 3068 #define H2C_P2P_ACT_LEN 20 3069 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, 3070 struct rtw89_vif_link *rtwvif_link, 3071 struct ieee80211_p2p_noa_desc *desc, 3072 u8 act, u8 noa_id, u8 ctwindow_oppps) 3073 { 3074 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 3075 struct sk_buff *skb; 3076 u8 *cmd; 3077 int ret; 3078 3079 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 3080 if (!skb) { 3081 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 3082 return -ENOMEM; 3083 } 3084 skb_put(skb, H2C_P2P_ACT_LEN); 3085 cmd = skb->data; 3086 3087 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id); 3088 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 3089 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 3090 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 3091 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 3092 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 3093 if (desc) { 3094 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 3095 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 3096 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 3097 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 3098 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 3099 } 3100 3101 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3102 H2C_CAT_MAC, H2C_CL_MAC_PS, 3103 H2C_FUNC_P2P_ACT, 0, 0, 3104 H2C_P2P_ACT_LEN); 3105 3106 ret = rtw89_h2c_tx(rtwdev, skb, false); 3107 if (ret) { 3108 rtw89_err(rtwdev, "failed to send h2c\n"); 3109 goto fail; 3110 } 3111 3112 return 0; 3113 fail: 3114 dev_kfree_skb_any(skb); 3115 3116 return ret; 3117 } 3118 3119 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 3120 struct sk_buff *skb) 3121 { 3122 const struct rtw89_chip_info *chip = rtwdev->chip; 3123 struct rtw89_hal *hal = &rtwdev->hal; 3124 u8 ntx_path; 3125 u8 map_b; 3126 3127 if (chip->rf_path_num == 1) { 3128 ntx_path = RF_A; 3129 map_b = 0; 3130 } else { 3131 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB; 3132 map_b = ntx_path == RF_AB ? 1 : 0; 3133 } 3134 3135 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 3136 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 3137 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 3138 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 3139 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 3140 } 3141 3142 #define H2C_CMC_TBL_LEN 68 3143 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 3144 struct rtw89_vif_link *rtwvif_link, 3145 struct rtw89_sta_link *rtwsta_link) 3146 { 3147 const struct rtw89_chip_info *chip = rtwdev->chip; 3148 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3149 struct sk_buff *skb; 3150 int ret; 3151 3152 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3153 if (!skb) { 3154 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3155 return -ENOMEM; 3156 } 3157 skb_put(skb, H2C_CMC_TBL_LEN); 3158 SET_CTRL_INFO_MACID(skb->data, macid); 3159 SET_CTRL_INFO_OPERATION(skb->data, 1); 3160 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3161 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 3162 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3163 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 3164 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 3165 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 3166 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 3167 } 3168 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 3169 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 3170 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3171 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3172 3173 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3174 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3175 chip->h2c_cctl_func_id, 0, 1, 3176 H2C_CMC_TBL_LEN); 3177 3178 ret = rtw89_h2c_tx(rtwdev, skb, false); 3179 if (ret) { 3180 rtw89_err(rtwdev, "failed to send h2c\n"); 3181 goto fail; 3182 } 3183 3184 return 0; 3185 fail: 3186 dev_kfree_skb_any(skb); 3187 3188 return ret; 3189 } 3190 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 3191 3192 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3193 struct rtw89_vif_link *rtwvif_link, 3194 struct rtw89_sta_link *rtwsta_link) 3195 { 3196 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3197 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3198 u32 len = sizeof(*h2c); 3199 struct sk_buff *skb; 3200 int ret; 3201 3202 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3203 if (!skb) { 3204 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3205 return -ENOMEM; 3206 } 3207 skb_put(skb, len); 3208 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3209 3210 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3211 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3212 3213 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE); 3214 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 3215 3216 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 3217 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 3218 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3219 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 3220 3221 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 3222 3223 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 3224 3225 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3226 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 3227 3228 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3229 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3230 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3231 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3232 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3233 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 3234 3235 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 3236 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 3237 3238 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 3239 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 3240 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 3241 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 3242 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 3243 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 3244 3245 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 3246 3247 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 3248 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 3249 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 3250 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 3251 3252 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 3253 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 3254 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 3255 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 3256 3257 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3258 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3259 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3260 len); 3261 3262 ret = rtw89_h2c_tx(rtwdev, skb, false); 3263 if (ret) { 3264 rtw89_err(rtwdev, "failed to send h2c\n"); 3265 goto fail; 3266 } 3267 3268 return 0; 3269 fail: 3270 dev_kfree_skb_any(skb); 3271 3272 return ret; 3273 } 3274 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 3275 3276 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 3277 struct ieee80211_link_sta *link_sta, 3278 u8 *pads) 3279 { 3280 bool ppe_th; 3281 u8 ppe16, ppe8; 3282 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3283 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0]; 3284 u8 ru_bitmap; 3285 u8 n, idx, sh; 3286 u16 ppe; 3287 int i; 3288 3289 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 3290 link_sta->he_cap.he_cap_elem.phy_cap_info[6]); 3291 if (!ppe_th) { 3292 u8 pad; 3293 3294 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 3295 link_sta->he_cap.he_cap_elem.phy_cap_info[9]); 3296 3297 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3298 pads[i] = pad; 3299 3300 return; 3301 } 3302 3303 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 3304 n = hweight8(ru_bitmap); 3305 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3306 3307 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3308 if (!(ru_bitmap & BIT(i))) { 3309 pads[i] = 1; 3310 continue; 3311 } 3312 3313 idx = n >> 3; 3314 sh = n & 7; 3315 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 3316 3317 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx])); 3318 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3319 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 3320 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3321 3322 if (ppe16 != 7 && ppe8 == 7) 3323 pads[i] = RTW89_PE_DURATION_16; 3324 else if (ppe8 != 7) 3325 pads[i] = RTW89_PE_DURATION_8; 3326 else 3327 pads[i] = RTW89_PE_DURATION_0; 3328 } 3329 } 3330 3331 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 3332 struct rtw89_vif_link *rtwvif_link, 3333 struct rtw89_sta_link *rtwsta_link) 3334 { 3335 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3336 const struct rtw89_chip_info *chip = rtwdev->chip; 3337 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3338 rtwvif_link->chanctx_idx); 3339 struct ieee80211_link_sta *link_sta; 3340 struct sk_buff *skb; 3341 u8 pads[RTW89_PPE_BW_NUM]; 3342 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3343 u16 lowest_rate; 3344 int ret; 3345 3346 memset(pads, 0, sizeof(pads)); 3347 3348 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3349 if (!skb) { 3350 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3351 return -ENOMEM; 3352 } 3353 3354 rcu_read_lock(); 3355 3356 if (rtwsta_link) 3357 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3358 3359 if (rtwsta_link && link_sta->he_cap.has_he) 3360 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3361 3362 if (vif->p2p) 3363 lowest_rate = RTW89_HW_RATE_OFDM6; 3364 else if (chan->band_type == RTW89_BAND_2G) 3365 lowest_rate = RTW89_HW_RATE_CCK1; 3366 else 3367 lowest_rate = RTW89_HW_RATE_OFDM6; 3368 3369 skb_put(skb, H2C_CMC_TBL_LEN); 3370 SET_CTRL_INFO_MACID(skb->data, mac_id); 3371 SET_CTRL_INFO_OPERATION(skb->data, 1); 3372 SET_CMC_TBL_DISRTSFB(skb->data, 1); 3373 SET_CMC_TBL_DISDATAFB(skb->data, 1); 3374 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 3375 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 3376 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 3377 if (vif->type == NL80211_IFTYPE_STATION) 3378 SET_CMC_TBL_ULDL(skb->data, 1); 3379 else 3380 SET_CMC_TBL_ULDL(skb->data, 0); 3381 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port); 3382 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 3383 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3384 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3385 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3386 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3387 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3388 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3389 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3390 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3391 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3392 } 3393 if (rtwsta_link) 3394 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 3395 link_sta->he_cap.has_he); 3396 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3397 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3398 3399 rcu_read_unlock(); 3400 3401 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3402 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3403 chip->h2c_cctl_func_id, 0, 1, 3404 H2C_CMC_TBL_LEN); 3405 3406 ret = rtw89_h2c_tx(rtwdev, skb, false); 3407 if (ret) { 3408 rtw89_err(rtwdev, "failed to send h2c\n"); 3409 goto fail; 3410 } 3411 3412 return 0; 3413 fail: 3414 dev_kfree_skb_any(skb); 3415 3416 return ret; 3417 } 3418 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 3419 3420 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 3421 struct ieee80211_link_sta *link_sta, 3422 u8 *pads) 3423 { 3424 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3425 u16 ppe_thres_hdr; 3426 u8 ppe16, ppe8; 3427 u8 n, idx, sh; 3428 u8 ru_bitmap; 3429 bool ppe_th; 3430 u16 ppe; 3431 int i; 3432 3433 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3434 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 3435 if (!ppe_th) { 3436 u8 pad; 3437 3438 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3439 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 3440 3441 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3442 pads[i] = pad; 3443 3444 return; 3445 } 3446 3447 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres); 3448 ru_bitmap = u16_get_bits(ppe_thres_hdr, 3449 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 3450 n = hweight8(ru_bitmap); 3451 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 3452 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3453 3454 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3455 if (!(ru_bitmap & BIT(i))) { 3456 pads[i] = 1; 3457 continue; 3458 } 3459 3460 idx = n >> 3; 3461 sh = n & 7; 3462 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 3463 3464 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx); 3465 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3466 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 3467 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3468 3469 if (ppe16 != 7 && ppe8 == 7) 3470 pads[i] = RTW89_PE_DURATION_16_20; 3471 else if (ppe8 != 7) 3472 pads[i] = RTW89_PE_DURATION_8; 3473 else 3474 pads[i] = RTW89_PE_DURATION_0; 3475 } 3476 } 3477 3478 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3479 struct rtw89_vif_link *rtwvif_link, 3480 struct rtw89_sta_link *rtwsta_link) 3481 { 3482 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3483 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3484 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3485 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3486 struct ieee80211_bss_conf *bss_conf; 3487 struct ieee80211_link_sta *link_sta; 3488 u8 pads[RTW89_PPE_BW_NUM]; 3489 u32 len = sizeof(*h2c); 3490 struct sk_buff *skb; 3491 u16 lowest_rate; 3492 int ret; 3493 3494 memset(pads, 0, sizeof(pads)); 3495 3496 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3497 if (!skb) { 3498 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3499 return -ENOMEM; 3500 } 3501 3502 rcu_read_lock(); 3503 3504 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3505 3506 if (rtwsta_link) { 3507 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3508 3509 if (link_sta->eht_cap.has_eht) 3510 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads); 3511 else if (link_sta->he_cap.has_he) 3512 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3513 } 3514 3515 if (vif->p2p) 3516 lowest_rate = RTW89_HW_RATE_OFDM6; 3517 else if (chan->band_type == RTW89_BAND_2G) 3518 lowest_rate = RTW89_HW_RATE_CCK1; 3519 else 3520 lowest_rate = RTW89_HW_RATE_OFDM6; 3521 3522 skb_put(skb, len); 3523 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3524 3525 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3526 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3527 3528 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 3529 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 3530 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 3531 CCTLINFO_G7_W0_DISDATAFB); 3532 3533 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3534 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3535 3536 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3537 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3538 3539 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3540 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3541 3542 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 3543 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 3544 3545 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3546 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 3547 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 3548 } 3549 3550 if (bss_conf->eht_support) { 3551 u16 punct = bss_conf->chanreq.oper.punctured; 3552 3553 h2c->w4 |= le32_encode_bits(~punct, 3554 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3555 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3556 } 3557 3558 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 3559 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3560 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 3561 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3562 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 3563 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3564 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 3565 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3566 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 3567 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3568 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 3569 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 3570 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 3571 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 3572 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3573 3574 h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) | 3575 le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 3576 CCTLINFO_G7_W6_ULDL); 3577 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL); 3578 3579 if (rtwsta_link) { 3580 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he, 3581 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3582 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3583 } 3584 3585 rcu_read_unlock(); 3586 3587 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3588 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3589 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3590 len); 3591 3592 ret = rtw89_h2c_tx(rtwdev, skb, false); 3593 if (ret) { 3594 rtw89_err(rtwdev, "failed to send h2c\n"); 3595 goto fail; 3596 } 3597 3598 return 0; 3599 fail: 3600 dev_kfree_skb_any(skb); 3601 3602 return ret; 3603 } 3604 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 3605 3606 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3607 struct rtw89_vif_link *rtwvif_link, 3608 struct rtw89_sta_link *rtwsta_link) 3609 { 3610 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 3611 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3612 u32 len = sizeof(*h2c); 3613 struct sk_buff *skb; 3614 u16 agg_num = 0; 3615 u8 ba_bmap = 0; 3616 int ret; 3617 u8 tid; 3618 3619 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3620 if (!skb) { 3621 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 3622 return -ENOMEM; 3623 } 3624 skb_put(skb, len); 3625 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3626 3627 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 3628 if (agg_num == 0) 3629 agg_num = rtwsta->ampdu_params[tid].agg_num; 3630 else 3631 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 3632 } 3633 3634 if (agg_num <= 0x20) 3635 ba_bmap = 3; 3636 else if (agg_num > 0x20 && agg_num <= 0x40) 3637 ba_bmap = 0; 3638 else if (agg_num > 0x40 && agg_num <= 0x80) 3639 ba_bmap = 1; 3640 else if (agg_num > 0x80 && agg_num <= 0x100) 3641 ba_bmap = 2; 3642 else if (agg_num > 0x100 && agg_num <= 0x200) 3643 ba_bmap = 4; 3644 else if (agg_num > 0x200 && agg_num <= 0x400) 3645 ba_bmap = 5; 3646 3647 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 3648 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3649 3650 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 3651 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 3652 3653 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3654 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3655 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 3656 len); 3657 3658 ret = rtw89_h2c_tx(rtwdev, skb, false); 3659 if (ret) { 3660 rtw89_err(rtwdev, "failed to send h2c\n"); 3661 goto fail; 3662 } 3663 3664 return 0; 3665 fail: 3666 dev_kfree_skb_any(skb); 3667 3668 return ret; 3669 } 3670 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 3671 3672 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 3673 struct rtw89_sta_link *rtwsta_link) 3674 { 3675 const struct rtw89_chip_info *chip = rtwdev->chip; 3676 struct sk_buff *skb; 3677 int ret; 3678 3679 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3680 if (!skb) { 3681 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3682 return -ENOMEM; 3683 } 3684 skb_put(skb, H2C_CMC_TBL_LEN); 3685 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3686 SET_CTRL_INFO_OPERATION(skb->data, 1); 3687 if (rtwsta_link->cctl_tx_time) { 3688 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 3689 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time); 3690 } 3691 if (rtwsta_link->cctl_tx_retry_limit) { 3692 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 3693 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt); 3694 } 3695 3696 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3697 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3698 chip->h2c_cctl_func_id, 0, 1, 3699 H2C_CMC_TBL_LEN); 3700 3701 ret = rtw89_h2c_tx(rtwdev, skb, false); 3702 if (ret) { 3703 rtw89_err(rtwdev, "failed to send h2c\n"); 3704 goto fail; 3705 } 3706 3707 return 0; 3708 fail: 3709 dev_kfree_skb_any(skb); 3710 3711 return ret; 3712 } 3713 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl); 3714 3715 int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3716 struct rtw89_sta_link *rtwsta_link) 3717 { 3718 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3719 u32 len = sizeof(*h2c); 3720 struct sk_buff *skb; 3721 int ret; 3722 3723 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3724 if (!skb) { 3725 rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_g7\n"); 3726 return -ENOMEM; 3727 } 3728 skb_put(skb, len); 3729 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3730 3731 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 3732 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3733 3734 if (rtwsta_link->cctl_tx_time) { 3735 h2c->w3 |= le32_encode_bits(1, CCTLINFO_G7_W3_AMPDU_TIME_SEL); 3736 h2c->m3 |= cpu_to_le32(CCTLINFO_G7_W3_AMPDU_TIME_SEL); 3737 3738 h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time, 3739 CCTLINFO_G7_W2_AMPDU_MAX_TIME); 3740 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_AMPDU_MAX_TIME); 3741 } 3742 if (rtwsta_link->cctl_tx_retry_limit) { 3743 h2c->w2 |= le32_encode_bits(1, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL) | 3744 le32_encode_bits(rtwsta_link->data_tx_cnt_lmt, 3745 CCTLINFO_G7_W2_DATA_TX_CNT_LMT); 3746 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL | 3747 CCTLINFO_G7_W2_DATA_TX_CNT_LMT); 3748 } 3749 3750 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3751 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3752 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3753 len); 3754 3755 ret = rtw89_h2c_tx(rtwdev, skb, false); 3756 if (ret) { 3757 rtw89_err(rtwdev, "failed to send h2c\n"); 3758 goto fail; 3759 } 3760 3761 return 0; 3762 fail: 3763 dev_kfree_skb_any(skb); 3764 3765 return ret; 3766 } 3767 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_g7); 3768 3769 int rtw89_fw_h2c_punctured_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3770 struct rtw89_vif_link *rtwvif_link, 3771 u16 punctured) 3772 { 3773 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3774 u32 len = sizeof(*h2c); 3775 struct sk_buff *skb; 3776 int ret; 3777 3778 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3779 if (!skb) { 3780 rtw89_err(rtwdev, "failed to alloc skb for punctured cmac g7\n"); 3781 return -ENOMEM; 3782 } 3783 3784 skb_put(skb, len); 3785 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3786 3787 h2c->c0 = le32_encode_bits(rtwvif_link->mac_id, CCTLINFO_G7_C0_MACID) | 3788 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3789 3790 h2c->w4 = le32_encode_bits(~punctured, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3791 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3792 3793 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3794 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3795 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3796 len); 3797 3798 ret = rtw89_h2c_tx(rtwdev, skb, false); 3799 if (ret) { 3800 rtw89_err(rtwdev, "failed to send h2c\n"); 3801 goto fail; 3802 } 3803 3804 return 0; 3805 fail: 3806 dev_kfree_skb_any(skb); 3807 3808 return ret; 3809 } 3810 EXPORT_SYMBOL(rtw89_fw_h2c_punctured_cmac_tbl_g7); 3811 3812 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 3813 struct rtw89_sta_link *rtwsta_link) 3814 { 3815 const struct rtw89_chip_info *chip = rtwdev->chip; 3816 struct sk_buff *skb; 3817 int ret; 3818 3819 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 3820 return 0; 3821 3822 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3823 if (!skb) { 3824 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3825 return -ENOMEM; 3826 } 3827 skb_put(skb, H2C_CMC_TBL_LEN); 3828 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3829 SET_CTRL_INFO_OPERATION(skb->data, 1); 3830 3831 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3832 3833 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3834 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3835 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 3836 H2C_CMC_TBL_LEN); 3837 3838 ret = rtw89_h2c_tx(rtwdev, skb, false); 3839 if (ret) { 3840 rtw89_err(rtwdev, "failed to send h2c\n"); 3841 goto fail; 3842 } 3843 3844 return 0; 3845 fail: 3846 dev_kfree_skb_any(skb); 3847 3848 return ret; 3849 } 3850 3851 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 3852 struct rtw89_vif_link *rtwvif_link) 3853 { 3854 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3855 rtwvif_link->chanctx_idx); 3856 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3857 struct rtw89_h2c_bcn_upd *h2c; 3858 struct sk_buff *skb_beacon; 3859 struct ieee80211_hdr *hdr; 3860 u32 len = sizeof(*h2c); 3861 struct sk_buff *skb; 3862 int bcn_total_len; 3863 u16 beacon_rate; 3864 u16 tim_offset; 3865 void *noa_data; 3866 u8 noa_len; 3867 int ret; 3868 3869 if (vif->p2p) 3870 beacon_rate = RTW89_HW_RATE_OFDM6; 3871 else if (chan->band_type == RTW89_BAND_2G) 3872 beacon_rate = RTW89_HW_RATE_CCK1; 3873 else 3874 beacon_rate = RTW89_HW_RATE_OFDM6; 3875 3876 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3877 NULL, 0); 3878 if (!skb_beacon) { 3879 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3880 return -ENOMEM; 3881 } 3882 3883 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3884 if (noa_len && 3885 (noa_len <= skb_tailroom(skb_beacon) || 3886 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3887 skb_put_data(skb_beacon, noa_data, noa_len); 3888 } 3889 3890 hdr = (struct ieee80211_hdr *)skb_beacon; 3891 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3892 3893 bcn_total_len = len + skb_beacon->len; 3894 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3895 if (!skb) { 3896 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3897 dev_kfree_skb_any(skb_beacon); 3898 return -ENOMEM; 3899 } 3900 skb_put(skb, len); 3901 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 3902 3903 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) | 3904 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 3905 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 3906 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 3907 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 3908 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 3909 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 3910 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 3911 3912 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3913 dev_kfree_skb_any(skb_beacon); 3914 3915 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3916 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3917 H2C_FUNC_MAC_BCN_UPD, 0, 1, 3918 bcn_total_len); 3919 3920 ret = rtw89_h2c_tx(rtwdev, skb, false); 3921 if (ret) { 3922 rtw89_err(rtwdev, "failed to send h2c\n"); 3923 dev_kfree_skb_any(skb); 3924 return ret; 3925 } 3926 3927 return 0; 3928 } 3929 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 3930 3931 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 3932 struct rtw89_vif_link *rtwvif_link) 3933 { 3934 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3935 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3936 struct rtw89_h2c_bcn_upd_be *h2c; 3937 struct sk_buff *skb_beacon; 3938 struct ieee80211_hdr *hdr; 3939 u32 len = sizeof(*h2c); 3940 struct sk_buff *skb; 3941 int bcn_total_len; 3942 u16 beacon_rate; 3943 u16 tim_offset; 3944 void *noa_data; 3945 u8 noa_len; 3946 int ret; 3947 3948 if (vif->p2p) 3949 beacon_rate = RTW89_HW_RATE_OFDM6; 3950 else if (chan->band_type == RTW89_BAND_2G) 3951 beacon_rate = RTW89_HW_RATE_CCK1; 3952 else 3953 beacon_rate = RTW89_HW_RATE_OFDM6; 3954 3955 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3956 NULL, 0); 3957 if (!skb_beacon) { 3958 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3959 return -ENOMEM; 3960 } 3961 3962 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3963 if (noa_len && 3964 (noa_len <= skb_tailroom(skb_beacon) || 3965 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3966 skb_put_data(skb_beacon, noa_data, noa_len); 3967 } 3968 3969 hdr = (struct ieee80211_hdr *)skb_beacon; 3970 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3971 3972 bcn_total_len = len + skb_beacon->len; 3973 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3974 if (!skb) { 3975 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3976 dev_kfree_skb_any(skb_beacon); 3977 return -ENOMEM; 3978 } 3979 skb_put(skb, len); 3980 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 3981 3982 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 3983 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 3984 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 3985 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 3986 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 3987 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 3988 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 3989 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 3990 3991 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3992 dev_kfree_skb_any(skb_beacon); 3993 3994 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3995 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3996 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 3997 bcn_total_len); 3998 3999 ret = rtw89_h2c_tx(rtwdev, skb, false); 4000 if (ret) { 4001 rtw89_err(rtwdev, "failed to send h2c\n"); 4002 goto fail; 4003 } 4004 4005 return 0; 4006 4007 fail: 4008 dev_kfree_skb_any(skb); 4009 4010 return ret; 4011 } 4012 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 4013 4014 int rtw89_fw_h2c_tbtt_tuning(struct rtw89_dev *rtwdev, 4015 struct rtw89_vif_link *rtwvif_link, u32 offset) 4016 { 4017 struct rtw89_h2c_tbtt_tuning *h2c; 4018 u32 len = sizeof(*h2c); 4019 struct sk_buff *skb; 4020 int ret; 4021 4022 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4023 if (!skb) { 4024 rtw89_err(rtwdev, "failed to alloc skb for h2c tbtt tuning\n"); 4025 return -ENOMEM; 4026 } 4027 skb_put(skb, len); 4028 h2c = (struct rtw89_h2c_tbtt_tuning *)skb->data; 4029 4030 h2c->w0 = le32_encode_bits(rtwvif_link->phy_idx, RTW89_H2C_TBTT_TUNING_W0_BAND) | 4031 le32_encode_bits(rtwvif_link->port, RTW89_H2C_TBTT_TUNING_W0_PORT); 4032 h2c->w1 = le32_encode_bits(offset, RTW89_H2C_TBTT_TUNING_W1_SHIFT); 4033 4034 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4035 H2C_CAT_MAC, H2C_CL_MAC_PS, 4036 H2C_FUNC_TBTT_TUNING, 0, 0, 4037 len); 4038 4039 ret = rtw89_h2c_tx(rtwdev, skb, false); 4040 if (ret) { 4041 rtw89_err(rtwdev, "failed to send h2c\n"); 4042 goto fail; 4043 } 4044 4045 return 0; 4046 fail: 4047 dev_kfree_skb_any(skb); 4048 4049 return ret; 4050 } 4051 4052 int rtw89_fw_h2c_pwr_lvl(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4053 { 4054 #define RTW89_BCN_TO_VAL_MIN 4 4055 #define RTW89_BCN_TO_VAL_MAX 64 4056 #define RTW89_DTIM_TO_VAL_MIN 7 4057 #define RTW89_DTIM_TO_VAL_MAX 15 4058 struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 4059 struct rtw89_h2c_pwr_lvl *h2c; 4060 u32 len = sizeof(*h2c); 4061 struct sk_buff *skb; 4062 u8 bcn_to_val; 4063 int ret; 4064 4065 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4066 if (!skb) { 4067 rtw89_err(rtwdev, "failed to alloc skb for h2c pwr lvl\n"); 4068 return -ENOMEM; 4069 } 4070 skb_put(skb, len); 4071 h2c = (struct rtw89_h2c_pwr_lvl *)skb->data; 4072 4073 bcn_to_val = clamp_t(u8, bcn_track->bcn_timeout, 4074 RTW89_BCN_TO_VAL_MIN, RTW89_BCN_TO_VAL_MAX); 4075 4076 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_PWR_LVL_W0_MACID) | 4077 le32_encode_bits(bcn_to_val, RTW89_H2C_PWR_LVL_W0_BCN_TO_VAL) | 4078 le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_PS_LVL) | 4079 le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_TRX_LVL) | 4080 le32_encode_bits(RTW89_DTIM_TO_VAL_MIN, 4081 RTW89_H2C_PWR_LVL_W0_DTIM_TO_VAL); 4082 4083 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4084 H2C_CAT_MAC, H2C_CL_MAC_PS, 4085 H2C_FUNC_PS_POWER_LEVEL, 0, 0, 4086 len); 4087 4088 ret = rtw89_h2c_tx(rtwdev, skb, false); 4089 if (ret) { 4090 rtw89_err(rtwdev, "failed to send h2c\n"); 4091 goto fail; 4092 } 4093 4094 return 0; 4095 fail: 4096 dev_kfree_skb_any(skb); 4097 4098 return ret; 4099 } 4100 4101 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 4102 struct rtw89_vif_link *rtwvif_link, 4103 struct rtw89_sta_link *rtwsta_link, 4104 enum rtw89_upd_mode upd_mode) 4105 { 4106 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 4107 struct rtw89_h2c_role_maintain *h2c; 4108 u32 len = sizeof(*h2c); 4109 struct sk_buff *skb; 4110 u8 self_role; 4111 int ret; 4112 4113 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 4114 if (rtwsta_link) 4115 self_role = RTW89_SELF_ROLE_AP_CLIENT; 4116 else 4117 self_role = rtwvif_link->self_role; 4118 } else { 4119 self_role = rtwvif_link->self_role; 4120 } 4121 4122 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4123 if (!skb) { 4124 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4125 return -ENOMEM; 4126 } 4127 skb_put(skb, len); 4128 h2c = (struct rtw89_h2c_role_maintain *)skb->data; 4129 4130 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_ROLE_MAINTAIN_W0_MACID) | 4131 le32_encode_bits(self_role, RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE) | 4132 le32_encode_bits(upd_mode, RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE) | 4133 le32_encode_bits(rtwvif_link->wifi_role, 4134 RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE) | 4135 le32_encode_bits(rtwvif_link->mac_idx, 4136 RTW89_H2C_ROLE_MAINTAIN_W0_BAND) | 4137 le32_encode_bits(rtwvif_link->port, RTW89_H2C_ROLE_MAINTAIN_W0_PORT); 4138 4139 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4140 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 4141 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 4142 len); 4143 4144 ret = rtw89_h2c_tx(rtwdev, skb, false); 4145 if (ret) { 4146 rtw89_err(rtwdev, "failed to send h2c\n"); 4147 goto fail; 4148 } 4149 4150 return 0; 4151 fail: 4152 dev_kfree_skb_any(skb); 4153 4154 return ret; 4155 } 4156 4157 static enum rtw89_fw_sta_type 4158 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 4159 struct rtw89_sta_link *rtwsta_link) 4160 { 4161 struct ieee80211_bss_conf *bss_conf; 4162 struct ieee80211_link_sta *link_sta; 4163 enum rtw89_fw_sta_type type; 4164 4165 rcu_read_lock(); 4166 4167 if (!rtwsta_link) 4168 goto by_vif; 4169 4170 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 4171 4172 if (link_sta->eht_cap.has_eht) 4173 type = RTW89_FW_BE_STA; 4174 else if (link_sta->he_cap.has_he) 4175 type = RTW89_FW_AX_STA; 4176 else 4177 type = RTW89_FW_N_AC_STA; 4178 4179 goto out; 4180 4181 by_vif: 4182 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 4183 4184 if (bss_conf->eht_support) 4185 type = RTW89_FW_BE_STA; 4186 else if (bss_conf->he_support) 4187 type = RTW89_FW_AX_STA; 4188 else 4189 type = RTW89_FW_N_AC_STA; 4190 4191 out: 4192 rcu_read_unlock(); 4193 4194 return type; 4195 } 4196 4197 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 4198 struct rtw89_sta_link *rtwsta_link, bool dis_conn) 4199 { 4200 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 4201 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4202 bool is_mld = ieee80211_vif_is_mld(vif); 4203 u8 self_role = rtwvif_link->self_role; 4204 enum rtw89_fw_sta_type sta_type; 4205 u8 net_type = rtwvif_link->net_type; 4206 struct rtw89_h2c_join_v1 *h2c_v1; 4207 struct rtw89_h2c_join *h2c; 4208 u32 len = sizeof(*h2c); 4209 bool format_v1 = false; 4210 struct sk_buff *skb; 4211 u8 main_mac_id; 4212 bool init_ps; 4213 int ret; 4214 4215 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 4216 len = sizeof(*h2c_v1); 4217 format_v1 = true; 4218 } 4219 4220 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) { 4221 self_role = RTW89_SELF_ROLE_AP_CLIENT; 4222 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 4223 } 4224 4225 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4226 if (!skb) { 4227 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4228 return -ENOMEM; 4229 } 4230 skb_put(skb, len); 4231 h2c = (struct rtw89_h2c_join *)skb->data; 4232 4233 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 4234 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 4235 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 4236 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) | 4237 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) | 4238 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 4239 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 4240 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 4241 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 4242 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 4243 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 4244 le32_encode_bits(rtwvif_link->wifi_role, 4245 RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 4246 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 4247 4248 if (!format_v1) 4249 goto done; 4250 4251 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 4252 4253 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link); 4254 init_ps = rtwvif_link != rtw89_get_designated_link(rtwvif_link->rtwvif); 4255 4256 if (rtwsta_link) 4257 main_mac_id = rtw89_sta_get_main_macid(rtwsta_link->rtwsta); 4258 else 4259 main_mac_id = rtw89_vif_get_main_macid(rtwvif_link->rtwvif); 4260 4261 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE) | 4262 le32_encode_bits(is_mld, RTW89_H2C_JOININFO_W1_IS_MLD) | 4263 le32_encode_bits(main_mac_id, RTW89_H2C_JOININFO_W1_MAIN_MACID) | 4264 le32_encode_bits(RTW89_H2C_JOININFO_MLO_MODE_MLSR, 4265 RTW89_H2C_JOININFO_W1_MLO_MODE) | 4266 le32_encode_bits(0, RTW89_H2C_JOININFO_W1_EMLSR_CAB) | 4267 le32_encode_bits(0, RTW89_H2C_JOININFO_W1_NSTR_EN) | 4268 le32_encode_bits(init_ps, RTW89_H2C_JOININFO_W1_INIT_PWR_STATE) | 4269 le32_encode_bits(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US, 4270 RTW89_H2C_JOININFO_W1_EMLSR_PADDING) | 4271 le32_encode_bits(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US, 4272 RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY) | 4273 le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MACID_EXT) | 4274 le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT); 4275 4276 h2c_v1->w2 = 0; 4277 4278 done: 4279 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4280 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 4281 H2C_FUNC_MAC_JOININFO, 0, 1, 4282 len); 4283 4284 ret = rtw89_h2c_tx(rtwdev, skb, false); 4285 if (ret) { 4286 rtw89_err(rtwdev, "failed to send h2c\n"); 4287 goto fail; 4288 } 4289 4290 return 0; 4291 fail: 4292 dev_kfree_skb_any(skb); 4293 4294 return ret; 4295 } 4296 4297 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 4298 { 4299 struct rtw89_h2c_notify_dbcc *h2c; 4300 u32 len = sizeof(*h2c); 4301 struct sk_buff *skb; 4302 int ret; 4303 4304 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4305 if (!skb) { 4306 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 4307 return -ENOMEM; 4308 } 4309 skb_put(skb, len); 4310 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 4311 4312 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 4313 4314 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4315 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 4316 H2C_FUNC_NOTIFY_DBCC, 0, 1, 4317 len); 4318 4319 ret = rtw89_h2c_tx(rtwdev, skb, false); 4320 if (ret) { 4321 rtw89_err(rtwdev, "failed to send h2c\n"); 4322 goto fail; 4323 } 4324 4325 return 0; 4326 fail: 4327 dev_kfree_skb_any(skb); 4328 4329 return ret; 4330 } 4331 4332 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 4333 bool pause) 4334 { 4335 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 4336 struct rtw89_fw_macid_pause_grp *h2c; 4337 __le32 set = cpu_to_le32(BIT(sh)); 4338 u8 h2c_macid_pause_id; 4339 struct sk_buff *skb; 4340 u32 len; 4341 int ret; 4342 4343 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 4344 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 4345 len = sizeof(*h2c_new); 4346 } else { 4347 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 4348 len = sizeof(*h2c); 4349 } 4350 4351 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4352 if (!skb) { 4353 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 4354 return -ENOMEM; 4355 } 4356 skb_put(skb, len); 4357 4358 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 4359 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 4360 4361 h2c_new->n[0].pause_mask_grp[grp] = set; 4362 h2c_new->n[0].sleep_mask_grp[grp] = set; 4363 if (pause) { 4364 h2c_new->n[0].pause_grp[grp] = set; 4365 h2c_new->n[0].sleep_grp[grp] = set; 4366 } 4367 } else { 4368 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 4369 4370 h2c->mask_grp[grp] = set; 4371 if (pause) 4372 h2c->pause_grp[grp] = set; 4373 } 4374 4375 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4376 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4377 h2c_macid_pause_id, 1, 0, 4378 len); 4379 4380 ret = rtw89_h2c_tx(rtwdev, skb, false); 4381 if (ret) { 4382 rtw89_err(rtwdev, "failed to send h2c\n"); 4383 goto fail; 4384 } 4385 4386 return 0; 4387 fail: 4388 dev_kfree_skb_any(skb); 4389 4390 return ret; 4391 } 4392 4393 #define H2C_EDCA_LEN 12 4394 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 4395 u8 ac, u32 val) 4396 { 4397 struct sk_buff *skb; 4398 int ret; 4399 4400 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 4401 if (!skb) { 4402 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 4403 return -ENOMEM; 4404 } 4405 skb_put(skb, H2C_EDCA_LEN); 4406 RTW89_SET_EDCA_SEL(skb->data, 0); 4407 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx); 4408 RTW89_SET_EDCA_WMM(skb->data, 0); 4409 RTW89_SET_EDCA_AC(skb->data, ac); 4410 RTW89_SET_EDCA_PARAM(skb->data, val); 4411 4412 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4413 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4414 H2C_FUNC_USR_EDCA, 0, 1, 4415 H2C_EDCA_LEN); 4416 4417 ret = rtw89_h2c_tx(rtwdev, skb, false); 4418 if (ret) { 4419 rtw89_err(rtwdev, "failed to send h2c\n"); 4420 goto fail; 4421 } 4422 4423 return 0; 4424 fail: 4425 dev_kfree_skb_any(skb); 4426 4427 return ret; 4428 } 4429 4430 #define H2C_TSF32_TOGL_LEN 4 4431 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, 4432 struct rtw89_vif_link *rtwvif_link, 4433 bool en) 4434 { 4435 struct sk_buff *skb; 4436 u16 early_us = en ? 2000 : 0; 4437 u8 *cmd; 4438 int ret; 4439 4440 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 4441 if (!skb) { 4442 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 4443 return -ENOMEM; 4444 } 4445 skb_put(skb, H2C_TSF32_TOGL_LEN); 4446 cmd = skb->data; 4447 4448 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx); 4449 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 4450 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port); 4451 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 4452 4453 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4454 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4455 H2C_FUNC_TSF32_TOGL, 0, 0, 4456 H2C_TSF32_TOGL_LEN); 4457 4458 ret = rtw89_h2c_tx(rtwdev, skb, false); 4459 if (ret) { 4460 rtw89_err(rtwdev, "failed to send h2c\n"); 4461 goto fail; 4462 } 4463 4464 return 0; 4465 fail: 4466 dev_kfree_skb_any(skb); 4467 4468 return ret; 4469 } 4470 4471 #define H2C_OFLD_CFG_LEN 8 4472 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 4473 { 4474 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 4475 struct sk_buff *skb; 4476 int ret; 4477 4478 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 4479 if (!skb) { 4480 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 4481 return -ENOMEM; 4482 } 4483 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 4484 4485 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4486 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4487 H2C_FUNC_OFLD_CFG, 0, 1, 4488 H2C_OFLD_CFG_LEN); 4489 4490 ret = rtw89_h2c_tx(rtwdev, skb, false); 4491 if (ret) { 4492 rtw89_err(rtwdev, "failed to send h2c\n"); 4493 goto fail; 4494 } 4495 4496 return 0; 4497 fail: 4498 dev_kfree_skb_any(skb); 4499 4500 return ret; 4501 } 4502 4503 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv) 4504 { 4505 struct rtw89_h2c_tx_duty *h2c; 4506 u32 len = sizeof(*h2c); 4507 struct sk_buff *skb; 4508 u16 pause, active; 4509 int ret; 4510 4511 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4512 if (!skb) { 4513 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n"); 4514 return -ENOMEM; 4515 } 4516 4517 skb_put(skb, len); 4518 h2c = (struct rtw89_h2c_tx_duty *)skb->data; 4519 4520 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100); 4521 4522 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) { 4523 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP); 4524 } else { 4525 active = 100 - lv * RTW89_THERMAL_PROT_STEP; 4526 pause = 100 - active; 4527 4528 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) | 4529 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK); 4530 } 4531 4532 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4533 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4534 H2C_FUNC_TX_DUTY, 0, 0, len); 4535 4536 ret = rtw89_h2c_tx(rtwdev, skb, false); 4537 if (ret) { 4538 rtw89_err(rtwdev, "failed to send h2c\n"); 4539 goto fail; 4540 } 4541 4542 return 0; 4543 fail: 4544 dev_kfree_skb_any(skb); 4545 4546 return ret; 4547 } 4548 4549 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 4550 struct rtw89_vif_link *rtwvif_link, 4551 bool connect) 4552 { 4553 struct ieee80211_bss_conf *bss_conf; 4554 s32 thold = RTW89_DEFAULT_CQM_THOLD; 4555 u32 hyst = RTW89_DEFAULT_CQM_HYST; 4556 struct rtw89_h2c_bcnfltr *h2c; 4557 u32 len = sizeof(*h2c); 4558 struct sk_buff *skb; 4559 u8 max_cnt, cnt; 4560 int ret; 4561 4562 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4563 return -EINVAL; 4564 4565 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4566 return -EINVAL; 4567 4568 rcu_read_lock(); 4569 4570 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 4571 4572 if (bss_conf->cqm_rssi_hyst) 4573 hyst = bss_conf->cqm_rssi_hyst; 4574 if (bss_conf->cqm_rssi_thold) 4575 thold = bss_conf->cqm_rssi_thold; 4576 4577 rcu_read_unlock(); 4578 4579 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4580 if (!skb) { 4581 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 4582 return -ENOMEM; 4583 } 4584 4585 skb_put(skb, len); 4586 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 4587 4588 if (RTW89_CHK_FW_FEATURE(BEACON_LOSS_COUNT_V1, &rtwdev->fw)) 4589 max_cnt = BIT(7) - 1; 4590 else 4591 max_cnt = BIT(4) - 1; 4592 4593 cnt = min(RTW89_BCN_LOSS_CNT, max_cnt); 4594 4595 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 4596 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 4597 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 4598 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 4599 RTW89_H2C_BCNFLTR_W0_MODE) | 4600 le32_encode_bits(cnt >> 4, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_H3) | 4601 le32_encode_bits(cnt & 0xf, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_L4) | 4602 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 4603 le32_encode_bits(thold + MAX_RSSI, 4604 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 4605 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 4606 4607 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4608 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4609 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 4610 4611 ret = rtw89_h2c_tx(rtwdev, skb, false); 4612 if (ret) { 4613 rtw89_err(rtwdev, "failed to send h2c\n"); 4614 goto fail; 4615 } 4616 4617 return 0; 4618 fail: 4619 dev_kfree_skb_any(skb); 4620 4621 return ret; 4622 } 4623 4624 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 4625 struct rtw89_rx_phy_ppdu *phy_ppdu) 4626 { 4627 struct rtw89_h2c_ofld_rssi *h2c; 4628 u32 len = sizeof(*h2c); 4629 struct sk_buff *skb; 4630 s8 rssi; 4631 int ret; 4632 4633 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4634 return -EINVAL; 4635 4636 if (!phy_ppdu) 4637 return -EINVAL; 4638 4639 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4640 if (!skb) { 4641 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 4642 return -ENOMEM; 4643 } 4644 4645 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 4646 skb_put(skb, len); 4647 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 4648 4649 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 4650 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 4651 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 4652 4653 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4654 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4655 H2C_FUNC_OFLD_RSSI, 0, 1, len); 4656 4657 ret = rtw89_h2c_tx(rtwdev, skb, false); 4658 if (ret) { 4659 rtw89_err(rtwdev, "failed to send h2c\n"); 4660 goto fail; 4661 } 4662 4663 return 0; 4664 fail: 4665 dev_kfree_skb_any(skb); 4666 4667 return ret; 4668 } 4669 4670 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4671 { 4672 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 4673 struct rtw89_traffic_stats *stats = &rtwvif->stats; 4674 struct rtw89_h2c_ofld *h2c; 4675 u32 len = sizeof(*h2c); 4676 struct sk_buff *skb; 4677 int ret; 4678 4679 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4680 return -EINVAL; 4681 4682 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4683 if (!skb) { 4684 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 4685 return -ENOMEM; 4686 } 4687 4688 skb_put(skb, len); 4689 h2c = (struct rtw89_h2c_ofld *)skb->data; 4690 4691 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 4692 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 4693 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 4694 4695 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4696 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4697 H2C_FUNC_OFLD_TP, 0, 1, len); 4698 4699 ret = rtw89_h2c_tx(rtwdev, skb, false); 4700 if (ret) { 4701 rtw89_err(rtwdev, "failed to send h2c\n"); 4702 goto fail; 4703 } 4704 4705 return 0; 4706 fail: 4707 dev_kfree_skb_any(skb); 4708 4709 return ret; 4710 } 4711 4712 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 4713 { 4714 const struct rtw89_chip_info *chip = rtwdev->chip; 4715 struct rtw89_h2c_ra_v1 *h2c_v1; 4716 struct rtw89_h2c_ra *h2c; 4717 u32 len = sizeof(*h2c); 4718 bool format_v1 = false; 4719 struct sk_buff *skb; 4720 int ret; 4721 4722 if (chip->chip_gen == RTW89_CHIP_BE) { 4723 len = sizeof(*h2c_v1); 4724 format_v1 = true; 4725 } 4726 4727 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4728 if (!skb) { 4729 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4730 return -ENOMEM; 4731 } 4732 skb_put(skb, len); 4733 h2c = (struct rtw89_h2c_ra *)skb->data; 4734 rtw89_debug(rtwdev, RTW89_DBG_RA, 4735 "ra cmd msk: %llx ", ra->ra_mask); 4736 4737 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 4738 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 4739 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 4740 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 4741 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 4742 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 4743 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 4744 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 4745 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 4746 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 4747 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 4748 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 4749 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 4750 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 4751 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 4752 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 4753 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 4754 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 4755 4756 if (!format_v1) 4757 goto csi; 4758 4759 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 4760 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 4761 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 4762 4763 csi: 4764 if (!csi) 4765 goto done; 4766 4767 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 4768 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 4769 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 4770 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 4771 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 4772 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 4773 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 4774 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 4775 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 4776 4777 done: 4778 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4779 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 4780 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 4781 len); 4782 4783 ret = rtw89_h2c_tx(rtwdev, skb, false); 4784 if (ret) { 4785 rtw89_err(rtwdev, "failed to send h2c\n"); 4786 goto fail; 4787 } 4788 4789 return 0; 4790 fail: 4791 dev_kfree_skb_any(skb); 4792 4793 return ret; 4794 } 4795 4796 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 4797 { 4798 struct rtw89_btc *btc = &rtwdev->btc; 4799 struct rtw89_btc_dm *dm = &btc->dm; 4800 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 4801 struct rtw89_btc_module *module = &init_info->module; 4802 struct rtw89_btc_ant_info *ant = &module->ant; 4803 struct rtw89_h2c_cxinit *h2c; 4804 u32 len = sizeof(*h2c); 4805 struct sk_buff *skb; 4806 int ret; 4807 4808 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4809 if (!skb) { 4810 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 4811 return -ENOMEM; 4812 } 4813 skb_put(skb, len); 4814 h2c = (struct rtw89_h2c_cxinit *)skb->data; 4815 4816 h2c->hdr.type = type; 4817 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 4818 4819 h2c->ant_type = ant->type; 4820 h2c->ant_num = ant->num; 4821 h2c->ant_iso = ant->isolation; 4822 h2c->ant_info = 4823 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 4824 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 4825 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 4826 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 4827 4828 h2c->mod_rfe = module->rfe_type; 4829 h2c->mod_cv = module->cv; 4830 h2c->mod_info = 4831 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 4832 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 4833 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 4834 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 4835 h2c->mod_adie_kt = module->kt_ver_adie; 4836 h2c->wl_gch = init_info->wl_guard_ch; 4837 4838 h2c->info = 4839 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 4840 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 4841 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 4842 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 4843 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 4844 4845 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4846 H2C_CAT_OUTSRC, BTFC_SET, 4847 SET_DRV_INFO, 0, 0, 4848 len); 4849 4850 ret = rtw89_h2c_tx(rtwdev, skb, false); 4851 if (ret) { 4852 rtw89_err(rtwdev, "failed to send h2c\n"); 4853 goto fail; 4854 } 4855 4856 return 0; 4857 fail: 4858 dev_kfree_skb_any(skb); 4859 4860 return ret; 4861 } 4862 4863 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 4864 { 4865 struct rtw89_btc *btc = &rtwdev->btc; 4866 struct rtw89_btc_dm *dm = &btc->dm; 4867 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 4868 struct rtw89_h2c_cxinit_v7 *h2c; 4869 u32 len = sizeof(*h2c); 4870 struct sk_buff *skb; 4871 int ret; 4872 4873 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4874 if (!skb) { 4875 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 4876 return -ENOMEM; 4877 } 4878 skb_put(skb, len); 4879 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 4880 4881 h2c->hdr.type = type; 4882 h2c->hdr.ver = btc->ver->fcxinit; 4883 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4884 h2c->init = *init_info; 4885 4886 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4887 H2C_CAT_OUTSRC, BTFC_SET, 4888 SET_DRV_INFO, 0, 0, 4889 len); 4890 4891 ret = rtw89_h2c_tx(rtwdev, skb, false); 4892 if (ret) { 4893 rtw89_err(rtwdev, "failed to send h2c\n"); 4894 goto fail; 4895 } 4896 4897 return 0; 4898 fail: 4899 dev_kfree_skb_any(skb); 4900 4901 return ret; 4902 } 4903 4904 #define PORT_DATA_OFFSET 4 4905 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 4906 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 4907 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 4908 4909 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 4910 { 4911 struct rtw89_btc *btc = &rtwdev->btc; 4912 const struct rtw89_btc_ver *ver = btc->ver; 4913 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4914 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 4915 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4916 struct rtw89_btc_wl_active_role *active = role_info->active_role; 4917 struct sk_buff *skb; 4918 u32 len; 4919 u8 offset = 0; 4920 u8 *cmd; 4921 int ret; 4922 int i; 4923 4924 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 4925 4926 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4927 if (!skb) { 4928 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4929 return -ENOMEM; 4930 } 4931 skb_put(skb, len); 4932 cmd = skb->data; 4933 4934 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4935 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4936 4937 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4938 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4939 4940 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4941 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4942 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4943 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4944 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4945 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4946 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4947 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4948 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4949 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4950 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4951 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4952 4953 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4954 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4955 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4956 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4957 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4958 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4959 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4960 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4961 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4962 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4963 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4964 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4965 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4966 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4967 } 4968 4969 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4970 H2C_CAT_OUTSRC, BTFC_SET, 4971 SET_DRV_INFO, 0, 0, 4972 len); 4973 4974 ret = rtw89_h2c_tx(rtwdev, skb, false); 4975 if (ret) { 4976 rtw89_err(rtwdev, "failed to send h2c\n"); 4977 goto fail; 4978 } 4979 4980 return 0; 4981 fail: 4982 dev_kfree_skb_any(skb); 4983 4984 return ret; 4985 } 4986 4987 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 4988 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4989 4990 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 4991 { 4992 struct rtw89_btc *btc = &rtwdev->btc; 4993 const struct rtw89_btc_ver *ver = btc->ver; 4994 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4995 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 4996 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4997 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 4998 struct sk_buff *skb; 4999 u32 len; 5000 u8 *cmd, offset; 5001 int ret; 5002 int i; 5003 5004 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 5005 5006 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5007 if (!skb) { 5008 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 5009 return -ENOMEM; 5010 } 5011 skb_put(skb, len); 5012 cmd = skb->data; 5013 5014 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5015 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 5016 5017 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 5018 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 5019 5020 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 5021 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 5022 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 5023 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 5024 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 5025 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 5026 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 5027 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 5028 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 5029 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 5030 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 5031 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 5032 5033 offset = PORT_DATA_OFFSET; 5034 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 5035 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 5036 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 5037 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 5038 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 5039 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 5040 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 5041 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 5042 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 5043 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 5044 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 5045 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 5046 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 5047 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 5048 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 5049 } 5050 5051 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 5052 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 5053 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 5054 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 5055 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 5056 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 5057 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 5058 5059 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5060 H2C_CAT_OUTSRC, BTFC_SET, 5061 SET_DRV_INFO, 0, 0, 5062 len); 5063 5064 ret = rtw89_h2c_tx(rtwdev, skb, false); 5065 if (ret) { 5066 rtw89_err(rtwdev, "failed to send h2c\n"); 5067 goto fail; 5068 } 5069 5070 return 0; 5071 fail: 5072 dev_kfree_skb_any(skb); 5073 5074 return ret; 5075 } 5076 5077 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 5078 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 5079 5080 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 5081 { 5082 struct rtw89_btc *btc = &rtwdev->btc; 5083 const struct rtw89_btc_ver *ver = btc->ver; 5084 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 5085 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 5086 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 5087 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 5088 struct sk_buff *skb; 5089 u32 len; 5090 u8 *cmd, offset; 5091 int ret; 5092 int i; 5093 5094 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 5095 5096 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5097 if (!skb) { 5098 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 5099 return -ENOMEM; 5100 } 5101 skb_put(skb, len); 5102 cmd = skb->data; 5103 5104 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5105 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 5106 5107 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 5108 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 5109 5110 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 5111 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 5112 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 5113 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 5114 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 5115 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 5116 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 5117 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 5118 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 5119 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 5120 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 5121 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 5122 5123 offset = PORT_DATA_OFFSET; 5124 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 5125 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 5126 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 5127 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 5128 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 5129 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 5130 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 5131 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 5132 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 5133 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 5134 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 5135 } 5136 5137 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 5138 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 5139 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 5140 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 5141 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 5142 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 5143 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 5144 5145 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5146 H2C_CAT_OUTSRC, BTFC_SET, 5147 SET_DRV_INFO, 0, 0, 5148 len); 5149 5150 ret = rtw89_h2c_tx(rtwdev, skb, false); 5151 if (ret) { 5152 rtw89_err(rtwdev, "failed to send h2c\n"); 5153 goto fail; 5154 } 5155 5156 return 0; 5157 fail: 5158 dev_kfree_skb_any(skb); 5159 5160 return ret; 5161 } 5162 5163 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type) 5164 { 5165 struct rtw89_btc *btc = &rtwdev->btc; 5166 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7; 5167 struct rtw89_h2c_cxrole_v7 *h2c; 5168 u32 len = sizeof(*h2c); 5169 struct sk_buff *skb; 5170 int ret; 5171 5172 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5173 if (!skb) { 5174 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5175 return -ENOMEM; 5176 } 5177 skb_put(skb, len); 5178 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data; 5179 5180 h2c->hdr.type = type; 5181 h2c->hdr.ver = btc->ver->fwlrole; 5182 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 5183 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 5184 h2c->_u32.role_map = cpu_to_le32(role->role_map); 5185 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 5186 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 5187 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en); 5188 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg); 5189 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy); 5190 5191 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5192 H2C_CAT_OUTSRC, BTFC_SET, 5193 SET_DRV_INFO, 0, 0, 5194 len); 5195 5196 ret = rtw89_h2c_tx(rtwdev, skb, false); 5197 if (ret) { 5198 rtw89_err(rtwdev, "failed to send h2c\n"); 5199 goto fail; 5200 } 5201 5202 return 0; 5203 fail: 5204 dev_kfree_skb_any(skb); 5205 5206 return ret; 5207 } 5208 5209 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 5210 { 5211 struct rtw89_btc *btc = &rtwdev->btc; 5212 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 5213 struct rtw89_h2c_cxrole_v8 *h2c; 5214 u32 len = sizeof(*h2c); 5215 struct sk_buff *skb; 5216 int ret; 5217 5218 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5219 if (!skb) { 5220 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5221 return -ENOMEM; 5222 } 5223 skb_put(skb, len); 5224 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 5225 5226 h2c->hdr.type = type; 5227 h2c->hdr.ver = btc->ver->fwlrole; 5228 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 5229 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 5230 h2c->_u32.role_map = cpu_to_le32(role->role_map); 5231 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 5232 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 5233 5234 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5235 H2C_CAT_OUTSRC, BTFC_SET, 5236 SET_DRV_INFO, 0, 0, 5237 len); 5238 5239 ret = rtw89_h2c_tx(rtwdev, skb, false); 5240 if (ret) { 5241 rtw89_err(rtwdev, "failed to send h2c\n"); 5242 goto fail; 5243 } 5244 5245 return 0; 5246 fail: 5247 dev_kfree_skb_any(skb); 5248 5249 return ret; 5250 } 5251 5252 int rtw89_fw_h2c_cxdrv_osi_info(struct rtw89_dev *rtwdev, u8 type) 5253 { 5254 struct rtw89_btc *btc = &rtwdev->btc; 5255 struct rtw89_btc_fbtc_outsrc_set_info *osi = &btc->dm.ost_info; 5256 struct rtw89_h2c_cxosi *h2c; 5257 u32 len = sizeof(*h2c); 5258 struct sk_buff *skb; 5259 int ret; 5260 5261 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5262 if (!skb) { 5263 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_osi\n"); 5264 return -ENOMEM; 5265 } 5266 skb_put(skb, len); 5267 h2c = (struct rtw89_h2c_cxosi *)skb->data; 5268 5269 h2c->hdr.type = type; 5270 h2c->hdr.ver = btc->ver->fcxosi; 5271 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 5272 h2c->osi = *osi; 5273 5274 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5275 H2C_CAT_OUTSRC, BTFC_SET, 5276 SET_DRV_INFO, 0, 0, 5277 len); 5278 5279 ret = rtw89_h2c_tx(rtwdev, skb, false); 5280 if (ret) { 5281 rtw89_err(rtwdev, "failed to send h2c\n"); 5282 goto fail; 5283 } 5284 5285 return 0; 5286 fail: 5287 dev_kfree_skb_any(skb); 5288 5289 return ret; 5290 } 5291 5292 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 5293 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 5294 { 5295 struct rtw89_btc *btc = &rtwdev->btc; 5296 const struct rtw89_btc_ver *ver = btc->ver; 5297 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 5298 struct sk_buff *skb; 5299 u8 *cmd; 5300 int ret; 5301 5302 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 5303 if (!skb) { 5304 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5305 return -ENOMEM; 5306 } 5307 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 5308 cmd = skb->data; 5309 5310 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5311 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 5312 5313 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 5314 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 5315 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 5316 if (ver->fcxctrl == 0) 5317 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 5318 5319 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5320 H2C_CAT_OUTSRC, BTFC_SET, 5321 SET_DRV_INFO, 0, 0, 5322 H2C_LEN_CXDRVINFO_CTRL); 5323 5324 ret = rtw89_h2c_tx(rtwdev, skb, false); 5325 if (ret) { 5326 rtw89_err(rtwdev, "failed to send h2c\n"); 5327 goto fail; 5328 } 5329 5330 return 0; 5331 fail: 5332 dev_kfree_skb_any(skb); 5333 5334 return ret; 5335 } 5336 5337 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 5338 { 5339 struct rtw89_btc *btc = &rtwdev->btc; 5340 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 5341 struct rtw89_h2c_cxctrl_v7 *h2c; 5342 u32 len = sizeof(*h2c); 5343 struct sk_buff *skb; 5344 int ret; 5345 5346 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5347 if (!skb) { 5348 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n"); 5349 return -ENOMEM; 5350 } 5351 skb_put(skb, len); 5352 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 5353 5354 h2c->hdr.type = type; 5355 h2c->hdr.ver = btc->ver->fcxctrl; 5356 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 5357 h2c->ctrl = *ctrl; 5358 5359 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5360 H2C_CAT_OUTSRC, BTFC_SET, 5361 SET_DRV_INFO, 0, 0, len); 5362 5363 ret = rtw89_h2c_tx(rtwdev, skb, false); 5364 if (ret) { 5365 rtw89_err(rtwdev, "failed to send h2c\n"); 5366 goto fail; 5367 } 5368 5369 return 0; 5370 fail: 5371 dev_kfree_skb_any(skb); 5372 5373 return ret; 5374 } 5375 5376 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 5377 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 5378 { 5379 struct rtw89_btc *btc = &rtwdev->btc; 5380 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 5381 struct sk_buff *skb; 5382 u8 *cmd; 5383 int ret; 5384 5385 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 5386 if (!skb) { 5387 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 5388 return -ENOMEM; 5389 } 5390 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 5391 cmd = skb->data; 5392 5393 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5394 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 5395 5396 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 5397 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 5398 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 5399 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 5400 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 5401 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 5402 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 5403 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 5404 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 5405 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 5406 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 5407 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 5408 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 5409 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 5410 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 5411 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 5412 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 5413 5414 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5415 H2C_CAT_OUTSRC, BTFC_SET, 5416 SET_DRV_INFO, 0, 0, 5417 H2C_LEN_CXDRVINFO_TRX); 5418 5419 ret = rtw89_h2c_tx(rtwdev, skb, false); 5420 if (ret) { 5421 rtw89_err(rtwdev, "failed to send h2c\n"); 5422 goto fail; 5423 } 5424 5425 return 0; 5426 fail: 5427 dev_kfree_skb_any(skb); 5428 5429 return ret; 5430 } 5431 5432 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 5433 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 5434 { 5435 struct rtw89_btc *btc = &rtwdev->btc; 5436 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 5437 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 5438 struct sk_buff *skb; 5439 u8 *cmd; 5440 int ret; 5441 5442 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 5443 if (!skb) { 5444 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5445 return -ENOMEM; 5446 } 5447 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 5448 cmd = skb->data; 5449 5450 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5451 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 5452 5453 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 5454 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 5455 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 5456 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 5457 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 5458 5459 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5460 H2C_CAT_OUTSRC, BTFC_SET, 5461 SET_DRV_INFO, 0, 0, 5462 H2C_LEN_CXDRVINFO_RFK); 5463 5464 ret = rtw89_h2c_tx(rtwdev, skb, false); 5465 if (ret) { 5466 rtw89_err(rtwdev, "failed to send h2c\n"); 5467 goto fail; 5468 } 5469 5470 return 0; 5471 fail: 5472 dev_kfree_skb_any(skb); 5473 5474 return ret; 5475 } 5476 5477 #define H2C_LEN_PKT_OFLD 4 5478 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 5479 { 5480 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5481 struct sk_buff *skb; 5482 unsigned int cond; 5483 u8 *cmd; 5484 int ret; 5485 5486 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 5487 if (!skb) { 5488 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 5489 return -ENOMEM; 5490 } 5491 skb_put(skb, H2C_LEN_PKT_OFLD); 5492 cmd = skb->data; 5493 5494 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 5495 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 5496 5497 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5498 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5499 H2C_FUNC_PACKET_OFLD, 1, 1, 5500 H2C_LEN_PKT_OFLD); 5501 5502 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 5503 5504 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5505 if (ret < 0) { 5506 rtw89_debug(rtwdev, RTW89_DBG_FW, 5507 "failed to del pkt ofld: id %d, ret %d\n", 5508 id, ret); 5509 return ret; 5510 } 5511 5512 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 5513 return 0; 5514 } 5515 5516 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 5517 struct sk_buff *skb_ofld) 5518 { 5519 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5520 struct sk_buff *skb; 5521 unsigned int cond; 5522 u8 *cmd; 5523 u8 alloc_id; 5524 int ret; 5525 5526 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 5527 RTW89_MAX_PKT_OFLD_NUM); 5528 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 5529 return -ENOSPC; 5530 5531 *id = alloc_id; 5532 5533 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 5534 if (!skb) { 5535 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 5536 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 5537 return -ENOMEM; 5538 } 5539 skb_put(skb, H2C_LEN_PKT_OFLD); 5540 cmd = skb->data; 5541 5542 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 5543 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 5544 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 5545 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 5546 5547 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5548 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5549 H2C_FUNC_PACKET_OFLD, 1, 1, 5550 H2C_LEN_PKT_OFLD + skb_ofld->len); 5551 5552 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 5553 5554 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5555 if (ret < 0) { 5556 rtw89_debug(rtwdev, RTW89_DBG_FW, 5557 "failed to add pkt ofld: id %d, ret %d\n", 5558 alloc_id, ret); 5559 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 5560 return ret; 5561 } 5562 5563 return 0; 5564 } 5565 5566 static 5567 int rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev *rtwdev, int ch_num, 5568 struct list_head *chan_list) 5569 { 5570 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5571 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5572 struct rtw89_h2c_chinfo_elem *elem; 5573 struct rtw89_mac_chinfo_ax *ch_info; 5574 struct rtw89_h2c_chinfo *h2c; 5575 struct sk_buff *skb; 5576 unsigned int cond; 5577 int skb_len; 5578 int ret; 5579 5580 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 5581 5582 skb_len = struct_size(h2c, elem, ch_num); 5583 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5584 if (!skb) { 5585 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5586 return -ENOMEM; 5587 } 5588 skb_put(skb, sizeof(*h2c)); 5589 h2c = (struct rtw89_h2c_chinfo *)skb->data; 5590 5591 h2c->ch_num = ch_num; 5592 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5593 5594 list_for_each_entry(ch_info, chan_list, list) { 5595 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 5596 5597 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 5598 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 5599 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 5600 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 5601 5602 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 5603 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 5604 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 5605 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 5606 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 5607 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 5608 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 5609 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 5610 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 5611 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 5612 5613 if (scan_info->extra_op.set) 5614 elem->w1 |= le32_encode_bits(ch_info->macid_tx, 5615 RTW89_H2C_CHINFO_W1_MACID_TX); 5616 5617 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 5618 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 5619 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 5620 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 5621 5622 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 5623 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 5624 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 5625 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 5626 } 5627 5628 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5629 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5630 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5631 5632 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5633 5634 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5635 if (ret) { 5636 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5637 return ret; 5638 } 5639 5640 return 0; 5641 } 5642 5643 static 5644 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 5645 struct list_head *chan_list, 5646 struct rtw89_vif_link *rtwvif_link) 5647 { 5648 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5649 struct rtw89_h2c_chinfo_elem_be *elem; 5650 struct rtw89_mac_chinfo_be *ch_info; 5651 struct rtw89_h2c_chinfo_be *h2c; 5652 struct sk_buff *skb; 5653 unsigned int cond; 5654 u8 ver = U8_MAX; 5655 int skb_len; 5656 int ret; 5657 5658 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE); 5659 5660 skb_len = struct_size(h2c, elem, ch_num); 5661 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5662 if (!skb) { 5663 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5664 return -ENOMEM; 5665 } 5666 5667 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5668 ver = 0; 5669 5670 skb_put(skb, sizeof(*h2c)); 5671 h2c = (struct rtw89_h2c_chinfo_be *)skb->data; 5672 5673 h2c->ch_num = ch_num; 5674 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5675 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx, 5676 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 5677 5678 list_for_each_entry(ch_info, chan_list, list) { 5679 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 5680 5681 elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 5682 le32_encode_bits(ch_info->central_ch, 5683 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 5684 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 5685 5686 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 5687 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 5688 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 5689 le32_encode_bits(ch_info->pause_data, 5690 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 5691 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 5692 le32_encode_bits(ch_info->rand_seq_num, 5693 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 5694 le32_encode_bits(ch_info->notify_action, 5695 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 5696 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 5697 RTW89_H2C_CHINFO_BE_W1_PROBE) | 5698 le32_encode_bits(ch_info->leave_crit, 5699 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 5700 le32_encode_bits(ch_info->chkpt_timer, 5701 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 5702 5703 elem->w2 = le32_encode_bits(ch_info->leave_time, 5704 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 5705 le32_encode_bits(ch_info->leave_th, 5706 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 5707 le32_encode_bits(ch_info->tx_pkt_ctrl, 5708 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 5709 5710 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 5711 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 5712 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 5713 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 5714 5715 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 5716 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 5717 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 5718 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 5719 5720 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 5721 le32_encode_bits(ch_info->fw_probe0_ssids, 5722 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 5723 5724 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 5725 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 5726 le32_encode_bits(ch_info->fw_probe0_bssids, 5727 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 5728 if (ver == 0) 5729 elem->w0 |= 5730 le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD); 5731 else 5732 elem->w7 = le32_encode_bits(ch_info->period, 5733 RTW89_H2C_CHINFO_BE_W7_PERIOD_V1); 5734 } 5735 5736 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5737 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5738 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5739 5740 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5741 5742 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5743 if (ret) { 5744 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5745 return ret; 5746 } 5747 5748 return 0; 5749 } 5750 5751 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, 5752 struct rtw89_scan_option *option, 5753 struct rtw89_vif_link *rtwvif_link, 5754 bool wowlan) 5755 { 5756 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5757 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5758 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 5759 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE; 5760 struct rtw89_h2c_scanofld *h2c; 5761 u32 len = sizeof(*h2c); 5762 struct sk_buff *skb; 5763 unsigned int cond; 5764 u64 tsf = 0; 5765 int ret; 5766 5767 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5768 if (!skb) { 5769 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5770 return -ENOMEM; 5771 } 5772 skb_put(skb, len); 5773 h2c = (struct rtw89_h2c_scanofld *)skb->data; 5774 5775 if (option->delay) { 5776 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 5777 if (ret) { 5778 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret); 5779 scan_mode = RTW89_SCAN_IMMEDIATE; 5780 } else { 5781 scan_mode = RTW89_SCAN_DELAY; 5782 tsf += (u64)option->delay * 1000; 5783 } 5784 } 5785 5786 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 5787 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 5788 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) | 5789 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 5790 5791 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 5792 le32_encode_bits(option->target_ch_mode, 5793 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 5794 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) | 5795 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 5796 5797 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) | 5798 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD); 5799 5800 if (option->target_ch_mode) { 5801 h2c->w1 |= le32_encode_bits(op->band_width, 5802 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 5803 le32_encode_bits(op->primary_channel, 5804 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 5805 le32_encode_bits(op->channel, 5806 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 5807 h2c->w0 |= le32_encode_bits(op->band_type, 5808 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 5809 } 5810 5811 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf), 5812 RTW89_H2C_SCANOFLD_W3_TSF_HIGH); 5813 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf), 5814 RTW89_H2C_SCANOFLD_W4_TSF_LOW); 5815 5816 if (scan_info->extra_op.set) 5817 h2c->w6 = le32_encode_bits(scan_info->extra_op.macid, 5818 RTW89_H2C_SCANOFLD_W6_SECOND_MACID); 5819 5820 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5821 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5822 H2C_FUNC_SCANOFLD, 1, 1, 5823 len); 5824 5825 if (option->enable) 5826 cond = RTW89_SCANOFLD_WAIT_COND_START; 5827 else 5828 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 5829 5830 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5831 if (ret) { 5832 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 5833 return ret; 5834 } 5835 5836 return 0; 5837 } 5838 5839 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 5840 struct rtw89_scan_option *option) 5841 { 5842 struct ieee80211_supported_band *sband; 5843 struct ieee80211_channel *chan; 5844 u8 i, idx; 5845 5846 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 5847 if (!sband) { 5848 option->prohib_chan = U64_MAX; 5849 return; 5850 } 5851 5852 for (i = 0; i < sband->n_channels; i++) { 5853 chan = &sband->channels[i]; 5854 if (chan->flags & IEEE80211_CHAN_DISABLED) { 5855 idx = (chan->hw_value - 1) / 4; 5856 option->prohib_chan |= BIT(idx); 5857 } 5858 } 5859 } 5860 5861 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 5862 struct rtw89_scan_option *option, 5863 struct rtw89_vif_link *rtwvif_link, 5864 bool wowlan) 5865 { 5866 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 5867 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5868 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 5869 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5870 struct cfg80211_scan_request *req = rtwvif->scan_req; 5871 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 5872 struct rtw89_hw_scan_extra_op scan_op[2] = {}; 5873 struct rtw89_chan *op = &scan_info->op_chan; 5874 struct rtw89_h2c_scanofld_be_opch *opch; 5875 struct rtw89_pktofld_info *pkt_info; 5876 struct rtw89_h2c_scanofld_be *h2c; 5877 struct ieee80211_vif *vif; 5878 struct sk_buff *skb; 5879 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 5880 u8 opch_size = sizeof(*opch) * option->num_opch; 5881 enum rtw89_scan_be_opmode opmode; 5882 u8 probe_id[NUM_NL80211_BANDS]; 5883 u8 scan_offload_ver = U8_MAX; 5884 u8 cfg_len = sizeof(*h2c); 5885 unsigned int cond; 5886 u8 ap_idx = U8_MAX; 5887 u8 ver = U8_MAX; 5888 u8 policy_val; 5889 void *ptr; 5890 u8 txbcn; 5891 int ret; 5892 u32 len; 5893 u8 i; 5894 5895 scan_op[0].macid = rtwvif_link->mac_id; 5896 scan_op[0].port = rtwvif_link->port; 5897 scan_op[0].chan = *op; 5898 vif = rtwvif_to_vif(rtwvif_link->rtwvif); 5899 if (vif->type == NL80211_IFTYPE_AP) 5900 ap_idx = 0; 5901 5902 if (ext->set) { 5903 scan_op[1] = *ext; 5904 vif = rtwvif_to_vif(ext->rtwvif_link->rtwvif); 5905 if (vif->type == NL80211_IFTYPE_AP) 5906 ap_idx = 1; 5907 } 5908 5909 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 5910 5911 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { 5912 cfg_len = offsetofend(typeof(*h2c), w8); 5913 scan_offload_ver = 0; 5914 } 5915 5916 len = cfg_len + macc_role_size + opch_size; 5917 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5918 if (!skb) { 5919 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5920 return -ENOMEM; 5921 } 5922 5923 skb_put(skb, len); 5924 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 5925 ptr = skb->data; 5926 5927 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 5928 5929 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5930 ver = 0; 5931 5932 if (!wowlan) { 5933 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 5934 if (pkt_info->wildcard_6ghz) { 5935 /* Provide wildcard as template */ 5936 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 5937 break; 5938 } 5939 } 5940 } 5941 5942 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 5943 le32_encode_bits(option->scan_mode, 5944 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 5945 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 5946 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 5947 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 5948 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 5949 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 5950 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 5951 5952 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 5953 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 5954 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 5955 5956 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 5957 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 5958 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 5959 5960 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 5961 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 5962 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 5963 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 5964 5965 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 5966 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 5967 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 5968 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 5969 le32_encode_bits(option->delay / 1000, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 5970 5971 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 5972 5973 h2c->w6 = le32_encode_bits(option->prohib_chan, 5974 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 5975 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 5976 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 5977 if (!wowlan && req->no_cck) { 5978 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); 5979 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, 5980 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | 5981 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5982 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | 5983 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5984 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); 5985 } 5986 5987 if (scan_offload_ver == 0) 5988 goto flex_member; 5989 5990 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), 5991 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | 5992 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0), 5993 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) | 5994 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0), 5995 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP); 5996 5997 flex_member: 5998 ptr += cfg_len; 5999 6000 for (i = 0; i < option->num_macc_role; i++) { 6001 macc_role = ptr; 6002 macc_role->w0 = 6003 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 6004 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 6005 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 6006 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 6007 ptr += sizeof(*macc_role); 6008 } 6009 6010 for (i = 0; i < option->num_opch; i++) { 6011 bool is_ap_idx = i == ap_idx; 6012 6013 opmode = is_ap_idx ? RTW89_SCAN_OPMODE_TBTT : RTW89_SCAN_OPMODE_INTV; 6014 policy_val = is_ap_idx ? 2 : RTW89_OFF_CHAN_TIME / 10; 6015 txbcn = is_ap_idx ? 1 : 0; 6016 6017 opch = ptr; 6018 opch->w0 = le32_encode_bits(scan_op[i].macid, 6019 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 6020 le32_encode_bits(option->band, 6021 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 6022 le32_encode_bits(scan_op[i].port, 6023 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 6024 le32_encode_bits(opmode, 6025 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 6026 le32_encode_bits(true, 6027 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 6028 le32_encode_bits(policy_val, 6029 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 6030 6031 opch->w1 = le32_encode_bits(scan_op[i].chan.band_type, 6032 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 6033 le32_encode_bits(scan_op[i].chan.band_width, 6034 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 6035 le32_encode_bits(0x3, 6036 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 6037 le32_encode_bits(scan_op[i].chan.primary_channel, 6038 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 6039 le32_encode_bits(scan_op[i].chan.channel, 6040 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 6041 6042 opch->w2 = le32_encode_bits(0, 6043 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 6044 le32_encode_bits(0, 6045 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 6046 le32_encode_bits(rtw89_is_mlo_1_1(rtwdev) ? 1 : 2, 6047 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS) | 6048 le32_encode_bits(txbcn, 6049 RTW89_H2C_SCANOFLD_BE_OPCH_W2_TXBCN); 6050 6051 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 6052 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 6053 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 6054 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 6055 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 6056 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 6057 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 6058 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 6059 6060 if (ver == 0) 6061 opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME, 6062 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION); 6063 else 6064 opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME, 6065 RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1); 6066 ptr += sizeof(*opch); 6067 } 6068 6069 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6070 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 6071 H2C_FUNC_SCANOFLD_BE, 1, 1, 6072 len); 6073 6074 if (option->enable) 6075 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 6076 else 6077 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 6078 6079 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6080 if (ret) { 6081 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 6082 return ret; 6083 } 6084 6085 return 0; 6086 } 6087 6088 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 6089 struct rtw89_fw_h2c_rf_reg_info *info, 6090 u16 len, u8 page) 6091 { 6092 struct sk_buff *skb; 6093 u8 class = info->rf_path == RF_PATH_A ? 6094 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 6095 int ret; 6096 6097 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6098 if (!skb) { 6099 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 6100 return -ENOMEM; 6101 } 6102 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 6103 6104 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6105 H2C_CAT_OUTSRC, class, page, 0, 0, 6106 len); 6107 6108 ret = rtw89_h2c_tx(rtwdev, skb, false); 6109 if (ret) { 6110 rtw89_err(rtwdev, "failed to send h2c\n"); 6111 goto fail; 6112 } 6113 6114 return 0; 6115 fail: 6116 dev_kfree_skb_any(skb); 6117 6118 return ret; 6119 } 6120 6121 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 6122 { 6123 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; 6124 struct rtw89_fw_h2c_rf_get_mccch_v0 *mccch_v0; 6125 struct rtw89_fw_h2c_rf_get_mccch *mccch; 6126 u32 len = sizeof(*mccch); 6127 struct sk_buff *skb; 6128 u8 ver = U8_MAX; 6129 int ret; 6130 u8 idx; 6131 6132 if (RTW89_CHK_FW_FEATURE(RFK_NTFY_MCC_V0, &rtwdev->fw)) { 6133 len = sizeof(*mccch_v0); 6134 ver = 0; 6135 } 6136 6137 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6138 if (!skb) { 6139 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 6140 return -ENOMEM; 6141 } 6142 skb_put(skb, len); 6143 6144 idx = rfk_mcc->table_idx; 6145 if (ver == 0) { 6146 mccch_v0 = (struct rtw89_fw_h2c_rf_get_mccch_v0 *)skb->data; 6147 mccch_v0->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 6148 mccch_v0->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 6149 mccch_v0->band_0 = cpu_to_le32(rfk_mcc->band[0]); 6150 mccch_v0->band_1 = cpu_to_le32(rfk_mcc->band[1]); 6151 mccch_v0->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 6152 mccch_v0->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 6153 } else { 6154 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 6155 mccch->ch_0_0 = cpu_to_le32(rfk_mcc->ch[0]); 6156 mccch->ch_0_1 = cpu_to_le32(rfk_mcc->ch[0]); 6157 mccch->ch_1_0 = cpu_to_le32(rfk_mcc->ch[1]); 6158 mccch->ch_1_1 = cpu_to_le32(rfk_mcc->ch[1]); 6159 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 6160 } 6161 6162 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6163 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 6164 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 6165 len); 6166 6167 ret = rtw89_h2c_tx(rtwdev, skb, false); 6168 if (ret) { 6169 rtw89_err(rtwdev, "failed to send h2c\n"); 6170 goto fail; 6171 } 6172 6173 return 0; 6174 fail: 6175 dev_kfree_skb_any(skb); 6176 6177 return ret; 6178 } 6179 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 6180 6181 int rtw89_fw_h2c_mcc_dig(struct rtw89_dev *rtwdev, 6182 enum rtw89_chanctx_idx chanctx_idx, 6183 u8 mcc_role_idx, u8 pd_val, bool en) 6184 { 6185 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); 6186 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 6187 struct rtw89_h2c_mcc_dig *h2c; 6188 u32 len = sizeof(*h2c); 6189 struct sk_buff *skb; 6190 int ret; 6191 6192 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6193 if (!skb) { 6194 rtw89_err(rtwdev, "failed to alloc skb for h2c mcc_dig\n"); 6195 return -ENOMEM; 6196 } 6197 skb_put(skb, len); 6198 h2c = (struct rtw89_h2c_mcc_dig *)skb->data; 6199 6200 h2c->w0 = le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_REG_CNT) | 6201 le32_encode_bits(en, RTW89_H2C_MCC_DIG_W0_DM_EN) | 6202 le32_encode_bits(mcc_role_idx, RTW89_H2C_MCC_DIG_W0_IDX) | 6203 le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_SET) | 6204 le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_PHY0_EN) | 6205 le32_encode_bits(chan->channel, RTW89_H2C_MCC_DIG_W0_CENTER_CH) | 6206 le32_encode_bits(chan->band_type, RTW89_H2C_MCC_DIG_W0_BAND_TYPE); 6207 h2c->w1 = le32_encode_bits(dig_regs->seg0_pd_reg, 6208 RTW89_H2C_MCC_DIG_W1_ADDR_LSB) | 6209 le32_encode_bits(dig_regs->seg0_pd_reg >> 8, 6210 RTW89_H2C_MCC_DIG_W1_ADDR_MSB) | 6211 le32_encode_bits(dig_regs->pd_lower_bound_mask, 6212 RTW89_H2C_MCC_DIG_W1_BMASK_LSB) | 6213 le32_encode_bits(dig_regs->pd_lower_bound_mask >> 8, 6214 RTW89_H2C_MCC_DIG_W1_BMASK_MSB); 6215 h2c->w2 = le32_encode_bits(pd_val, RTW89_H2C_MCC_DIG_W2_VAL_LSB); 6216 6217 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6218 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 6219 H2C_FUNC_FW_MCC_DIG, 0, 0, len); 6220 6221 ret = rtw89_h2c_tx(rtwdev, skb, false); 6222 if (ret) { 6223 rtw89_err(rtwdev, "failed to send h2c\n"); 6224 goto fail; 6225 } 6226 6227 return 0; 6228 fail: 6229 dev_kfree_skb_any(skb); 6230 6231 return ret; 6232 } 6233 6234 int rtw89_fw_h2c_rf_ps_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 6235 { 6236 const struct rtw89_chip_info *chip = rtwdev->chip; 6237 struct rtw89_vif_link *rtwvif_link; 6238 struct rtw89_h2c_rf_ps_info *h2c; 6239 const struct rtw89_chan *chan; 6240 u32 len = sizeof(*h2c); 6241 unsigned int link_id; 6242 struct sk_buff *skb; 6243 int ret; 6244 u8 path; 6245 u32 val; 6246 6247 if (chip->chip_gen != RTW89_CHIP_BE) 6248 return 0; 6249 6250 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6251 if (!skb) { 6252 rtw89_err(rtwdev, "failed to alloc skb for h2c rf ps info\n"); 6253 return -ENOMEM; 6254 } 6255 skb_put(skb, len); 6256 h2c = (struct rtw89_h2c_rf_ps_info *)skb->data; 6257 h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 6258 6259 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 6260 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 6261 path = rtw89_phy_get_syn_sel(rtwdev, rtwvif_link->phy_idx); 6262 val = rtw89_chip_chan_to_rf18_val(rtwdev, chan); 6263 6264 if (path >= chip->rf_path_num || path >= NUM_OF_RTW89_FW_RFK_PATH) { 6265 rtw89_err(rtwdev, "unsupported rf path (%d)\n", path); 6266 ret = -ENOENT; 6267 goto fail; 6268 } 6269 6270 h2c->rf18[path] = cpu_to_le32(val); 6271 h2c->pri_ch[path] = chan->primary_channel; 6272 } 6273 6274 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6275 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 6276 H2C_FUNC_OUTSRC_RF_PS_INFO, 0, 0, 6277 sizeof(*h2c)); 6278 6279 ret = rtw89_h2c_tx(rtwdev, skb, false); 6280 if (ret) { 6281 rtw89_err(rtwdev, "failed to send h2c\n"); 6282 goto fail; 6283 } 6284 6285 return 0; 6286 fail: 6287 dev_kfree_skb_any(skb); 6288 6289 return ret; 6290 } 6291 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ps_info); 6292 6293 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 6294 enum rtw89_phy_idx phy_idx) 6295 { 6296 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 6297 struct rtw89_fw_h2c_rfk_pre_info_common *common; 6298 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; 6299 struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1; 6300 struct rtw89_fw_h2c_rfk_pre_info *h2c; 6301 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH]; 6302 u32 len = sizeof(*h2c); 6303 struct sk_buff *skb; 6304 u8 ver = U8_MAX; 6305 u8 tbl, path; 6306 u32 val32; 6307 int ret; 6308 6309 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) { 6310 len = sizeof(*h2c_v1); 6311 ver = 1; 6312 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 6313 len = sizeof(*h2c_v0); 6314 ver = 0; 6315 } 6316 6317 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6318 if (!skb) { 6319 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 6320 return -ENOMEM; 6321 } 6322 skb_put(skb, len); 6323 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 6324 common = &h2c->base_v1.common; 6325 6326 common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 6327 6328 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 6329 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH); 6330 6331 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 6332 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 6333 common->dbcc.ch[path][tbl] = 6334 cpu_to_le32(rfk_mcc->data[path].ch[tbl]); 6335 common->dbcc.band[path][tbl] = 6336 cpu_to_le32(rfk_mcc->data[path].band[tbl]); 6337 } 6338 } 6339 6340 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 6341 tbl_sel[path] = rfk_mcc->data[path].table_idx; 6342 6343 common->tbl.cur_ch[path] = 6344 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]); 6345 common->tbl.cur_band[path] = 6346 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]); 6347 6348 if (ver <= 1) 6349 continue; 6350 6351 h2c->cur_bandwidth[path] = 6352 cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]); 6353 } 6354 6355 common->phy_idx = cpu_to_le32(phy_idx); 6356 6357 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ 6358 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; 6359 6360 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]); 6361 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]); 6362 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]); 6363 6364 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 6365 h2c_v0->ktbl_sel0 = cpu_to_le32(val32); 6366 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 6367 h2c_v0->ktbl_sel1 = cpu_to_le32(val32); 6368 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 6369 h2c_v0->rfmod0 = cpu_to_le32(val32); 6370 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 6371 h2c_v0->rfmod1 = cpu_to_le32(val32); 6372 6373 if (rtw89_is_mlo_1_1(rtwdev)) 6374 h2c_v0->mlo_1_1 = cpu_to_le32(1); 6375 6376 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 6377 6378 goto done; 6379 } 6380 6381 if (rtw89_is_mlo_1_1(rtwdev)) { 6382 h2c_v1 = &h2c->base_v1; 6383 h2c_v1->mlo_1_1 = cpu_to_le32(1); 6384 } 6385 done: 6386 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6387 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6388 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 6389 len); 6390 6391 ret = rtw89_h2c_tx(rtwdev, skb, false); 6392 if (ret) { 6393 rtw89_err(rtwdev, "failed to send h2c\n"); 6394 goto fail; 6395 } 6396 6397 return 0; 6398 fail: 6399 dev_kfree_skb_any(skb); 6400 6401 return ret; 6402 } 6403 6404 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6405 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode) 6406 { 6407 struct rtw89_efuse *efuse = &rtwdev->efuse; 6408 struct rtw89_hal *hal = &rtwdev->hal; 6409 struct rtw89_h2c_rf_tssi *h2c; 6410 u32 len = sizeof(*h2c); 6411 struct sk_buff *skb; 6412 int ret; 6413 6414 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6415 if (!skb) { 6416 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 6417 return -ENOMEM; 6418 } 6419 skb_put(skb, len); 6420 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 6421 6422 h2c->len = cpu_to_le16(len); 6423 h2c->phy = phy_idx; 6424 h2c->ch = chan->channel; 6425 h2c->bw = chan->band_width; 6426 h2c->band = chan->band_type; 6427 h2c->hwtx_en = true; 6428 h2c->cv = hal->cv; 6429 h2c->tssi_mode = tssi_mode; 6430 h2c->rfe_type = efuse->rfe_type; 6431 6432 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 6433 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 6434 6435 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6436 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6437 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 6438 6439 ret = rtw89_h2c_tx(rtwdev, skb, false); 6440 if (ret) { 6441 rtw89_err(rtwdev, "failed to send h2c\n"); 6442 goto fail; 6443 } 6444 6445 return 0; 6446 fail: 6447 dev_kfree_skb_any(skb); 6448 6449 return ret; 6450 } 6451 6452 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6453 const struct rtw89_chan *chan) 6454 { 6455 struct rtw89_hal *hal = &rtwdev->hal; 6456 struct rtw89_h2c_rf_iqk_v0 *h2c_v0; 6457 struct rtw89_h2c_rf_iqk *h2c; 6458 u32 len = sizeof(*h2c); 6459 struct sk_buff *skb; 6460 u8 ver = U8_MAX; 6461 int ret; 6462 6463 if (RTW89_CHK_FW_FEATURE(RFK_IQK_V0, &rtwdev->fw)) { 6464 len = sizeof(*h2c_v0); 6465 ver = 0; 6466 } 6467 6468 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6469 if (!skb) { 6470 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 6471 return -ENOMEM; 6472 } 6473 skb_put(skb, len); 6474 6475 if (ver == 0) { 6476 h2c_v0 = (struct rtw89_h2c_rf_iqk_v0 *)skb->data; 6477 6478 h2c_v0->phy_idx = cpu_to_le32(phy_idx); 6479 h2c_v0->dbcc = cpu_to_le32(rtwdev->dbcc_en); 6480 6481 goto done; 6482 } 6483 6484 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 6485 6486 h2c->len = sizeof(*h2c); 6487 h2c->ktype = 0; 6488 h2c->phy = phy_idx; 6489 h2c->kpath = rtw89_phy_get_kpath(rtwdev, phy_idx); 6490 h2c->band = chan->band_type; 6491 h2c->bw = chan->band_width; 6492 h2c->ch = chan->channel; 6493 h2c->cv = hal->cv; 6494 6495 done: 6496 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6497 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6498 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 6499 6500 ret = rtw89_h2c_tx(rtwdev, skb, false); 6501 if (ret) { 6502 rtw89_err(rtwdev, "failed to send h2c\n"); 6503 goto fail; 6504 } 6505 6506 return 0; 6507 fail: 6508 dev_kfree_skb_any(skb); 6509 6510 return ret; 6511 } 6512 6513 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6514 const struct rtw89_chan *chan) 6515 { 6516 struct rtw89_h2c_rf_dpk *h2c; 6517 u32 len = sizeof(*h2c); 6518 struct sk_buff *skb; 6519 int ret; 6520 6521 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6522 if (!skb) { 6523 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 6524 return -ENOMEM; 6525 } 6526 skb_put(skb, len); 6527 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 6528 6529 h2c->len = len; 6530 h2c->phy = phy_idx; 6531 h2c->dpk_enable = true; 6532 h2c->kpath = RF_AB; 6533 h2c->cur_band = chan->band_type; 6534 h2c->cur_bw = chan->band_width; 6535 h2c->cur_ch = chan->channel; 6536 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 6537 6538 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6539 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6540 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 6541 6542 ret = rtw89_h2c_tx(rtwdev, skb, false); 6543 if (ret) { 6544 rtw89_err(rtwdev, "failed to send h2c\n"); 6545 goto fail; 6546 } 6547 6548 return 0; 6549 fail: 6550 dev_kfree_skb_any(skb); 6551 6552 return ret; 6553 } 6554 6555 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6556 const struct rtw89_chan *chan) 6557 { 6558 struct rtw89_hal *hal = &rtwdev->hal; 6559 struct rtw89_h2c_rf_txgapk *h2c; 6560 u32 len = sizeof(*h2c); 6561 struct sk_buff *skb; 6562 int ret; 6563 6564 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6565 if (!skb) { 6566 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 6567 return -ENOMEM; 6568 } 6569 skb_put(skb, len); 6570 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 6571 6572 h2c->len = len; 6573 h2c->ktype = 2; 6574 h2c->phy = phy_idx; 6575 h2c->kpath = RF_AB; 6576 h2c->band = chan->band_type; 6577 h2c->bw = chan->band_width; 6578 h2c->ch = chan->channel; 6579 h2c->cv = hal->cv; 6580 6581 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6582 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6583 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 6584 6585 ret = rtw89_h2c_tx(rtwdev, skb, false); 6586 if (ret) { 6587 rtw89_err(rtwdev, "failed to send h2c\n"); 6588 goto fail; 6589 } 6590 6591 return 0; 6592 fail: 6593 dev_kfree_skb_any(skb); 6594 6595 return ret; 6596 } 6597 6598 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6599 const struct rtw89_chan *chan) 6600 { 6601 struct rtw89_h2c_rf_dack *h2c; 6602 u32 len = sizeof(*h2c); 6603 struct sk_buff *skb; 6604 int ret; 6605 6606 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6607 if (!skb) { 6608 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 6609 return -ENOMEM; 6610 } 6611 skb_put(skb, len); 6612 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 6613 6614 h2c->len = cpu_to_le32(len); 6615 h2c->phy = cpu_to_le32(phy_idx); 6616 h2c->type = cpu_to_le32(0); 6617 6618 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6619 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6620 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 6621 6622 ret = rtw89_h2c_tx(rtwdev, skb, false); 6623 if (ret) { 6624 rtw89_err(rtwdev, "failed to send h2c\n"); 6625 goto fail; 6626 } 6627 6628 return 0; 6629 fail: 6630 dev_kfree_skb_any(skb); 6631 6632 return ret; 6633 } 6634 6635 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6636 const struct rtw89_chan *chan, bool is_chl_k) 6637 { 6638 struct rtw89_h2c_rf_rxdck_v0 *v0; 6639 struct rtw89_h2c_rf_rxdck *h2c; 6640 u32 len = sizeof(*h2c); 6641 struct sk_buff *skb; 6642 int ver = -1; 6643 int ret; 6644 6645 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) { 6646 len = sizeof(*v0); 6647 ver = 0; 6648 } 6649 6650 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6651 if (!skb) { 6652 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 6653 return -ENOMEM; 6654 } 6655 skb_put(skb, len); 6656 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data; 6657 6658 v0->len = len; 6659 v0->phy = phy_idx; 6660 v0->is_afe = false; 6661 v0->kpath = RF_AB; 6662 v0->cur_band = chan->band_type; 6663 v0->cur_bw = chan->band_width; 6664 v0->cur_ch = chan->channel; 6665 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 6666 6667 if (ver == 0) 6668 goto hdr; 6669 6670 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 6671 h2c->is_chl_k = is_chl_k; 6672 6673 hdr: 6674 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6675 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6676 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 6677 6678 ret = rtw89_h2c_tx(rtwdev, skb, false); 6679 if (ret) { 6680 rtw89_err(rtwdev, "failed to send h2c\n"); 6681 goto fail; 6682 } 6683 6684 return 0; 6685 fail: 6686 dev_kfree_skb_any(skb); 6687 6688 return ret; 6689 } 6690 6691 int rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev *rtwdev, bool enable) 6692 { 6693 struct rtw89_h2c_rf_tas *h2c; 6694 u32 len = sizeof(*h2c); 6695 struct sk_buff *skb; 6696 int ret; 6697 6698 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6699 if (!skb) { 6700 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TAS\n"); 6701 return -ENOMEM; 6702 } 6703 skb_put(skb, len); 6704 h2c = (struct rtw89_h2c_rf_tas *)skb->data; 6705 6706 h2c->enable = cpu_to_le32(enable); 6707 6708 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6709 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6710 H2C_FUNC_RFK_TAS_OFFLOAD, 0, 0, len); 6711 6712 ret = rtw89_h2c_tx(rtwdev, skb, false); 6713 if (ret) { 6714 rtw89_err(rtwdev, "failed to send h2c\n"); 6715 goto fail; 6716 } 6717 6718 return 0; 6719 fail: 6720 dev_kfree_skb_any(skb); 6721 6722 return ret; 6723 } 6724 6725 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 6726 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 6727 bool rack, bool dack) 6728 { 6729 struct sk_buff *skb; 6730 int ret; 6731 6732 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6733 if (!skb) { 6734 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 6735 return -ENOMEM; 6736 } 6737 skb_put_data(skb, buf, len); 6738 6739 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6740 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 6741 len); 6742 6743 ret = rtw89_h2c_tx(rtwdev, skb, false); 6744 if (ret) { 6745 rtw89_err(rtwdev, "failed to send h2c\n"); 6746 goto fail; 6747 } 6748 6749 return 0; 6750 fail: 6751 dev_kfree_skb_any(skb); 6752 6753 return ret; 6754 } 6755 6756 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 6757 { 6758 struct sk_buff *skb; 6759 int ret; 6760 6761 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 6762 if (!skb) { 6763 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 6764 return -ENOMEM; 6765 } 6766 skb_put_data(skb, buf, len); 6767 6768 ret = rtw89_h2c_tx(rtwdev, skb, false); 6769 if (ret) { 6770 rtw89_err(rtwdev, "failed to send h2c\n"); 6771 goto fail; 6772 } 6773 6774 return 0; 6775 fail: 6776 dev_kfree_skb_any(skb); 6777 6778 return ret; 6779 } 6780 6781 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 6782 { 6783 struct rtw89_early_h2c *early_h2c; 6784 6785 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6786 6787 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 6788 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 6789 } 6790 } 6791 6792 void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 6793 { 6794 struct rtw89_early_h2c *early_h2c, *tmp; 6795 6796 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 6797 list_del(&early_h2c->list); 6798 kfree(early_h2c->h2c); 6799 kfree(early_h2c); 6800 } 6801 } 6802 6803 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 6804 { 6805 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6806 6807 __rtw89_fw_free_all_early_h2c(rtwdev); 6808 } 6809 6810 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 6811 { 6812 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 6813 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6814 6815 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 6816 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 6817 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 6818 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 6819 } 6820 6821 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 6822 struct sk_buff *c2h) 6823 { 6824 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6825 u8 category = attr->category; 6826 u8 class = attr->class; 6827 u8 func = attr->func; 6828 6829 switch (category) { 6830 default: 6831 return false; 6832 case RTW89_C2H_CAT_MAC: 6833 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 6834 case RTW89_C2H_CAT_OUTSRC: 6835 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 6836 } 6837 } 6838 6839 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 6840 { 6841 rtw89_fw_c2h_parse_attr(c2h); 6842 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 6843 goto enqueue; 6844 6845 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 6846 dev_kfree_skb_any(c2h); 6847 return; 6848 6849 enqueue: 6850 skb_queue_tail(&rtwdev->c2h_queue, c2h); 6851 wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->c2h_work); 6852 } 6853 6854 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 6855 struct sk_buff *skb) 6856 { 6857 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 6858 u8 category = attr->category; 6859 u8 class = attr->class; 6860 u8 func = attr->func; 6861 u16 len = attr->len; 6862 bool dump = true; 6863 6864 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 6865 return; 6866 6867 switch (category) { 6868 case RTW89_C2H_CAT_TEST: 6869 break; 6870 case RTW89_C2H_CAT_MAC: 6871 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 6872 if (class == RTW89_MAC_C2H_CLASS_INFO && 6873 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 6874 dump = false; 6875 break; 6876 case RTW89_C2H_CAT_OUTSRC: 6877 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 6878 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 6879 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 6880 else 6881 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 6882 break; 6883 } 6884 6885 if (dump) 6886 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 6887 } 6888 6889 void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work) 6890 { 6891 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 6892 c2h_work); 6893 struct sk_buff *skb, *tmp; 6894 6895 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6896 6897 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 6898 skb_unlink(skb, &rtwdev->c2h_queue); 6899 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 6900 dev_kfree_skb_any(skb); 6901 } 6902 } 6903 6904 void rtw89_fw_c2h_purge_obsoleted_scan_events(struct rtw89_dev *rtwdev) 6905 { 6906 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6907 struct sk_buff *skb, *tmp; 6908 int limit; 6909 6910 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6911 6912 limit = skb_queue_len(&rtwdev->c2h_queue); 6913 6914 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 6915 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 6916 6917 if (--limit < 0) 6918 return; 6919 6920 if (!attr->is_scan_event || attr->scan_seq == scan_info->seq) 6921 continue; 6922 6923 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 6924 "purge obsoleted scan event with seq=%d (cur=%d)\n", 6925 attr->scan_seq, scan_info->seq); 6926 6927 skb_unlink(skb, &rtwdev->c2h_queue); 6928 dev_kfree_skb_any(skb); 6929 } 6930 } 6931 6932 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 6933 struct rtw89_mac_h2c_info *info) 6934 { 6935 const struct rtw89_chip_info *chip = rtwdev->chip; 6936 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6937 const u32 *h2c_reg = chip->h2c_regs; 6938 u8 i, val, len; 6939 int ret; 6940 6941 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 6942 rtwdev, chip->h2c_ctrl_reg); 6943 if (ret) { 6944 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 6945 return ret; 6946 } 6947 6948 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 6949 sizeof(info->u.h2creg[0])); 6950 6951 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 6952 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 6953 6954 for (i = 0; i < RTW89_H2CREG_MAX; i++) 6955 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 6956 6957 fw_info->h2c_counter++; 6958 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 6959 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 6960 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 6961 6962 return 0; 6963 } 6964 6965 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 6966 struct rtw89_mac_c2h_info *info) 6967 { 6968 const struct rtw89_chip_info *chip = rtwdev->chip; 6969 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6970 const u32 *c2h_reg = chip->c2h_regs; 6971 u32 timeout; 6972 u8 i, val; 6973 int ret; 6974 6975 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 6976 6977 if (rtwdev->hci.type == RTW89_HCI_TYPE_USB) 6978 timeout = RTW89_C2H_TIMEOUT_USB; 6979 else 6980 timeout = RTW89_C2H_TIMEOUT; 6981 6982 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 6983 timeout, false, rtwdev, 6984 chip->c2h_ctrl_reg); 6985 if (ret) { 6986 rtw89_warn(rtwdev, "c2h reg timeout\n"); 6987 return ret; 6988 } 6989 6990 for (i = 0; i < RTW89_C2HREG_MAX; i++) 6991 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 6992 6993 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 6994 6995 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 6996 info->content_len = 6997 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 6998 RTW89_C2HREG_HDR_LEN; 6999 7000 fw_info->c2h_counter++; 7001 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 7002 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 7003 7004 return 0; 7005 } 7006 7007 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 7008 struct rtw89_mac_h2c_info *h2c_info, 7009 struct rtw89_mac_c2h_info *c2h_info) 7010 { 7011 int ret; 7012 7013 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 7014 lockdep_assert_wiphy(rtwdev->hw->wiphy); 7015 7016 if (!h2c_info && !c2h_info) 7017 return -EINVAL; 7018 7019 if (!h2c_info) 7020 goto recv_c2h; 7021 7022 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 7023 if (ret) 7024 return ret; 7025 7026 recv_c2h: 7027 if (!c2h_info) 7028 return 0; 7029 7030 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 7031 if (ret) 7032 return ret; 7033 7034 return 0; 7035 } 7036 7037 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 7038 { 7039 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 7040 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 7041 return; 7042 } 7043 7044 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 7045 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 7046 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 7047 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 7048 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 7049 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 7050 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 7051 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 7052 7053 rtw89_fw_prog_cnt_dump(rtwdev); 7054 } 7055 7056 static void rtw89_hw_scan_release_pkt_list(struct rtw89_dev *rtwdev) 7057 { 7058 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 7059 struct rtw89_pktofld_info *info, *tmp; 7060 u8 idx; 7061 7062 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 7063 if (!(rtwdev->chip->support_bands & BIT(idx))) 7064 continue; 7065 7066 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 7067 if (test_bit(info->id, rtwdev->pkt_offload)) 7068 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 7069 list_del(&info->list); 7070 kfree(info); 7071 } 7072 } 7073 } 7074 7075 static void rtw89_hw_scan_cleanup(struct rtw89_dev *rtwdev, 7076 struct rtw89_vif_link *rtwvif_link) 7077 { 7078 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7079 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7080 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7081 7082 mac->free_chan_list(rtwdev); 7083 rtw89_hw_scan_release_pkt_list(rtwdev); 7084 7085 rtwvif->scan_req = NULL; 7086 rtwvif->scan_ies = NULL; 7087 scan_info->scanning_vif = NULL; 7088 scan_info->abort = false; 7089 scan_info->connected = false; 7090 scan_info->delay = 0; 7091 } 7092 7093 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 7094 struct cfg80211_scan_request *req, 7095 struct rtw89_pktofld_info *info, 7096 enum nl80211_band band, u8 ssid_idx) 7097 { 7098 if (band != NL80211_BAND_6GHZ) 7099 return false; 7100 7101 if (req->ssids[ssid_idx].ssid_len) { 7102 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 7103 req->ssids[ssid_idx].ssid_len); 7104 info->ssid_len = req->ssids[ssid_idx].ssid_len; 7105 return false; 7106 } else { 7107 info->wildcard_6ghz = true; 7108 return true; 7109 } 7110 } 7111 7112 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 7113 struct rtw89_vif_link *rtwvif_link, 7114 struct sk_buff *skb, u8 ssid_idx) 7115 { 7116 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7117 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7118 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 7119 struct cfg80211_scan_request *req = rtwvif->scan_req; 7120 struct rtw89_pktofld_info *info; 7121 struct sk_buff *new; 7122 int ret = 0; 7123 u8 band; 7124 7125 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 7126 if (!(rtwdev->chip->support_bands & BIT(band))) 7127 continue; 7128 7129 new = skb_copy(skb, GFP_KERNEL); 7130 if (!new) { 7131 ret = -ENOMEM; 7132 goto out; 7133 } 7134 skb_put_data(new, ies->ies[band], ies->len[band]); 7135 skb_put_data(new, ies->common_ies, ies->common_ie_len); 7136 7137 info = kzalloc(sizeof(*info), GFP_KERNEL); 7138 if (!info) { 7139 ret = -ENOMEM; 7140 kfree_skb(new); 7141 goto out; 7142 } 7143 7144 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx); 7145 7146 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 7147 if (ret) { 7148 kfree_skb(new); 7149 kfree(info); 7150 goto out; 7151 } 7152 7153 list_add_tail(&info->list, &scan_info->pkt_list[band]); 7154 kfree_skb(new); 7155 } 7156 out: 7157 return ret; 7158 } 7159 7160 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 7161 struct rtw89_vif_link *rtwvif_link, 7162 const u8 *mac_addr) 7163 { 7164 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7165 struct cfg80211_scan_request *req = rtwvif->scan_req; 7166 struct sk_buff *skb; 7167 u8 num = req->n_ssids, i; 7168 int ret; 7169 7170 for (i = 0; i < num; i++) { 7171 skb = ieee80211_probereq_get(rtwdev->hw, mac_addr, 7172 req->ssids[i].ssid, 7173 req->ssids[i].ssid_len, 7174 req->ie_len); 7175 if (!skb) 7176 return -ENOMEM; 7177 7178 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i); 7179 kfree_skb(skb); 7180 7181 if (ret) 7182 return ret; 7183 } 7184 7185 return 0; 7186 } 7187 7188 static int rtw89_update_6ghz_rnr_chan_ax(struct rtw89_dev *rtwdev, 7189 struct ieee80211_scan_ies *ies, 7190 struct cfg80211_scan_request *req, 7191 struct rtw89_mac_chinfo_ax *ch_info) 7192 { 7193 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 7194 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 7195 struct cfg80211_scan_6ghz_params *params; 7196 struct rtw89_pktofld_info *info, *tmp; 7197 struct ieee80211_hdr *hdr; 7198 struct sk_buff *skb; 7199 bool found; 7200 int ret = 0; 7201 u8 i; 7202 7203 if (!req->n_6ghz_params) 7204 return 0; 7205 7206 for (i = 0; i < req->n_6ghz_params; i++) { 7207 params = &req->scan_6ghz_params[i]; 7208 7209 if (req->channels[params->channel_idx]->hw_value != 7210 ch_info->pri_ch) 7211 continue; 7212 7213 found = false; 7214 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 7215 if (ether_addr_equal(tmp->bssid, params->bssid)) { 7216 found = true; 7217 break; 7218 } 7219 } 7220 if (found) 7221 continue; 7222 7223 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 7224 NULL, 0, req->ie_len); 7225 if (!skb) 7226 return -ENOMEM; 7227 7228 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 7229 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 7230 hdr = (struct ieee80211_hdr *)skb->data; 7231 ether_addr_copy(hdr->addr3, params->bssid); 7232 7233 info = kzalloc(sizeof(*info), GFP_KERNEL); 7234 if (!info) { 7235 ret = -ENOMEM; 7236 kfree_skb(skb); 7237 goto out; 7238 } 7239 7240 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 7241 if (ret) { 7242 kfree_skb(skb); 7243 kfree(info); 7244 goto out; 7245 } 7246 7247 ether_addr_copy(info->bssid, params->bssid); 7248 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 7249 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 7250 7251 ch_info->tx_pkt = true; 7252 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 7253 7254 kfree_skb(skb); 7255 } 7256 7257 out: 7258 return ret; 7259 } 7260 7261 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, 7262 int chan_type, int ssid_num, 7263 struct rtw89_mac_chinfo_ax *ch_info) 7264 { 7265 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7266 struct rtw89_pktofld_info *info; 7267 u8 probe_count = 0; 7268 7269 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 7270 ch_info->bw = RTW89_SCAN_WIDTH; 7271 ch_info->tx_pkt = true; 7272 ch_info->cfg_tx_pwr = false; 7273 ch_info->tx_pwr_idx = 0; 7274 ch_info->tx_null = false; 7275 ch_info->pause_data = false; 7276 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7277 7278 if (ssid_num) { 7279 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 7280 if (info->channel_6ghz && 7281 ch_info->pri_ch != info->channel_6ghz) 7282 continue; 7283 else if (info->channel_6ghz && probe_count != 0) 7284 ch_info->period += RTW89_CHANNEL_TIME_6G; 7285 7286 if (info->wildcard_6ghz) 7287 continue; 7288 7289 ch_info->pkt_id[probe_count++] = info->id; 7290 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 7291 break; 7292 } 7293 ch_info->num_pkt = probe_count; 7294 } 7295 7296 switch (chan_type) { 7297 case RTW89_CHAN_DFS: 7298 if (ch_info->ch_band != RTW89_BAND_6G) 7299 ch_info->period = max_t(u8, ch_info->period, 7300 RTW89_DFS_CHAN_TIME); 7301 ch_info->dwell_time = RTW89_DWELL_TIME; 7302 break; 7303 case RTW89_CHAN_ACTIVE: 7304 break; 7305 default: 7306 rtw89_err(rtwdev, "Channel type out of bound\n"); 7307 } 7308 } 7309 7310 static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type, 7311 int ssid_num, 7312 struct rtw89_mac_chinfo_ax *ch_info) 7313 { 7314 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7315 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 7316 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 7317 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7318 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 7319 struct cfg80211_scan_request *req = rtwvif->scan_req; 7320 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 7321 struct rtw89_pktofld_info *info; 7322 u8 band, probe_count = 0; 7323 int ret; 7324 7325 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 7326 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 7327 ch_info->bw = RTW89_SCAN_WIDTH; 7328 ch_info->tx_pkt = true; 7329 ch_info->cfg_tx_pwr = false; 7330 ch_info->tx_pwr_idx = 0; 7331 ch_info->tx_null = false; 7332 ch_info->pause_data = false; 7333 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7334 7335 if (ch_info->ch_band == RTW89_BAND_6G) { 7336 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 7337 !ch_info->is_psc) { 7338 ch_info->tx_pkt = false; 7339 if (!req->duration_mandatory) 7340 ch_info->period -= RTW89_DWELL_TIME_6G; 7341 } 7342 } 7343 7344 ret = rtw89_update_6ghz_rnr_chan_ax(rtwdev, ies, req, ch_info); 7345 if (ret) 7346 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 7347 7348 if (ssid_num) { 7349 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 7350 7351 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 7352 if (info->channel_6ghz && 7353 ch_info->pri_ch != info->channel_6ghz) 7354 continue; 7355 else if (info->channel_6ghz && probe_count != 0) 7356 ch_info->period += RTW89_CHANNEL_TIME_6G; 7357 7358 if (info->wildcard_6ghz) 7359 continue; 7360 7361 ch_info->pkt_id[probe_count++] = info->id; 7362 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 7363 break; 7364 } 7365 ch_info->num_pkt = probe_count; 7366 } 7367 7368 switch (chan_type) { 7369 case RTW89_CHAN_OPERATE: 7370 ch_info->central_ch = op->channel; 7371 ch_info->pri_ch = op->primary_channel; 7372 ch_info->ch_band = op->band_type; 7373 ch_info->bw = op->band_width; 7374 ch_info->tx_null = true; 7375 ch_info->num_pkt = 0; 7376 break; 7377 case RTW89_CHAN_DFS: 7378 if (ch_info->ch_band != RTW89_BAND_6G) 7379 ch_info->period = max_t(u8, ch_info->period, 7380 RTW89_DFS_CHAN_TIME); 7381 ch_info->dwell_time = RTW89_DWELL_TIME; 7382 ch_info->pause_data = true; 7383 break; 7384 case RTW89_CHAN_ACTIVE: 7385 ch_info->pause_data = true; 7386 break; 7387 case RTW89_CHAN_EXTRA_OP: 7388 ch_info->central_ch = ext->chan.channel; 7389 ch_info->pri_ch = ext->chan.primary_channel; 7390 ch_info->ch_band = ext->chan.band_type; 7391 ch_info->bw = ext->chan.band_width; 7392 ch_info->tx_null = true; 7393 ch_info->num_pkt = 0; 7394 ch_info->macid_tx = true; 7395 break; 7396 default: 7397 rtw89_err(rtwdev, "Channel type out of bound\n"); 7398 } 7399 } 7400 7401 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 7402 int ssid_num, 7403 struct rtw89_mac_chinfo_be *ch_info) 7404 { 7405 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7406 struct rtw89_pktofld_info *info; 7407 u8 probe_count = 0, i; 7408 7409 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 7410 ch_info->bw = RTW89_SCAN_WIDTH; 7411 ch_info->tx_null = false; 7412 ch_info->pause_data = false; 7413 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7414 7415 if (ssid_num) { 7416 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 7417 ch_info->pkt_id[probe_count++] = info->id; 7418 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 7419 break; 7420 } 7421 } 7422 7423 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 7424 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 7425 7426 switch (chan_type) { 7427 case RTW89_CHAN_DFS: 7428 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 7429 ch_info->dwell_time = RTW89_DWELL_TIME; 7430 break; 7431 case RTW89_CHAN_ACTIVE: 7432 break; 7433 default: 7434 rtw89_warn(rtwdev, "Channel type out of bound\n"); 7435 break; 7436 } 7437 } 7438 7439 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 7440 int ssid_num, 7441 struct rtw89_mac_chinfo_be *ch_info) 7442 { 7443 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7444 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 7445 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7446 struct cfg80211_scan_request *req = rtwvif->scan_req; 7447 struct rtw89_pktofld_info *info; 7448 u8 band, probe_count = 0, i; 7449 7450 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 7451 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 7452 ch_info->bw = RTW89_SCAN_WIDTH; 7453 ch_info->tx_null = false; 7454 ch_info->pause_data = false; 7455 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7456 7457 if (ssid_num) { 7458 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 7459 7460 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 7461 if (info->channel_6ghz && 7462 ch_info->pri_ch != info->channel_6ghz) 7463 continue; 7464 7465 if (info->wildcard_6ghz) 7466 continue; 7467 7468 ch_info->pkt_id[probe_count++] = info->id; 7469 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 7470 break; 7471 } 7472 } 7473 7474 if (ch_info->ch_band == RTW89_BAND_6G) { 7475 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 7476 !ch_info->is_psc) { 7477 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7478 if (!req->duration_mandatory) 7479 ch_info->period -= RTW89_DWELL_TIME_6G; 7480 } 7481 } 7482 7483 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 7484 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 7485 7486 switch (chan_type) { 7487 case RTW89_CHAN_DFS: 7488 if (ch_info->ch_band != RTW89_BAND_6G) 7489 ch_info->period = 7490 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 7491 ch_info->dwell_time = RTW89_DWELL_TIME; 7492 ch_info->pause_data = true; 7493 break; 7494 case RTW89_CHAN_ACTIVE: 7495 ch_info->pause_data = true; 7496 break; 7497 default: 7498 rtw89_warn(rtwdev, "Channel type out of bound\n"); 7499 break; 7500 } 7501 } 7502 7503 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 7504 struct rtw89_vif_link *rtwvif_link) 7505 { 7506 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7507 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 7508 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 7509 struct ieee80211_channel *channel; 7510 struct list_head chan_list; 7511 int list_len; 7512 enum rtw89_chan_type type; 7513 int ret = 0; 7514 u32 idx; 7515 7516 INIT_LIST_HEAD(&chan_list); 7517 for (idx = 0, list_len = 0; 7518 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 7519 idx++, list_len++) { 7520 channel = nd_config->channels[idx]; 7521 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 7522 if (!ch_info) { 7523 ret = -ENOMEM; 7524 goto out; 7525 } 7526 7527 ch_info->period = RTW89_CHANNEL_TIME; 7528 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 7529 ch_info->central_ch = channel->hw_value; 7530 ch_info->pri_ch = channel->hw_value; 7531 ch_info->is_psc = cfg80211_channel_is_psc(channel); 7532 7533 if (channel->flags & 7534 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 7535 type = RTW89_CHAN_DFS; 7536 else 7537 type = RTW89_CHAN_ACTIVE; 7538 7539 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); 7540 list_add_tail(&ch_info->list, &chan_list); 7541 } 7542 ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &chan_list); 7543 7544 out: 7545 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 7546 list_del(&ch_info->list); 7547 kfree(ch_info); 7548 } 7549 7550 return ret; 7551 } 7552 7553 static int rtw89_hw_scan_add_op_types_ax(struct rtw89_dev *rtwdev, 7554 enum rtw89_chan_type type, 7555 struct list_head *chan_list, 7556 struct cfg80211_scan_request *req, 7557 int *off_chan_time) 7558 { 7559 struct rtw89_mac_chinfo_ax *tmp; 7560 7561 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 7562 if (!tmp) 7563 return -ENOMEM; 7564 7565 switch (type) { 7566 case RTW89_CHAN_OPERATE: 7567 tmp->period = req->duration_mandatory ? 7568 req->duration : RTW89_CHANNEL_TIME; 7569 *off_chan_time = 0; 7570 break; 7571 case RTW89_CHAN_EXTRA_OP: 7572 tmp->period = RTW89_CHANNEL_TIME_EXTRA_OP; 7573 /* still calc @off_chan_time for scan op */ 7574 *off_chan_time += tmp->period; 7575 break; 7576 default: 7577 kfree(tmp); 7578 return -EINVAL; 7579 } 7580 7581 rtw89_hw_scan_add_chan_ax(rtwdev, type, 0, tmp); 7582 list_add_tail(&tmp->list, chan_list); 7583 7584 return 0; 7585 } 7586 7587 int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev, 7588 struct rtw89_vif_link *rtwvif_link) 7589 { 7590 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7591 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 7592 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7593 struct cfg80211_scan_request *req = rtwvif->scan_req; 7594 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 7595 struct ieee80211_channel *channel; 7596 struct list_head chan_list; 7597 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 7598 enum rtw89_chan_type type; 7599 int off_chan_time = 0; 7600 int ret; 7601 u32 idx; 7602 7603 INIT_LIST_HEAD(&chan_list); 7604 7605 for (idx = 0; idx < req->n_channels; idx++) { 7606 channel = req->channels[idx]; 7607 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 7608 if (!ch_info) { 7609 ret = -ENOMEM; 7610 goto out; 7611 } 7612 7613 if (req->duration) 7614 ch_info->period = req->duration; 7615 else if (channel->band == NL80211_BAND_6GHZ) 7616 ch_info->period = RTW89_CHANNEL_TIME_6G + 7617 RTW89_DWELL_TIME_6G; 7618 else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) 7619 ch_info->period = RTW89_P2P_CHAN_TIME; 7620 else 7621 ch_info->period = RTW89_CHANNEL_TIME; 7622 7623 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 7624 ch_info->central_ch = channel->hw_value; 7625 ch_info->pri_ch = channel->hw_value; 7626 ch_info->rand_seq_num = random_seq; 7627 ch_info->is_psc = cfg80211_channel_is_psc(channel); 7628 7629 if (channel->flags & 7630 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 7631 type = RTW89_CHAN_DFS; 7632 else 7633 type = RTW89_CHAN_ACTIVE; 7634 rtw89_hw_scan_add_chan_ax(rtwdev, type, req->n_ssids, ch_info); 7635 7636 if (!(scan_info->connected && 7637 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME)) 7638 goto next; 7639 7640 ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_OPERATE, 7641 &chan_list, req, &off_chan_time); 7642 if (ret) { 7643 kfree(ch_info); 7644 goto out; 7645 } 7646 7647 if (!ext->set) 7648 goto next; 7649 7650 ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_EXTRA_OP, 7651 &chan_list, req, &off_chan_time); 7652 if (ret) { 7653 kfree(ch_info); 7654 goto out; 7655 } 7656 7657 next: 7658 list_add_tail(&ch_info->list, &chan_list); 7659 off_chan_time += ch_info->period; 7660 } 7661 7662 list_splice_tail(&chan_list, &scan_info->chan_list); 7663 return 0; 7664 7665 out: 7666 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 7667 list_del(&ch_info->list); 7668 kfree(ch_info); 7669 } 7670 7671 return ret; 7672 } 7673 7674 void rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev *rtwdev) 7675 { 7676 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7677 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 7678 7679 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 7680 list_del(&ch_info->list); 7681 kfree(ch_info); 7682 } 7683 } 7684 7685 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 7686 struct rtw89_vif_link *rtwvif_link) 7687 { 7688 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7689 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 7690 unsigned int list_len = 0; 7691 struct list_head list; 7692 int ret; 7693 7694 INIT_LIST_HEAD(&list); 7695 7696 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 7697 list_move_tail(&ch_info->list, &list); 7698 7699 list_len++; 7700 if (list_len == RTW89_SCAN_LIST_LIMIT_AX) 7701 break; 7702 } 7703 7704 ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &list); 7705 7706 list_for_each_entry_safe(ch_info, tmp, &list, list) { 7707 list_del(&ch_info->list); 7708 kfree(ch_info); 7709 } 7710 7711 return ret; 7712 } 7713 7714 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 7715 struct rtw89_vif_link *rtwvif_link) 7716 { 7717 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7718 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 7719 struct rtw89_mac_chinfo_be *ch_info, *tmp; 7720 struct ieee80211_channel *channel; 7721 struct list_head chan_list; 7722 enum rtw89_chan_type type; 7723 int list_len, ret; 7724 u32 idx; 7725 7726 INIT_LIST_HEAD(&chan_list); 7727 7728 for (idx = 0, list_len = 0; 7729 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 7730 idx++, list_len++) { 7731 channel = nd_config->channels[idx]; 7732 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 7733 if (!ch_info) { 7734 ret = -ENOMEM; 7735 goto out; 7736 } 7737 7738 ch_info->period = RTW89_CHANNEL_TIME; 7739 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 7740 ch_info->central_ch = channel->hw_value; 7741 ch_info->pri_ch = channel->hw_value; 7742 ch_info->is_psc = cfg80211_channel_is_psc(channel); 7743 7744 if (channel->flags & 7745 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 7746 type = RTW89_CHAN_DFS; 7747 else 7748 type = RTW89_CHAN_ACTIVE; 7749 7750 rtw89_pno_scan_add_chan_be(rtwdev, type, 7751 nd_config->n_match_sets, ch_info); 7752 list_add_tail(&ch_info->list, &chan_list); 7753 } 7754 7755 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 7756 rtwvif_link); 7757 7758 out: 7759 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 7760 list_del(&ch_info->list); 7761 kfree(ch_info); 7762 } 7763 7764 return ret; 7765 } 7766 7767 int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev, 7768 struct rtw89_vif_link *rtwvif_link) 7769 { 7770 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7771 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7772 struct cfg80211_scan_request *req = rtwvif->scan_req; 7773 struct rtw89_mac_chinfo_be *ch_info, *tmp; 7774 struct ieee80211_channel *channel; 7775 struct list_head chan_list; 7776 enum rtw89_chan_type type; 7777 bool random_seq; 7778 int ret; 7779 u32 idx; 7780 7781 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 7782 INIT_LIST_HEAD(&chan_list); 7783 7784 for (idx = 0; idx < req->n_channels; idx++) { 7785 channel = req->channels[idx]; 7786 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 7787 if (!ch_info) { 7788 ret = -ENOMEM; 7789 goto out; 7790 } 7791 7792 if (req->duration) 7793 ch_info->period = req->duration; 7794 else if (channel->band == NL80211_BAND_6GHZ) 7795 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 7796 else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) 7797 ch_info->period = RTW89_P2P_CHAN_TIME; 7798 else 7799 ch_info->period = RTW89_CHANNEL_TIME; 7800 7801 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 7802 ch_info->central_ch = channel->hw_value; 7803 ch_info->pri_ch = channel->hw_value; 7804 ch_info->rand_seq_num = random_seq; 7805 ch_info->is_psc = cfg80211_channel_is_psc(channel); 7806 7807 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 7808 type = RTW89_CHAN_DFS; 7809 else 7810 type = RTW89_CHAN_ACTIVE; 7811 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 7812 7813 list_add_tail(&ch_info->list, &chan_list); 7814 } 7815 7816 list_splice_tail(&chan_list, &scan_info->chan_list); 7817 return 0; 7818 7819 out: 7820 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 7821 list_del(&ch_info->list); 7822 kfree(ch_info); 7823 } 7824 7825 return ret; 7826 } 7827 7828 void rtw89_hw_scan_free_chan_list_be(struct rtw89_dev *rtwdev) 7829 { 7830 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7831 struct rtw89_mac_chinfo_be *ch_info, *tmp; 7832 7833 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 7834 list_del(&ch_info->list); 7835 kfree(ch_info); 7836 } 7837 } 7838 7839 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 7840 struct rtw89_vif_link *rtwvif_link) 7841 { 7842 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7843 struct rtw89_mac_chinfo_be *ch_info, *tmp; 7844 unsigned int list_len = 0; 7845 struct list_head list; 7846 int ret; 7847 7848 INIT_LIST_HEAD(&list); 7849 7850 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 7851 list_move_tail(&ch_info->list, &list); 7852 7853 list_len++; 7854 if (list_len == RTW89_SCAN_LIST_LIMIT_BE) 7855 break; 7856 } 7857 7858 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &list, 7859 rtwvif_link); 7860 7861 list_for_each_entry_safe(ch_info, tmp, &list, list) { 7862 list_del(&ch_info->list); 7863 kfree(ch_info); 7864 } 7865 7866 return ret; 7867 } 7868 7869 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 7870 struct rtw89_vif_link *rtwvif_link, 7871 const u8 *mac_addr) 7872 { 7873 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7874 int ret; 7875 7876 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link, mac_addr); 7877 if (ret) { 7878 rtw89_err(rtwdev, "Update probe request failed\n"); 7879 goto out; 7880 } 7881 ret = mac->prep_chan_list(rtwdev, rtwvif_link); 7882 out: 7883 return ret; 7884 } 7885 7886 static void rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev *rtwdev, 7887 struct rtw89_vif_link *rtwvif_link, 7888 u16 tu, bool scan) 7889 { 7890 struct ieee80211_p2p_noa_desc noa_desc = {}; 7891 struct ieee80211_bss_conf *bss_conf; 7892 u16 beacon_int; 7893 u64 tsf; 7894 int ret; 7895 7896 rcu_read_lock(); 7897 7898 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 7899 beacon_int = bss_conf->beacon_int; 7900 7901 rcu_read_unlock(); 7902 7903 tu += beacon_int * 3; 7904 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX) 7905 rtwdev->scan_info.delay = ieee80211_tu_to_usec(beacon_int * 3) / 1000; 7906 7907 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 7908 if (ret) { 7909 rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__); 7910 return; 7911 } 7912 7913 noa_desc.start_time = cpu_to_le32(tsf); 7914 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX) { 7915 noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(tu)); 7916 noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(tu)); 7917 noa_desc.count = 1; 7918 } else { 7919 noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(20000)); 7920 noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(20000)); 7921 noa_desc.count = 255; 7922 } 7923 7924 rtw89_p2p_noa_renew(rtwvif_link); 7925 if (scan) 7926 rtw89_p2p_noa_append(rtwvif_link, &noa_desc); 7927 7928 rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link); 7929 } 7930 7931 static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev, bool scan) 7932 { 7933 const struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt; 7934 const struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7935 const struct rtw89_chip_info *chip = rtwdev->chip; 7936 struct rtw89_mac_chinfo_ax *chinfo_ax; 7937 struct rtw89_mac_chinfo_be *chinfo_be; 7938 struct rtw89_vif_link *rtwvif_link; 7939 struct list_head *pos, *tmp; 7940 struct ieee80211_vif *vif; 7941 struct rtw89_vif *rtwvif; 7942 u16 tu = 0; 7943 7944 lockdep_assert_wiphy(rtwdev->hw->wiphy); 7945 7946 if (!scan) 7947 goto update; 7948 7949 list_for_each_safe(pos, tmp, &scan_info->chan_list) { 7950 switch (chip->chip_gen) { 7951 case RTW89_CHIP_AX: 7952 chinfo_ax = list_entry(pos, typeof(*chinfo_ax), list); 7953 tu += chinfo_ax->period; 7954 break; 7955 case RTW89_CHIP_BE: 7956 chinfo_be = list_entry(pos, typeof(*chinfo_be), list); 7957 tu += chinfo_be->period; 7958 break; 7959 default: 7960 rtw89_warn(rtwdev, "%s: invalid chip gen %d\n", 7961 __func__, chip->chip_gen); 7962 return; 7963 } 7964 } 7965 7966 if (unlikely(tu == 0)) { 7967 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 7968 "%s: cannot estimate needed TU\n", __func__); 7969 return; 7970 } 7971 7972 update: 7973 list_for_each_entry(rtwvif, &mgnt->active_list, mgnt_entry) { 7974 unsigned int link_id; 7975 7976 vif = rtwvif_to_vif(rtwvif); 7977 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 7978 continue; 7979 7980 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 7981 rtw89_hw_scan_update_link_beacon_noa(rtwdev, rtwvif_link, 7982 tu, scan); 7983 } 7984 } 7985 7986 static void rtw89_hw_scan_set_extra_op_info(struct rtw89_dev *rtwdev, 7987 struct rtw89_vif *scan_rtwvif, 7988 const struct rtw89_chan *scan_op) 7989 { 7990 struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt; 7991 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7992 struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 7993 struct rtw89_vif *tmp; 7994 7995 ext->set = false; 7996 if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_EXTRA_OP, &rtwdev->fw)) 7997 return; 7998 7999 list_for_each_entry(tmp, &mgnt->active_list, mgnt_entry) { 8000 const struct rtw89_chan *tmp_chan; 8001 struct rtw89_vif_link *tmp_link; 8002 8003 if (tmp == scan_rtwvif) 8004 continue; 8005 8006 tmp_link = rtw89_vif_get_link_inst(tmp, 0); 8007 if (unlikely(!tmp_link)) { 8008 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 8009 "hw scan: no HW-0 link for extra op\n"); 8010 continue; 8011 } 8012 8013 tmp_chan = rtw89_chan_get(rtwdev, tmp_link->chanctx_idx); 8014 *ext = (struct rtw89_hw_scan_extra_op){ 8015 .set = true, 8016 .macid = tmp_link->mac_id, 8017 .port = tmp_link->port, 8018 .chan = *tmp_chan, 8019 .rtwvif_link = tmp_link, 8020 }; 8021 8022 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 8023 "hw scan: extra op: center %d primary %d\n", 8024 ext->chan.channel, ext->chan.primary_channel); 8025 break; 8026 } 8027 } 8028 8029 int rtw89_hw_scan_start(struct rtw89_dev *rtwdev, 8030 struct rtw89_vif_link *rtwvif_link, 8031 struct ieee80211_scan_request *scan_req) 8032 { 8033 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 8034 enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev); 8035 struct cfg80211_scan_request *req = &scan_req->req; 8036 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 8037 rtwvif_link->chanctx_idx); 8038 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 8039 struct rtw89_chanctx_pause_parm pause_parm = { 8040 .rsn = RTW89_CHANCTX_PAUSE_REASON_HW_SCAN, 8041 .trigger = rtwvif_link, 8042 }; 8043 u32 rx_fltr = rtwdev->hal.rx_fltr; 8044 u8 mac_addr[ETH_ALEN]; 8045 u32 reg; 8046 int ret; 8047 8048 /* clone op and keep it during scan */ 8049 rtwdev->scan_info.op_chan = *chan; 8050 8051 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 8052 "hw scan: op: center %d primary %d\n", 8053 chan->channel, chan->primary_channel); 8054 8055 rtw89_hw_scan_set_extra_op_info(rtwdev, rtwvif, chan); 8056 8057 rtwdev->scan_info.connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 8058 rtwdev->scan_info.scanning_vif = rtwvif_link; 8059 rtwdev->scan_info.abort = false; 8060 rtwdev->scan_info.delay = 0; 8061 rtwvif->scan_ies = &scan_req->ies; 8062 rtwvif->scan_req = req; 8063 8064 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 8065 get_random_mask_addr(mac_addr, req->mac_addr, 8066 req->mac_addr_mask); 8067 else 8068 ether_addr_copy(mac_addr, rtwvif_link->mac_addr); 8069 8070 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, mac_addr); 8071 if (ret) { 8072 rtw89_hw_scan_cleanup(rtwdev, rtwvif_link); 8073 return ret; 8074 } 8075 8076 ieee80211_stop_queues(rtwdev->hw); 8077 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false); 8078 8079 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true); 8080 8081 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 8082 rx_fltr &= ~B_AX_A_BC; 8083 rx_fltr &= ~B_AX_A_A1_MATCH; 8084 8085 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 8086 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr); 8087 8088 rtw89_chanctx_pause(rtwdev, &pause_parm); 8089 rtw89_phy_dig_suspend(rtwdev); 8090 8091 if (mode == RTW89_ENTITY_MODE_MCC) 8092 rtw89_hw_scan_update_beacon_noa(rtwdev, true); 8093 8094 return 0; 8095 } 8096 8097 struct rtw89_hw_scan_complete_cb_data { 8098 struct rtw89_vif_link *rtwvif_link; 8099 bool aborted; 8100 }; 8101 8102 static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data) 8103 { 8104 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 8105 enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev); 8106 struct rtw89_hw_scan_complete_cb_data *cb_data = data; 8107 struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link; 8108 struct cfg80211_scan_info info = { 8109 .aborted = cb_data->aborted, 8110 }; 8111 u32 reg; 8112 8113 if (!rtwvif_link) 8114 return -EINVAL; 8115 8116 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 8117 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); 8118 8119 rtw89_core_scan_complete(rtwdev, rtwvif_link, true); 8120 ieee80211_scan_completed(rtwdev->hw, &info); 8121 ieee80211_wake_queues(rtwdev->hw); 8122 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true); 8123 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 8124 rtw89_phy_dig_resume(rtwdev, true); 8125 8126 rtw89_hw_scan_cleanup(rtwdev, rtwvif_link); 8127 8128 if (mode == RTW89_ENTITY_MODE_MCC) 8129 rtw89_hw_scan_update_beacon_noa(rtwdev, false); 8130 8131 return 0; 8132 } 8133 8134 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, 8135 struct rtw89_vif_link *rtwvif_link, 8136 bool aborted) 8137 { 8138 struct rtw89_hw_scan_complete_cb_data cb_data = { 8139 .rtwvif_link = rtwvif_link, 8140 .aborted = aborted, 8141 }; 8142 const struct rtw89_chanctx_cb_parm cb_parm = { 8143 .cb = rtw89_hw_scan_complete_cb, 8144 .data = &cb_data, 8145 .caller = __func__, 8146 }; 8147 8148 /* The things here needs to be done after setting channel (for coex) 8149 * and before proceeding entity mode (for MCC). So, pass a callback 8150 * of them for the right sequence rather than doing them directly. 8151 */ 8152 rtw89_chanctx_proceed(rtwdev, &cb_parm); 8153 } 8154 8155 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, 8156 struct rtw89_vif_link *rtwvif_link) 8157 { 8158 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8159 int ret; 8160 8161 scan_info->abort = true; 8162 8163 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false); 8164 if (ret) 8165 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); 8166 8167 /* Indicate ieee80211_scan_completed() before returning, which is safe 8168 * because scan abort command always waits for completion of 8169 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan 8170 * work properly. 8171 */ 8172 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true); 8173 } 8174 8175 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 8176 { 8177 struct rtw89_vif_link *rtwvif_link; 8178 struct rtw89_vif *rtwvif; 8179 unsigned int link_id; 8180 8181 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 8182 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 8183 /* This variable implies connected or during attempt to connect */ 8184 if (!is_zero_ether_addr(rtwvif_link->bssid)) 8185 return true; 8186 } 8187 } 8188 8189 return false; 8190 } 8191 8192 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, 8193 struct rtw89_vif_link *rtwvif_link, 8194 bool enable) 8195 { 8196 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 8197 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8198 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 8199 struct rtw89_scan_option opt = {0}; 8200 bool connected; 8201 int ret = 0; 8202 8203 if (!rtwvif_link) 8204 return -EINVAL; 8205 8206 connected = rtwdev->scan_info.connected; 8207 opt.enable = enable; 8208 opt.target_ch_mode = connected; 8209 opt.delay = rtwdev->scan_info.delay; 8210 if (enable) { 8211 ret = mac->add_chan_list(rtwdev, rtwvif_link); 8212 if (ret) 8213 goto out; 8214 } 8215 8216 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 8217 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 8218 opt.scan_mode = RTW89_SCAN_MODE_SA; 8219 opt.band = rtwvif_link->mac_idx; 8220 opt.num_macc_role = 0; 8221 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 8222 opt.num_opch = connected ? 1 : 0; 8223 if (connected && ext->set) 8224 opt.num_opch++; 8225 8226 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 8227 } 8228 8229 ret = rtw89_mac_scan_offload(rtwdev, &opt, rtwvif_link, false); 8230 8231 out: 8232 return ret; 8233 } 8234 8235 #define H2C_FW_CPU_EXCEPTION_TYPE_0 0x5566 8236 #define H2C_FW_CPU_EXCEPTION_TYPE_1 0x0 8237 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 8238 { 8239 struct rtw89_h2c_trig_cpu_except *h2c; 8240 u32 cpu_exception_type_def; 8241 u32 len = sizeof(*h2c); 8242 struct sk_buff *skb; 8243 int ret; 8244 8245 if (RTW89_CHK_FW_FEATURE(CRASH_TRIGGER_TYPE_1, &rtwdev->fw)) 8246 cpu_exception_type_def = H2C_FW_CPU_EXCEPTION_TYPE_1; 8247 else if (RTW89_CHK_FW_FEATURE(CRASH_TRIGGER_TYPE_0, &rtwdev->fw)) 8248 cpu_exception_type_def = H2C_FW_CPU_EXCEPTION_TYPE_0; 8249 else 8250 return -EOPNOTSUPP; 8251 8252 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8253 if (!skb) { 8254 rtw89_err(rtwdev, 8255 "failed to alloc skb for fw cpu exception\n"); 8256 return -ENOMEM; 8257 } 8258 8259 skb_put(skb, len); 8260 h2c = (struct rtw89_h2c_trig_cpu_except *)skb->data; 8261 8262 h2c->w0 = le32_encode_bits(cpu_exception_type_def, 8263 RTW89_H2C_CPU_EXCEPTION_TYPE); 8264 8265 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8266 H2C_CAT_TEST, 8267 H2C_CL_FW_STATUS_TEST, 8268 H2C_FUNC_CPU_EXCEPTION, 0, 0, 8269 len); 8270 8271 ret = rtw89_h2c_tx(rtwdev, skb, false); 8272 if (ret) { 8273 rtw89_err(rtwdev, "failed to send h2c\n"); 8274 dev_kfree_skb_any(skb); 8275 return ret; 8276 } 8277 8278 return 0; 8279 } 8280 8281 #define H2C_PKT_DROP_LEN 24 8282 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 8283 const struct rtw89_pkt_drop_params *params) 8284 { 8285 struct sk_buff *skb; 8286 int ret; 8287 8288 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 8289 if (!skb) { 8290 rtw89_err(rtwdev, 8291 "failed to alloc skb for packet drop\n"); 8292 return -ENOMEM; 8293 } 8294 8295 switch (params->sel) { 8296 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 8297 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 8298 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 8299 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 8300 case RTW89_PKT_DROP_SEL_BAND_ONCE: 8301 break; 8302 default: 8303 rtw89_debug(rtwdev, RTW89_DBG_FW, 8304 "H2C of pkt drop might not fully support sel: %d yet\n", 8305 params->sel); 8306 break; 8307 } 8308 8309 skb_put(skb, H2C_PKT_DROP_LEN); 8310 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 8311 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 8312 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 8313 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 8314 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 8315 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 8316 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 8317 params->macid_band_sel[0]); 8318 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 8319 params->macid_band_sel[1]); 8320 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 8321 params->macid_band_sel[2]); 8322 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 8323 params->macid_band_sel[3]); 8324 8325 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8326 H2C_CAT_MAC, 8327 H2C_CL_MAC_FW_OFLD, 8328 H2C_FUNC_PKT_DROP, 0, 0, 8329 H2C_PKT_DROP_LEN); 8330 8331 ret = rtw89_h2c_tx(rtwdev, skb, false); 8332 if (ret) { 8333 rtw89_err(rtwdev, "failed to send h2c\n"); 8334 goto fail; 8335 } 8336 8337 return 0; 8338 8339 fail: 8340 dev_kfree_skb_any(skb); 8341 return ret; 8342 } 8343 8344 #define H2C_KEEP_ALIVE_LEN 4 8345 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8346 bool enable) 8347 { 8348 struct sk_buff *skb; 8349 u8 pkt_id = 0; 8350 int ret; 8351 8352 if (enable) { 8353 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 8354 RTW89_PKT_OFLD_TYPE_NULL_DATA, 8355 &pkt_id); 8356 if (ret) 8357 return -EPERM; 8358 } 8359 8360 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 8361 if (!skb) { 8362 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 8363 return -ENOMEM; 8364 } 8365 8366 skb_put(skb, H2C_KEEP_ALIVE_LEN); 8367 8368 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 8369 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 8370 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 8371 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id); 8372 8373 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8374 H2C_CAT_MAC, 8375 H2C_CL_MAC_WOW, 8376 H2C_FUNC_KEEP_ALIVE, 0, 1, 8377 H2C_KEEP_ALIVE_LEN); 8378 8379 ret = rtw89_h2c_tx(rtwdev, skb, false); 8380 if (ret) { 8381 rtw89_err(rtwdev, "failed to send h2c\n"); 8382 goto fail; 8383 } 8384 8385 return 0; 8386 8387 fail: 8388 dev_kfree_skb_any(skb); 8389 8390 return ret; 8391 } 8392 8393 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8394 bool enable) 8395 { 8396 struct rtw89_h2c_arp_offload *h2c; 8397 u32 len = sizeof(*h2c); 8398 struct sk_buff *skb; 8399 u8 pkt_id = 0; 8400 int ret; 8401 8402 if (enable) { 8403 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 8404 RTW89_PKT_OFLD_TYPE_ARP_RSP, 8405 &pkt_id); 8406 if (ret) 8407 return ret; 8408 } 8409 8410 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8411 if (!skb) { 8412 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); 8413 return -ENOMEM; 8414 } 8415 8416 skb_put(skb, len); 8417 h2c = (struct rtw89_h2c_arp_offload *)skb->data; 8418 8419 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | 8420 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | 8421 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | 8422 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); 8423 8424 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8425 H2C_CAT_MAC, 8426 H2C_CL_MAC_WOW, 8427 H2C_FUNC_ARP_OFLD, 0, 1, 8428 len); 8429 8430 ret = rtw89_h2c_tx(rtwdev, skb, false); 8431 if (ret) { 8432 rtw89_err(rtwdev, "failed to send h2c\n"); 8433 goto fail; 8434 } 8435 8436 return 0; 8437 8438 fail: 8439 dev_kfree_skb_any(skb); 8440 8441 return ret; 8442 } 8443 8444 #define H2C_DISCONNECT_DETECT_LEN 8 8445 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 8446 struct rtw89_vif_link *rtwvif_link, bool enable) 8447 { 8448 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8449 struct sk_buff *skb; 8450 u8 macid = rtwvif_link->mac_id; 8451 int ret; 8452 8453 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 8454 if (!skb) { 8455 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 8456 return -ENOMEM; 8457 } 8458 8459 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 8460 8461 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 8462 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 8463 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 8464 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 8465 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 8466 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 8467 } 8468 8469 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8470 H2C_CAT_MAC, 8471 H2C_CL_MAC_WOW, 8472 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 8473 H2C_DISCONNECT_DETECT_LEN); 8474 8475 ret = rtw89_h2c_tx(rtwdev, skb, false); 8476 if (ret) { 8477 rtw89_err(rtwdev, "failed to send h2c\n"); 8478 goto fail; 8479 } 8480 8481 return 0; 8482 8483 fail: 8484 dev_kfree_skb_any(skb); 8485 8486 return ret; 8487 } 8488 8489 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8490 bool enable) 8491 { 8492 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8493 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 8494 struct rtw89_h2c_cfg_nlo *h2c; 8495 u32 len = sizeof(*h2c); 8496 struct sk_buff *skb; 8497 int ret, i; 8498 8499 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8500 if (!skb) { 8501 rtw89_err(rtwdev, "failed to alloc skb for nlo\n"); 8502 return -ENOMEM; 8503 } 8504 8505 skb_put(skb, len); 8506 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data; 8507 8508 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) | 8509 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) | 8510 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID); 8511 8512 if (enable) { 8513 h2c->nlo_cnt = nd_config->n_match_sets; 8514 for (i = 0 ; i < nd_config->n_match_sets; i++) { 8515 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len; 8516 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid, 8517 nd_config->match_sets[i].ssid.ssid_len); 8518 } 8519 } 8520 8521 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8522 H2C_CAT_MAC, 8523 H2C_CL_MAC_WOW, 8524 H2C_FUNC_NLO, 0, 1, 8525 len); 8526 8527 ret = rtw89_h2c_tx(rtwdev, skb, false); 8528 if (ret) { 8529 rtw89_err(rtwdev, "failed to send h2c\n"); 8530 goto fail; 8531 } 8532 8533 return 0; 8534 8535 fail: 8536 dev_kfree_skb_any(skb); 8537 return ret; 8538 } 8539 8540 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8541 bool enable) 8542 { 8543 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8544 struct rtw89_h2c_wow_global *h2c; 8545 u8 macid = rtwvif_link->mac_id; 8546 u32 len = sizeof(*h2c); 8547 struct sk_buff *skb; 8548 int ret; 8549 8550 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8551 if (!skb) { 8552 rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); 8553 return -ENOMEM; 8554 } 8555 8556 skb_put(skb, len); 8557 h2c = (struct rtw89_h2c_wow_global *)skb->data; 8558 8559 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | 8560 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | 8561 le32_encode_bits(rtw_wow->ptk_alg, 8562 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | 8563 le32_encode_bits(rtw_wow->gtk_alg, 8564 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); 8565 h2c->key_info = rtw_wow->key_info; 8566 8567 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8568 H2C_CAT_MAC, 8569 H2C_CL_MAC_WOW, 8570 H2C_FUNC_WOW_GLOBAL, 0, 1, 8571 len); 8572 8573 ret = rtw89_h2c_tx(rtwdev, skb, false); 8574 if (ret) { 8575 rtw89_err(rtwdev, "failed to send h2c\n"); 8576 goto fail; 8577 } 8578 8579 return 0; 8580 8581 fail: 8582 dev_kfree_skb_any(skb); 8583 8584 return ret; 8585 } 8586 8587 #define H2C_WAKEUP_CTRL_LEN 4 8588 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 8589 struct rtw89_vif_link *rtwvif_link, 8590 bool enable) 8591 { 8592 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8593 struct sk_buff *skb; 8594 u8 macid = rtwvif_link->mac_id; 8595 int ret; 8596 8597 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 8598 if (!skb) { 8599 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); 8600 return -ENOMEM; 8601 } 8602 8603 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 8604 8605 if (rtw_wow->pattern_cnt) 8606 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 8607 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 8608 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 8609 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 8610 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 8611 8612 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 8613 8614 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8615 H2C_CAT_MAC, 8616 H2C_CL_MAC_WOW, 8617 H2C_FUNC_WAKEUP_CTRL, 0, 1, 8618 H2C_WAKEUP_CTRL_LEN); 8619 8620 ret = rtw89_h2c_tx(rtwdev, skb, false); 8621 if (ret) { 8622 rtw89_err(rtwdev, "failed to send h2c\n"); 8623 goto fail; 8624 } 8625 8626 return 0; 8627 8628 fail: 8629 dev_kfree_skb_any(skb); 8630 8631 return ret; 8632 } 8633 8634 #define H2C_WOW_CAM_UPD_LEN 24 8635 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 8636 struct rtw89_wow_cam_info *cam_info) 8637 { 8638 struct sk_buff *skb; 8639 int ret; 8640 8641 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 8642 if (!skb) { 8643 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 8644 return -ENOMEM; 8645 } 8646 8647 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 8648 8649 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 8650 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 8651 if (cam_info->valid) { 8652 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 8653 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 8654 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 8655 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 8656 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 8657 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 8658 cam_info->negative_pattern_match); 8659 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 8660 cam_info->skip_mac_hdr); 8661 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 8662 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 8663 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 8664 } 8665 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 8666 8667 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8668 H2C_CAT_MAC, 8669 H2C_CL_MAC_WOW, 8670 H2C_FUNC_WOW_CAM_UPD, 0, 1, 8671 H2C_WOW_CAM_UPD_LEN); 8672 8673 ret = rtw89_h2c_tx(rtwdev, skb, false); 8674 if (ret) { 8675 rtw89_err(rtwdev, "failed to send h2c\n"); 8676 goto fail; 8677 } 8678 8679 return 0; 8680 fail: 8681 dev_kfree_skb_any(skb); 8682 8683 return ret; 8684 } 8685 8686 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, 8687 struct rtw89_vif_link *rtwvif_link, 8688 bool enable) 8689 { 8690 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8691 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; 8692 struct rtw89_h2c_wow_gtk_ofld *h2c; 8693 u8 macid = rtwvif_link->mac_id; 8694 u32 len = sizeof(*h2c); 8695 u8 pkt_id_sa_query = 0; 8696 struct sk_buff *skb; 8697 u8 pkt_id_eapol = 0; 8698 int ret; 8699 8700 if (!rtw_wow->gtk_alg) 8701 return 0; 8702 8703 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8704 if (!skb) { 8705 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); 8706 return -ENOMEM; 8707 } 8708 8709 skb_put(skb, len); 8710 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; 8711 8712 if (!enable) 8713 goto hdr; 8714 8715 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 8716 RTW89_PKT_OFLD_TYPE_EAPOL_KEY, 8717 &pkt_id_eapol); 8718 if (ret) 8719 goto fail; 8720 8721 if (gtk_info->igtk_keyid) { 8722 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 8723 RTW89_PKT_OFLD_TYPE_SA_QUERY, 8724 &pkt_id_sa_query); 8725 if (ret) 8726 goto fail; 8727 } 8728 8729 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 8730 le32_encode_bits(!!memchr_inv(gtk_info->txmickey, 0, 8731 sizeof(gtk_info->txmickey)), 8732 RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 8733 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 8734 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 8735 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | 8736 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); 8737 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, 8738 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | 8739 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); 8740 h2c->gtk_info = rtw_wow->gtk_info; 8741 8742 hdr: 8743 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8744 H2C_CAT_MAC, 8745 H2C_CL_MAC_WOW, 8746 H2C_FUNC_GTK_OFLD, 0, 1, 8747 len); 8748 8749 ret = rtw89_h2c_tx(rtwdev, skb, false); 8750 if (ret) { 8751 rtw89_err(rtwdev, "failed to send h2c\n"); 8752 goto fail; 8753 } 8754 return 0; 8755 fail: 8756 dev_kfree_skb_any(skb); 8757 8758 return ret; 8759 } 8760 8761 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8762 bool enable) 8763 { 8764 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait; 8765 struct rtw89_h2c_fwips *h2c; 8766 u32 len = sizeof(*h2c); 8767 struct sk_buff *skb; 8768 8769 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8770 if (!skb) { 8771 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n"); 8772 return -ENOMEM; 8773 } 8774 skb_put(skb, len); 8775 h2c = (struct rtw89_h2c_fwips *)skb->data; 8776 8777 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) | 8778 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE); 8779 8780 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8781 H2C_CAT_MAC, 8782 H2C_CL_MAC_PS, 8783 H2C_FUNC_IPS_CFG, 0, 1, 8784 len); 8785 8786 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG); 8787 } 8788 8789 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) 8790 { 8791 struct rtw89_wait_info *wait = &rtwdev->wow.wait; 8792 struct rtw89_h2c_wow_aoac *h2c; 8793 u32 len = sizeof(*h2c); 8794 struct sk_buff *skb; 8795 8796 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8797 if (!skb) { 8798 rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); 8799 return -ENOMEM; 8800 } 8801 8802 skb_put(skb, len); 8803 8804 /* This H2C only nofity firmware to generate AOAC report C2H, 8805 * no need any parameter. 8806 */ 8807 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8808 H2C_CAT_MAC, 8809 H2C_CL_MAC_WOW, 8810 H2C_FUNC_AOAC_REPORT_REQ, 1, 0, 8811 len); 8812 8813 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC); 8814 } 8815 8816 /* Return < 0, if failures happen during waiting for the condition. 8817 * Return 0, when waiting for the condition succeeds. 8818 * Return > 0, if the wait is considered unreachable due to driver/FW design, 8819 * where 1 means during SER. 8820 */ 8821 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 8822 struct rtw89_wait_info *wait, unsigned int cond) 8823 { 8824 struct rtw89_wait_response *prep; 8825 int ret = 0; 8826 8827 lockdep_assert_wiphy(rtwdev->hw->wiphy); 8828 8829 prep = rtw89_wait_for_cond_prep(wait, cond); 8830 if (IS_ERR(prep)) 8831 goto out; 8832 8833 ret = rtw89_h2c_tx(rtwdev, skb, false); 8834 if (ret) { 8835 rtw89_err(rtwdev, "failed to send h2c\n"); 8836 dev_kfree_skb_any(skb); 8837 ret = -EBUSY; 8838 goto out; 8839 } 8840 8841 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) { 8842 ret = 1; 8843 goto out; 8844 } 8845 8846 out: 8847 return rtw89_wait_for_cond_eval(wait, prep, ret); 8848 } 8849 8850 #define H2C_ADD_MCC_LEN 16 8851 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 8852 const struct rtw89_fw_mcc_add_req *p) 8853 { 8854 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8855 struct sk_buff *skb; 8856 unsigned int cond; 8857 8858 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 8859 if (!skb) { 8860 rtw89_err(rtwdev, 8861 "failed to alloc skb for add mcc\n"); 8862 return -ENOMEM; 8863 } 8864 8865 skb_put(skb, H2C_ADD_MCC_LEN); 8866 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 8867 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 8868 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 8869 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 8870 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 8871 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 8872 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 8873 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 8874 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 8875 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 8876 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 8877 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 8878 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 8879 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 8880 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 8881 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 8882 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 8883 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 8884 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 8885 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 8886 8887 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8888 H2C_CAT_MAC, 8889 H2C_CL_MCC, 8890 H2C_FUNC_ADD_MCC, 0, 0, 8891 H2C_ADD_MCC_LEN); 8892 8893 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 8894 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8895 } 8896 8897 #define H2C_START_MCC_LEN 12 8898 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 8899 const struct rtw89_fw_mcc_start_req *p) 8900 { 8901 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8902 struct sk_buff *skb; 8903 unsigned int cond; 8904 8905 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 8906 if (!skb) { 8907 rtw89_err(rtwdev, 8908 "failed to alloc skb for start mcc\n"); 8909 return -ENOMEM; 8910 } 8911 8912 skb_put(skb, H2C_START_MCC_LEN); 8913 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 8914 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 8915 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 8916 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 8917 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 8918 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 8919 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 8920 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 8921 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 8922 8923 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8924 H2C_CAT_MAC, 8925 H2C_CL_MCC, 8926 H2C_FUNC_START_MCC, 0, 0, 8927 H2C_START_MCC_LEN); 8928 8929 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 8930 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8931 } 8932 8933 #define H2C_STOP_MCC_LEN 4 8934 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 8935 bool prev_groups) 8936 { 8937 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8938 struct sk_buff *skb; 8939 unsigned int cond; 8940 8941 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 8942 if (!skb) { 8943 rtw89_err(rtwdev, 8944 "failed to alloc skb for stop mcc\n"); 8945 return -ENOMEM; 8946 } 8947 8948 skb_put(skb, H2C_STOP_MCC_LEN); 8949 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 8950 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 8951 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 8952 8953 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8954 H2C_CAT_MAC, 8955 H2C_CL_MCC, 8956 H2C_FUNC_STOP_MCC, 0, 0, 8957 H2C_STOP_MCC_LEN); 8958 8959 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 8960 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8961 } 8962 8963 #define H2C_DEL_MCC_GROUP_LEN 4 8964 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 8965 bool prev_groups) 8966 { 8967 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8968 struct sk_buff *skb; 8969 unsigned int cond; 8970 8971 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 8972 if (!skb) { 8973 rtw89_err(rtwdev, 8974 "failed to alloc skb for del mcc group\n"); 8975 return -ENOMEM; 8976 } 8977 8978 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 8979 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 8980 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 8981 8982 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8983 H2C_CAT_MAC, 8984 H2C_CL_MCC, 8985 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 8986 H2C_DEL_MCC_GROUP_LEN); 8987 8988 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 8989 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8990 } 8991 8992 #define H2C_RESET_MCC_GROUP_LEN 4 8993 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 8994 { 8995 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8996 struct sk_buff *skb; 8997 unsigned int cond; 8998 8999 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 9000 if (!skb) { 9001 rtw89_err(rtwdev, 9002 "failed to alloc skb for reset mcc group\n"); 9003 return -ENOMEM; 9004 } 9005 9006 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 9007 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 9008 9009 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9010 H2C_CAT_MAC, 9011 H2C_CL_MCC, 9012 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 9013 H2C_RESET_MCC_GROUP_LEN); 9014 9015 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 9016 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9017 } 9018 9019 #define H2C_MCC_REQ_TSF_LEN 4 9020 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 9021 const struct rtw89_fw_mcc_tsf_req *req, 9022 struct rtw89_mac_mcc_tsf_rpt *rpt) 9023 { 9024 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9025 struct rtw89_mac_mcc_tsf_rpt *tmp; 9026 struct sk_buff *skb; 9027 unsigned int cond; 9028 int ret; 9029 9030 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 9031 if (!skb) { 9032 rtw89_err(rtwdev, 9033 "failed to alloc skb for mcc req tsf\n"); 9034 return -ENOMEM; 9035 } 9036 9037 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 9038 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 9039 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 9040 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 9041 9042 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9043 H2C_CAT_MAC, 9044 H2C_CL_MCC, 9045 H2C_FUNC_MCC_REQ_TSF, 0, 0, 9046 H2C_MCC_REQ_TSF_LEN); 9047 9048 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 9049 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9050 if (ret) 9051 return ret; 9052 9053 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 9054 *rpt = *tmp; 9055 9056 return 0; 9057 } 9058 9059 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 9060 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 9061 u8 *bitmap) 9062 { 9063 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9064 struct sk_buff *skb; 9065 unsigned int cond; 9066 u8 map_len; 9067 u8 h2c_len; 9068 9069 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 9070 map_len = RTW89_MAX_MAC_ID_NUM / 8; 9071 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 9072 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 9073 if (!skb) { 9074 rtw89_err(rtwdev, 9075 "failed to alloc skb for mcc macid bitmap\n"); 9076 return -ENOMEM; 9077 } 9078 9079 skb_put(skb, h2c_len); 9080 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 9081 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 9082 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 9083 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 9084 9085 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9086 H2C_CAT_MAC, 9087 H2C_CL_MCC, 9088 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 9089 h2c_len); 9090 9091 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 9092 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9093 } 9094 9095 #define H2C_MCC_SYNC_LEN 4 9096 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 9097 u8 target, u8 offset) 9098 { 9099 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9100 struct sk_buff *skb; 9101 unsigned int cond; 9102 9103 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 9104 if (!skb) { 9105 rtw89_err(rtwdev, 9106 "failed to alloc skb for mcc sync\n"); 9107 return -ENOMEM; 9108 } 9109 9110 skb_put(skb, H2C_MCC_SYNC_LEN); 9111 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 9112 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 9113 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 9114 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 9115 9116 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9117 H2C_CAT_MAC, 9118 H2C_CL_MCC, 9119 H2C_FUNC_MCC_SYNC, 0, 0, 9120 H2C_MCC_SYNC_LEN); 9121 9122 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 9123 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9124 } 9125 9126 #define H2C_MCC_SET_DURATION_LEN 20 9127 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 9128 const struct rtw89_fw_mcc_duration *p) 9129 { 9130 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9131 struct sk_buff *skb; 9132 unsigned int cond; 9133 9134 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 9135 if (!skb) { 9136 rtw89_err(rtwdev, 9137 "failed to alloc skb for mcc set duration\n"); 9138 return -ENOMEM; 9139 } 9140 9141 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 9142 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 9143 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 9144 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 9145 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 9146 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 9147 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 9148 p->start_tsf_low); 9149 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 9150 p->start_tsf_high); 9151 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 9152 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 9153 9154 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9155 H2C_CAT_MAC, 9156 H2C_CL_MCC, 9157 H2C_FUNC_MCC_SET_DURATION, 0, 0, 9158 H2C_MCC_SET_DURATION_LEN); 9159 9160 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 9161 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9162 } 9163 9164 static 9165 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 9166 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 9167 struct rtw89_h2c_mrc_add_slot *slot_h2c) 9168 { 9169 bool fill_h2c = !!slot_h2c; 9170 unsigned int i; 9171 9172 if (!fill_h2c) 9173 goto calc_len; 9174 9175 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 9176 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 9177 le32_encode_bits(slot_arg->courtesy_en, 9178 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 9179 le32_encode_bits(slot_arg->role_num, 9180 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 9181 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 9182 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 9183 le32_encode_bits(slot_arg->courtesy_target, 9184 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 9185 9186 for (i = 0; i < slot_arg->role_num; i++) { 9187 slot_h2c->roles[i].w0 = 9188 le32_encode_bits(slot_arg->roles[i].macid, 9189 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 9190 le32_encode_bits(slot_arg->roles[i].role_type, 9191 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 9192 le32_encode_bits(slot_arg->roles[i].is_master, 9193 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 9194 le32_encode_bits(slot_arg->roles[i].en_tx_null, 9195 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 9196 le32_encode_bits(false, 9197 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 9198 le32_encode_bits(false, 9199 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 9200 slot_h2c->roles[i].w1 = 9201 le32_encode_bits(slot_arg->roles[i].central_ch, 9202 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 9203 le32_encode_bits(slot_arg->roles[i].primary_ch, 9204 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 9205 le32_encode_bits(slot_arg->roles[i].bw, 9206 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 9207 le32_encode_bits(slot_arg->roles[i].band, 9208 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 9209 le32_encode_bits(slot_arg->roles[i].null_early, 9210 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 9211 le32_encode_bits(false, 9212 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 9213 le32_encode_bits(true, 9214 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 9215 slot_h2c->roles[i].macid_main_bitmap = 9216 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 9217 slot_h2c->roles[i].macid_paired_bitmap = 9218 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 9219 } 9220 9221 calc_len: 9222 return struct_size(slot_h2c, roles, slot_arg->role_num); 9223 } 9224 9225 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 9226 const struct rtw89_fw_mrc_add_arg *arg) 9227 { 9228 struct rtw89_h2c_mrc_add *h2c_head; 9229 struct sk_buff *skb; 9230 unsigned int i; 9231 void *tmp; 9232 u32 len; 9233 int ret; 9234 9235 len = sizeof(*h2c_head); 9236 for (i = 0; i < arg->slot_num; i++) 9237 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 9238 9239 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9240 if (!skb) { 9241 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 9242 return -ENOMEM; 9243 } 9244 9245 skb_put(skb, len); 9246 tmp = skb->data; 9247 9248 h2c_head = tmp; 9249 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 9250 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 9251 le32_encode_bits(arg->sch_type, 9252 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 9253 le32_encode_bits(arg->slot_num, 9254 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 9255 le32_encode_bits(arg->btc_in_sch, 9256 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 9257 9258 tmp += sizeof(*h2c_head); 9259 for (i = 0; i < arg->slot_num; i++) 9260 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 9261 9262 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9263 H2C_CAT_MAC, 9264 H2C_CL_MRC, 9265 H2C_FUNC_ADD_MRC, 0, 0, 9266 len); 9267 9268 ret = rtw89_h2c_tx(rtwdev, skb, false); 9269 if (ret) { 9270 rtw89_err(rtwdev, "failed to send h2c\n"); 9271 dev_kfree_skb_any(skb); 9272 return -EBUSY; 9273 } 9274 9275 return 0; 9276 } 9277 9278 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 9279 const struct rtw89_fw_mrc_start_arg *arg) 9280 { 9281 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9282 struct rtw89_h2c_mrc_start *h2c; 9283 u32 len = sizeof(*h2c); 9284 struct sk_buff *skb; 9285 unsigned int cond; 9286 9287 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9288 if (!skb) { 9289 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 9290 return -ENOMEM; 9291 } 9292 9293 skb_put(skb, len); 9294 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 9295 9296 h2c->w0 = le32_encode_bits(arg->sch_idx, 9297 RTW89_H2C_MRC_START_W0_SCH_IDX) | 9298 le32_encode_bits(arg->old_sch_idx, 9299 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 9300 le32_encode_bits(arg->action, 9301 RTW89_H2C_MRC_START_W0_ACTION); 9302 9303 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 9304 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 9305 9306 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9307 H2C_CAT_MAC, 9308 H2C_CL_MRC, 9309 H2C_FUNC_START_MRC, 0, 0, 9310 len); 9311 9312 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 9313 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9314 } 9315 9316 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx) 9317 { 9318 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9319 struct rtw89_h2c_mrc_del *h2c; 9320 u32 len = sizeof(*h2c); 9321 struct sk_buff *skb; 9322 unsigned int cond; 9323 9324 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9325 if (!skb) { 9326 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 9327 return -ENOMEM; 9328 } 9329 9330 skb_put(skb, len); 9331 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 9332 9333 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) | 9334 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX); 9335 9336 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9337 H2C_CAT_MAC, 9338 H2C_CL_MRC, 9339 H2C_FUNC_DEL_MRC, 0, 0, 9340 len); 9341 9342 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 9343 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9344 } 9345 9346 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 9347 const struct rtw89_fw_mrc_req_tsf_arg *arg, 9348 struct rtw89_mac_mrc_tsf_rpt *rpt) 9349 { 9350 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9351 struct rtw89_h2c_mrc_req_tsf *h2c; 9352 struct rtw89_mac_mrc_tsf_rpt *tmp; 9353 struct sk_buff *skb; 9354 unsigned int i; 9355 u32 len; 9356 int ret; 9357 9358 len = struct_size(h2c, infos, arg->num); 9359 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9360 if (!skb) { 9361 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 9362 return -ENOMEM; 9363 } 9364 9365 skb_put(skb, len); 9366 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 9367 9368 h2c->req_tsf_num = arg->num; 9369 for (i = 0; i < arg->num; i++) 9370 h2c->infos[i] = 9371 u8_encode_bits(arg->infos[i].band, 9372 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 9373 u8_encode_bits(arg->infos[i].port, 9374 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 9375 9376 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9377 H2C_CAT_MAC, 9378 H2C_CL_MRC, 9379 H2C_FUNC_MRC_REQ_TSF, 0, 0, 9380 len); 9381 9382 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 9383 if (ret) 9384 return ret; 9385 9386 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 9387 *rpt = *tmp; 9388 9389 return 0; 9390 } 9391 9392 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 9393 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 9394 { 9395 struct rtw89_h2c_mrc_upd_bitmap *h2c; 9396 u32 len = sizeof(*h2c); 9397 struct sk_buff *skb; 9398 int ret; 9399 9400 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9401 if (!skb) { 9402 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 9403 return -ENOMEM; 9404 } 9405 9406 skb_put(skb, len); 9407 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 9408 9409 h2c->w0 = le32_encode_bits(arg->sch_idx, 9410 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 9411 le32_encode_bits(arg->action, 9412 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 9413 le32_encode_bits(arg->macid, 9414 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 9415 h2c->w1 = le32_encode_bits(arg->client_macid, 9416 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 9417 9418 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9419 H2C_CAT_MAC, 9420 H2C_CL_MRC, 9421 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 9422 len); 9423 9424 ret = rtw89_h2c_tx(rtwdev, skb, false); 9425 if (ret) { 9426 rtw89_err(rtwdev, "failed to send h2c\n"); 9427 dev_kfree_skb_any(skb); 9428 return -EBUSY; 9429 } 9430 9431 return 0; 9432 } 9433 9434 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 9435 const struct rtw89_fw_mrc_sync_arg *arg) 9436 { 9437 struct rtw89_h2c_mrc_sync *h2c; 9438 u32 len = sizeof(*h2c); 9439 struct sk_buff *skb; 9440 int ret; 9441 9442 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9443 if (!skb) { 9444 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 9445 return -ENOMEM; 9446 } 9447 9448 skb_put(skb, len); 9449 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 9450 9451 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 9452 le32_encode_bits(arg->src.port, 9453 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 9454 le32_encode_bits(arg->src.band, 9455 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 9456 le32_encode_bits(arg->dest.port, 9457 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 9458 le32_encode_bits(arg->dest.band, 9459 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 9460 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 9461 9462 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9463 H2C_CAT_MAC, 9464 H2C_CL_MRC, 9465 H2C_FUNC_MRC_SYNC, 0, 0, 9466 len); 9467 9468 ret = rtw89_h2c_tx(rtwdev, skb, false); 9469 if (ret) { 9470 rtw89_err(rtwdev, "failed to send h2c\n"); 9471 dev_kfree_skb_any(skb); 9472 return -EBUSY; 9473 } 9474 9475 return 0; 9476 } 9477 9478 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 9479 const struct rtw89_fw_mrc_upd_duration_arg *arg) 9480 { 9481 struct rtw89_h2c_mrc_upd_duration *h2c; 9482 struct sk_buff *skb; 9483 unsigned int i; 9484 u32 len; 9485 int ret; 9486 9487 len = struct_size(h2c, slots, arg->slot_num); 9488 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9489 if (!skb) { 9490 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 9491 return -ENOMEM; 9492 } 9493 9494 skb_put(skb, len); 9495 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 9496 9497 h2c->w0 = le32_encode_bits(arg->sch_idx, 9498 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 9499 le32_encode_bits(arg->slot_num, 9500 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 9501 le32_encode_bits(false, 9502 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 9503 9504 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 9505 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 9506 9507 for (i = 0; i < arg->slot_num; i++) { 9508 h2c->slots[i] = 9509 le32_encode_bits(arg->slots[i].slot_idx, 9510 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 9511 le32_encode_bits(arg->slots[i].duration, 9512 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 9513 } 9514 9515 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9516 H2C_CAT_MAC, 9517 H2C_CL_MRC, 9518 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 9519 len); 9520 9521 ret = rtw89_h2c_tx(rtwdev, skb, false); 9522 if (ret) { 9523 rtw89_err(rtwdev, "failed to send h2c\n"); 9524 dev_kfree_skb_any(skb); 9525 return -EBUSY; 9526 } 9527 9528 return 0; 9529 } 9530 9531 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en) 9532 { 9533 struct rtw89_h2c_ap_info *h2c; 9534 u32 len = sizeof(*h2c); 9535 struct sk_buff *skb; 9536 int ret; 9537 9538 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9539 if (!skb) { 9540 rtw89_err(rtwdev, "failed to alloc skb for ap info\n"); 9541 return -ENOMEM; 9542 } 9543 9544 skb_put(skb, len); 9545 h2c = (struct rtw89_h2c_ap_info *)skb->data; 9546 9547 h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN); 9548 9549 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9550 H2C_CAT_MAC, 9551 H2C_CL_AP, 9552 H2C_FUNC_AP_INFO, 0, 0, 9553 len); 9554 9555 ret = rtw89_h2c_tx(rtwdev, skb, false); 9556 if (ret) { 9557 rtw89_err(rtwdev, "failed to send h2c\n"); 9558 dev_kfree_skb_any(skb); 9559 return -EBUSY; 9560 } 9561 9562 return 0; 9563 } 9564 9565 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en) 9566 { 9567 int ret; 9568 9569 if (en) { 9570 if (refcount_inc_not_zero(&rtwdev->refcount_ap_info)) 9571 return 0; 9572 } else { 9573 if (!refcount_dec_and_test(&rtwdev->refcount_ap_info)) 9574 return 0; 9575 } 9576 9577 ret = rtw89_fw_h2c_ap_info(rtwdev, en); 9578 if (ret) { 9579 if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 9580 return ret; 9581 9582 /* During recovery, neither driver nor stack has full error 9583 * handling, so show a warning, but return 0 with refcount 9584 * increased normally. It can avoid underflow when calling 9585 * with @en == false later. 9586 */ 9587 rtw89_warn(rtwdev, "h2c ap_info failed during SER\n"); 9588 } 9589 9590 if (en) 9591 refcount_set(&rtwdev->refcount_ap_info, 1); 9592 9593 return 0; 9594 } 9595 9596 int rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 9597 bool enable) 9598 { 9599 struct rtw89_wait_info *wait = &rtwdev->mlo.wait; 9600 struct rtw89_h2c_mlo_link_cfg *h2c; 9601 u8 mac_id = rtwvif_link->mac_id; 9602 u32 len = sizeof(*h2c); 9603 struct sk_buff *skb; 9604 unsigned int cond; 9605 int ret; 9606 9607 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9608 if (!skb) { 9609 rtw89_err(rtwdev, "failed to alloc skb for mlo link cfg\n"); 9610 return -ENOMEM; 9611 } 9612 9613 skb_put(skb, len); 9614 h2c = (struct rtw89_h2c_mlo_link_cfg *)skb->data; 9615 9616 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_MLO_LINK_CFG_W0_MACID) | 9617 le32_encode_bits(enable, RTW89_H2C_MLO_LINK_CFG_W0_OPTION); 9618 9619 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9620 H2C_CAT_MAC, 9621 H2C_CL_MLO, 9622 H2C_FUNC_MLO_LINK_CFG, 0, 0, 9623 len); 9624 9625 cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG); 9626 9627 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9628 if (ret) { 9629 rtw89_err(rtwdev, "mlo link cfg (%s link id %u) failed: %d\n", 9630 str_enable_disable(enable), rtwvif_link->link_id, ret); 9631 return ret; 9632 } 9633 9634 return 0; 9635 } 9636 9637 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 9638 { 9639 static const u8 zeros[U8_MAX] = {}; 9640 9641 return memcmp(ext_ptr, zeros, ext_len) == 0; 9642 } 9643 9644 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 9645 ({ \ 9646 u8 __var_sz = sizeof(*(e)); \ 9647 bool __accept; \ 9648 if (__var_sz >= (ent_sz)) \ 9649 __accept = true; \ 9650 else \ 9651 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 9652 (ent_sz) - __var_sz);\ 9653 __accept; \ 9654 }) 9655 9656 static bool 9657 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 9658 const void *cursor, 9659 const struct rtw89_txpwr_conf *conf) 9660 { 9661 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9662 return false; 9663 9664 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 9665 return false; 9666 9667 switch (e->rs) { 9668 case RTW89_RS_CCK: 9669 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 9670 return false; 9671 break; 9672 case RTW89_RS_OFDM: 9673 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 9674 return false; 9675 break; 9676 case RTW89_RS_MCS: 9677 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 9678 e->nss >= RTW89_NSS_NUM || 9679 e->ofdma >= RTW89_OFDMA_NUM) 9680 return false; 9681 break; 9682 case RTW89_RS_HEDCM: 9683 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 9684 e->nss >= RTW89_NSS_HEDCM_NUM || 9685 e->ofdma >= RTW89_OFDMA_NUM) 9686 return false; 9687 break; 9688 case RTW89_RS_OFFSET: 9689 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 9690 return false; 9691 break; 9692 default: 9693 return false; 9694 } 9695 9696 return true; 9697 } 9698 9699 static 9700 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 9701 const struct rtw89_txpwr_table *tbl) 9702 { 9703 const struct rtw89_txpwr_conf *conf = tbl->data; 9704 struct rtw89_fw_txpwr_byrate_entry entry = {}; 9705 struct rtw89_txpwr_byrate *byr_head; 9706 struct rtw89_rate_desc desc = {}; 9707 const void *cursor; 9708 u32 data; 9709 s8 *byr; 9710 int i; 9711 9712 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9713 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 9714 continue; 9715 9716 byr_head = &rtwdev->byr[entry.band][entry.bw]; 9717 data = le32_to_cpu(entry.data); 9718 desc.ofdma = entry.ofdma; 9719 desc.nss = entry.nss; 9720 desc.rs = entry.rs; 9721 9722 for (i = 0; i < entry.len; i++, data >>= 8) { 9723 desc.idx = entry.shf + i; 9724 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 9725 *byr = data & 0xff; 9726 } 9727 } 9728 } 9729 9730 static bool 9731 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 9732 const void *cursor, 9733 const struct rtw89_txpwr_conf *conf) 9734 { 9735 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9736 return false; 9737 9738 if (e->bw >= RTW89_2G_BW_NUM) 9739 return false; 9740 if (e->nt >= RTW89_NTX_NUM) 9741 return false; 9742 if (e->rs >= RTW89_RS_LMT_NUM) 9743 return false; 9744 if (e->bf >= RTW89_BF_NUM) 9745 return false; 9746 if (e->regd >= RTW89_REGD_NUM) 9747 return false; 9748 if (e->ch_idx >= RTW89_2G_CH_NUM) 9749 return false; 9750 9751 return true; 9752 } 9753 9754 static 9755 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 9756 { 9757 const struct rtw89_txpwr_conf *conf = &data->conf; 9758 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 9759 const void *cursor; 9760 9761 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9762 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 9763 continue; 9764 9765 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 9766 [entry.ch_idx] = entry.v; 9767 } 9768 } 9769 9770 static bool 9771 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 9772 const void *cursor, 9773 const struct rtw89_txpwr_conf *conf) 9774 { 9775 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9776 return false; 9777 9778 if (e->bw >= RTW89_5G_BW_NUM) 9779 return false; 9780 if (e->nt >= RTW89_NTX_NUM) 9781 return false; 9782 if (e->rs >= RTW89_RS_LMT_NUM) 9783 return false; 9784 if (e->bf >= RTW89_BF_NUM) 9785 return false; 9786 if (e->regd >= RTW89_REGD_NUM) 9787 return false; 9788 if (e->ch_idx >= RTW89_5G_CH_NUM) 9789 return false; 9790 9791 return true; 9792 } 9793 9794 static 9795 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 9796 { 9797 const struct rtw89_txpwr_conf *conf = &data->conf; 9798 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 9799 const void *cursor; 9800 9801 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9802 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 9803 continue; 9804 9805 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 9806 [entry.ch_idx] = entry.v; 9807 } 9808 } 9809 9810 static bool 9811 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 9812 const void *cursor, 9813 const struct rtw89_txpwr_conf *conf) 9814 { 9815 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9816 return false; 9817 9818 if (e->bw >= RTW89_6G_BW_NUM) 9819 return false; 9820 if (e->nt >= RTW89_NTX_NUM) 9821 return false; 9822 if (e->rs >= RTW89_RS_LMT_NUM) 9823 return false; 9824 if (e->bf >= RTW89_BF_NUM) 9825 return false; 9826 if (e->regd >= RTW89_REGD_NUM) 9827 return false; 9828 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 9829 return false; 9830 if (e->ch_idx >= RTW89_6G_CH_NUM) 9831 return false; 9832 9833 return true; 9834 } 9835 9836 static 9837 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 9838 { 9839 const struct rtw89_txpwr_conf *conf = &data->conf; 9840 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 9841 const void *cursor; 9842 9843 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9844 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 9845 continue; 9846 9847 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 9848 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 9849 } 9850 } 9851 9852 static bool 9853 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 9854 const void *cursor, 9855 const struct rtw89_txpwr_conf *conf) 9856 { 9857 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9858 return false; 9859 9860 if (e->ru >= RTW89_RU_NUM) 9861 return false; 9862 if (e->nt >= RTW89_NTX_NUM) 9863 return false; 9864 if (e->regd >= RTW89_REGD_NUM) 9865 return false; 9866 if (e->ch_idx >= RTW89_2G_CH_NUM) 9867 return false; 9868 9869 return true; 9870 } 9871 9872 static 9873 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 9874 { 9875 const struct rtw89_txpwr_conf *conf = &data->conf; 9876 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 9877 const void *cursor; 9878 9879 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9880 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 9881 continue; 9882 9883 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 9884 } 9885 } 9886 9887 static bool 9888 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 9889 const void *cursor, 9890 const struct rtw89_txpwr_conf *conf) 9891 { 9892 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9893 return false; 9894 9895 if (e->ru >= RTW89_RU_NUM) 9896 return false; 9897 if (e->nt >= RTW89_NTX_NUM) 9898 return false; 9899 if (e->regd >= RTW89_REGD_NUM) 9900 return false; 9901 if (e->ch_idx >= RTW89_5G_CH_NUM) 9902 return false; 9903 9904 return true; 9905 } 9906 9907 static 9908 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 9909 { 9910 const struct rtw89_txpwr_conf *conf = &data->conf; 9911 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 9912 const void *cursor; 9913 9914 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9915 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 9916 continue; 9917 9918 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 9919 } 9920 } 9921 9922 static bool 9923 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 9924 const void *cursor, 9925 const struct rtw89_txpwr_conf *conf) 9926 { 9927 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9928 return false; 9929 9930 if (e->ru >= RTW89_RU_NUM) 9931 return false; 9932 if (e->nt >= RTW89_NTX_NUM) 9933 return false; 9934 if (e->regd >= RTW89_REGD_NUM) 9935 return false; 9936 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 9937 return false; 9938 if (e->ch_idx >= RTW89_6G_CH_NUM) 9939 return false; 9940 9941 return true; 9942 } 9943 9944 static 9945 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 9946 { 9947 const struct rtw89_txpwr_conf *conf = &data->conf; 9948 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 9949 const void *cursor; 9950 9951 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9952 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 9953 continue; 9954 9955 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 9956 [entry.ch_idx] = entry.v; 9957 } 9958 } 9959 9960 static bool 9961 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 9962 const void *cursor, 9963 const struct rtw89_txpwr_conf *conf) 9964 { 9965 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9966 return false; 9967 9968 if (e->band >= RTW89_BAND_NUM) 9969 return false; 9970 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 9971 return false; 9972 if (e->regd >= RTW89_REGD_NUM) 9973 return false; 9974 9975 return true; 9976 } 9977 9978 static 9979 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 9980 { 9981 const struct rtw89_txpwr_conf *conf = &data->conf; 9982 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 9983 const void *cursor; 9984 9985 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9986 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 9987 continue; 9988 9989 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 9990 } 9991 } 9992 9993 static bool 9994 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 9995 const void *cursor, 9996 const struct rtw89_txpwr_conf *conf) 9997 { 9998 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9999 return false; 10000 10001 if (e->band >= RTW89_BAND_NUM) 10002 return false; 10003 if (e->regd >= RTW89_REGD_NUM) 10004 return false; 10005 10006 return true; 10007 } 10008 10009 static 10010 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 10011 { 10012 const struct rtw89_txpwr_conf *conf = &data->conf; 10013 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 10014 const void *cursor; 10015 10016 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 10017 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 10018 continue; 10019 10020 data->v[entry.band][entry.regd] = entry.v; 10021 } 10022 } 10023 10024 static bool rtw89_fw_has_da_txpwr_table(struct rtw89_dev *rtwdev, 10025 const struct rtw89_rfe_parms *parms) 10026 { 10027 const struct rtw89_chip_info *chip = rtwdev->chip; 10028 10029 if (chip->support_bands & BIT(NL80211_BAND_2GHZ) && 10030 !(parms->rule_da_2ghz.lmt && parms->rule_da_2ghz.lmt_ru)) 10031 return false; 10032 10033 if (chip->support_bands & BIT(NL80211_BAND_5GHZ) && 10034 !(parms->rule_da_5ghz.lmt && parms->rule_da_5ghz.lmt_ru)) 10035 return false; 10036 10037 if (chip->support_bands & BIT(NL80211_BAND_6GHZ) && 10038 !(parms->rule_da_6ghz.lmt && parms->rule_da_6ghz.lmt_ru)) 10039 return false; 10040 10041 return true; 10042 } 10043 10044 const struct rtw89_rfe_parms * 10045 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 10046 const struct rtw89_rfe_parms *init) 10047 { 10048 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 10049 struct rtw89_rfe_parms *parms; 10050 10051 if (!rfe_data) 10052 return init; 10053 10054 parms = &rfe_data->rfe_parms; 10055 if (init) 10056 *parms = *init; 10057 10058 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 10059 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 10060 rfe_data->byrate.tbl.size = 0; /* don't care here */ 10061 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 10062 parms->byr_tbl = &rfe_data->byrate.tbl; 10063 } 10064 10065 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 10066 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 10067 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 10068 } 10069 10070 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 10071 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 10072 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 10073 } 10074 10075 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 10076 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 10077 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 10078 } 10079 10080 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_2ghz.conf)) { 10081 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->da_lmt_2ghz); 10082 parms->rule_da_2ghz.lmt = &rfe_data->da_lmt_2ghz.v; 10083 } 10084 10085 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_5ghz.conf)) { 10086 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->da_lmt_5ghz); 10087 parms->rule_da_5ghz.lmt = &rfe_data->da_lmt_5ghz.v; 10088 } 10089 10090 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_6ghz.conf)) { 10091 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->da_lmt_6ghz); 10092 parms->rule_da_6ghz.lmt = &rfe_data->da_lmt_6ghz.v; 10093 } 10094 10095 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 10096 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 10097 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 10098 } 10099 10100 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 10101 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 10102 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 10103 } 10104 10105 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 10106 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 10107 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 10108 } 10109 10110 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_2ghz.conf)) { 10111 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->da_lmt_ru_2ghz); 10112 parms->rule_da_2ghz.lmt_ru = &rfe_data->da_lmt_ru_2ghz.v; 10113 } 10114 10115 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_5ghz.conf)) { 10116 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->da_lmt_ru_5ghz); 10117 parms->rule_da_5ghz.lmt_ru = &rfe_data->da_lmt_ru_5ghz.v; 10118 } 10119 10120 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_6ghz.conf)) { 10121 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->da_lmt_ru_6ghz); 10122 parms->rule_da_6ghz.lmt_ru = &rfe_data->da_lmt_ru_6ghz.v; 10123 } 10124 10125 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 10126 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 10127 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 10128 } 10129 10130 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 10131 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 10132 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 10133 } 10134 10135 parms->has_da = rtw89_fw_has_da_txpwr_table(rtwdev, parms); 10136 10137 return parms; 10138 } 10139