1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/if_arp.h> 6 #include "cam.h" 7 #include "chan.h" 8 #include "coex.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "phy.h" 13 #include "ps.h" 14 #include "reg.h" 15 #include "util.h" 16 #include "wow.h" 17 18 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev); 19 20 struct rtw89_eapol_2_of_2 { 21 u8 gtkbody[14]; 22 u8 key_des_ver; 23 u8 rsvd[92]; 24 } __packed; 25 26 struct rtw89_sa_query { 27 u8 category; 28 u8 action; 29 } __packed; 30 31 struct rtw89_arp_rsp { 32 u8 llc_hdr[sizeof(rfc1042_header)]; 33 __be16 llc_type; 34 struct arphdr arp_hdr; 35 u8 sender_hw[ETH_ALEN]; 36 __be32 sender_ip; 37 u8 target_hw[ETH_ALEN]; 38 __be32 target_ip; 39 } __packed; 40 41 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 42 43 const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = { 44 .ver = 0x00, 45 .list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 46 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 47 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 48 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 49 }, 50 }; 51 EXPORT_SYMBOL(rtw89_fw_blacklist_default); 52 53 union rtw89_fw_element_arg { 54 size_t offset; 55 enum rtw89_rf_path rf_path; 56 enum rtw89_fw_type fw_type; 57 }; 58 59 struct rtw89_fw_element_handler { 60 int (*fn)(struct rtw89_dev *rtwdev, 61 const struct rtw89_fw_element_hdr *elm, 62 const union rtw89_fw_element_arg arg); 63 const union rtw89_fw_element_arg arg; 64 const char *name; 65 }; 66 67 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 68 struct sk_buff *skb); 69 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 70 struct rtw89_wait_info *wait, unsigned int cond); 71 static int __parse_security_section(struct rtw89_dev *rtwdev, 72 struct rtw89_fw_bin_info *info, 73 struct rtw89_fw_hdr_section_info *section_info, 74 const void *content, 75 u32 *mssc_len); 76 77 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 78 bool header) 79 { 80 struct sk_buff *skb; 81 u32 header_len = 0; 82 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 83 84 if (header) 85 header_len = H2C_HEADER_LEN; 86 87 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 88 if (!skb) 89 return NULL; 90 skb_reserve(skb, header_len + h2c_desc_size); 91 memset(skb->data, 0, len); 92 93 return skb; 94 } 95 96 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 97 { 98 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 99 } 100 101 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 102 { 103 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 104 } 105 106 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 107 { 108 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 109 u8 val; 110 int ret; 111 112 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 113 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 114 1, FWDL_WAIT_CNT, false, rtwdev, type); 115 if (ret) { 116 switch (val) { 117 case RTW89_FWDL_CHECKSUM_FAIL: 118 rtw89_err(rtwdev, "fw checksum fail\n"); 119 return -EINVAL; 120 121 case RTW89_FWDL_SECURITY_FAIL: 122 rtw89_err(rtwdev, "fw security fail\n"); 123 return -EINVAL; 124 125 case RTW89_FWDL_CV_NOT_MATCH: 126 rtw89_err(rtwdev, "fw cv not match\n"); 127 return -EINVAL; 128 129 default: 130 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 131 return -EBUSY; 132 } 133 } 134 135 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 136 137 return 0; 138 } 139 140 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 141 struct rtw89_fw_bin_info *info) 142 { 143 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 144 const struct rtw89_chip_info *chip = rtwdev->chip; 145 struct rtw89_fw_hdr_section_info *section_info; 146 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 147 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 148 const struct rtw89_fw_hdr_section *section; 149 const u8 *fw_end = fw + len; 150 const u8 *bin; 151 u32 base_hdr_len; 152 u32 mssc_len; 153 int ret; 154 u32 i; 155 156 if (!info) 157 return -EINVAL; 158 159 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 160 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 161 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 162 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE); 163 164 if (chip->chip_gen == RTW89_CHIP_AX) 165 info->part_size = FWDL_SECTION_PER_PKT_LEN; 166 else 167 info->part_size = le32_get_bits(fw_hdr->w7, FW_HDR_W7_PART_SIZE); 168 169 if (info->dynamic_hdr_en) { 170 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 171 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 172 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 173 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 174 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 175 return -EINVAL; 176 } 177 } else { 178 info->hdr_len = base_hdr_len; 179 info->dynamic_hdr_len = 0; 180 } 181 182 bin = fw + info->hdr_len; 183 184 /* jump to section header */ 185 section_info = info->section_info; 186 for (i = 0; i < info->section_num; i++) { 187 section = &fw_hdr->sections[i]; 188 section_info->type = 189 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 190 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 191 192 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 193 section_info->len += FWDL_SECTION_CHKSUM_LEN; 194 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 195 section_info->dladdr = 196 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 197 section_info->addr = bin; 198 199 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 200 section_info->mssc = 201 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 202 203 ret = __parse_security_section(rtwdev, info, section_info, 204 bin, &mssc_len); 205 if (ret) 206 return ret; 207 208 if (sec->secure_boot && chip->chip_id == RTL8852B) 209 section_info->len_override = 960; 210 } else { 211 section_info->mssc = 0; 212 mssc_len = 0; 213 } 214 215 rtw89_debug(rtwdev, RTW89_DBG_FW, 216 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 217 i, section_info->type, section_info->len, 218 section_info->mssc, mssc_len, bin - fw); 219 rtw89_debug(rtwdev, RTW89_DBG_FW, 220 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 221 section_info->ignore, section_info->key_addr, 222 section_info->key_addr ? 223 section_info->key_addr - section_info->addr : 0, 224 section_info->key_len, section_info->key_idx); 225 226 bin += section_info->len + mssc_len; 227 section_info++; 228 } 229 230 if (fw_end != bin) { 231 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 232 return -EINVAL; 233 } 234 235 return 0; 236 } 237 238 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 239 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 240 u32 rmp_tbl_size, u32 *key_idx) 241 { 242 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 243 u32 sel_byte_idx; 244 u32 mss_sel_idx; 245 u8 sel_bit_idx; 246 int i; 247 248 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 249 if (!mss_hdr->defen) 250 return -ENOENT; 251 252 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 253 sec->mss_key_num; 254 } else { 255 if (mss_hdr->defen) 256 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 257 else 258 mss_sel_idx = 0; 259 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 260 le16_to_cpu(mss_hdr->msscust_max) + 261 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 262 sec->mss_key_num; 263 } 264 265 sel_byte_idx = mss_sel_idx >> 3; 266 sel_bit_idx = mss_sel_idx & 0x7; 267 268 if (sel_byte_idx >= rmp_tbl_size) 269 return -EFAULT; 270 271 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 272 return -ENOENT; 273 274 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 275 276 for (i = 0; i < sel_byte_idx; i++) 277 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 278 279 return 0; 280 } 281 282 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 283 struct rtw89_fw_bin_info *info, 284 struct rtw89_fw_hdr_section_info *section_info, 285 const void *content, 286 u32 *mssc_len) 287 { 288 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 289 const union rtw89_fw_section_mssc_content *section_content = content; 290 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 291 u32 rmp_tbl_size; 292 u32 key_sign_len; 293 u32 real_key_idx; 294 u32 sb_sel_ver; 295 int ret; 296 297 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 298 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 299 return -ENOENT; 300 } 301 302 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 303 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 304 le16_to_cpu(mss_hdr->msscust_max) * 305 mss_hdr->mssdev_max) >> 3; 306 if (mss_hdr->defen) 307 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 308 } else { 309 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 310 mss_hdr->rmpfmt); 311 return -EINVAL; 312 } 313 314 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 315 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 316 rmp_tbl_size, (int)sizeof(*mss_hdr), 317 le32_to_cpu(mss_hdr->key_raw_offset)); 318 return -EINVAL; 319 } 320 321 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 322 if (!key_sign_len) 323 key_sign_len = 512; 324 325 if (info->dsp_checksum) 326 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 327 328 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 329 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 330 331 if (!sec->secure_boot) 332 goto out; 333 334 sb_sel_ver = get_unaligned_le32(§ion_content->sb_sel_ver.v); 335 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 336 goto ignore; 337 338 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 339 if (ret) 340 goto ignore; 341 342 section_info->key_addr = content + section_info->len + 343 le32_to_cpu(mss_hdr->key_raw_offset) + 344 key_sign_len * real_key_idx; 345 section_info->key_len = key_sign_len; 346 section_info->key_idx = real_key_idx; 347 348 out: 349 if (info->secure_section_exist) { 350 section_info->ignore = true; 351 return 0; 352 } 353 354 info->secure_section_exist = true; 355 356 return 0; 357 358 ignore: 359 section_info->ignore = true; 360 361 return 0; 362 } 363 364 static int __check_secure_blacklist(struct rtw89_dev *rtwdev, 365 struct rtw89_fw_bin_info *info, 366 struct rtw89_fw_hdr_section_info *section_info, 367 const void *content) 368 { 369 const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist; 370 const union rtw89_fw_section_mssc_content *section_content = content; 371 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 372 u8 byte_idx; 373 u8 bit_mask; 374 375 if (!sec->secure_boot) 376 return 0; 377 378 if (!info->secure_section_exist || section_info->ignore) 379 return 0; 380 381 if (!chip_blacklist) { 382 rtw89_warn(rtwdev, "chip no blacklist for secure firmware\n"); 383 return -ENOENT; 384 } 385 386 byte_idx = section_content->blacklist.bit_in_chip_list >> 3; 387 bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7); 388 389 if (section_content->blacklist.ver > chip_blacklist->ver) { 390 rtw89_warn(rtwdev, "chip blacklist out of date (%u, %u)\n", 391 section_content->blacklist.ver, chip_blacklist->ver); 392 return -EINVAL; 393 } 394 395 if (chip_blacklist->list[byte_idx] & bit_mask) { 396 rtw89_warn(rtwdev, "firmware %u in chip blacklist\n", 397 section_content->blacklist.ver); 398 return -EPERM; 399 } 400 401 return 0; 402 } 403 404 static int __parse_security_section(struct rtw89_dev *rtwdev, 405 struct rtw89_fw_bin_info *info, 406 struct rtw89_fw_hdr_section_info *section_info, 407 const void *content, 408 u32 *mssc_len) 409 { 410 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 411 int ret; 412 413 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) { 414 ret = __parse_formatted_mssc(rtwdev, info, section_info, 415 content, mssc_len); 416 if (ret) 417 return -EINVAL; 418 } else { 419 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 420 if (info->dsp_checksum) 421 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 422 423 if (sec->secure_boot) { 424 if (sec->mss_idx >= section_info->mssc) { 425 rtw89_err(rtwdev, "unexpected MSS %d >= %d\n", 426 sec->mss_idx, section_info->mssc); 427 return -EFAULT; 428 } 429 section_info->key_addr = content + section_info->len + 430 sec->mss_idx * FWDL_SECURITY_SIGLEN; 431 section_info->key_len = FWDL_SECURITY_SIGLEN; 432 } 433 434 info->secure_section_exist = true; 435 } 436 437 ret = __check_secure_blacklist(rtwdev, info, section_info, content); 438 WARN_ONCE(ret, "Current firmware in blacklist. Please update firmware.\n"); 439 440 return 0; 441 } 442 443 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 444 struct rtw89_fw_bin_info *info) 445 { 446 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 447 const struct rtw89_chip_info *chip = rtwdev->chip; 448 struct rtw89_fw_hdr_section_info *section_info; 449 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 450 const struct rtw89_fw_hdr_section_v1 *section; 451 const u8 *fw_end = fw + len; 452 const u8 *bin; 453 u32 base_hdr_len; 454 u32 mssc_len; 455 int ret; 456 u32 i; 457 458 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 459 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 460 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 461 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 462 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE); 463 464 if (chip->chip_gen == RTW89_CHIP_AX) 465 info->part_size = FWDL_SECTION_PER_PKT_LEN; 466 else 467 info->part_size = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_PART_SIZE); 468 469 if (info->dynamic_hdr_en) { 470 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 471 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 472 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 473 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 474 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 475 return -EINVAL; 476 } 477 } else { 478 info->hdr_len = base_hdr_len; 479 info->dynamic_hdr_len = 0; 480 } 481 482 bin = fw + info->hdr_len; 483 484 /* jump to section header */ 485 section_info = info->section_info; 486 for (i = 0; i < info->section_num; i++) { 487 section = &fw_hdr->sections[i]; 488 489 section_info->type = 490 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 491 section_info->len = 492 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 493 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 494 section_info->len += FWDL_SECTION_CHKSUM_LEN; 495 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 496 section_info->dladdr = 497 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 498 section_info->addr = bin; 499 500 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 501 section_info->mssc = 502 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 503 504 ret = __parse_security_section(rtwdev, info, section_info, 505 bin, &mssc_len); 506 if (ret) 507 return ret; 508 } else { 509 section_info->mssc = 0; 510 mssc_len = 0; 511 } 512 513 rtw89_debug(rtwdev, RTW89_DBG_FW, 514 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 515 i, section_info->type, section_info->len, 516 section_info->mssc, mssc_len, bin - fw); 517 rtw89_debug(rtwdev, RTW89_DBG_FW, 518 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 519 section_info->ignore, section_info->key_addr, 520 section_info->key_addr ? 521 section_info->key_addr - section_info->addr : 0, 522 section_info->key_len, section_info->key_idx); 523 524 bin += section_info->len + mssc_len; 525 section_info++; 526 } 527 528 if (fw_end != bin) { 529 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 530 return -EINVAL; 531 } 532 533 if (!info->secure_section_exist) 534 rtw89_warn(rtwdev, "no firmware secure section\n"); 535 536 return 0; 537 } 538 539 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 540 const struct rtw89_fw_suit *fw_suit, 541 struct rtw89_fw_bin_info *info) 542 { 543 const u8 *fw = fw_suit->data; 544 u32 len = fw_suit->size; 545 546 if (!fw || !len) { 547 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 548 return -ENOENT; 549 } 550 551 switch (fw_suit->hdr_ver) { 552 case 0: 553 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 554 case 1: 555 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 556 default: 557 return -ENOENT; 558 } 559 } 560 561 static 562 const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev, 563 const struct firmware *firmware) 564 { 565 const struct rtw89_mfw_hdr *mfw_hdr; 566 567 if (sizeof(*mfw_hdr) > firmware->size) 568 return NULL; 569 570 mfw_hdr = (const struct rtw89_mfw_hdr *)&firmware->data[0]; 571 572 if (mfw_hdr->sig != RTW89_MFW_SIG) 573 return NULL; 574 575 return mfw_hdr; 576 } 577 578 static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev, 579 const struct firmware *firmware, 580 const struct rtw89_mfw_hdr *mfw_hdr) 581 { 582 const void *mfw = firmware->data; 583 u32 mfw_len = firmware->size; 584 u8 fw_nr = mfw_hdr->fw_nr; 585 const void *ptr; 586 587 if (fw_nr == 0) { 588 rtw89_err(rtwdev, "mfw header has no fw entry\n"); 589 return -ENOENT; 590 } 591 592 ptr = &mfw_hdr->info[fw_nr]; 593 594 if (ptr > mfw + mfw_len) { 595 rtw89_err(rtwdev, "mfw header out of address\n"); 596 return -EFAULT; 597 } 598 599 return 0; 600 } 601 602 static 603 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 604 struct rtw89_fw_suit *fw_suit, bool nowarn) 605 { 606 struct rtw89_fw_info *fw_info = &rtwdev->fw; 607 const struct firmware *firmware = fw_info->req.firmware; 608 const struct rtw89_mfw_info *mfw_info = NULL, *tmp; 609 const struct rtw89_mfw_hdr *mfw_hdr; 610 const u8 *mfw = firmware->data; 611 u32 mfw_len = firmware->size; 612 int ret; 613 int i; 614 615 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware); 616 if (!mfw_hdr) { 617 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 618 /* legacy firmware support normal type only */ 619 if (type != RTW89_FW_NORMAL) 620 return -EINVAL; 621 fw_suit->data = mfw; 622 fw_suit->size = mfw_len; 623 return 0; 624 } 625 626 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); 627 if (ret) 628 return ret; 629 630 for (i = 0; i < mfw_hdr->fw_nr; i++) { 631 tmp = &mfw_hdr->info[i]; 632 if (tmp->type != type) 633 continue; 634 635 if (type == RTW89_FW_LOGFMT) { 636 mfw_info = tmp; 637 goto found; 638 } 639 640 /* Version order of WiFi firmware in firmware file are not in order, 641 * pass all firmware to find the equal or less but closest version. 642 */ 643 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { 644 if (!mfw_info || mfw_info->cv < tmp->cv) 645 mfw_info = tmp; 646 } 647 } 648 649 if (mfw_info) 650 goto found; 651 652 if (!nowarn) 653 rtw89_err(rtwdev, "no suitable firmware found\n"); 654 return -ENOENT; 655 656 found: 657 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 658 fw_suit->size = le32_to_cpu(mfw_info->size); 659 660 if (fw_suit->data + fw_suit->size > mfw + mfw_len) { 661 rtw89_err(rtwdev, "fw_suit %d out of address\n", type); 662 return -EFAULT; 663 } 664 665 return 0; 666 } 667 668 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 669 { 670 struct rtw89_fw_info *fw_info = &rtwdev->fw; 671 const struct firmware *firmware = fw_info->req.firmware; 672 const struct rtw89_mfw_info *mfw_info; 673 const struct rtw89_mfw_hdr *mfw_hdr; 674 u32 size; 675 int ret; 676 677 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware); 678 if (!mfw_hdr) { 679 rtw89_warn(rtwdev, "not mfw format\n"); 680 return 0; 681 } 682 683 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); 684 if (ret) 685 return ret; 686 687 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 688 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 689 690 return size; 691 } 692 693 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 694 struct rtw89_fw_suit *fw_suit, 695 const struct rtw89_fw_hdr *hdr) 696 { 697 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 698 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 699 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 700 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 701 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 702 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 703 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 704 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 705 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 706 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 707 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 708 } 709 710 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 711 struct rtw89_fw_suit *fw_suit, 712 const struct rtw89_fw_hdr_v1 *hdr) 713 { 714 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 715 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 716 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 717 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 718 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 719 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 720 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 721 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 722 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 723 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 724 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 725 } 726 727 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 728 enum rtw89_fw_type type, 729 struct rtw89_fw_suit *fw_suit) 730 { 731 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 732 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 733 734 if (type == RTW89_FW_LOGFMT) 735 return 0; 736 737 fw_suit->type = type; 738 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 739 740 switch (fw_suit->hdr_ver) { 741 case 0: 742 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 743 break; 744 case 1: 745 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 746 break; 747 default: 748 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 749 fw_suit->hdr_ver); 750 return -ENOENT; 751 } 752 753 rtw89_info(rtwdev, 754 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 755 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 756 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 757 758 return 0; 759 } 760 761 static 762 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 763 bool nowarn) 764 { 765 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 766 int ret; 767 768 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 769 if (ret) 770 return ret; 771 772 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 773 } 774 775 static 776 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 777 const struct rtw89_fw_element_hdr *elm, 778 const union rtw89_fw_element_arg arg) 779 { 780 enum rtw89_fw_type type = arg.fw_type; 781 struct rtw89_hal *hal = &rtwdev->hal; 782 struct rtw89_fw_suit *fw_suit; 783 784 /* Version of BB MCU is in decreasing order in firmware file, so take 785 * first equal or less version, which is equal or less but closest version. 786 */ 787 if (hal->cv < elm->u.bbmcu.cv) 788 return 1; /* ignore this element */ 789 790 fw_suit = rtw89_fw_suit_get(rtwdev, type); 791 if (fw_suit->data) 792 return 1; /* ignore this element (a firmware is taken already) */ 793 794 fw_suit->data = elm->u.bbmcu.contents; 795 fw_suit->size = le32_to_cpu(elm->size); 796 797 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 798 } 799 800 #define __DEF_FW_FEAT_COND(__cond, __op) \ 801 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 802 { \ 803 return suit_ver_code __op comp_ver_code; \ 804 } 805 806 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 807 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 808 __DEF_FW_FEAT_COND(lt, <); /* less than */ 809 810 struct __fw_feat_cfg { 811 enum rtw89_core_chip_id chip_id; 812 enum rtw89_fw_feature feature; 813 u32 ver_code; 814 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 815 }; 816 817 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 818 { \ 819 .chip_id = _chip, \ 820 .feature = RTW89_FW_FEATURE_ ## _feat, \ 821 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 822 .cond = __fw_feat_cond_ ## _cond, \ 823 } 824 825 static const struct __fw_feat_cfg fw_feat_tbl[] = { 826 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 827 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 828 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER_TYPE_0), 829 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 830 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 831 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 832 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER_TYPE_0), 833 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX), 834 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 835 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 836 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 837 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER_TYPE_0), 838 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 839 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER), 840 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX), 841 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG), 842 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, CRASH_TRIGGER_TYPE_1), 843 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, SCAN_OFFLOAD_EXTRA_OP), 844 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, BEACON_TRACKING), 845 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 846 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 847 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER_TYPE_0), 848 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 849 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER), 850 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 122, 0, BEACON_TRACKING), 851 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SCAN_OFFLOAD_EXTRA_OP), 852 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG), 853 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, CRASH_TRIGGER_TYPE_1), 854 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 855 __CFG_FW_FEAT(RTL8852C, ge, 0, 0, 0, 0, RFK_NTFY_MCC_V0), 856 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 857 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 858 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER_TYPE_0), 859 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 860 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 861 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, BEACON_LOSS_COUNT_V1), 862 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, LPS_DACK_BY_C2H_REG), 863 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, CRASH_TRIGGER_TYPE_1), 864 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 129, 1, BEACON_TRACKING), 865 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER_TYPE_0), 866 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 867 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 868 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0), 869 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 870 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 871 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 28, 0, RFK_IQK_V0), 872 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0), 873 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO), 874 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0), 875 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO), 876 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0), 877 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1), 878 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1), 879 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE), 880 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1), 881 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 76, 0, LPS_DACK_BY_C2H_REG), 882 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 79, 0, CRASH_TRIGGER_TYPE_1), 883 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 80, 0, BEACON_TRACKING), 884 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 84, 0, ADDR_CAM_V0), 885 }; 886 887 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 888 const struct rtw89_chip_info *chip, 889 u32 ver_code) 890 { 891 int i; 892 893 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 894 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 895 896 if (chip->chip_id != ent->chip_id) 897 continue; 898 899 if (ent->cond(ver_code, ent->ver_code)) 900 RTW89_SET_FW_FEATURE(ent->feature, fw); 901 } 902 } 903 904 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 905 { 906 const struct rtw89_chip_info *chip = rtwdev->chip; 907 const struct rtw89_fw_suit *fw_suit; 908 u32 suit_ver_code; 909 910 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 911 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 912 913 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 914 } 915 916 const struct firmware * 917 rtw89_early_fw_feature_recognize(struct device *device, 918 const struct rtw89_chip_info *chip, 919 struct rtw89_fw_info *early_fw, 920 int *used_fw_format) 921 { 922 const struct firmware *firmware; 923 char fw_name[64]; 924 int fw_format; 925 u32 ver_code; 926 int ret; 927 928 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 929 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 930 chip->fw_basename, fw_format); 931 932 ret = request_firmware(&firmware, fw_name, device); 933 if (!ret) { 934 dev_info(device, "loaded firmware %s\n", fw_name); 935 *used_fw_format = fw_format; 936 break; 937 } 938 } 939 940 if (ret) { 941 dev_err(device, "failed to early request firmware: %d\n", ret); 942 return NULL; 943 } 944 945 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 946 947 if (!ver_code) 948 goto out; 949 950 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 951 952 out: 953 return firmware; 954 } 955 956 static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev) 957 { 958 const struct rtw89_chip_variant *variant = rtwdev->variant; 959 const struct rtw89_fw_suit *fw_suit; 960 u32 suit_ver_code; 961 962 if (!variant) 963 return 0; 964 965 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 966 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 967 968 if (variant->fw_min_ver_code > suit_ver_code) { 969 rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n", 970 variant->fw_min_ver_code); 971 return -ENOENT; 972 } 973 974 return 0; 975 } 976 977 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 978 { 979 const struct rtw89_chip_info *chip = rtwdev->chip; 980 int ret; 981 982 if (chip->try_ce_fw) { 983 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 984 if (!ret) 985 goto normal_done; 986 } 987 988 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 989 if (ret) 990 return ret; 991 992 normal_done: 993 ret = rtw89_fw_validate_ver_required(rtwdev); 994 if (ret) 995 return ret; 996 997 /* It still works if wowlan firmware isn't existing. */ 998 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 999 1000 /* It still works if log format file isn't existing. */ 1001 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 1002 1003 rtw89_fw_recognize_features(rtwdev); 1004 1005 rtw89_coex_recognize_ver(rtwdev); 1006 1007 return 0; 1008 } 1009 1010 static 1011 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 1012 const struct rtw89_fw_element_hdr *elm, 1013 const union rtw89_fw_element_arg arg) 1014 { 1015 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1016 struct rtw89_phy_table *tbl; 1017 struct rtw89_reg2_def *regs; 1018 enum rtw89_rf_path rf_path; 1019 u32 n_regs, i; 1020 u8 idx; 1021 1022 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 1023 if (!tbl) 1024 return -ENOMEM; 1025 1026 switch (le32_to_cpu(elm->id)) { 1027 case RTW89_FW_ELEMENT_ID_BB_REG: 1028 elm_info->bb_tbl = tbl; 1029 break; 1030 case RTW89_FW_ELEMENT_ID_BB_GAIN: 1031 elm_info->bb_gain = tbl; 1032 break; 1033 case RTW89_FW_ELEMENT_ID_RADIO_A: 1034 case RTW89_FW_ELEMENT_ID_RADIO_B: 1035 case RTW89_FW_ELEMENT_ID_RADIO_C: 1036 case RTW89_FW_ELEMENT_ID_RADIO_D: 1037 rf_path = arg.rf_path; 1038 idx = elm->u.reg2.idx; 1039 1040 elm_info->rf_radio[idx] = tbl; 1041 tbl->rf_path = rf_path; 1042 tbl->config = rtw89_phy_config_rf_reg_v1; 1043 break; 1044 case RTW89_FW_ELEMENT_ID_RF_NCTL: 1045 elm_info->rf_nctl = tbl; 1046 break; 1047 default: 1048 kfree(tbl); 1049 return -ENOENT; 1050 } 1051 1052 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 1053 regs = kcalloc(n_regs, sizeof(*regs), GFP_KERNEL); 1054 if (!regs) 1055 goto out; 1056 1057 for (i = 0; i < n_regs; i++) { 1058 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 1059 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 1060 } 1061 1062 tbl->n_regs = n_regs; 1063 tbl->regs = regs; 1064 1065 return 0; 1066 1067 out: 1068 kfree(tbl); 1069 return -ENOMEM; 1070 } 1071 1072 static 1073 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 1074 const struct rtw89_fw_element_hdr *elm, 1075 const union rtw89_fw_element_arg arg) 1076 { 1077 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 1078 const unsigned long offset = arg.offset; 1079 struct rtw89_efuse *efuse = &rtwdev->efuse; 1080 struct rtw89_txpwr_conf *conf; 1081 1082 if (!rtwdev->rfe_data) { 1083 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 1084 if (!rtwdev->rfe_data) 1085 return -ENOMEM; 1086 } 1087 1088 conf = (void *)rtwdev->rfe_data + offset; 1089 1090 /* if multiple matched, take the last eventually */ 1091 if (txpwr_elm->rfe_type == efuse->rfe_type) 1092 goto setup; 1093 1094 /* without one is matched, accept default */ 1095 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 1096 (!rtw89_txpwr_conf_valid(conf) || 1097 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 1098 goto setup; 1099 1100 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 1101 elm->id, txpwr_elm->rfe_type); 1102 return 0; 1103 1104 setup: 1105 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 1106 elm->id, txpwr_elm->rfe_type); 1107 1108 conf->rfe_type = txpwr_elm->rfe_type; 1109 conf->ent_sz = txpwr_elm->ent_sz; 1110 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 1111 conf->data = txpwr_elm->content; 1112 return 0; 1113 } 1114 1115 static 1116 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 1117 const struct rtw89_fw_element_hdr *elm, 1118 const union rtw89_fw_element_arg arg) 1119 { 1120 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1121 const struct rtw89_chip_info *chip = rtwdev->chip; 1122 u32 needed_bitmap = 0; 1123 u32 offset = 0; 1124 int subband; 1125 u32 bitmap; 1126 int type; 1127 1128 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 1129 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 1130 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 1131 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 1132 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 1133 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 1134 1135 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 1136 1137 if ((bitmap & needed_bitmap) != needed_bitmap) { 1138 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %08x\n", 1139 needed_bitmap, bitmap); 1140 return -ENOENT; 1141 } 1142 1143 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL); 1144 if (!elm_info->txpwr_trk) 1145 return -ENOMEM; 1146 1147 for (type = 0; bitmap; type++, bitmap >>= 1) { 1148 if (!(bitmap & BIT(0))) 1149 continue; 1150 1151 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 1152 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 1153 subband = 4; 1154 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 1155 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 1156 subband = 3; 1157 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 1158 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 1159 subband = 1; 1160 else 1161 break; 1162 1163 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 1164 1165 offset += subband; 1166 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 1167 goto err; 1168 } 1169 1170 return 0; 1171 1172 err: 1173 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 1174 offset, le32_to_cpu(elm->size)); 1175 kfree(elm_info->txpwr_trk); 1176 elm_info->txpwr_trk = NULL; 1177 1178 return -EFAULT; 1179 } 1180 1181 static 1182 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 1183 const struct rtw89_fw_element_hdr *elm, 1184 const union rtw89_fw_element_arg arg) 1185 { 1186 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1187 u8 rfk_id; 1188 1189 if (elm_info->rfk_log_fmt) 1190 goto allocated; 1191 1192 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL); 1193 if (!elm_info->rfk_log_fmt) 1194 return 1; /* this is an optional element, so just ignore this */ 1195 1196 allocated: 1197 rfk_id = elm->u.rfk_log_fmt.rfk_id; 1198 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 1199 return 1; 1200 1201 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 1202 1203 return 0; 1204 } 1205 1206 static bool rtw89_regd_entcpy(struct rtw89_regd *regd, const void *cursor, 1207 u8 cursor_size) 1208 { 1209 /* fill default values if needed for backward compatibility */ 1210 struct rtw89_fw_regd_entry entry = { 1211 .rule_2ghz = RTW89_NA, 1212 .rule_5ghz = RTW89_NA, 1213 .rule_6ghz = RTW89_NA, 1214 .fmap = cpu_to_le32(0x0), 1215 }; 1216 u8 valid_size = min_t(u8, sizeof(entry), cursor_size); 1217 unsigned int i; 1218 u32 fmap; 1219 1220 memcpy(&entry, cursor, valid_size); 1221 memset(regd, 0, sizeof(*regd)); 1222 1223 regd->alpha2[0] = entry.alpha2_0; 1224 regd->alpha2[1] = entry.alpha2_1; 1225 regd->alpha2[2] = '\0'; 1226 1227 /* also need to consider forward compatibility */ 1228 regd->txpwr_regd[RTW89_BAND_2G] = entry.rule_2ghz < RTW89_REGD_NUM ? 1229 entry.rule_2ghz : RTW89_NA; 1230 regd->txpwr_regd[RTW89_BAND_5G] = entry.rule_5ghz < RTW89_REGD_NUM ? 1231 entry.rule_5ghz : RTW89_NA; 1232 regd->txpwr_regd[RTW89_BAND_6G] = entry.rule_6ghz < RTW89_REGD_NUM ? 1233 entry.rule_6ghz : RTW89_NA; 1234 1235 BUILD_BUG_ON(sizeof(fmap) != sizeof(entry.fmap)); 1236 BUILD_BUG_ON(sizeof(fmap) * 8 < NUM_OF_RTW89_REGD_FUNC); 1237 1238 fmap = le32_to_cpu(entry.fmap); 1239 for (i = 0; i < NUM_OF_RTW89_REGD_FUNC; i++) { 1240 if (fmap & BIT(i)) 1241 set_bit(i, regd->func_bitmap); 1242 } 1243 1244 return true; 1245 } 1246 1247 #define rtw89_for_each_in_regd_element(regd, element) \ 1248 for (const void *cursor = (element)->content, \ 1249 *end = (element)->content + \ 1250 le32_to_cpu((element)->num_ents) * (element)->ent_sz; \ 1251 cursor < end; cursor += (element)->ent_sz) \ 1252 if (rtw89_regd_entcpy(regd, cursor, (element)->ent_sz)) 1253 1254 static 1255 int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev, 1256 const struct rtw89_fw_element_hdr *elm, 1257 const union rtw89_fw_element_arg arg) 1258 { 1259 const struct __rtw89_fw_regd_element *regd_elm = &elm->u.regd; 1260 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1261 u32 num_ents = le32_to_cpu(regd_elm->num_ents); 1262 struct rtw89_regd_data *p; 1263 struct rtw89_regd regd; 1264 u32 i = 0; 1265 1266 if (num_ents > RTW89_REGD_MAX_COUNTRY_NUM) { 1267 rtw89_warn(rtwdev, 1268 "regd element ents (%d) are over max num (%d)\n", 1269 num_ents, RTW89_REGD_MAX_COUNTRY_NUM); 1270 rtw89_warn(rtwdev, 1271 "regd element ignore and take another/common\n"); 1272 return 1; 1273 } 1274 1275 if (elm_info->regd) { 1276 rtw89_debug(rtwdev, RTW89_DBG_REGD, 1277 "regd element take the latter\n"); 1278 devm_kfree(rtwdev->dev, elm_info->regd); 1279 elm_info->regd = NULL; 1280 } 1281 1282 p = devm_kzalloc(rtwdev->dev, struct_size(p, map, num_ents), GFP_KERNEL); 1283 if (!p) 1284 return -ENOMEM; 1285 1286 p->nr = num_ents; 1287 rtw89_for_each_in_regd_element(®d, regd_elm) 1288 p->map[i++] = regd; 1289 1290 if (i != num_ents) { 1291 rtw89_err(rtwdev, "regd element has %d invalid ents\n", 1292 num_ents - i); 1293 devm_kfree(rtwdev->dev, p); 1294 return -EINVAL; 1295 } 1296 1297 elm_info->regd = p; 1298 return 0; 1299 } 1300 1301 static 1302 int rtw89_build_afe_pwr_seq_from_elm(struct rtw89_dev *rtwdev, 1303 const struct rtw89_fw_element_hdr *elm, 1304 const union rtw89_fw_element_arg arg) 1305 { 1306 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1307 1308 elm_info->afe = elm; 1309 1310 return 0; 1311 } 1312 1313 static 1314 int rtw89_recognize_diag_mac_from_elm(struct rtw89_dev *rtwdev, 1315 const struct rtw89_fw_element_hdr *elm, 1316 const union rtw89_fw_element_arg arg) 1317 { 1318 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1319 1320 elm_info->diag_mac = elm; 1321 1322 return 0; 1323 } 1324 1325 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 1326 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 1327 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 1328 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 1329 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 1330 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 1331 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 1332 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 1333 { .rf_path = RF_PATH_A }, "radio A"}, 1334 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 1335 { .rf_path = RF_PATH_B }, NULL}, 1336 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 1337 { .rf_path = RF_PATH_C }, NULL}, 1338 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 1339 { .rf_path = RF_PATH_D }, NULL}, 1340 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 1341 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 1342 rtw89_fw_recognize_txpwr_from_elm, 1343 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 1344 }, 1345 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 1346 rtw89_fw_recognize_txpwr_from_elm, 1347 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 1348 }, 1349 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 1350 rtw89_fw_recognize_txpwr_from_elm, 1351 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 1352 }, 1353 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 1354 rtw89_fw_recognize_txpwr_from_elm, 1355 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 1356 }, 1357 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_2GHZ] = { 1358 rtw89_fw_recognize_txpwr_from_elm, 1359 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_2ghz.conf) }, NULL, 1360 }, 1361 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_5GHZ] = { 1362 rtw89_fw_recognize_txpwr_from_elm, 1363 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_5ghz.conf) }, NULL, 1364 }, 1365 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_6GHZ] = { 1366 rtw89_fw_recognize_txpwr_from_elm, 1367 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_6ghz.conf) }, NULL, 1368 }, 1369 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 1370 rtw89_fw_recognize_txpwr_from_elm, 1371 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 1372 }, 1373 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 1374 rtw89_fw_recognize_txpwr_from_elm, 1375 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 1376 }, 1377 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 1378 rtw89_fw_recognize_txpwr_from_elm, 1379 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 1380 }, 1381 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ] = { 1382 rtw89_fw_recognize_txpwr_from_elm, 1383 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_2ghz.conf) }, NULL, 1384 }, 1385 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ] = { 1386 rtw89_fw_recognize_txpwr_from_elm, 1387 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_5ghz.conf) }, NULL, 1388 }, 1389 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ] = { 1390 rtw89_fw_recognize_txpwr_from_elm, 1391 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_6ghz.conf) }, NULL, 1392 }, 1393 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 1394 rtw89_fw_recognize_txpwr_from_elm, 1395 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 1396 }, 1397 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 1398 rtw89_fw_recognize_txpwr_from_elm, 1399 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 1400 }, 1401 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 1402 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 1403 }, 1404 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 1405 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 1406 }, 1407 [RTW89_FW_ELEMENT_ID_REGD] = { 1408 rtw89_recognize_regd_from_elm, {}, "REGD", 1409 }, 1410 [RTW89_FW_ELEMENT_ID_AFE_PWR_SEQ] = { 1411 rtw89_build_afe_pwr_seq_from_elm, {}, "AFE", 1412 }, 1413 [RTW89_FW_ELEMENT_ID_DIAG_MAC] = { 1414 rtw89_recognize_diag_mac_from_elm, {}, NULL, 1415 }, 1416 }; 1417 1418 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 1419 { 1420 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1421 const struct firmware *firmware = fw_info->req.firmware; 1422 const struct rtw89_chip_info *chip = rtwdev->chip; 1423 u32 unrecognized_elements = chip->needed_fw_elms; 1424 const struct rtw89_fw_element_handler *handler; 1425 const struct rtw89_fw_element_hdr *hdr; 1426 u32 elm_size; 1427 u32 elem_id; 1428 u32 offset; 1429 int ret; 1430 1431 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1432 1433 offset = rtw89_mfw_get_size(rtwdev); 1434 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1435 if (offset == 0) 1436 return -EINVAL; 1437 1438 while (offset + sizeof(*hdr) < firmware->size) { 1439 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1440 1441 elm_size = le32_to_cpu(hdr->size); 1442 if (offset + elm_size >= firmware->size) { 1443 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1444 break; 1445 } 1446 1447 elem_id = le32_to_cpu(hdr->id); 1448 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1449 goto next; 1450 1451 handler = &__fw_element_handlers[elem_id]; 1452 if (!handler->fn) 1453 goto next; 1454 1455 ret = handler->fn(rtwdev, hdr, handler->arg); 1456 if (ret == 1) /* ignore this element */ 1457 goto next; 1458 if (ret) 1459 return ret; 1460 1461 if (handler->name) 1462 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1463 handler->name, hdr->ver); 1464 1465 unrecognized_elements &= ~BIT(elem_id); 1466 next: 1467 offset += sizeof(*hdr) + elm_size; 1468 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1469 } 1470 1471 if (unrecognized_elements) { 1472 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1473 unrecognized_elements); 1474 return -ENOENT; 1475 } 1476 1477 return 0; 1478 } 1479 1480 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1481 u8 type, u8 cat, u8 class, u8 func, 1482 bool rack, bool dack, u32 len) 1483 { 1484 struct fwcmd_hdr *hdr; 1485 1486 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1487 1488 if (!(rtwdev->fw.h2c_seq % 4)) 1489 rack = true; 1490 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1491 FIELD_PREP(H2C_HDR_CAT, cat) | 1492 FIELD_PREP(H2C_HDR_CLASS, class) | 1493 FIELD_PREP(H2C_HDR_FUNC, func) | 1494 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1495 1496 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1497 len + H2C_HEADER_LEN) | 1498 (rack ? H2C_HDR_REC_ACK : 0) | 1499 (dack ? H2C_HDR_DONE_ACK : 0)); 1500 1501 rtwdev->fw.h2c_seq++; 1502 } 1503 1504 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1505 struct sk_buff *skb, 1506 u8 type, u8 cat, u8 class, u8 func, 1507 u32 len) 1508 { 1509 struct fwcmd_hdr *hdr; 1510 1511 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1512 1513 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1514 FIELD_PREP(H2C_HDR_CAT, cat) | 1515 FIELD_PREP(H2C_HDR_CLASS, class) | 1516 FIELD_PREP(H2C_HDR_FUNC, func) | 1517 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1518 1519 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1520 len + H2C_HEADER_LEN)); 1521 } 1522 1523 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1524 struct rtw89_fw_bin_info *info, 1525 struct rtw89_fw_hdr *fw_hdr) 1526 { 1527 struct rtw89_fw_hdr_section_info *section_info; 1528 struct rtw89_fw_hdr_section *section; 1529 int i; 1530 1531 le32p_replace_bits(&fw_hdr->w7, info->part_size, FW_HDR_W7_PART_SIZE); 1532 1533 for (i = 0; i < info->section_num; i++) { 1534 section_info = &info->section_info[i]; 1535 1536 if (!section_info->len_override) 1537 continue; 1538 1539 section = &fw_hdr->sections[i]; 1540 le32p_replace_bits(§ion->w1, section_info->len_override, 1541 FWSECTION_HDR_W1_SEC_SIZE); 1542 } 1543 1544 return 0; 1545 } 1546 1547 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1548 struct rtw89_fw_bin_info *info, 1549 struct rtw89_fw_hdr_v1 *fw_hdr) 1550 { 1551 struct rtw89_fw_hdr_section_info *section_info; 1552 struct rtw89_fw_hdr_section_v1 *section; 1553 u8 dst_sec_idx = 0; 1554 u8 sec_idx; 1555 1556 le32p_replace_bits(&fw_hdr->w7, info->part_size, FW_HDR_V1_W7_PART_SIZE); 1557 1558 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1559 section_info = &info->section_info[sec_idx]; 1560 section = &fw_hdr->sections[sec_idx]; 1561 1562 if (section_info->ignore) 1563 continue; 1564 1565 if (dst_sec_idx != sec_idx) 1566 fw_hdr->sections[dst_sec_idx] = *section; 1567 1568 dst_sec_idx++; 1569 } 1570 1571 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1572 1573 return (info->section_num - dst_sec_idx) * sizeof(*section); 1574 } 1575 1576 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1577 const struct rtw89_fw_suit *fw_suit, 1578 struct rtw89_fw_bin_info *info) 1579 { 1580 u32 len = info->hdr_len - info->dynamic_hdr_len; 1581 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1582 const u8 *fw = fw_suit->data; 1583 struct rtw89_fw_hdr *fw_hdr; 1584 struct sk_buff *skb; 1585 u32 truncated; 1586 int ret; 1587 1588 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1589 if (!skb) { 1590 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1591 return -ENOMEM; 1592 } 1593 1594 skb_put_data(skb, fw, len); 1595 1596 switch (fw_suit->hdr_ver) { 1597 case 0: 1598 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1599 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1600 break; 1601 case 1: 1602 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1603 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1604 break; 1605 default: 1606 ret = -EOPNOTSUPP; 1607 goto fail; 1608 } 1609 1610 if (truncated) { 1611 len -= truncated; 1612 skb_trim(skb, len); 1613 } 1614 1615 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1616 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1617 H2C_FUNC_MAC_FWHDR_DL, len); 1618 1619 ret = rtw89_h2c_tx(rtwdev, skb, false); 1620 if (ret) { 1621 rtw89_err(rtwdev, "failed to send h2c\n"); 1622 goto fail; 1623 } 1624 1625 return 0; 1626 fail: 1627 dev_kfree_skb_any(skb); 1628 1629 return ret; 1630 } 1631 1632 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1633 const struct rtw89_fw_suit *fw_suit, 1634 struct rtw89_fw_bin_info *info) 1635 { 1636 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1637 int ret; 1638 1639 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1640 if (ret) { 1641 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1642 return ret; 1643 } 1644 1645 ret = mac->fwdl_check_path_ready(rtwdev, false); 1646 if (ret) { 1647 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1648 return ret; 1649 } 1650 1651 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1652 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1653 1654 return 0; 1655 } 1656 1657 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1658 struct rtw89_fw_hdr_section_info *info, 1659 u32 part_size) 1660 { 1661 struct sk_buff *skb; 1662 const u8 *section = info->addr; 1663 u32 residue_len = info->len; 1664 bool copy_key = false; 1665 u32 pkt_len; 1666 int ret; 1667 1668 if (info->ignore) 1669 return 0; 1670 1671 if (info->len_override) { 1672 if (info->len_override > info->len) 1673 rtw89_warn(rtwdev, "override length %u larger than original %u\n", 1674 info->len_override, info->len); 1675 else 1676 residue_len = info->len_override; 1677 } 1678 1679 if (info->key_addr && info->key_len) { 1680 if (residue_len > part_size || info->len < info->key_len) 1681 rtw89_warn(rtwdev, 1682 "ignore to copy key data because of len %d, %d, %d, %d\n", 1683 info->len, part_size, 1684 info->key_len, residue_len); 1685 else 1686 copy_key = true; 1687 } 1688 1689 while (residue_len) { 1690 pkt_len = min(residue_len, part_size); 1691 1692 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1693 if (!skb) { 1694 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1695 return -ENOMEM; 1696 } 1697 skb_put_data(skb, section, pkt_len); 1698 1699 if (copy_key) 1700 memcpy(skb->data + pkt_len - info->key_len, 1701 info->key_addr, info->key_len); 1702 1703 ret = rtw89_h2c_tx(rtwdev, skb, true); 1704 if (ret) { 1705 rtw89_err(rtwdev, "failed to send h2c\n"); 1706 goto fail; 1707 } 1708 1709 section += pkt_len; 1710 residue_len -= pkt_len; 1711 } 1712 1713 return 0; 1714 fail: 1715 dev_kfree_skb_any(skb); 1716 1717 return ret; 1718 } 1719 1720 static enum rtw89_fwdl_check_type 1721 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1722 const struct rtw89_fw_suit *fw_suit) 1723 { 1724 switch (fw_suit->type) { 1725 case RTW89_FW_BBMCU0: 1726 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1727 case RTW89_FW_BBMCU1: 1728 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1729 default: 1730 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1731 } 1732 } 1733 1734 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1735 const struct rtw89_fw_suit *fw_suit, 1736 struct rtw89_fw_bin_info *info) 1737 { 1738 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1739 const struct rtw89_chip_info *chip = rtwdev->chip; 1740 enum rtw89_fwdl_check_type chk_type; 1741 u8 section_num = info->section_num; 1742 int ret; 1743 1744 while (section_num--) { 1745 ret = __rtw89_fw_download_main(rtwdev, section_info, info->part_size); 1746 if (ret) 1747 return ret; 1748 section_info++; 1749 } 1750 1751 if (chip->chip_gen == RTW89_CHIP_AX) 1752 return 0; 1753 1754 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1755 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1756 if (ret) { 1757 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1758 fw_suit->type); 1759 return ret; 1760 } 1761 1762 return 0; 1763 } 1764 1765 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1766 { 1767 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1768 u32 addr = R_AX_DBG_PORT_SEL; 1769 u32 val32; 1770 u16 index; 1771 1772 if (chip_gen == RTW89_CHIP_BE) { 1773 addr = R_BE_WLCPU_PORT_PC; 1774 goto dump; 1775 } 1776 1777 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1778 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1779 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1780 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1781 1782 dump: 1783 for (index = 0; index < 15; index++) { 1784 val32 = rtw89_read32(rtwdev, addr); 1785 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1786 fsleep(10); 1787 } 1788 } 1789 1790 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1791 { 1792 u32 val32; 1793 1794 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1795 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1796 1797 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1798 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1799 1800 rtw89_fw_prog_cnt_dump(rtwdev); 1801 } 1802 1803 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1804 struct rtw89_fw_suit *fw_suit) 1805 { 1806 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1807 struct rtw89_fw_bin_info info = {}; 1808 int ret; 1809 1810 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1811 if (ret) { 1812 rtw89_err(rtwdev, "parse fw header fail\n"); 1813 return ret; 1814 } 1815 1816 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode); 1817 1818 if (rtwdev->chip->chip_id == RTL8922A && 1819 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1820 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1821 1822 ret = mac->fwdl_check_path_ready(rtwdev, true); 1823 if (ret) { 1824 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1825 return ret; 1826 } 1827 1828 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1829 if (ret) 1830 return ret; 1831 1832 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1833 if (ret) 1834 return ret; 1835 1836 return 0; 1837 } 1838 1839 static 1840 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1841 bool include_bb) 1842 { 1843 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1844 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1845 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1846 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1847 int ret; 1848 int i; 1849 1850 mac->disable_cpu(rtwdev); 1851 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1852 if (ret) 1853 return ret; 1854 1855 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1856 if (ret) 1857 goto fwdl_err; 1858 1859 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1860 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1861 1862 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1863 if (ret) 1864 goto fwdl_err; 1865 } 1866 1867 fw_info->h2c_seq = 0; 1868 fw_info->rec_seq = 0; 1869 fw_info->h2c_counter = 0; 1870 fw_info->c2h_counter = 0; 1871 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1872 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1873 1874 mdelay(5); 1875 1876 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1877 if (ret) { 1878 rtw89_warn(rtwdev, "download firmware fail\n"); 1879 goto fwdl_err; 1880 } 1881 1882 return ret; 1883 1884 fwdl_err: 1885 rtw89_fw_dl_fail_dump(rtwdev); 1886 return ret; 1887 } 1888 1889 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1890 bool include_bb) 1891 { 1892 int retry; 1893 int ret; 1894 1895 for (retry = 0; retry < 5; retry++) { 1896 ret = __rtw89_fw_download(rtwdev, type, include_bb); 1897 if (!ret) 1898 return 0; 1899 } 1900 1901 return ret; 1902 } 1903 1904 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1905 { 1906 struct rtw89_fw_info *fw = &rtwdev->fw; 1907 1908 wait_for_completion(&fw->req.completion); 1909 if (!fw->req.firmware) 1910 return -EINVAL; 1911 1912 return 0; 1913 } 1914 1915 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1916 struct rtw89_fw_req_info *req, 1917 const char *fw_name, bool nowarn) 1918 { 1919 int ret; 1920 1921 if (req->firmware) { 1922 rtw89_debug(rtwdev, RTW89_DBG_FW, 1923 "full firmware has been early requested\n"); 1924 complete_all(&req->completion); 1925 return 0; 1926 } 1927 1928 if (nowarn) 1929 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1930 else 1931 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1932 1933 complete_all(&req->completion); 1934 1935 return ret; 1936 } 1937 1938 void rtw89_load_firmware_work(struct work_struct *work) 1939 { 1940 struct rtw89_dev *rtwdev = 1941 container_of(work, struct rtw89_dev, load_firmware_work); 1942 const struct rtw89_chip_info *chip = rtwdev->chip; 1943 char fw_name[64]; 1944 1945 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1946 chip->fw_basename, rtwdev->fw.fw_format); 1947 1948 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1949 } 1950 1951 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1952 { 1953 if (!tbl) 1954 return; 1955 1956 kfree(tbl->regs); 1957 kfree(tbl); 1958 } 1959 1960 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1961 { 1962 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1963 int i; 1964 1965 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1966 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1967 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1968 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1969 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1970 1971 kfree(elm_info->txpwr_trk); 1972 kfree(elm_info->rfk_log_fmt); 1973 } 1974 1975 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1976 { 1977 struct rtw89_fw_info *fw = &rtwdev->fw; 1978 1979 cancel_work_sync(&rtwdev->load_firmware_work); 1980 1981 if (fw->req.firmware) { 1982 release_firmware(fw->req.firmware); 1983 1984 /* assign NULL back in case rtw89_free_ieee80211_hw() 1985 * try to release the same one again. 1986 */ 1987 fw->req.firmware = NULL; 1988 } 1989 1990 kfree(fw->log.fmts); 1991 rtw89_unload_firmware_elements(rtwdev); 1992 } 1993 1994 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1995 { 1996 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1997 u32 i; 1998 1999 if (fmt_id > fw_log->last_fmt_id) 2000 return 0; 2001 2002 for (i = 0; i < fw_log->fmt_count; i++) { 2003 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 2004 return i; 2005 } 2006 return 0; 2007 } 2008 2009 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 2010 { 2011 struct rtw89_fw_log *log = &rtwdev->fw.log; 2012 const struct rtw89_fw_logsuit_hdr *suit_hdr; 2013 struct rtw89_fw_suit *suit = &log->suit; 2014 const void *fmts_ptr, *fmts_end_ptr; 2015 u32 fmt_count; 2016 int i; 2017 2018 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 2019 fmt_count = le32_to_cpu(suit_hdr->count); 2020 log->fmt_ids = suit_hdr->ids; 2021 fmts_ptr = &suit_hdr->ids[fmt_count]; 2022 fmts_end_ptr = suit->data + suit->size; 2023 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 2024 if (!log->fmts) 2025 return -ENOMEM; 2026 2027 for (i = 0; i < fmt_count; i++) { 2028 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 2029 if (!fmts_ptr) 2030 break; 2031 2032 (*log->fmts)[i] = fmts_ptr; 2033 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 2034 log->fmt_count++; 2035 fmts_ptr += strlen(fmts_ptr); 2036 } 2037 2038 return 0; 2039 } 2040 2041 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 2042 { 2043 struct rtw89_fw_log *log = &rtwdev->fw.log; 2044 struct rtw89_fw_suit *suit = &log->suit; 2045 2046 if (!suit || !suit->data) { 2047 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 2048 return -EINVAL; 2049 } 2050 if (log->fmts) 2051 return 0; 2052 2053 return rtw89_fw_log_create_fmts_dict(rtwdev); 2054 } 2055 2056 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 2057 const struct rtw89_fw_c2h_log_fmt *log_fmt, 2058 u32 fmt_idx, u8 para_int, bool raw_data) 2059 { 2060 const char *(*fmts)[] = rtwdev->fw.log.fmts; 2061 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 2062 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 2063 int i; 2064 2065 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 2066 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 2067 log_fmt->argc); 2068 return; 2069 } 2070 2071 if (para_int) 2072 for (i = 0 ; i < log_fmt->argc; i++) 2073 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 2074 2075 if (raw_data) { 2076 if (para_int) 2077 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 2078 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 2079 para_int, log_fmt->argc, (int)sizeof(args), args); 2080 else 2081 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 2082 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 2083 para_int, log_fmt->argc, log_fmt->u.raw); 2084 } else { 2085 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 2086 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 2087 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 2088 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 2089 args[0xf]); 2090 } 2091 2092 rtw89_info(rtwdev, "C2H log: %s", str_buf); 2093 } 2094 2095 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 2096 { 2097 const struct rtw89_fw_c2h_log_fmt *log_fmt; 2098 u8 para_int; 2099 u32 fmt_idx; 2100 2101 if (len < RTW89_C2H_HEADER_LEN) { 2102 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 2103 return; 2104 } 2105 2106 buf += RTW89_C2H_HEADER_LEN; 2107 len -= RTW89_C2H_HEADER_LEN; 2108 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 2109 2110 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 2111 goto plain_log; 2112 2113 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 2114 goto plain_log; 2115 2116 if (!rtwdev->fw.log.fmts) 2117 return; 2118 2119 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 2120 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 2121 2122 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 2123 rtw89_info(rtwdev, "C2H log: %s%s", 2124 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 2125 else if (fmt_idx != 0 && para_int) 2126 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 2127 else 2128 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 2129 return; 2130 2131 plain_log: 2132 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 2133 2134 } 2135 2136 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 2137 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr, 2138 enum rtw89_upd_mode upd_mode) 2139 { 2140 const struct rtw89_chip_info *chip = rtwdev->chip; 2141 struct rtw89_h2c_addr_cam_v0 *h2c_v0; 2142 struct rtw89_h2c_addr_cam *h2c; 2143 u32 len = sizeof(*h2c); 2144 struct sk_buff *skb; 2145 u8 ver = U8_MAX; 2146 int ret; 2147 2148 if (RTW89_CHK_FW_FEATURE(ADDR_CAM_V0, &rtwdev->fw) || 2149 chip->chip_gen == RTW89_CHIP_AX) { 2150 len = sizeof(*h2c_v0); 2151 ver = 0; 2152 } 2153 2154 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2155 if (!skb) { 2156 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2157 return -ENOMEM; 2158 } 2159 skb_put(skb, len); 2160 h2c_v0 = (struct rtw89_h2c_addr_cam_v0 *)skb->data; 2161 2162 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, 2163 scan_mac_addr, h2c_v0); 2164 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, h2c_v0); 2165 2166 if (ver == 0) 2167 goto hdr; 2168 2169 h2c = (struct rtw89_h2c_addr_cam *)skb->data; 2170 h2c->w15 = le32_encode_bits(upd_mode, ADDR_CAM_W15_UPD_MODE); 2171 2172 hdr: 2173 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2174 H2C_CAT_MAC, 2175 H2C_CL_MAC_ADDR_CAM_UPDATE, 2176 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 2177 len); 2178 2179 ret = rtw89_h2c_tx(rtwdev, skb, false); 2180 if (ret) { 2181 rtw89_err(rtwdev, "failed to send h2c\n"); 2182 goto fail; 2183 } 2184 2185 return 0; 2186 fail: 2187 dev_kfree_skb_any(skb); 2188 2189 return ret; 2190 } 2191 2192 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 2193 struct rtw89_vif_link *rtwvif_link, 2194 struct rtw89_sta_link *rtwsta_link) 2195 { 2196 struct rtw89_h2c_dctlinfo_ud_v1 *h2c; 2197 u32 len = sizeof(*h2c); 2198 struct sk_buff *skb; 2199 int ret; 2200 2201 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2202 if (!skb) { 2203 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 2204 return -ENOMEM; 2205 } 2206 skb_put(skb, len); 2207 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; 2208 2209 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c); 2210 2211 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2212 H2C_CAT_MAC, 2213 H2C_CL_MAC_FR_EXCHG, 2214 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 2215 len); 2216 2217 ret = rtw89_h2c_tx(rtwdev, skb, false); 2218 if (ret) { 2219 rtw89_err(rtwdev, "failed to send h2c\n"); 2220 goto fail; 2221 } 2222 2223 return 0; 2224 fail: 2225 dev_kfree_skb_any(skb); 2226 2227 return ret; 2228 } 2229 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 2230 2231 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 2232 struct rtw89_vif_link *rtwvif_link, 2233 struct rtw89_sta_link *rtwsta_link) 2234 { 2235 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 2236 u32 len = sizeof(*h2c); 2237 struct sk_buff *skb; 2238 int ret; 2239 2240 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2241 if (!skb) { 2242 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 2243 return -ENOMEM; 2244 } 2245 skb_put(skb, len); 2246 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 2247 2248 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c); 2249 2250 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2251 H2C_CAT_MAC, 2252 H2C_CL_MAC_FR_EXCHG, 2253 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 2254 len); 2255 2256 ret = rtw89_h2c_tx(rtwdev, skb, false); 2257 if (ret) { 2258 rtw89_err(rtwdev, "failed to send h2c\n"); 2259 goto fail; 2260 } 2261 2262 return 0; 2263 fail: 2264 dev_kfree_skb_any(skb); 2265 2266 return ret; 2267 } 2268 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 2269 2270 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 2271 struct rtw89_vif_link *rtwvif_link, 2272 struct rtw89_sta_link *rtwsta_link) 2273 { 2274 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2275 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 2276 u32 len = sizeof(*h2c); 2277 struct sk_buff *skb; 2278 int ret; 2279 2280 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2281 if (!skb) { 2282 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 2283 return -ENOMEM; 2284 } 2285 skb_put(skb, len); 2286 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 2287 2288 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 2289 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 2290 2291 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 2292 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 2293 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 2294 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 2295 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 2296 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 2297 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 2298 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 2299 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 2300 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 2301 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 2302 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 2303 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 2304 2305 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2306 H2C_CAT_MAC, 2307 H2C_CL_MAC_FR_EXCHG, 2308 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 2309 len); 2310 2311 ret = rtw89_h2c_tx(rtwdev, skb, false); 2312 if (ret) { 2313 rtw89_err(rtwdev, "failed to send h2c\n"); 2314 goto fail; 2315 } 2316 2317 return 0; 2318 fail: 2319 dev_kfree_skb_any(skb); 2320 2321 return ret; 2322 } 2323 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 2324 2325 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, 2326 struct rtw89_vif_link *rtwvif_link, 2327 struct rtw89_sta_link *rtwsta_link, 2328 bool valid, struct ieee80211_ampdu_params *params) 2329 { 2330 const struct rtw89_chip_info *chip = rtwdev->chip; 2331 struct rtw89_h2c_ba_cam *h2c; 2332 u8 macid = rtwsta_link->mac_id; 2333 u32 len = sizeof(*h2c); 2334 struct sk_buff *skb; 2335 u8 entry_idx; 2336 int ret; 2337 2338 ret = valid ? 2339 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2340 &entry_idx) : 2341 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2342 &entry_idx); 2343 if (ret) { 2344 /* it still works even if we don't have static BA CAM, because 2345 * hardware can create dynamic BA CAM automatically. 2346 */ 2347 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2348 "failed to %s entry tid=%d for h2c ba cam\n", 2349 valid ? "alloc" : "free", params->tid); 2350 return 0; 2351 } 2352 2353 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2354 if (!skb) { 2355 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2356 return -ENOMEM; 2357 } 2358 skb_put(skb, len); 2359 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2360 2361 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 2362 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 2363 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 2364 else 2365 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 2366 if (!valid) 2367 goto end; 2368 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 2369 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 2370 if (params->buf_size > 64) 2371 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2372 else 2373 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2374 /* If init req is set, hw will set the ssn */ 2375 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 2376 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 2377 2378 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 2379 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 2380 le32_encode_bits(rtwvif_link->mac_idx, 2381 RTW89_H2C_BA_CAM_W1_BAND); 2382 } 2383 2384 end: 2385 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2386 H2C_CAT_MAC, 2387 H2C_CL_BA_CAM, 2388 H2C_FUNC_MAC_BA_CAM, 0, 1, 2389 len); 2390 2391 ret = rtw89_h2c_tx(rtwdev, skb, false); 2392 if (ret) { 2393 rtw89_err(rtwdev, "failed to send h2c\n"); 2394 goto fail; 2395 } 2396 2397 return 0; 2398 fail: 2399 dev_kfree_skb_any(skb); 2400 2401 return ret; 2402 } 2403 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 2404 2405 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 2406 u8 entry_idx, u8 uid) 2407 { 2408 struct rtw89_h2c_ba_cam *h2c; 2409 u32 len = sizeof(*h2c); 2410 struct sk_buff *skb; 2411 int ret; 2412 2413 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2414 if (!skb) { 2415 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 2416 return -ENOMEM; 2417 } 2418 skb_put(skb, len); 2419 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2420 2421 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 2422 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 2423 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 2424 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 2425 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 2426 2427 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2428 H2C_CAT_MAC, 2429 H2C_CL_BA_CAM, 2430 H2C_FUNC_MAC_BA_CAM, 0, 1, 2431 len); 2432 2433 ret = rtw89_h2c_tx(rtwdev, skb, false); 2434 if (ret) { 2435 rtw89_err(rtwdev, "failed to send h2c\n"); 2436 goto fail; 2437 } 2438 2439 return 0; 2440 fail: 2441 dev_kfree_skb_any(skb); 2442 2443 return ret; 2444 } 2445 2446 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 2447 { 2448 const struct rtw89_chip_info *chip = rtwdev->chip; 2449 u8 entry_idx = chip->bacam_num; 2450 u8 uid = 0; 2451 int i; 2452 2453 for (i = 0; i < chip->bacam_dynamic_num; i++) { 2454 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 2455 entry_idx++; 2456 uid++; 2457 } 2458 } 2459 2460 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, 2461 struct rtw89_vif_link *rtwvif_link, 2462 struct rtw89_sta_link *rtwsta_link, 2463 bool valid, struct ieee80211_ampdu_params *params) 2464 { 2465 const struct rtw89_chip_info *chip = rtwdev->chip; 2466 struct rtw89_h2c_ba_cam_v1 *h2c; 2467 u8 macid = rtwsta_link->mac_id; 2468 u32 len = sizeof(*h2c); 2469 struct sk_buff *skb; 2470 u8 entry_idx; 2471 u8 bmap_size; 2472 int ret; 2473 2474 ret = valid ? 2475 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2476 &entry_idx) : 2477 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2478 &entry_idx); 2479 if (ret) { 2480 /* it still works even if we don't have static BA CAM, because 2481 * hardware can create dynamic BA CAM automatically. 2482 */ 2483 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2484 "failed to %s entry tid=%d for h2c ba cam\n", 2485 valid ? "alloc" : "free", params->tid); 2486 return 0; 2487 } 2488 2489 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2490 if (!skb) { 2491 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2492 return -ENOMEM; 2493 } 2494 skb_put(skb, len); 2495 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2496 2497 if (params->buf_size > 512) 2498 bmap_size = 10; 2499 else if (params->buf_size > 256) 2500 bmap_size = 8; 2501 else if (params->buf_size > 64) 2502 bmap_size = 4; 2503 else 2504 bmap_size = 0; 2505 2506 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2507 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2508 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2509 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2510 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2511 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2512 2513 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2514 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2515 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2516 le32_encode_bits(!!rtwvif_link->mac_idx, 2517 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2518 2519 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2520 H2C_CAT_MAC, 2521 H2C_CL_BA_CAM, 2522 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2523 len); 2524 2525 ret = rtw89_h2c_tx(rtwdev, skb, false); 2526 if (ret) { 2527 rtw89_err(rtwdev, "failed to send h2c\n"); 2528 goto fail; 2529 } 2530 2531 return 0; 2532 fail: 2533 dev_kfree_skb_any(skb); 2534 2535 return ret; 2536 } 2537 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2538 2539 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2540 u8 offset, u8 mac_idx) 2541 { 2542 struct rtw89_h2c_ba_cam_init *h2c; 2543 u32 len = sizeof(*h2c); 2544 struct sk_buff *skb; 2545 int ret; 2546 2547 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2548 if (!skb) { 2549 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2550 return -ENOMEM; 2551 } 2552 skb_put(skb, len); 2553 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2554 2555 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2556 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2557 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2558 2559 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2560 H2C_CAT_MAC, 2561 H2C_CL_BA_CAM, 2562 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2563 len); 2564 2565 ret = rtw89_h2c_tx(rtwdev, skb, false); 2566 if (ret) { 2567 rtw89_err(rtwdev, "failed to send h2c\n"); 2568 goto fail; 2569 } 2570 2571 return 0; 2572 fail: 2573 dev_kfree_skb_any(skb); 2574 2575 return ret; 2576 } 2577 2578 #define H2C_LOG_CFG_LEN 12 2579 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2580 { 2581 struct sk_buff *skb; 2582 u32 comp = 0; 2583 int ret; 2584 2585 if (enable) 2586 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2587 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2588 BIT(RTW89_FW_LOG_COMP_MLO) | BIT(RTW89_FW_LOG_COMP_SCAN); 2589 2590 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2591 if (!skb) { 2592 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2593 return -ENOMEM; 2594 } 2595 2596 skb_put(skb, H2C_LOG_CFG_LEN); 2597 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2598 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2599 SET_LOG_CFG_COMP(skb->data, comp); 2600 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2601 2602 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2603 H2C_CAT_MAC, 2604 H2C_CL_FW_INFO, 2605 H2C_FUNC_LOG_CFG, 0, 0, 2606 H2C_LOG_CFG_LEN); 2607 2608 ret = rtw89_h2c_tx(rtwdev, skb, false); 2609 if (ret) { 2610 rtw89_err(rtwdev, "failed to send h2c\n"); 2611 goto fail; 2612 } 2613 2614 return 0; 2615 fail: 2616 dev_kfree_skb_any(skb); 2617 2618 return ret; 2619 } 2620 2621 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 2622 struct rtw89_vif_link *rtwvif_link) 2623 { 2624 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 2625 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; 2626 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2627 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2628 struct rtw89_eapol_2_of_2 *eapol_pkt; 2629 struct ieee80211_bss_conf *bss_conf; 2630 struct ieee80211_hdr_3addr *hdr; 2631 struct sk_buff *skb; 2632 u8 key_des_ver; 2633 2634 if (rtw_wow->ptk_alg == 3) 2635 key_des_ver = 1; 2636 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) 2637 key_des_ver = 2; 2638 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) 2639 key_des_ver = 3; 2640 else 2641 key_des_ver = 0; 2642 2643 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); 2644 if (!skb) 2645 return NULL; 2646 2647 hdr = skb_put_zero(skb, sizeof(*hdr)); 2648 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2649 IEEE80211_FCTL_TODS | 2650 IEEE80211_FCTL_PROTECTED); 2651 2652 rcu_read_lock(); 2653 2654 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2655 2656 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2657 ether_addr_copy(hdr->addr2, bss_conf->addr); 2658 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2659 2660 rcu_read_unlock(); 2661 2662 skb_put_zero(skb, sec_hdr_len); 2663 2664 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); 2665 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); 2666 eapol_pkt->key_des_ver = key_des_ver; 2667 2668 return skb; 2669 } 2670 2671 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, 2672 struct rtw89_vif_link *rtwvif_link) 2673 { 2674 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2675 struct ieee80211_bss_conf *bss_conf; 2676 struct ieee80211_hdr_3addr *hdr; 2677 struct rtw89_sa_query *sa_query; 2678 struct sk_buff *skb; 2679 2680 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); 2681 if (!skb) 2682 return NULL; 2683 2684 hdr = skb_put_zero(skb, sizeof(*hdr)); 2685 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2686 IEEE80211_STYPE_ACTION | 2687 IEEE80211_FCTL_PROTECTED); 2688 2689 rcu_read_lock(); 2690 2691 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2692 2693 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2694 ether_addr_copy(hdr->addr2, bss_conf->addr); 2695 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2696 2697 rcu_read_unlock(); 2698 2699 skb_put_zero(skb, sec_hdr_len); 2700 2701 sa_query = skb_put_zero(skb, sizeof(*sa_query)); 2702 sa_query->category = WLAN_CATEGORY_SA_QUERY; 2703 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; 2704 2705 return skb; 2706 } 2707 2708 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, 2709 struct rtw89_vif_link *rtwvif_link) 2710 { 2711 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 2712 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2713 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2714 struct ieee80211_hdr_3addr *hdr; 2715 struct rtw89_arp_rsp *arp_skb; 2716 struct arphdr *arp_hdr; 2717 struct sk_buff *skb; 2718 __le16 fc; 2719 2720 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); 2721 if (!skb) 2722 return NULL; 2723 2724 hdr = skb_put_zero(skb, sizeof(*hdr)); 2725 2726 if (rtw_wow->ptk_alg) 2727 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | 2728 IEEE80211_FCTL_PROTECTED); 2729 else 2730 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); 2731 2732 hdr->frame_control = fc; 2733 ether_addr_copy(hdr->addr1, rtwvif_link->bssid); 2734 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr); 2735 ether_addr_copy(hdr->addr3, rtwvif_link->bssid); 2736 2737 skb_put_zero(skb, sec_hdr_len); 2738 2739 arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); 2740 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 2741 arp_skb->llc_type = htons(ETH_P_ARP); 2742 2743 arp_hdr = &arp_skb->arp_hdr; 2744 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 2745 arp_hdr->ar_pro = htons(ETH_P_IP); 2746 arp_hdr->ar_hln = ETH_ALEN; 2747 arp_hdr->ar_pln = 4; 2748 arp_hdr->ar_op = htons(ARPOP_REPLY); 2749 2750 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr); 2751 arp_skb->sender_ip = rtwvif->ip_addr; 2752 2753 return skb; 2754 } 2755 2756 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2757 struct rtw89_vif_link *rtwvif_link, 2758 enum rtw89_fw_pkt_ofld_type type, 2759 u8 *id) 2760 { 2761 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 2762 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; 2763 struct rtw89_pktofld_info *info; 2764 struct sk_buff *skb; 2765 int ret; 2766 2767 info = kzalloc(sizeof(*info), GFP_KERNEL); 2768 if (!info) 2769 return -ENOMEM; 2770 2771 switch (type) { 2772 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2773 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2774 break; 2775 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2776 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2777 break; 2778 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2779 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false); 2780 break; 2781 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2782 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true); 2783 break; 2784 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2785 skb = rtw89_eapol_get(rtwdev, rtwvif_link); 2786 break; 2787 case RTW89_PKT_OFLD_TYPE_SA_QUERY: 2788 skb = rtw89_sa_query_get(rtwdev, rtwvif_link); 2789 break; 2790 case RTW89_PKT_OFLD_TYPE_ARP_RSP: 2791 skb = rtw89_arp_response_get(rtwdev, rtwvif_link); 2792 break; 2793 default: 2794 goto err; 2795 } 2796 2797 if (!skb) 2798 goto err; 2799 2800 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 2801 kfree_skb(skb); 2802 2803 if (ret) 2804 goto err; 2805 2806 list_add_tail(&info->list, &rtwvif_link->general_pkt_list); 2807 *id = info->id; 2808 return 0; 2809 2810 err: 2811 kfree(info); 2812 return -ENOMEM; 2813 } 2814 2815 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 2816 struct rtw89_vif_link *rtwvif_link, 2817 bool notify_fw) 2818 { 2819 struct list_head *pkt_list = &rtwvif_link->general_pkt_list; 2820 struct rtw89_pktofld_info *info, *tmp; 2821 2822 list_for_each_entry_safe(info, tmp, pkt_list, list) { 2823 if (notify_fw) 2824 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2825 else 2826 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 2827 list_del(&info->list); 2828 kfree(info); 2829 } 2830 } 2831 2832 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 2833 { 2834 struct rtw89_vif_link *rtwvif_link; 2835 struct rtw89_vif *rtwvif; 2836 unsigned int link_id; 2837 2838 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2839 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 2840 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link, 2841 notify_fw); 2842 } 2843 2844 #define H2C_GENERAL_PKT_LEN 6 2845 #define H2C_GENERAL_PKT_ID_UND 0xff 2846 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 2847 struct rtw89_vif_link *rtwvif_link, u8 macid) 2848 { 2849 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 2850 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 2851 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 2852 struct sk_buff *skb; 2853 int ret; 2854 2855 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2856 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 2857 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2858 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 2859 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2860 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 2861 2862 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 2863 if (!skb) { 2864 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2865 return -ENOMEM; 2866 } 2867 skb_put(skb, H2C_GENERAL_PKT_LEN); 2868 SET_GENERAL_PKT_MACID(skb->data, macid); 2869 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2870 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 2871 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 2872 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 2873 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2874 2875 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2876 H2C_CAT_MAC, 2877 H2C_CL_FW_INFO, 2878 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 2879 H2C_GENERAL_PKT_LEN); 2880 2881 ret = rtw89_h2c_tx(rtwdev, skb, false); 2882 if (ret) { 2883 rtw89_err(rtwdev, "failed to send h2c\n"); 2884 goto fail; 2885 } 2886 2887 return 0; 2888 fail: 2889 dev_kfree_skb_any(skb); 2890 2891 return ret; 2892 } 2893 2894 #define H2C_LPS_PARM_LEN 8 2895 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 2896 struct rtw89_lps_parm *lps_param) 2897 { 2898 struct sk_buff *skb; 2899 bool done_ack; 2900 int ret; 2901 2902 if (RTW89_CHK_FW_FEATURE(LPS_DACK_BY_C2H_REG, &rtwdev->fw)) 2903 done_ack = false; 2904 else 2905 done_ack = !lps_param->psmode; 2906 2907 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 2908 if (!skb) { 2909 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2910 return -ENOMEM; 2911 } 2912 skb_put(skb, H2C_LPS_PARM_LEN); 2913 2914 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 2915 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 2916 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 2917 SET_LPS_PARM_RLBM(skb->data, 1); 2918 SET_LPS_PARM_SMARTPS(skb->data, 1); 2919 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 2920 SET_LPS_PARM_VOUAPSD(skb->data, 0); 2921 SET_LPS_PARM_VIUAPSD(skb->data, 0); 2922 SET_LPS_PARM_BEUAPSD(skb->data, 0); 2923 SET_LPS_PARM_BKUAPSD(skb->data, 0); 2924 2925 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2926 H2C_CAT_MAC, 2927 H2C_CL_MAC_PS, 2928 H2C_FUNC_MAC_LPS_PARM, 0, done_ack, 2929 H2C_LPS_PARM_LEN); 2930 2931 ret = rtw89_h2c_tx(rtwdev, skb, false); 2932 if (ret) { 2933 rtw89_err(rtwdev, "failed to send h2c\n"); 2934 goto fail; 2935 } 2936 2937 return 0; 2938 fail: 2939 dev_kfree_skb_any(skb); 2940 2941 return ret; 2942 } 2943 2944 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2945 { 2946 const struct rtw89_chip_info *chip = rtwdev->chip; 2947 const struct rtw89_chan *chan; 2948 struct rtw89_vif_link *rtwvif_link; 2949 struct rtw89_h2c_lps_ch_info *h2c; 2950 u32 len = sizeof(*h2c); 2951 unsigned int link_id; 2952 struct sk_buff *skb; 2953 bool no_chan = true; 2954 u8 phy_idx; 2955 u32 done; 2956 int ret; 2957 2958 if (chip->chip_gen != RTW89_CHIP_BE) 2959 return 0; 2960 2961 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2962 if (!skb) { 2963 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 2964 return -ENOMEM; 2965 } 2966 skb_put(skb, len); 2967 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2968 2969 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2970 phy_idx = rtwvif_link->phy_idx; 2971 if (phy_idx >= ARRAY_SIZE(h2c->info)) 2972 continue; 2973 2974 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2975 no_chan = false; 2976 2977 h2c->info[phy_idx].central_ch = chan->channel; 2978 h2c->info[phy_idx].pri_ch = chan->primary_channel; 2979 h2c->info[phy_idx].band = chan->band_type; 2980 h2c->info[phy_idx].bw = chan->band_width; 2981 } 2982 2983 if (no_chan) { 2984 rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n"); 2985 ret = -ENOENT; 2986 goto fail; 2987 } 2988 2989 h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2990 2991 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2992 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2993 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 2994 2995 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2996 ret = rtw89_h2c_tx(rtwdev, skb, false); 2997 if (ret) { 2998 rtw89_err(rtwdev, "failed to send h2c\n"); 2999 goto fail; 3000 } 3001 3002 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 3003 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 3004 if (ret) 3005 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 3006 3007 return 0; 3008 fail: 3009 dev_kfree_skb_any(skb); 3010 3011 return ret; 3012 } 3013 3014 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, 3015 struct rtw89_vif *rtwvif) 3016 { 3017 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be; 3018 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 3019 static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12}; 3020 const struct rtw89_chip_info *chip = rtwdev->chip; 3021 struct rtw89_efuse *efuse = &rtwdev->efuse; 3022 struct rtw89_h2c_lps_ml_cmn_info *h2c; 3023 struct rtw89_vif_link *rtwvif_link; 3024 const struct rtw89_chan *chan; 3025 u8 bw_idx = RTW89_BB_BW_20_40; 3026 u32 len = sizeof(*h2c); 3027 unsigned int link_id; 3028 struct sk_buff *skb; 3029 u8 beacon_bw_ofst; 3030 u8 gain_band; 3031 u32 done; 3032 u8 path; 3033 int ret; 3034 int i; 3035 3036 if (chip->chip_gen != RTW89_CHIP_BE) 3037 return 0; 3038 3039 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3040 if (!skb) { 3041 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n"); 3042 return -ENOMEM; 3043 } 3044 skb_put(skb, len); 3045 h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data; 3046 3047 h2c->fmt_id = 0x3; 3048 3049 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 3050 h2c->rfe_type = efuse->rfe_type; 3051 3052 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 3053 path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A; 3054 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3055 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type); 3056 3057 h2c->central_ch[rtwvif_link->phy_idx] = chan->channel; 3058 h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel; 3059 h2c->band[rtwvif_link->phy_idx] = chan->band_type; 3060 h2c->bw[rtwvif_link->phy_idx] = chan->band_width; 3061 if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6) 3062 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1; 3063 else 3064 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2; 3065 3066 /* Fill BW20 RX gain table for beacon mode */ 3067 for (i = 0; i < TIA_GAIN_NUM; i++) { 3068 h2c->tia_gain[rtwvif_link->phy_idx][i] = 3069 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]); 3070 } 3071 3072 if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) { 3073 beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx]; 3074 h2c->dup_bcn_ofst[rtwvif_link->phy_idx] = beacon_bw_ofst; 3075 } 3076 3077 memcpy(h2c->lna_gain[rtwvif_link->phy_idx], 3078 gain->lna_gain[gain_band][bw_idx][path], 3079 LNA_GAIN_NUM); 3080 memcpy(h2c->tia_lna_op1db[rtwvif_link->phy_idx], 3081 gain->tia_lna_op1db[gain_band][bw_idx][path], 3082 LNA_GAIN_NUM + 1); 3083 memcpy(h2c->lna_op1db[rtwvif_link->phy_idx], 3084 gain->lna_op1db[gain_band][bw_idx][path], 3085 LNA_GAIN_NUM); 3086 } 3087 3088 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3089 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 3090 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len); 3091 3092 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 3093 ret = rtw89_h2c_tx(rtwdev, skb, false); 3094 if (ret) { 3095 rtw89_err(rtwdev, "failed to send h2c\n"); 3096 goto fail; 3097 } 3098 3099 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 3100 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 3101 if (ret) 3102 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n"); 3103 3104 return 0; 3105 fail: 3106 dev_kfree_skb_any(skb); 3107 3108 return ret; 3109 } 3110 3111 #define H2C_P2P_ACT_LEN 20 3112 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, 3113 struct rtw89_vif_link *rtwvif_link, 3114 struct ieee80211_p2p_noa_desc *desc, 3115 u8 act, u8 noa_id, u8 ctwindow_oppps) 3116 { 3117 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 3118 struct sk_buff *skb; 3119 u8 *cmd; 3120 int ret; 3121 3122 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 3123 if (!skb) { 3124 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 3125 return -ENOMEM; 3126 } 3127 skb_put(skb, H2C_P2P_ACT_LEN); 3128 cmd = skb->data; 3129 3130 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id); 3131 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 3132 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 3133 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 3134 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 3135 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 3136 if (desc) { 3137 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 3138 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 3139 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 3140 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 3141 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 3142 } 3143 3144 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3145 H2C_CAT_MAC, H2C_CL_MAC_PS, 3146 H2C_FUNC_P2P_ACT, 0, 0, 3147 H2C_P2P_ACT_LEN); 3148 3149 ret = rtw89_h2c_tx(rtwdev, skb, false); 3150 if (ret) { 3151 rtw89_err(rtwdev, "failed to send h2c\n"); 3152 goto fail; 3153 } 3154 3155 return 0; 3156 fail: 3157 dev_kfree_skb_any(skb); 3158 3159 return ret; 3160 } 3161 3162 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 3163 struct sk_buff *skb) 3164 { 3165 const struct rtw89_chip_info *chip = rtwdev->chip; 3166 struct rtw89_hal *hal = &rtwdev->hal; 3167 u8 ntx_path; 3168 u8 map_b; 3169 3170 if (chip->rf_path_num == 1) { 3171 ntx_path = RF_A; 3172 map_b = 0; 3173 } else { 3174 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB; 3175 map_b = ntx_path == RF_AB ? 1 : 0; 3176 } 3177 3178 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 3179 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 3180 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 3181 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 3182 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 3183 } 3184 3185 #define H2C_CMC_TBL_LEN 68 3186 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 3187 struct rtw89_vif_link *rtwvif_link, 3188 struct rtw89_sta_link *rtwsta_link) 3189 { 3190 const struct rtw89_chip_info *chip = rtwdev->chip; 3191 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3192 struct sk_buff *skb; 3193 int ret; 3194 3195 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3196 if (!skb) { 3197 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3198 return -ENOMEM; 3199 } 3200 skb_put(skb, H2C_CMC_TBL_LEN); 3201 SET_CTRL_INFO_MACID(skb->data, macid); 3202 SET_CTRL_INFO_OPERATION(skb->data, 1); 3203 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3204 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 3205 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3206 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 3207 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 3208 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 3209 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 3210 } 3211 SET_CMC_TBL_MGQ_RPT_EN(skb->data, rtwdev->hci.tx_rpt_enabled); 3212 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 3213 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 3214 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3215 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3216 3217 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3218 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3219 chip->h2c_cctl_func_id, 0, 1, 3220 H2C_CMC_TBL_LEN); 3221 3222 ret = rtw89_h2c_tx(rtwdev, skb, false); 3223 if (ret) { 3224 rtw89_err(rtwdev, "failed to send h2c\n"); 3225 goto fail; 3226 } 3227 3228 return 0; 3229 fail: 3230 dev_kfree_skb_any(skb); 3231 3232 return ret; 3233 } 3234 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 3235 3236 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3237 struct rtw89_vif_link *rtwvif_link, 3238 struct rtw89_sta_link *rtwsta_link) 3239 { 3240 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3241 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3242 u32 len = sizeof(*h2c); 3243 struct sk_buff *skb; 3244 int ret; 3245 3246 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3247 if (!skb) { 3248 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3249 return -ENOMEM; 3250 } 3251 skb_put(skb, len); 3252 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3253 3254 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3255 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3256 3257 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE) | 3258 le32_encode_bits(rtwdev->hci.tx_rpt_enabled, CCTLINFO_G7_W0_MGQ_RPT_EN); 3259 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 3260 3261 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 3262 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 3263 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3264 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 3265 3266 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 3267 3268 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 3269 3270 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3271 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 3272 3273 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3274 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3275 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3276 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3277 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3278 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 3279 3280 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 3281 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 3282 3283 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 3284 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 3285 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 3286 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 3287 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 3288 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 3289 3290 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 3291 3292 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 3293 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 3294 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 3295 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 3296 3297 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 3298 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 3299 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 3300 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 3301 3302 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3303 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3304 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3305 len); 3306 3307 ret = rtw89_h2c_tx(rtwdev, skb, false); 3308 if (ret) { 3309 rtw89_err(rtwdev, "failed to send h2c\n"); 3310 goto fail; 3311 } 3312 3313 return 0; 3314 fail: 3315 dev_kfree_skb_any(skb); 3316 3317 return ret; 3318 } 3319 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 3320 3321 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 3322 struct ieee80211_link_sta *link_sta, 3323 u8 *pads) 3324 { 3325 bool ppe_th; 3326 u8 ppe16, ppe8; 3327 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3328 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0]; 3329 u8 ru_bitmap; 3330 u8 n, idx, sh; 3331 u16 ppe; 3332 int i; 3333 3334 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 3335 link_sta->he_cap.he_cap_elem.phy_cap_info[6]); 3336 if (!ppe_th) { 3337 u8 pad; 3338 3339 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 3340 link_sta->he_cap.he_cap_elem.phy_cap_info[9]); 3341 3342 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3343 pads[i] = pad; 3344 3345 return; 3346 } 3347 3348 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 3349 n = hweight8(ru_bitmap); 3350 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3351 3352 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3353 if (!(ru_bitmap & BIT(i))) { 3354 pads[i] = 1; 3355 continue; 3356 } 3357 3358 idx = n >> 3; 3359 sh = n & 7; 3360 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 3361 3362 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx])); 3363 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3364 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 3365 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3366 3367 if (ppe16 != 7 && ppe8 == 7) 3368 pads[i] = RTW89_PE_DURATION_16; 3369 else if (ppe8 != 7) 3370 pads[i] = RTW89_PE_DURATION_8; 3371 else 3372 pads[i] = RTW89_PE_DURATION_0; 3373 } 3374 } 3375 3376 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 3377 struct rtw89_vif_link *rtwvif_link, 3378 struct rtw89_sta_link *rtwsta_link) 3379 { 3380 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3381 const struct rtw89_chip_info *chip = rtwdev->chip; 3382 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3383 rtwvif_link->chanctx_idx); 3384 struct ieee80211_link_sta *link_sta; 3385 struct sk_buff *skb; 3386 u8 pads[RTW89_PPE_BW_NUM]; 3387 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3388 u16 lowest_rate; 3389 int ret; 3390 3391 memset(pads, 0, sizeof(pads)); 3392 3393 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3394 if (!skb) { 3395 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3396 return -ENOMEM; 3397 } 3398 3399 rcu_read_lock(); 3400 3401 if (rtwsta_link) 3402 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3403 3404 if (rtwsta_link && link_sta->he_cap.has_he) 3405 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3406 3407 if (vif->p2p) 3408 lowest_rate = RTW89_HW_RATE_OFDM6; 3409 else if (chan->band_type == RTW89_BAND_2G) 3410 lowest_rate = RTW89_HW_RATE_CCK1; 3411 else 3412 lowest_rate = RTW89_HW_RATE_OFDM6; 3413 3414 skb_put(skb, H2C_CMC_TBL_LEN); 3415 SET_CTRL_INFO_MACID(skb->data, mac_id); 3416 SET_CTRL_INFO_OPERATION(skb->data, 1); 3417 SET_CMC_TBL_DISRTSFB(skb->data, 1); 3418 SET_CMC_TBL_DISDATAFB(skb->data, 1); 3419 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 3420 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 3421 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 3422 if (vif->type == NL80211_IFTYPE_STATION) 3423 SET_CMC_TBL_ULDL(skb->data, 1); 3424 else 3425 SET_CMC_TBL_ULDL(skb->data, 0); 3426 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port); 3427 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 3428 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3429 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3430 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3431 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3432 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3433 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3434 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3435 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3436 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3437 } 3438 if (rtwsta_link) 3439 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 3440 link_sta->he_cap.has_he); 3441 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3442 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3443 3444 rcu_read_unlock(); 3445 3446 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3447 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3448 chip->h2c_cctl_func_id, 0, 1, 3449 H2C_CMC_TBL_LEN); 3450 3451 ret = rtw89_h2c_tx(rtwdev, skb, false); 3452 if (ret) { 3453 rtw89_err(rtwdev, "failed to send h2c\n"); 3454 goto fail; 3455 } 3456 3457 return 0; 3458 fail: 3459 dev_kfree_skb_any(skb); 3460 3461 return ret; 3462 } 3463 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 3464 3465 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 3466 struct ieee80211_link_sta *link_sta, 3467 u8 *pads) 3468 { 3469 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3470 u16 ppe_thres_hdr; 3471 u8 ppe16, ppe8; 3472 u8 n, idx, sh; 3473 u8 ru_bitmap; 3474 bool ppe_th; 3475 u16 ppe; 3476 int i; 3477 3478 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3479 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 3480 if (!ppe_th) { 3481 u8 pad; 3482 3483 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3484 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 3485 3486 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3487 pads[i] = pad; 3488 3489 return; 3490 } 3491 3492 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres); 3493 ru_bitmap = u16_get_bits(ppe_thres_hdr, 3494 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 3495 n = hweight8(ru_bitmap); 3496 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 3497 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3498 3499 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3500 if (!(ru_bitmap & BIT(i))) { 3501 pads[i] = 1; 3502 continue; 3503 } 3504 3505 idx = n >> 3; 3506 sh = n & 7; 3507 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 3508 3509 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx); 3510 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3511 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 3512 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3513 3514 if (ppe16 != 7 && ppe8 == 7) 3515 pads[i] = RTW89_PE_DURATION_16_20; 3516 else if (ppe8 != 7) 3517 pads[i] = RTW89_PE_DURATION_8; 3518 else 3519 pads[i] = RTW89_PE_DURATION_0; 3520 } 3521 } 3522 3523 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3524 struct rtw89_vif_link *rtwvif_link, 3525 struct rtw89_sta_link *rtwsta_link) 3526 { 3527 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3528 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3529 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3530 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3531 struct ieee80211_bss_conf *bss_conf; 3532 struct ieee80211_link_sta *link_sta; 3533 u8 pads[RTW89_PPE_BW_NUM]; 3534 u32 len = sizeof(*h2c); 3535 struct sk_buff *skb; 3536 u16 lowest_rate; 3537 int ret; 3538 3539 memset(pads, 0, sizeof(pads)); 3540 3541 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3542 if (!skb) { 3543 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3544 return -ENOMEM; 3545 } 3546 3547 rcu_read_lock(); 3548 3549 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3550 3551 if (rtwsta_link) { 3552 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3553 3554 if (link_sta->eht_cap.has_eht) 3555 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads); 3556 else if (link_sta->he_cap.has_he) 3557 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3558 } 3559 3560 if (vif->p2p) 3561 lowest_rate = RTW89_HW_RATE_OFDM6; 3562 else if (chan->band_type == RTW89_BAND_2G) 3563 lowest_rate = RTW89_HW_RATE_CCK1; 3564 else 3565 lowest_rate = RTW89_HW_RATE_OFDM6; 3566 3567 skb_put(skb, len); 3568 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3569 3570 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3571 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3572 3573 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 3574 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 3575 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 3576 CCTLINFO_G7_W0_DISDATAFB); 3577 3578 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3579 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3580 3581 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3582 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3583 3584 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3585 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3586 3587 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 3588 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 3589 3590 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3591 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 3592 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 3593 } 3594 3595 if (bss_conf->eht_support) { 3596 u16 punct = bss_conf->chanreq.oper.punctured; 3597 3598 h2c->w4 |= le32_encode_bits(~punct, 3599 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3600 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3601 } 3602 3603 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 3604 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3605 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 3606 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3607 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 3608 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3609 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 3610 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3611 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 3612 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3613 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 3614 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 3615 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 3616 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 3617 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3618 3619 h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) | 3620 le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 3621 CCTLINFO_G7_W6_ULDL); 3622 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL); 3623 3624 if (rtwsta_link) { 3625 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he, 3626 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3627 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3628 } 3629 3630 rcu_read_unlock(); 3631 3632 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3633 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3634 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3635 len); 3636 3637 ret = rtw89_h2c_tx(rtwdev, skb, false); 3638 if (ret) { 3639 rtw89_err(rtwdev, "failed to send h2c\n"); 3640 goto fail; 3641 } 3642 3643 return 0; 3644 fail: 3645 dev_kfree_skb_any(skb); 3646 3647 return ret; 3648 } 3649 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 3650 3651 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3652 struct rtw89_vif_link *rtwvif_link, 3653 struct rtw89_sta_link *rtwsta_link) 3654 { 3655 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 3656 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3657 u32 len = sizeof(*h2c); 3658 struct sk_buff *skb; 3659 u16 agg_num = 0; 3660 u8 ba_bmap = 0; 3661 int ret; 3662 u8 tid; 3663 3664 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3665 if (!skb) { 3666 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 3667 return -ENOMEM; 3668 } 3669 skb_put(skb, len); 3670 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3671 3672 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 3673 if (agg_num == 0) 3674 agg_num = rtwsta->ampdu_params[tid].agg_num; 3675 else 3676 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 3677 } 3678 3679 if (agg_num <= 0x20) 3680 ba_bmap = 3; 3681 else if (agg_num > 0x20 && agg_num <= 0x40) 3682 ba_bmap = 0; 3683 else if (agg_num > 0x40 && agg_num <= 0x80) 3684 ba_bmap = 1; 3685 else if (agg_num > 0x80 && agg_num <= 0x100) 3686 ba_bmap = 2; 3687 else if (agg_num > 0x100 && agg_num <= 0x200) 3688 ba_bmap = 4; 3689 else if (agg_num > 0x200 && agg_num <= 0x400) 3690 ba_bmap = 5; 3691 3692 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 3693 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3694 3695 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 3696 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 3697 3698 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3699 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3700 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 3701 len); 3702 3703 ret = rtw89_h2c_tx(rtwdev, skb, false); 3704 if (ret) { 3705 rtw89_err(rtwdev, "failed to send h2c\n"); 3706 goto fail; 3707 } 3708 3709 return 0; 3710 fail: 3711 dev_kfree_skb_any(skb); 3712 3713 return ret; 3714 } 3715 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 3716 3717 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 3718 struct rtw89_sta_link *rtwsta_link) 3719 { 3720 const struct rtw89_chip_info *chip = rtwdev->chip; 3721 struct sk_buff *skb; 3722 int ret; 3723 3724 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3725 if (!skb) { 3726 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3727 return -ENOMEM; 3728 } 3729 skb_put(skb, H2C_CMC_TBL_LEN); 3730 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3731 SET_CTRL_INFO_OPERATION(skb->data, 1); 3732 if (rtwsta_link->cctl_tx_time) { 3733 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 3734 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time); 3735 } 3736 if (rtwsta_link->cctl_tx_retry_limit) { 3737 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 3738 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt); 3739 } 3740 3741 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3742 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3743 chip->h2c_cctl_func_id, 0, 1, 3744 H2C_CMC_TBL_LEN); 3745 3746 ret = rtw89_h2c_tx(rtwdev, skb, false); 3747 if (ret) { 3748 rtw89_err(rtwdev, "failed to send h2c\n"); 3749 goto fail; 3750 } 3751 3752 return 0; 3753 fail: 3754 dev_kfree_skb_any(skb); 3755 3756 return ret; 3757 } 3758 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl); 3759 3760 int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3761 struct rtw89_sta_link *rtwsta_link) 3762 { 3763 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3764 u32 len = sizeof(*h2c); 3765 struct sk_buff *skb; 3766 int ret; 3767 3768 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3769 if (!skb) { 3770 rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_g7\n"); 3771 return -ENOMEM; 3772 } 3773 skb_put(skb, len); 3774 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3775 3776 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 3777 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3778 3779 if (rtwsta_link->cctl_tx_time) { 3780 h2c->w3 |= le32_encode_bits(1, CCTLINFO_G7_W3_AMPDU_TIME_SEL); 3781 h2c->m3 |= cpu_to_le32(CCTLINFO_G7_W3_AMPDU_TIME_SEL); 3782 3783 h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time, 3784 CCTLINFO_G7_W2_AMPDU_MAX_TIME); 3785 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_AMPDU_MAX_TIME); 3786 } 3787 if (rtwsta_link->cctl_tx_retry_limit) { 3788 h2c->w2 |= le32_encode_bits(1, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL) | 3789 le32_encode_bits(rtwsta_link->data_tx_cnt_lmt, 3790 CCTLINFO_G7_W2_DATA_TX_CNT_LMT); 3791 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL | 3792 CCTLINFO_G7_W2_DATA_TX_CNT_LMT); 3793 } 3794 3795 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3796 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3797 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3798 len); 3799 3800 ret = rtw89_h2c_tx(rtwdev, skb, false); 3801 if (ret) { 3802 rtw89_err(rtwdev, "failed to send h2c\n"); 3803 goto fail; 3804 } 3805 3806 return 0; 3807 fail: 3808 dev_kfree_skb_any(skb); 3809 3810 return ret; 3811 } 3812 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_g7); 3813 3814 int rtw89_fw_h2c_punctured_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3815 struct rtw89_vif_link *rtwvif_link, 3816 u16 punctured) 3817 { 3818 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3819 u32 len = sizeof(*h2c); 3820 struct sk_buff *skb; 3821 int ret; 3822 3823 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3824 if (!skb) { 3825 rtw89_err(rtwdev, "failed to alloc skb for punctured cmac g7\n"); 3826 return -ENOMEM; 3827 } 3828 3829 skb_put(skb, len); 3830 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3831 3832 h2c->c0 = le32_encode_bits(rtwvif_link->mac_id, CCTLINFO_G7_C0_MACID) | 3833 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3834 3835 h2c->w4 = le32_encode_bits(~punctured, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3836 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3837 3838 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3839 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3840 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3841 len); 3842 3843 ret = rtw89_h2c_tx(rtwdev, skb, false); 3844 if (ret) { 3845 rtw89_err(rtwdev, "failed to send h2c\n"); 3846 goto fail; 3847 } 3848 3849 return 0; 3850 fail: 3851 dev_kfree_skb_any(skb); 3852 3853 return ret; 3854 } 3855 EXPORT_SYMBOL(rtw89_fw_h2c_punctured_cmac_tbl_g7); 3856 3857 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 3858 struct rtw89_sta_link *rtwsta_link) 3859 { 3860 const struct rtw89_chip_info *chip = rtwdev->chip; 3861 struct sk_buff *skb; 3862 int ret; 3863 3864 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 3865 return 0; 3866 3867 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3868 if (!skb) { 3869 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3870 return -ENOMEM; 3871 } 3872 skb_put(skb, H2C_CMC_TBL_LEN); 3873 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3874 SET_CTRL_INFO_OPERATION(skb->data, 1); 3875 3876 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3877 3878 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3879 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3880 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 3881 H2C_CMC_TBL_LEN); 3882 3883 ret = rtw89_h2c_tx(rtwdev, skb, false); 3884 if (ret) { 3885 rtw89_err(rtwdev, "failed to send h2c\n"); 3886 goto fail; 3887 } 3888 3889 return 0; 3890 fail: 3891 dev_kfree_skb_any(skb); 3892 3893 return ret; 3894 } 3895 3896 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 3897 struct rtw89_vif_link *rtwvif_link) 3898 { 3899 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3900 rtwvif_link->chanctx_idx); 3901 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3902 struct rtw89_h2c_bcn_upd *h2c; 3903 struct sk_buff *skb_beacon; 3904 struct ieee80211_hdr *hdr; 3905 u32 len = sizeof(*h2c); 3906 struct sk_buff *skb; 3907 int bcn_total_len; 3908 u16 beacon_rate; 3909 u16 tim_offset; 3910 void *noa_data; 3911 u8 noa_len; 3912 int ret; 3913 3914 if (vif->p2p) 3915 beacon_rate = RTW89_HW_RATE_OFDM6; 3916 else if (chan->band_type == RTW89_BAND_2G) 3917 beacon_rate = RTW89_HW_RATE_CCK1; 3918 else 3919 beacon_rate = RTW89_HW_RATE_OFDM6; 3920 3921 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3922 NULL, 0); 3923 if (!skb_beacon) { 3924 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3925 return -ENOMEM; 3926 } 3927 3928 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3929 if (noa_len && 3930 (noa_len <= skb_tailroom(skb_beacon) || 3931 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3932 skb_put_data(skb_beacon, noa_data, noa_len); 3933 } 3934 3935 hdr = (struct ieee80211_hdr *)skb_beacon; 3936 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3937 3938 bcn_total_len = len + skb_beacon->len; 3939 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3940 if (!skb) { 3941 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3942 dev_kfree_skb_any(skb_beacon); 3943 return -ENOMEM; 3944 } 3945 skb_put(skb, len); 3946 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 3947 3948 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) | 3949 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 3950 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 3951 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 3952 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 3953 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 3954 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 3955 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 3956 3957 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3958 dev_kfree_skb_any(skb_beacon); 3959 3960 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3961 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3962 H2C_FUNC_MAC_BCN_UPD, 0, 1, 3963 bcn_total_len); 3964 3965 ret = rtw89_h2c_tx(rtwdev, skb, false); 3966 if (ret) { 3967 rtw89_err(rtwdev, "failed to send h2c\n"); 3968 dev_kfree_skb_any(skb); 3969 return ret; 3970 } 3971 3972 return 0; 3973 } 3974 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 3975 3976 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 3977 struct rtw89_vif_link *rtwvif_link) 3978 { 3979 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3980 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3981 struct rtw89_h2c_bcn_upd_be *h2c; 3982 struct sk_buff *skb_beacon; 3983 struct ieee80211_hdr *hdr; 3984 u32 len = sizeof(*h2c); 3985 struct sk_buff *skb; 3986 int bcn_total_len; 3987 u16 beacon_rate; 3988 u16 tim_offset; 3989 void *noa_data; 3990 u8 noa_len; 3991 int ret; 3992 3993 if (vif->p2p) 3994 beacon_rate = RTW89_HW_RATE_OFDM6; 3995 else if (chan->band_type == RTW89_BAND_2G) 3996 beacon_rate = RTW89_HW_RATE_CCK1; 3997 else 3998 beacon_rate = RTW89_HW_RATE_OFDM6; 3999 4000 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 4001 NULL, 0); 4002 if (!skb_beacon) { 4003 rtw89_err(rtwdev, "failed to get beacon skb\n"); 4004 return -ENOMEM; 4005 } 4006 4007 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 4008 if (noa_len && 4009 (noa_len <= skb_tailroom(skb_beacon) || 4010 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 4011 skb_put_data(skb_beacon, noa_data, noa_len); 4012 } 4013 4014 hdr = (struct ieee80211_hdr *)skb_beacon; 4015 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 4016 4017 bcn_total_len = len + skb_beacon->len; 4018 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 4019 if (!skb) { 4020 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 4021 dev_kfree_skb_any(skb_beacon); 4022 return -ENOMEM; 4023 } 4024 skb_put(skb, len); 4025 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 4026 4027 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 4028 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 4029 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 4030 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 4031 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 4032 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 4033 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 4034 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 4035 4036 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 4037 dev_kfree_skb_any(skb_beacon); 4038 4039 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4040 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 4041 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 4042 bcn_total_len); 4043 4044 ret = rtw89_h2c_tx(rtwdev, skb, false); 4045 if (ret) { 4046 rtw89_err(rtwdev, "failed to send h2c\n"); 4047 goto fail; 4048 } 4049 4050 return 0; 4051 4052 fail: 4053 dev_kfree_skb_any(skb); 4054 4055 return ret; 4056 } 4057 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 4058 4059 int rtw89_fw_h2c_tbtt_tuning(struct rtw89_dev *rtwdev, 4060 struct rtw89_vif_link *rtwvif_link, u32 offset) 4061 { 4062 struct rtw89_h2c_tbtt_tuning *h2c; 4063 u32 len = sizeof(*h2c); 4064 struct sk_buff *skb; 4065 int ret; 4066 4067 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4068 if (!skb) { 4069 rtw89_err(rtwdev, "failed to alloc skb for h2c tbtt tuning\n"); 4070 return -ENOMEM; 4071 } 4072 skb_put(skb, len); 4073 h2c = (struct rtw89_h2c_tbtt_tuning *)skb->data; 4074 4075 h2c->w0 = le32_encode_bits(rtwvif_link->phy_idx, RTW89_H2C_TBTT_TUNING_W0_BAND) | 4076 le32_encode_bits(rtwvif_link->port, RTW89_H2C_TBTT_TUNING_W0_PORT); 4077 h2c->w1 = le32_encode_bits(offset, RTW89_H2C_TBTT_TUNING_W1_SHIFT); 4078 4079 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4080 H2C_CAT_MAC, H2C_CL_MAC_PS, 4081 H2C_FUNC_TBTT_TUNING, 0, 0, 4082 len); 4083 4084 ret = rtw89_h2c_tx(rtwdev, skb, false); 4085 if (ret) { 4086 rtw89_err(rtwdev, "failed to send h2c\n"); 4087 goto fail; 4088 } 4089 4090 return 0; 4091 fail: 4092 dev_kfree_skb_any(skb); 4093 4094 return ret; 4095 } 4096 4097 int rtw89_fw_h2c_pwr_lvl(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4098 { 4099 #define RTW89_BCN_TO_VAL_MIN 4 4100 #define RTW89_BCN_TO_VAL_MAX 64 4101 #define RTW89_DTIM_TO_VAL_MIN 7 4102 #define RTW89_DTIM_TO_VAL_MAX 15 4103 struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 4104 struct rtw89_h2c_pwr_lvl *h2c; 4105 u32 len = sizeof(*h2c); 4106 struct sk_buff *skb; 4107 u8 bcn_to_val; 4108 int ret; 4109 4110 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4111 if (!skb) { 4112 rtw89_err(rtwdev, "failed to alloc skb for h2c pwr lvl\n"); 4113 return -ENOMEM; 4114 } 4115 skb_put(skb, len); 4116 h2c = (struct rtw89_h2c_pwr_lvl *)skb->data; 4117 4118 bcn_to_val = clamp_t(u8, bcn_track->bcn_timeout, 4119 RTW89_BCN_TO_VAL_MIN, RTW89_BCN_TO_VAL_MAX); 4120 4121 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_PWR_LVL_W0_MACID) | 4122 le32_encode_bits(bcn_to_val, RTW89_H2C_PWR_LVL_W0_BCN_TO_VAL) | 4123 le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_PS_LVL) | 4124 le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_TRX_LVL) | 4125 le32_encode_bits(RTW89_DTIM_TO_VAL_MIN, 4126 RTW89_H2C_PWR_LVL_W0_DTIM_TO_VAL); 4127 4128 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4129 H2C_CAT_MAC, H2C_CL_MAC_PS, 4130 H2C_FUNC_PS_POWER_LEVEL, 0, 0, 4131 len); 4132 4133 ret = rtw89_h2c_tx(rtwdev, skb, false); 4134 if (ret) { 4135 rtw89_err(rtwdev, "failed to send h2c\n"); 4136 goto fail; 4137 } 4138 4139 return 0; 4140 fail: 4141 dev_kfree_skb_any(skb); 4142 4143 return ret; 4144 } 4145 4146 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 4147 struct rtw89_vif_link *rtwvif_link, 4148 struct rtw89_sta_link *rtwsta_link, 4149 enum rtw89_upd_mode upd_mode) 4150 { 4151 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 4152 struct rtw89_h2c_role_maintain *h2c; 4153 u32 len = sizeof(*h2c); 4154 struct sk_buff *skb; 4155 u8 self_role; 4156 int ret; 4157 4158 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 4159 if (rtwsta_link) 4160 self_role = RTW89_SELF_ROLE_AP_CLIENT; 4161 else 4162 self_role = rtwvif_link->self_role; 4163 } else { 4164 self_role = rtwvif_link->self_role; 4165 } 4166 4167 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4168 if (!skb) { 4169 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4170 return -ENOMEM; 4171 } 4172 skb_put(skb, len); 4173 h2c = (struct rtw89_h2c_role_maintain *)skb->data; 4174 4175 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_ROLE_MAINTAIN_W0_MACID) | 4176 le32_encode_bits(self_role, RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE) | 4177 le32_encode_bits(upd_mode, RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE) | 4178 le32_encode_bits(rtwvif_link->wifi_role, 4179 RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE) | 4180 le32_encode_bits(rtwvif_link->mac_idx, 4181 RTW89_H2C_ROLE_MAINTAIN_W0_BAND) | 4182 le32_encode_bits(rtwvif_link->port, RTW89_H2C_ROLE_MAINTAIN_W0_PORT); 4183 4184 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4185 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 4186 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 4187 len); 4188 4189 ret = rtw89_h2c_tx(rtwdev, skb, false); 4190 if (ret) { 4191 rtw89_err(rtwdev, "failed to send h2c\n"); 4192 goto fail; 4193 } 4194 4195 return 0; 4196 fail: 4197 dev_kfree_skb_any(skb); 4198 4199 return ret; 4200 } 4201 4202 static enum rtw89_fw_sta_type 4203 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 4204 struct rtw89_sta_link *rtwsta_link) 4205 { 4206 struct ieee80211_bss_conf *bss_conf; 4207 struct ieee80211_link_sta *link_sta; 4208 enum rtw89_fw_sta_type type; 4209 4210 rcu_read_lock(); 4211 4212 if (!rtwsta_link) 4213 goto by_vif; 4214 4215 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 4216 4217 if (link_sta->eht_cap.has_eht) 4218 type = RTW89_FW_BE_STA; 4219 else if (link_sta->he_cap.has_he) 4220 type = RTW89_FW_AX_STA; 4221 else 4222 type = RTW89_FW_N_AC_STA; 4223 4224 goto out; 4225 4226 by_vif: 4227 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 4228 4229 if (bss_conf->eht_support) 4230 type = RTW89_FW_BE_STA; 4231 else if (bss_conf->he_support) 4232 type = RTW89_FW_AX_STA; 4233 else 4234 type = RTW89_FW_N_AC_STA; 4235 4236 out: 4237 rcu_read_unlock(); 4238 4239 return type; 4240 } 4241 4242 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 4243 struct rtw89_sta_link *rtwsta_link, bool dis_conn) 4244 { 4245 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 4246 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4247 bool is_mld = ieee80211_vif_is_mld(vif); 4248 u8 self_role = rtwvif_link->self_role; 4249 enum rtw89_fw_sta_type sta_type; 4250 u8 net_type = rtwvif_link->net_type; 4251 struct rtw89_h2c_join_v1 *h2c_v1; 4252 struct rtw89_h2c_join *h2c; 4253 u32 len = sizeof(*h2c); 4254 bool format_v1 = false; 4255 struct sk_buff *skb; 4256 u8 main_mac_id; 4257 bool init_ps; 4258 int ret; 4259 4260 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 4261 len = sizeof(*h2c_v1); 4262 format_v1 = true; 4263 } 4264 4265 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) { 4266 self_role = RTW89_SELF_ROLE_AP_CLIENT; 4267 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 4268 } 4269 4270 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4271 if (!skb) { 4272 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4273 return -ENOMEM; 4274 } 4275 skb_put(skb, len); 4276 h2c = (struct rtw89_h2c_join *)skb->data; 4277 4278 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 4279 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 4280 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 4281 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) | 4282 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) | 4283 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 4284 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 4285 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 4286 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 4287 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 4288 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 4289 le32_encode_bits(rtwvif_link->wifi_role, 4290 RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 4291 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 4292 4293 if (!format_v1) 4294 goto done; 4295 4296 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 4297 4298 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link); 4299 init_ps = rtwvif_link != rtw89_get_designated_link(rtwvif_link->rtwvif); 4300 4301 if (rtwsta_link) 4302 main_mac_id = rtw89_sta_get_main_macid(rtwsta_link->rtwsta); 4303 else 4304 main_mac_id = rtw89_vif_get_main_macid(rtwvif_link->rtwvif); 4305 4306 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE) | 4307 le32_encode_bits(is_mld, RTW89_H2C_JOININFO_W1_IS_MLD) | 4308 le32_encode_bits(main_mac_id, RTW89_H2C_JOININFO_W1_MAIN_MACID) | 4309 le32_encode_bits(RTW89_H2C_JOININFO_MLO_MODE_MLSR, 4310 RTW89_H2C_JOININFO_W1_MLO_MODE) | 4311 le32_encode_bits(0, RTW89_H2C_JOININFO_W1_EMLSR_CAB) | 4312 le32_encode_bits(0, RTW89_H2C_JOININFO_W1_NSTR_EN) | 4313 le32_encode_bits(init_ps, RTW89_H2C_JOININFO_W1_INIT_PWR_STATE) | 4314 le32_encode_bits(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US, 4315 RTW89_H2C_JOININFO_W1_EMLSR_PADDING) | 4316 le32_encode_bits(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US, 4317 RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY) | 4318 le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MACID_EXT) | 4319 le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT); 4320 4321 h2c_v1->w2 = 0; 4322 4323 done: 4324 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4325 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 4326 H2C_FUNC_MAC_JOININFO, 0, 1, 4327 len); 4328 4329 ret = rtw89_h2c_tx(rtwdev, skb, false); 4330 if (ret) { 4331 rtw89_err(rtwdev, "failed to send h2c\n"); 4332 goto fail; 4333 } 4334 4335 return 0; 4336 fail: 4337 dev_kfree_skb_any(skb); 4338 4339 return ret; 4340 } 4341 4342 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 4343 { 4344 struct rtw89_h2c_notify_dbcc *h2c; 4345 u32 len = sizeof(*h2c); 4346 struct sk_buff *skb; 4347 int ret; 4348 4349 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4350 if (!skb) { 4351 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 4352 return -ENOMEM; 4353 } 4354 skb_put(skb, len); 4355 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 4356 4357 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 4358 4359 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4360 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 4361 H2C_FUNC_NOTIFY_DBCC, 0, 1, 4362 len); 4363 4364 ret = rtw89_h2c_tx(rtwdev, skb, false); 4365 if (ret) { 4366 rtw89_err(rtwdev, "failed to send h2c\n"); 4367 goto fail; 4368 } 4369 4370 return 0; 4371 fail: 4372 dev_kfree_skb_any(skb); 4373 4374 return ret; 4375 } 4376 4377 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 4378 bool pause) 4379 { 4380 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 4381 struct rtw89_fw_macid_pause_grp *h2c; 4382 __le32 set = cpu_to_le32(BIT(sh)); 4383 u8 h2c_macid_pause_id; 4384 struct sk_buff *skb; 4385 u32 len; 4386 int ret; 4387 4388 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 4389 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 4390 len = sizeof(*h2c_new); 4391 } else { 4392 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 4393 len = sizeof(*h2c); 4394 } 4395 4396 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4397 if (!skb) { 4398 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 4399 return -ENOMEM; 4400 } 4401 skb_put(skb, len); 4402 4403 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 4404 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 4405 4406 h2c_new->n[0].pause_mask_grp[grp] = set; 4407 h2c_new->n[0].sleep_mask_grp[grp] = set; 4408 if (pause) { 4409 h2c_new->n[0].pause_grp[grp] = set; 4410 h2c_new->n[0].sleep_grp[grp] = set; 4411 } 4412 } else { 4413 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 4414 4415 h2c->mask_grp[grp] = set; 4416 if (pause) 4417 h2c->pause_grp[grp] = set; 4418 } 4419 4420 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4421 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4422 h2c_macid_pause_id, 1, 0, 4423 len); 4424 4425 ret = rtw89_h2c_tx(rtwdev, skb, false); 4426 if (ret) { 4427 rtw89_err(rtwdev, "failed to send h2c\n"); 4428 goto fail; 4429 } 4430 4431 return 0; 4432 fail: 4433 dev_kfree_skb_any(skb); 4434 4435 return ret; 4436 } 4437 4438 #define H2C_EDCA_LEN 12 4439 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 4440 u8 ac, u32 val) 4441 { 4442 struct sk_buff *skb; 4443 int ret; 4444 4445 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 4446 if (!skb) { 4447 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 4448 return -ENOMEM; 4449 } 4450 skb_put(skb, H2C_EDCA_LEN); 4451 RTW89_SET_EDCA_SEL(skb->data, 0); 4452 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx); 4453 RTW89_SET_EDCA_WMM(skb->data, 0); 4454 RTW89_SET_EDCA_AC(skb->data, ac); 4455 RTW89_SET_EDCA_PARAM(skb->data, val); 4456 4457 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4458 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4459 H2C_FUNC_USR_EDCA, 0, 1, 4460 H2C_EDCA_LEN); 4461 4462 ret = rtw89_h2c_tx(rtwdev, skb, false); 4463 if (ret) { 4464 rtw89_err(rtwdev, "failed to send h2c\n"); 4465 goto fail; 4466 } 4467 4468 return 0; 4469 fail: 4470 dev_kfree_skb_any(skb); 4471 4472 return ret; 4473 } 4474 4475 #define H2C_TSF32_TOGL_LEN 4 4476 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, 4477 struct rtw89_vif_link *rtwvif_link, 4478 bool en) 4479 { 4480 struct sk_buff *skb; 4481 u16 early_us = en ? 2000 : 0; 4482 u8 *cmd; 4483 int ret; 4484 4485 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 4486 if (!skb) { 4487 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 4488 return -ENOMEM; 4489 } 4490 skb_put(skb, H2C_TSF32_TOGL_LEN); 4491 cmd = skb->data; 4492 4493 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx); 4494 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 4495 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port); 4496 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 4497 4498 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4499 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4500 H2C_FUNC_TSF32_TOGL, 0, 0, 4501 H2C_TSF32_TOGL_LEN); 4502 4503 ret = rtw89_h2c_tx(rtwdev, skb, false); 4504 if (ret) { 4505 rtw89_err(rtwdev, "failed to send h2c\n"); 4506 goto fail; 4507 } 4508 4509 return 0; 4510 fail: 4511 dev_kfree_skb_any(skb); 4512 4513 return ret; 4514 } 4515 4516 #define H2C_OFLD_CFG_LEN 8 4517 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 4518 { 4519 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 4520 struct sk_buff *skb; 4521 int ret; 4522 4523 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 4524 if (!skb) { 4525 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 4526 return -ENOMEM; 4527 } 4528 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 4529 4530 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4531 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4532 H2C_FUNC_OFLD_CFG, 0, 1, 4533 H2C_OFLD_CFG_LEN); 4534 4535 ret = rtw89_h2c_tx(rtwdev, skb, false); 4536 if (ret) { 4537 rtw89_err(rtwdev, "failed to send h2c\n"); 4538 goto fail; 4539 } 4540 4541 return 0; 4542 fail: 4543 dev_kfree_skb_any(skb); 4544 4545 return ret; 4546 } 4547 4548 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv) 4549 { 4550 struct rtw89_h2c_tx_duty *h2c; 4551 u32 len = sizeof(*h2c); 4552 struct sk_buff *skb; 4553 u16 pause, active; 4554 int ret; 4555 4556 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4557 if (!skb) { 4558 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n"); 4559 return -ENOMEM; 4560 } 4561 4562 skb_put(skb, len); 4563 h2c = (struct rtw89_h2c_tx_duty *)skb->data; 4564 4565 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100); 4566 4567 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) { 4568 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP); 4569 } else { 4570 active = 100 - lv * RTW89_THERMAL_PROT_STEP; 4571 pause = 100 - active; 4572 4573 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) | 4574 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK); 4575 } 4576 4577 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4578 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4579 H2C_FUNC_TX_DUTY, 0, 0, len); 4580 4581 ret = rtw89_h2c_tx(rtwdev, skb, false); 4582 if (ret) { 4583 rtw89_err(rtwdev, "failed to send h2c\n"); 4584 goto fail; 4585 } 4586 4587 return 0; 4588 fail: 4589 dev_kfree_skb_any(skb); 4590 4591 return ret; 4592 } 4593 4594 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 4595 struct rtw89_vif_link *rtwvif_link, 4596 bool connect) 4597 { 4598 struct ieee80211_bss_conf *bss_conf; 4599 s32 thold = RTW89_DEFAULT_CQM_THOLD; 4600 u32 hyst = RTW89_DEFAULT_CQM_HYST; 4601 struct rtw89_h2c_bcnfltr *h2c; 4602 u32 len = sizeof(*h2c); 4603 struct sk_buff *skb; 4604 u8 max_cnt, cnt; 4605 int ret; 4606 4607 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4608 return -EINVAL; 4609 4610 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4611 return -EINVAL; 4612 4613 rcu_read_lock(); 4614 4615 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 4616 4617 if (bss_conf->cqm_rssi_hyst) 4618 hyst = bss_conf->cqm_rssi_hyst; 4619 if (bss_conf->cqm_rssi_thold) 4620 thold = bss_conf->cqm_rssi_thold; 4621 4622 rcu_read_unlock(); 4623 4624 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4625 if (!skb) { 4626 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 4627 return -ENOMEM; 4628 } 4629 4630 skb_put(skb, len); 4631 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 4632 4633 if (RTW89_CHK_FW_FEATURE(BEACON_LOSS_COUNT_V1, &rtwdev->fw)) 4634 max_cnt = BIT(7) - 1; 4635 else 4636 max_cnt = BIT(4) - 1; 4637 4638 cnt = min(RTW89_BCN_LOSS_CNT, max_cnt); 4639 4640 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 4641 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 4642 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 4643 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 4644 RTW89_H2C_BCNFLTR_W0_MODE) | 4645 le32_encode_bits(cnt >> 4, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_H3) | 4646 le32_encode_bits(cnt & 0xf, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_L4) | 4647 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 4648 le32_encode_bits(thold + MAX_RSSI, 4649 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 4650 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 4651 4652 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4653 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4654 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 4655 4656 ret = rtw89_h2c_tx(rtwdev, skb, false); 4657 if (ret) { 4658 rtw89_err(rtwdev, "failed to send h2c\n"); 4659 goto fail; 4660 } 4661 4662 return 0; 4663 fail: 4664 dev_kfree_skb_any(skb); 4665 4666 return ret; 4667 } 4668 4669 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 4670 struct rtw89_rx_phy_ppdu *phy_ppdu) 4671 { 4672 struct rtw89_h2c_ofld_rssi *h2c; 4673 u32 len = sizeof(*h2c); 4674 struct sk_buff *skb; 4675 s8 rssi; 4676 int ret; 4677 4678 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4679 return -EINVAL; 4680 4681 if (!phy_ppdu) 4682 return -EINVAL; 4683 4684 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4685 if (!skb) { 4686 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 4687 return -ENOMEM; 4688 } 4689 4690 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 4691 skb_put(skb, len); 4692 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 4693 4694 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 4695 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 4696 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 4697 4698 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4699 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4700 H2C_FUNC_OFLD_RSSI, 0, 1, len); 4701 4702 ret = rtw89_h2c_tx(rtwdev, skb, false); 4703 if (ret) { 4704 rtw89_err(rtwdev, "failed to send h2c\n"); 4705 goto fail; 4706 } 4707 4708 return 0; 4709 fail: 4710 dev_kfree_skb_any(skb); 4711 4712 return ret; 4713 } 4714 4715 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4716 { 4717 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 4718 struct rtw89_traffic_stats *stats = &rtwvif->stats; 4719 struct rtw89_h2c_ofld *h2c; 4720 u32 len = sizeof(*h2c); 4721 struct sk_buff *skb; 4722 int ret; 4723 4724 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4725 return -EINVAL; 4726 4727 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4728 if (!skb) { 4729 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 4730 return -ENOMEM; 4731 } 4732 4733 skb_put(skb, len); 4734 h2c = (struct rtw89_h2c_ofld *)skb->data; 4735 4736 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 4737 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 4738 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 4739 4740 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4741 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4742 H2C_FUNC_OFLD_TP, 0, 1, len); 4743 4744 ret = rtw89_h2c_tx(rtwdev, skb, false); 4745 if (ret) { 4746 rtw89_err(rtwdev, "failed to send h2c\n"); 4747 goto fail; 4748 } 4749 4750 return 0; 4751 fail: 4752 dev_kfree_skb_any(skb); 4753 4754 return ret; 4755 } 4756 4757 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 4758 { 4759 const struct rtw89_chip_info *chip = rtwdev->chip; 4760 struct rtw89_h2c_ra_v1 *h2c_v1; 4761 struct rtw89_h2c_ra *h2c; 4762 u32 len = sizeof(*h2c); 4763 struct sk_buff *skb; 4764 u8 ver = U8_MAX; 4765 int ret; 4766 4767 if (chip->chip_gen == RTW89_CHIP_AX) { 4768 len = sizeof(*h2c); 4769 ver = 0; 4770 } else { 4771 len = sizeof(*h2c_v1); 4772 ver = 1; 4773 } 4774 4775 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4776 if (!skb) { 4777 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4778 return -ENOMEM; 4779 } 4780 skb_put(skb, len); 4781 h2c = (struct rtw89_h2c_ra *)skb->data; 4782 rtw89_debug(rtwdev, RTW89_DBG_RA, 4783 "ra cmd msk: %llx ", ra->ra_mask); 4784 4785 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 4786 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 4787 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 4788 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 4789 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 4790 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 4791 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 4792 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 4793 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 4794 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 4795 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 4796 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 4797 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 4798 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 4799 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 4800 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 4801 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 4802 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 4803 4804 if (!csi || ver >= 1) 4805 goto next_v1; 4806 4807 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 4808 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 4809 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 4810 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 4811 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 4812 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 4813 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 4814 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 4815 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 4816 4817 next_v1: 4818 if (ver < 1) 4819 goto done; 4820 4821 h2c->w3 |= le32_encode_bits(ra->partial_bw_er, 4822 RTW89_H2C_RA_V1_W3_PARTIAL_BW_SU_ER) | 4823 le32_encode_bits(ra->band, RTW89_H2C_RA_V1_W3_BAND); 4824 4825 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 4826 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 4827 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 4828 4829 done: 4830 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4831 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 4832 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 4833 len); 4834 4835 ret = rtw89_h2c_tx(rtwdev, skb, false); 4836 if (ret) { 4837 rtw89_err(rtwdev, "failed to send h2c\n"); 4838 goto fail; 4839 } 4840 4841 return 0; 4842 fail: 4843 dev_kfree_skb_any(skb); 4844 4845 return ret; 4846 } 4847 4848 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 4849 { 4850 struct rtw89_btc *btc = &rtwdev->btc; 4851 struct rtw89_btc_dm *dm = &btc->dm; 4852 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 4853 struct rtw89_btc_module *module = &init_info->module; 4854 struct rtw89_btc_ant_info *ant = &module->ant; 4855 struct rtw89_h2c_cxinit *h2c; 4856 u32 len = sizeof(*h2c); 4857 struct sk_buff *skb; 4858 int ret; 4859 4860 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4861 if (!skb) { 4862 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 4863 return -ENOMEM; 4864 } 4865 skb_put(skb, len); 4866 h2c = (struct rtw89_h2c_cxinit *)skb->data; 4867 4868 h2c->hdr.type = type; 4869 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 4870 4871 h2c->ant_type = ant->type; 4872 h2c->ant_num = ant->num; 4873 h2c->ant_iso = ant->isolation; 4874 h2c->ant_info = 4875 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 4876 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 4877 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 4878 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 4879 4880 h2c->mod_rfe = module->rfe_type; 4881 h2c->mod_cv = module->cv; 4882 h2c->mod_info = 4883 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 4884 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 4885 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 4886 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 4887 h2c->mod_adie_kt = module->kt_ver_adie; 4888 h2c->wl_gch = init_info->wl_guard_ch; 4889 4890 h2c->info = 4891 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 4892 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 4893 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 4894 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 4895 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 4896 4897 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4898 H2C_CAT_OUTSRC, BTFC_SET, 4899 SET_DRV_INFO, 0, 0, 4900 len); 4901 4902 ret = rtw89_h2c_tx(rtwdev, skb, false); 4903 if (ret) { 4904 rtw89_err(rtwdev, "failed to send h2c\n"); 4905 goto fail; 4906 } 4907 4908 return 0; 4909 fail: 4910 dev_kfree_skb_any(skb); 4911 4912 return ret; 4913 } 4914 4915 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 4916 { 4917 struct rtw89_btc *btc = &rtwdev->btc; 4918 struct rtw89_btc_dm *dm = &btc->dm; 4919 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 4920 struct rtw89_h2c_cxinit_v7 *h2c; 4921 u32 len = sizeof(*h2c); 4922 struct sk_buff *skb; 4923 int ret; 4924 4925 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4926 if (!skb) { 4927 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 4928 return -ENOMEM; 4929 } 4930 skb_put(skb, len); 4931 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 4932 4933 h2c->hdr.type = type; 4934 h2c->hdr.ver = btc->ver->fcxinit; 4935 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4936 h2c->init = *init_info; 4937 4938 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4939 H2C_CAT_OUTSRC, BTFC_SET, 4940 SET_DRV_INFO, 0, 0, 4941 len); 4942 4943 ret = rtw89_h2c_tx(rtwdev, skb, false); 4944 if (ret) { 4945 rtw89_err(rtwdev, "failed to send h2c\n"); 4946 goto fail; 4947 } 4948 4949 return 0; 4950 fail: 4951 dev_kfree_skb_any(skb); 4952 4953 return ret; 4954 } 4955 4956 #define PORT_DATA_OFFSET 4 4957 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 4958 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 4959 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 4960 4961 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 4962 { 4963 struct rtw89_btc *btc = &rtwdev->btc; 4964 const struct rtw89_btc_ver *ver = btc->ver; 4965 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4966 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 4967 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4968 struct rtw89_btc_wl_active_role *active = role_info->active_role; 4969 struct sk_buff *skb; 4970 u32 len; 4971 u8 offset = 0; 4972 u8 *cmd; 4973 int ret; 4974 int i; 4975 4976 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 4977 4978 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4979 if (!skb) { 4980 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4981 return -ENOMEM; 4982 } 4983 skb_put(skb, len); 4984 cmd = skb->data; 4985 4986 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4987 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4988 4989 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4990 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4991 4992 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4993 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4994 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4995 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4996 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4997 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4998 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4999 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 5000 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 5001 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 5002 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 5003 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 5004 5005 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 5006 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 5007 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 5008 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 5009 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 5010 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 5011 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 5012 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 5013 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 5014 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 5015 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 5016 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 5017 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 5018 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 5019 } 5020 5021 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5022 H2C_CAT_OUTSRC, BTFC_SET, 5023 SET_DRV_INFO, 0, 0, 5024 len); 5025 5026 ret = rtw89_h2c_tx(rtwdev, skb, false); 5027 if (ret) { 5028 rtw89_err(rtwdev, "failed to send h2c\n"); 5029 goto fail; 5030 } 5031 5032 return 0; 5033 fail: 5034 dev_kfree_skb_any(skb); 5035 5036 return ret; 5037 } 5038 5039 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 5040 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 5041 5042 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 5043 { 5044 struct rtw89_btc *btc = &rtwdev->btc; 5045 const struct rtw89_btc_ver *ver = btc->ver; 5046 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 5047 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 5048 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 5049 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 5050 struct sk_buff *skb; 5051 u32 len; 5052 u8 *cmd, offset; 5053 int ret; 5054 int i; 5055 5056 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 5057 5058 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5059 if (!skb) { 5060 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 5061 return -ENOMEM; 5062 } 5063 skb_put(skb, len); 5064 cmd = skb->data; 5065 5066 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5067 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 5068 5069 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 5070 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 5071 5072 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 5073 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 5074 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 5075 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 5076 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 5077 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 5078 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 5079 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 5080 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 5081 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 5082 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 5083 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 5084 5085 offset = PORT_DATA_OFFSET; 5086 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 5087 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 5088 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 5089 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 5090 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 5091 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 5092 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 5093 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 5094 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 5095 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 5096 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 5097 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 5098 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 5099 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 5100 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 5101 } 5102 5103 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 5104 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 5105 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 5106 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 5107 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 5108 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 5109 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 5110 5111 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5112 H2C_CAT_OUTSRC, BTFC_SET, 5113 SET_DRV_INFO, 0, 0, 5114 len); 5115 5116 ret = rtw89_h2c_tx(rtwdev, skb, false); 5117 if (ret) { 5118 rtw89_err(rtwdev, "failed to send h2c\n"); 5119 goto fail; 5120 } 5121 5122 return 0; 5123 fail: 5124 dev_kfree_skb_any(skb); 5125 5126 return ret; 5127 } 5128 5129 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 5130 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 5131 5132 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 5133 { 5134 struct rtw89_btc *btc = &rtwdev->btc; 5135 const struct rtw89_btc_ver *ver = btc->ver; 5136 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 5137 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 5138 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 5139 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 5140 struct sk_buff *skb; 5141 u32 len; 5142 u8 *cmd, offset; 5143 int ret; 5144 int i; 5145 5146 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 5147 5148 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5149 if (!skb) { 5150 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 5151 return -ENOMEM; 5152 } 5153 skb_put(skb, len); 5154 cmd = skb->data; 5155 5156 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5157 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 5158 5159 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 5160 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 5161 5162 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 5163 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 5164 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 5165 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 5166 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 5167 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 5168 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 5169 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 5170 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 5171 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 5172 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 5173 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 5174 5175 offset = PORT_DATA_OFFSET; 5176 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 5177 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 5178 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 5179 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 5180 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 5181 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 5182 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 5183 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 5184 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 5185 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 5186 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 5187 } 5188 5189 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 5190 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 5191 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 5192 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 5193 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 5194 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 5195 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 5196 5197 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5198 H2C_CAT_OUTSRC, BTFC_SET, 5199 SET_DRV_INFO, 0, 0, 5200 len); 5201 5202 ret = rtw89_h2c_tx(rtwdev, skb, false); 5203 if (ret) { 5204 rtw89_err(rtwdev, "failed to send h2c\n"); 5205 goto fail; 5206 } 5207 5208 return 0; 5209 fail: 5210 dev_kfree_skb_any(skb); 5211 5212 return ret; 5213 } 5214 5215 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type) 5216 { 5217 struct rtw89_btc *btc = &rtwdev->btc; 5218 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7; 5219 struct rtw89_h2c_cxrole_v7 *h2c; 5220 u32 len = sizeof(*h2c); 5221 struct sk_buff *skb; 5222 int ret; 5223 5224 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5225 if (!skb) { 5226 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5227 return -ENOMEM; 5228 } 5229 skb_put(skb, len); 5230 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data; 5231 5232 h2c->hdr.type = type; 5233 h2c->hdr.ver = btc->ver->fwlrole; 5234 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 5235 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 5236 h2c->_u32.role_map = cpu_to_le32(role->role_map); 5237 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 5238 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 5239 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en); 5240 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg); 5241 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy); 5242 5243 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5244 H2C_CAT_OUTSRC, BTFC_SET, 5245 SET_DRV_INFO, 0, 0, 5246 len); 5247 5248 ret = rtw89_h2c_tx(rtwdev, skb, false); 5249 if (ret) { 5250 rtw89_err(rtwdev, "failed to send h2c\n"); 5251 goto fail; 5252 } 5253 5254 return 0; 5255 fail: 5256 dev_kfree_skb_any(skb); 5257 5258 return ret; 5259 } 5260 5261 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 5262 { 5263 struct rtw89_btc *btc = &rtwdev->btc; 5264 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 5265 struct rtw89_h2c_cxrole_v8 *h2c; 5266 u32 len = sizeof(*h2c); 5267 struct sk_buff *skb; 5268 int ret; 5269 5270 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5271 if (!skb) { 5272 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5273 return -ENOMEM; 5274 } 5275 skb_put(skb, len); 5276 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 5277 5278 h2c->hdr.type = type; 5279 h2c->hdr.ver = btc->ver->fwlrole; 5280 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 5281 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 5282 h2c->_u32.role_map = cpu_to_le32(role->role_map); 5283 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 5284 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 5285 5286 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5287 H2C_CAT_OUTSRC, BTFC_SET, 5288 SET_DRV_INFO, 0, 0, 5289 len); 5290 5291 ret = rtw89_h2c_tx(rtwdev, skb, false); 5292 if (ret) { 5293 rtw89_err(rtwdev, "failed to send h2c\n"); 5294 goto fail; 5295 } 5296 5297 return 0; 5298 fail: 5299 dev_kfree_skb_any(skb); 5300 5301 return ret; 5302 } 5303 5304 int rtw89_fw_h2c_cxdrv_osi_info(struct rtw89_dev *rtwdev, u8 type) 5305 { 5306 struct rtw89_btc *btc = &rtwdev->btc; 5307 struct rtw89_btc_fbtc_outsrc_set_info *osi = &btc->dm.ost_info; 5308 struct rtw89_h2c_cxosi *h2c; 5309 u32 len = sizeof(*h2c); 5310 struct sk_buff *skb; 5311 int ret; 5312 5313 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5314 if (!skb) { 5315 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_osi\n"); 5316 return -ENOMEM; 5317 } 5318 skb_put(skb, len); 5319 h2c = (struct rtw89_h2c_cxosi *)skb->data; 5320 5321 h2c->hdr.type = type; 5322 h2c->hdr.ver = btc->ver->fcxosi; 5323 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 5324 h2c->osi = *osi; 5325 5326 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5327 H2C_CAT_OUTSRC, BTFC_SET, 5328 SET_DRV_INFO, 0, 0, 5329 len); 5330 5331 ret = rtw89_h2c_tx(rtwdev, skb, false); 5332 if (ret) { 5333 rtw89_err(rtwdev, "failed to send h2c\n"); 5334 goto fail; 5335 } 5336 5337 return 0; 5338 fail: 5339 dev_kfree_skb_any(skb); 5340 5341 return ret; 5342 } 5343 5344 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 5345 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 5346 { 5347 struct rtw89_btc *btc = &rtwdev->btc; 5348 const struct rtw89_btc_ver *ver = btc->ver; 5349 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 5350 struct sk_buff *skb; 5351 u8 *cmd; 5352 int ret; 5353 5354 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 5355 if (!skb) { 5356 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5357 return -ENOMEM; 5358 } 5359 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 5360 cmd = skb->data; 5361 5362 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5363 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 5364 5365 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 5366 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 5367 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 5368 if (ver->fcxctrl == 0) 5369 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 5370 5371 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5372 H2C_CAT_OUTSRC, BTFC_SET, 5373 SET_DRV_INFO, 0, 0, 5374 H2C_LEN_CXDRVINFO_CTRL); 5375 5376 ret = rtw89_h2c_tx(rtwdev, skb, false); 5377 if (ret) { 5378 rtw89_err(rtwdev, "failed to send h2c\n"); 5379 goto fail; 5380 } 5381 5382 return 0; 5383 fail: 5384 dev_kfree_skb_any(skb); 5385 5386 return ret; 5387 } 5388 5389 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 5390 { 5391 struct rtw89_btc *btc = &rtwdev->btc; 5392 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 5393 struct rtw89_h2c_cxctrl_v7 *h2c; 5394 u32 len = sizeof(*h2c); 5395 struct sk_buff *skb; 5396 int ret; 5397 5398 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5399 if (!skb) { 5400 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n"); 5401 return -ENOMEM; 5402 } 5403 skb_put(skb, len); 5404 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 5405 5406 h2c->hdr.type = type; 5407 h2c->hdr.ver = btc->ver->fcxctrl; 5408 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 5409 h2c->ctrl = *ctrl; 5410 5411 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5412 H2C_CAT_OUTSRC, BTFC_SET, 5413 SET_DRV_INFO, 0, 0, len); 5414 5415 ret = rtw89_h2c_tx(rtwdev, skb, false); 5416 if (ret) { 5417 rtw89_err(rtwdev, "failed to send h2c\n"); 5418 goto fail; 5419 } 5420 5421 return 0; 5422 fail: 5423 dev_kfree_skb_any(skb); 5424 5425 return ret; 5426 } 5427 5428 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 5429 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 5430 { 5431 struct rtw89_btc *btc = &rtwdev->btc; 5432 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 5433 struct sk_buff *skb; 5434 u8 *cmd; 5435 int ret; 5436 5437 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 5438 if (!skb) { 5439 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 5440 return -ENOMEM; 5441 } 5442 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 5443 cmd = skb->data; 5444 5445 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5446 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 5447 5448 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 5449 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 5450 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 5451 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 5452 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 5453 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 5454 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 5455 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 5456 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 5457 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 5458 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 5459 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 5460 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 5461 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 5462 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 5463 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 5464 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 5465 5466 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5467 H2C_CAT_OUTSRC, BTFC_SET, 5468 SET_DRV_INFO, 0, 0, 5469 H2C_LEN_CXDRVINFO_TRX); 5470 5471 ret = rtw89_h2c_tx(rtwdev, skb, false); 5472 if (ret) { 5473 rtw89_err(rtwdev, "failed to send h2c\n"); 5474 goto fail; 5475 } 5476 5477 return 0; 5478 fail: 5479 dev_kfree_skb_any(skb); 5480 5481 return ret; 5482 } 5483 5484 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 5485 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 5486 { 5487 struct rtw89_btc *btc = &rtwdev->btc; 5488 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 5489 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 5490 struct sk_buff *skb; 5491 u8 *cmd; 5492 int ret; 5493 5494 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 5495 if (!skb) { 5496 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5497 return -ENOMEM; 5498 } 5499 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 5500 cmd = skb->data; 5501 5502 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5503 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 5504 5505 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 5506 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 5507 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 5508 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 5509 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 5510 5511 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5512 H2C_CAT_OUTSRC, BTFC_SET, 5513 SET_DRV_INFO, 0, 0, 5514 H2C_LEN_CXDRVINFO_RFK); 5515 5516 ret = rtw89_h2c_tx(rtwdev, skb, false); 5517 if (ret) { 5518 rtw89_err(rtwdev, "failed to send h2c\n"); 5519 goto fail; 5520 } 5521 5522 return 0; 5523 fail: 5524 dev_kfree_skb_any(skb); 5525 5526 return ret; 5527 } 5528 5529 #define H2C_LEN_PKT_OFLD 4 5530 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 5531 { 5532 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5533 struct sk_buff *skb; 5534 unsigned int cond; 5535 u8 *cmd; 5536 int ret; 5537 5538 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 5539 if (!skb) { 5540 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 5541 return -ENOMEM; 5542 } 5543 skb_put(skb, H2C_LEN_PKT_OFLD); 5544 cmd = skb->data; 5545 5546 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 5547 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 5548 5549 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5550 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5551 H2C_FUNC_PACKET_OFLD, 1, 1, 5552 H2C_LEN_PKT_OFLD); 5553 5554 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 5555 5556 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5557 if (ret < 0) { 5558 rtw89_debug(rtwdev, RTW89_DBG_FW, 5559 "failed to del pkt ofld: id %d, ret %d\n", 5560 id, ret); 5561 return ret; 5562 } 5563 5564 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 5565 return 0; 5566 } 5567 5568 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 5569 struct sk_buff *skb_ofld) 5570 { 5571 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5572 struct sk_buff *skb; 5573 unsigned int cond; 5574 u8 *cmd; 5575 u8 alloc_id; 5576 int ret; 5577 5578 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 5579 RTW89_MAX_PKT_OFLD_NUM); 5580 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 5581 return -ENOSPC; 5582 5583 *id = alloc_id; 5584 5585 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 5586 if (!skb) { 5587 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 5588 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 5589 return -ENOMEM; 5590 } 5591 skb_put(skb, H2C_LEN_PKT_OFLD); 5592 cmd = skb->data; 5593 5594 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 5595 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 5596 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 5597 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 5598 5599 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5600 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5601 H2C_FUNC_PACKET_OFLD, 1, 1, 5602 H2C_LEN_PKT_OFLD + skb_ofld->len); 5603 5604 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 5605 5606 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5607 if (ret < 0) { 5608 rtw89_debug(rtwdev, RTW89_DBG_FW, 5609 "failed to add pkt ofld: id %d, ret %d\n", 5610 alloc_id, ret); 5611 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 5612 return ret; 5613 } 5614 5615 return 0; 5616 } 5617 5618 static 5619 int rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev *rtwdev, int ch_num, 5620 struct list_head *chan_list) 5621 { 5622 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5623 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5624 struct rtw89_h2c_chinfo_elem *elem; 5625 struct rtw89_mac_chinfo_ax *ch_info; 5626 struct rtw89_h2c_chinfo *h2c; 5627 struct sk_buff *skb; 5628 unsigned int cond; 5629 int skb_len; 5630 int ret; 5631 5632 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 5633 5634 skb_len = struct_size(h2c, elem, ch_num); 5635 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5636 if (!skb) { 5637 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5638 return -ENOMEM; 5639 } 5640 skb_put(skb, sizeof(*h2c)); 5641 h2c = (struct rtw89_h2c_chinfo *)skb->data; 5642 5643 h2c->ch_num = ch_num; 5644 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5645 5646 list_for_each_entry(ch_info, chan_list, list) { 5647 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 5648 5649 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 5650 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 5651 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 5652 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 5653 5654 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 5655 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 5656 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 5657 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 5658 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 5659 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 5660 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 5661 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 5662 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 5663 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 5664 5665 if (scan_info->extra_op.set) 5666 elem->w1 |= le32_encode_bits(ch_info->macid_tx, 5667 RTW89_H2C_CHINFO_W1_MACID_TX); 5668 5669 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 5670 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 5671 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 5672 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 5673 5674 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 5675 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 5676 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 5677 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 5678 } 5679 5680 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5681 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5682 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5683 5684 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5685 5686 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5687 if (ret) { 5688 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5689 return ret; 5690 } 5691 5692 return 0; 5693 } 5694 5695 static 5696 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 5697 struct list_head *chan_list, 5698 struct rtw89_vif_link *rtwvif_link) 5699 { 5700 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5701 struct rtw89_h2c_chinfo_elem_be *elem; 5702 struct rtw89_mac_chinfo_be *ch_info; 5703 struct rtw89_h2c_chinfo_be *h2c; 5704 struct sk_buff *skb; 5705 unsigned int cond; 5706 u8 ver = U8_MAX; 5707 int skb_len; 5708 int ret; 5709 5710 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE); 5711 5712 skb_len = struct_size(h2c, elem, ch_num); 5713 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5714 if (!skb) { 5715 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5716 return -ENOMEM; 5717 } 5718 5719 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5720 ver = 0; 5721 5722 skb_put(skb, sizeof(*h2c)); 5723 h2c = (struct rtw89_h2c_chinfo_be *)skb->data; 5724 5725 h2c->ch_num = ch_num; 5726 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5727 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx, 5728 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 5729 5730 list_for_each_entry(ch_info, chan_list, list) { 5731 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 5732 5733 elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 5734 le32_encode_bits(ch_info->central_ch, 5735 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 5736 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 5737 5738 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 5739 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 5740 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 5741 le32_encode_bits(ch_info->pause_data, 5742 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 5743 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 5744 le32_encode_bits(ch_info->rand_seq_num, 5745 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 5746 le32_encode_bits(ch_info->notify_action, 5747 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 5748 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 5749 RTW89_H2C_CHINFO_BE_W1_PROBE) | 5750 le32_encode_bits(ch_info->leave_crit, 5751 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 5752 le32_encode_bits(ch_info->chkpt_timer, 5753 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 5754 5755 elem->w2 = le32_encode_bits(ch_info->leave_time, 5756 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 5757 le32_encode_bits(ch_info->leave_th, 5758 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 5759 le32_encode_bits(ch_info->tx_pkt_ctrl, 5760 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 5761 5762 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 5763 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 5764 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 5765 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 5766 5767 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 5768 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 5769 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 5770 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 5771 5772 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 5773 le32_encode_bits(ch_info->fw_probe0_ssids, 5774 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 5775 5776 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 5777 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 5778 le32_encode_bits(ch_info->fw_probe0_bssids, 5779 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 5780 if (ver == 0) 5781 elem->w0 |= 5782 le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD); 5783 else 5784 elem->w7 = le32_encode_bits(ch_info->period, 5785 RTW89_H2C_CHINFO_BE_W7_PERIOD_V1); 5786 } 5787 5788 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5789 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5790 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5791 5792 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5793 5794 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5795 if (ret) { 5796 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5797 return ret; 5798 } 5799 5800 return 0; 5801 } 5802 5803 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, 5804 struct rtw89_scan_option *option, 5805 struct rtw89_vif_link *rtwvif_link, 5806 bool wowlan) 5807 { 5808 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5809 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5810 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 5811 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE; 5812 struct rtw89_h2c_scanofld *h2c; 5813 u32 len = sizeof(*h2c); 5814 struct sk_buff *skb; 5815 unsigned int cond; 5816 u64 tsf = 0; 5817 int ret; 5818 5819 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5820 if (!skb) { 5821 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5822 return -ENOMEM; 5823 } 5824 skb_put(skb, len); 5825 h2c = (struct rtw89_h2c_scanofld *)skb->data; 5826 5827 if (option->delay) { 5828 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 5829 if (ret) { 5830 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret); 5831 scan_mode = RTW89_SCAN_IMMEDIATE; 5832 } else { 5833 scan_mode = RTW89_SCAN_DELAY; 5834 tsf += (u64)option->delay * 1000; 5835 } 5836 } 5837 5838 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 5839 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 5840 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) | 5841 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 5842 5843 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 5844 le32_encode_bits(option->target_ch_mode, 5845 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 5846 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) | 5847 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 5848 5849 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) | 5850 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD); 5851 5852 if (option->target_ch_mode) { 5853 h2c->w1 |= le32_encode_bits(op->band_width, 5854 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 5855 le32_encode_bits(op->primary_channel, 5856 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 5857 le32_encode_bits(op->channel, 5858 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 5859 h2c->w0 |= le32_encode_bits(op->band_type, 5860 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 5861 } 5862 5863 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf), 5864 RTW89_H2C_SCANOFLD_W3_TSF_HIGH); 5865 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf), 5866 RTW89_H2C_SCANOFLD_W4_TSF_LOW); 5867 5868 if (scan_info->extra_op.set) 5869 h2c->w6 = le32_encode_bits(scan_info->extra_op.macid, 5870 RTW89_H2C_SCANOFLD_W6_SECOND_MACID); 5871 5872 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5873 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5874 H2C_FUNC_SCANOFLD, 1, 1, 5875 len); 5876 5877 if (option->enable) 5878 cond = RTW89_SCANOFLD_WAIT_COND_START; 5879 else 5880 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 5881 5882 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5883 if (ret) { 5884 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 5885 return ret; 5886 } 5887 5888 return 0; 5889 } 5890 5891 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 5892 struct rtw89_scan_option *option) 5893 { 5894 struct ieee80211_supported_band *sband; 5895 struct ieee80211_channel *chan; 5896 u8 i, idx; 5897 5898 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 5899 if (!sband) { 5900 option->prohib_chan = U64_MAX; 5901 return; 5902 } 5903 5904 for (i = 0; i < sband->n_channels; i++) { 5905 chan = &sband->channels[i]; 5906 if (chan->flags & IEEE80211_CHAN_DISABLED) { 5907 idx = (chan->hw_value - 1) / 4; 5908 option->prohib_chan |= BIT(idx); 5909 } 5910 } 5911 } 5912 5913 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 5914 struct rtw89_scan_option *option, 5915 struct rtw89_vif_link *rtwvif_link, 5916 bool wowlan) 5917 { 5918 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 5919 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5920 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 5921 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5922 struct cfg80211_scan_request *req = rtwvif->scan_req; 5923 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 5924 struct rtw89_hw_scan_extra_op scan_op[2] = {}; 5925 struct rtw89_chan *op = &scan_info->op_chan; 5926 struct rtw89_h2c_scanofld_be_opch *opch; 5927 struct rtw89_pktofld_info *pkt_info; 5928 struct rtw89_h2c_scanofld_be *h2c; 5929 struct ieee80211_vif *vif; 5930 struct sk_buff *skb; 5931 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 5932 u8 opch_size = sizeof(*opch) * option->num_opch; 5933 enum rtw89_scan_be_opmode opmode; 5934 u8 probe_id[NUM_NL80211_BANDS]; 5935 u8 scan_offload_ver = U8_MAX; 5936 u8 cfg_len = sizeof(*h2c); 5937 unsigned int cond; 5938 u8 ap_idx = U8_MAX; 5939 u8 ver = U8_MAX; 5940 u8 policy_val; 5941 void *ptr; 5942 u8 txbcn; 5943 int ret; 5944 u32 len; 5945 u8 i; 5946 5947 scan_op[0].macid = rtwvif_link->mac_id; 5948 scan_op[0].port = rtwvif_link->port; 5949 scan_op[0].chan = *op; 5950 vif = rtwvif_to_vif(rtwvif_link->rtwvif); 5951 if (vif->type == NL80211_IFTYPE_AP) 5952 ap_idx = 0; 5953 5954 if (ext->set) { 5955 scan_op[1] = *ext; 5956 vif = rtwvif_to_vif(ext->rtwvif_link->rtwvif); 5957 if (vif->type == NL80211_IFTYPE_AP) 5958 ap_idx = 1; 5959 } 5960 5961 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 5962 5963 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { 5964 cfg_len = offsetofend(typeof(*h2c), w8); 5965 scan_offload_ver = 0; 5966 } 5967 5968 len = cfg_len + macc_role_size + opch_size; 5969 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5970 if (!skb) { 5971 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5972 return -ENOMEM; 5973 } 5974 5975 skb_put(skb, len); 5976 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 5977 ptr = skb->data; 5978 5979 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 5980 5981 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5982 ver = 0; 5983 5984 if (!wowlan) { 5985 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 5986 if (pkt_info->wildcard_6ghz) { 5987 /* Provide wildcard as template */ 5988 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 5989 break; 5990 } 5991 } 5992 } 5993 5994 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 5995 le32_encode_bits(option->scan_mode, 5996 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 5997 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 5998 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 5999 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 6000 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 6001 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 6002 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 6003 6004 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 6005 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 6006 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 6007 6008 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 6009 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 6010 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 6011 6012 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 6013 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 6014 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 6015 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 6016 6017 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 6018 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 6019 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 6020 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 6021 le32_encode_bits(option->delay / 1000, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 6022 6023 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 6024 6025 h2c->w6 = le32_encode_bits(option->prohib_chan, 6026 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 6027 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 6028 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 6029 if (!wowlan && req->no_cck) { 6030 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); 6031 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, 6032 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | 6033 le32_encode_bits(RTW89_HW_RATE_OFDM6, 6034 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | 6035 le32_encode_bits(RTW89_HW_RATE_OFDM6, 6036 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); 6037 } 6038 6039 if (scan_offload_ver == 0) 6040 goto flex_member; 6041 6042 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), 6043 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | 6044 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0), 6045 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) | 6046 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0), 6047 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP); 6048 6049 flex_member: 6050 ptr += cfg_len; 6051 6052 for (i = 0; i < option->num_macc_role; i++) { 6053 macc_role = ptr; 6054 macc_role->w0 = 6055 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 6056 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 6057 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 6058 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 6059 ptr += sizeof(*macc_role); 6060 } 6061 6062 for (i = 0; i < option->num_opch; i++) { 6063 bool is_ap_idx = i == ap_idx; 6064 6065 opmode = is_ap_idx ? RTW89_SCAN_OPMODE_TBTT : RTW89_SCAN_OPMODE_INTV; 6066 policy_val = is_ap_idx ? 2 : RTW89_OFF_CHAN_TIME / 10; 6067 txbcn = is_ap_idx ? 1 : 0; 6068 6069 opch = ptr; 6070 opch->w0 = le32_encode_bits(scan_op[i].macid, 6071 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 6072 le32_encode_bits(option->band, 6073 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 6074 le32_encode_bits(scan_op[i].port, 6075 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 6076 le32_encode_bits(opmode, 6077 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 6078 le32_encode_bits(true, 6079 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 6080 le32_encode_bits(policy_val, 6081 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 6082 6083 opch->w1 = le32_encode_bits(scan_op[i].chan.band_type, 6084 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 6085 le32_encode_bits(scan_op[i].chan.band_width, 6086 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 6087 le32_encode_bits(0x3, 6088 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 6089 le32_encode_bits(scan_op[i].chan.primary_channel, 6090 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 6091 le32_encode_bits(scan_op[i].chan.channel, 6092 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 6093 6094 opch->w2 = le32_encode_bits(0, 6095 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 6096 le32_encode_bits(0, 6097 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 6098 le32_encode_bits(rtw89_is_mlo_1_1(rtwdev) ? 1 : 2, 6099 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS) | 6100 le32_encode_bits(txbcn, 6101 RTW89_H2C_SCANOFLD_BE_OPCH_W2_TXBCN); 6102 6103 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 6104 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 6105 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 6106 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 6107 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 6108 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 6109 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 6110 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 6111 6112 if (ver == 0) 6113 opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME, 6114 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION); 6115 else 6116 opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME, 6117 RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1); 6118 ptr += sizeof(*opch); 6119 } 6120 6121 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6122 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 6123 H2C_FUNC_SCANOFLD_BE, 1, 1, 6124 len); 6125 6126 if (option->enable) 6127 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 6128 else 6129 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 6130 6131 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6132 if (ret) { 6133 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 6134 return ret; 6135 } 6136 6137 return 0; 6138 } 6139 6140 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 6141 struct rtw89_fw_h2c_rf_reg_info *info, 6142 u16 len, u8 page) 6143 { 6144 struct sk_buff *skb; 6145 u8 class = info->rf_path == RF_PATH_A ? 6146 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 6147 int ret; 6148 6149 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6150 if (!skb) { 6151 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 6152 return -ENOMEM; 6153 } 6154 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 6155 6156 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6157 H2C_CAT_OUTSRC, class, page, 0, 0, 6158 len); 6159 6160 ret = rtw89_h2c_tx(rtwdev, skb, false); 6161 if (ret) { 6162 rtw89_err(rtwdev, "failed to send h2c\n"); 6163 goto fail; 6164 } 6165 6166 return 0; 6167 fail: 6168 dev_kfree_skb_any(skb); 6169 6170 return ret; 6171 } 6172 6173 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 6174 { 6175 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; 6176 struct rtw89_fw_h2c_rf_get_mccch_v0 *mccch_v0; 6177 struct rtw89_fw_h2c_rf_get_mccch *mccch; 6178 u32 len = sizeof(*mccch); 6179 struct sk_buff *skb; 6180 u8 ver = U8_MAX; 6181 int ret; 6182 u8 idx; 6183 6184 if (RTW89_CHK_FW_FEATURE(RFK_NTFY_MCC_V0, &rtwdev->fw)) { 6185 len = sizeof(*mccch_v0); 6186 ver = 0; 6187 } 6188 6189 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6190 if (!skb) { 6191 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 6192 return -ENOMEM; 6193 } 6194 skb_put(skb, len); 6195 6196 idx = rfk_mcc->table_idx; 6197 if (ver == 0) { 6198 mccch_v0 = (struct rtw89_fw_h2c_rf_get_mccch_v0 *)skb->data; 6199 mccch_v0->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 6200 mccch_v0->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 6201 mccch_v0->band_0 = cpu_to_le32(rfk_mcc->band[0]); 6202 mccch_v0->band_1 = cpu_to_le32(rfk_mcc->band[1]); 6203 mccch_v0->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 6204 mccch_v0->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 6205 } else { 6206 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 6207 mccch->ch_0_0 = cpu_to_le32(rfk_mcc->ch[0]); 6208 mccch->ch_0_1 = cpu_to_le32(rfk_mcc->ch[0]); 6209 mccch->ch_1_0 = cpu_to_le32(rfk_mcc->ch[1]); 6210 mccch->ch_1_1 = cpu_to_le32(rfk_mcc->ch[1]); 6211 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 6212 } 6213 6214 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6215 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 6216 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 6217 len); 6218 6219 ret = rtw89_h2c_tx(rtwdev, skb, false); 6220 if (ret) { 6221 rtw89_err(rtwdev, "failed to send h2c\n"); 6222 goto fail; 6223 } 6224 6225 return 0; 6226 fail: 6227 dev_kfree_skb_any(skb); 6228 6229 return ret; 6230 } 6231 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 6232 6233 int rtw89_fw_h2c_mcc_dig(struct rtw89_dev *rtwdev, 6234 enum rtw89_chanctx_idx chanctx_idx, 6235 u8 mcc_role_idx, u8 pd_val, bool en) 6236 { 6237 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); 6238 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 6239 struct rtw89_h2c_mcc_dig *h2c; 6240 u32 len = sizeof(*h2c); 6241 struct sk_buff *skb; 6242 int ret; 6243 6244 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6245 if (!skb) { 6246 rtw89_err(rtwdev, "failed to alloc skb for h2c mcc_dig\n"); 6247 return -ENOMEM; 6248 } 6249 skb_put(skb, len); 6250 h2c = (struct rtw89_h2c_mcc_dig *)skb->data; 6251 6252 h2c->w0 = le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_REG_CNT) | 6253 le32_encode_bits(en, RTW89_H2C_MCC_DIG_W0_DM_EN) | 6254 le32_encode_bits(mcc_role_idx, RTW89_H2C_MCC_DIG_W0_IDX) | 6255 le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_SET) | 6256 le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_PHY0_EN) | 6257 le32_encode_bits(chan->channel, RTW89_H2C_MCC_DIG_W0_CENTER_CH) | 6258 le32_encode_bits(chan->band_type, RTW89_H2C_MCC_DIG_W0_BAND_TYPE); 6259 h2c->w1 = le32_encode_bits(dig_regs->seg0_pd_reg, 6260 RTW89_H2C_MCC_DIG_W1_ADDR_LSB) | 6261 le32_encode_bits(dig_regs->seg0_pd_reg >> 8, 6262 RTW89_H2C_MCC_DIG_W1_ADDR_MSB) | 6263 le32_encode_bits(dig_regs->pd_lower_bound_mask, 6264 RTW89_H2C_MCC_DIG_W1_BMASK_LSB) | 6265 le32_encode_bits(dig_regs->pd_lower_bound_mask >> 8, 6266 RTW89_H2C_MCC_DIG_W1_BMASK_MSB); 6267 h2c->w2 = le32_encode_bits(pd_val, RTW89_H2C_MCC_DIG_W2_VAL_LSB); 6268 6269 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6270 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 6271 H2C_FUNC_FW_MCC_DIG, 0, 0, len); 6272 6273 ret = rtw89_h2c_tx(rtwdev, skb, false); 6274 if (ret) { 6275 rtw89_err(rtwdev, "failed to send h2c\n"); 6276 goto fail; 6277 } 6278 6279 return 0; 6280 fail: 6281 dev_kfree_skb_any(skb); 6282 6283 return ret; 6284 } 6285 6286 int rtw89_fw_h2c_rf_ps_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 6287 { 6288 const struct rtw89_chip_info *chip = rtwdev->chip; 6289 struct rtw89_vif_link *rtwvif_link; 6290 struct rtw89_h2c_rf_ps_info *h2c; 6291 const struct rtw89_chan *chan; 6292 u32 len = sizeof(*h2c); 6293 unsigned int link_id; 6294 struct sk_buff *skb; 6295 int ret; 6296 u8 path; 6297 u32 val; 6298 6299 if (chip->chip_gen != RTW89_CHIP_BE) 6300 return 0; 6301 6302 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6303 if (!skb) { 6304 rtw89_err(rtwdev, "failed to alloc skb for h2c rf ps info\n"); 6305 return -ENOMEM; 6306 } 6307 skb_put(skb, len); 6308 h2c = (struct rtw89_h2c_rf_ps_info *)skb->data; 6309 h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 6310 6311 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 6312 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 6313 path = rtw89_phy_get_syn_sel(rtwdev, rtwvif_link->phy_idx); 6314 val = rtw89_chip_chan_to_rf18_val(rtwdev, chan); 6315 6316 if (path >= chip->rf_path_num || path >= NUM_OF_RTW89_FW_RFK_PATH) { 6317 rtw89_err(rtwdev, "unsupported rf path (%d)\n", path); 6318 ret = -ENOENT; 6319 goto fail; 6320 } 6321 6322 h2c->rf18[path] = cpu_to_le32(val); 6323 h2c->pri_ch[path] = chan->primary_channel; 6324 } 6325 6326 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6327 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 6328 H2C_FUNC_OUTSRC_RF_PS_INFO, 0, 0, 6329 sizeof(*h2c)); 6330 6331 ret = rtw89_h2c_tx(rtwdev, skb, false); 6332 if (ret) { 6333 rtw89_err(rtwdev, "failed to send h2c\n"); 6334 goto fail; 6335 } 6336 6337 return 0; 6338 fail: 6339 dev_kfree_skb_any(skb); 6340 6341 return ret; 6342 } 6343 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ps_info); 6344 6345 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 6346 enum rtw89_phy_idx phy_idx) 6347 { 6348 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 6349 struct rtw89_fw_h2c_rfk_pre_info_common *common; 6350 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; 6351 struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1; 6352 struct rtw89_fw_h2c_rfk_pre_info *h2c; 6353 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH]; 6354 u32 len = sizeof(*h2c); 6355 struct sk_buff *skb; 6356 u8 ver = U8_MAX; 6357 u8 tbl, path; 6358 u32 val32; 6359 int ret; 6360 6361 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) { 6362 len = sizeof(*h2c_v1); 6363 ver = 1; 6364 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 6365 len = sizeof(*h2c_v0); 6366 ver = 0; 6367 } 6368 6369 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6370 if (!skb) { 6371 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 6372 return -ENOMEM; 6373 } 6374 skb_put(skb, len); 6375 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 6376 common = &h2c->base_v1.common; 6377 6378 common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 6379 6380 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 6381 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH); 6382 6383 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 6384 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 6385 common->dbcc.ch[path][tbl] = 6386 cpu_to_le32(rfk_mcc->data[path].ch[tbl]); 6387 common->dbcc.band[path][tbl] = 6388 cpu_to_le32(rfk_mcc->data[path].band[tbl]); 6389 } 6390 } 6391 6392 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 6393 tbl_sel[path] = rfk_mcc->data[path].table_idx; 6394 6395 common->tbl.cur_ch[path] = 6396 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]); 6397 common->tbl.cur_band[path] = 6398 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]); 6399 6400 if (ver <= 1) 6401 continue; 6402 6403 h2c->cur_bandwidth[path] = 6404 cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]); 6405 } 6406 6407 common->phy_idx = cpu_to_le32(phy_idx); 6408 6409 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ 6410 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; 6411 6412 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]); 6413 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]); 6414 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]); 6415 6416 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 6417 h2c_v0->ktbl_sel0 = cpu_to_le32(val32); 6418 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 6419 h2c_v0->ktbl_sel1 = cpu_to_le32(val32); 6420 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 6421 h2c_v0->rfmod0 = cpu_to_le32(val32); 6422 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 6423 h2c_v0->rfmod1 = cpu_to_le32(val32); 6424 6425 if (rtw89_is_mlo_1_1(rtwdev)) 6426 h2c_v0->mlo_1_1 = cpu_to_le32(1); 6427 6428 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 6429 6430 goto done; 6431 } 6432 6433 if (rtw89_is_mlo_1_1(rtwdev)) { 6434 h2c_v1 = &h2c->base_v1; 6435 h2c_v1->mlo_1_1 = cpu_to_le32(1); 6436 } 6437 done: 6438 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6439 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6440 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 6441 len); 6442 6443 ret = rtw89_h2c_tx(rtwdev, skb, false); 6444 if (ret) { 6445 rtw89_err(rtwdev, "failed to send h2c\n"); 6446 goto fail; 6447 } 6448 6449 return 0; 6450 fail: 6451 dev_kfree_skb_any(skb); 6452 6453 return ret; 6454 } 6455 6456 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6457 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode) 6458 { 6459 struct rtw89_efuse *efuse = &rtwdev->efuse; 6460 struct rtw89_hal *hal = &rtwdev->hal; 6461 struct rtw89_h2c_rf_tssi *h2c; 6462 u32 len = sizeof(*h2c); 6463 struct sk_buff *skb; 6464 int ret; 6465 6466 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6467 if (!skb) { 6468 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 6469 return -ENOMEM; 6470 } 6471 skb_put(skb, len); 6472 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 6473 6474 h2c->len = cpu_to_le16(len); 6475 h2c->phy = phy_idx; 6476 h2c->ch = chan->channel; 6477 h2c->bw = chan->band_width; 6478 h2c->band = chan->band_type; 6479 h2c->hwtx_en = true; 6480 h2c->cv = hal->cv; 6481 h2c->tssi_mode = tssi_mode; 6482 h2c->rfe_type = efuse->rfe_type; 6483 6484 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 6485 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 6486 6487 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6488 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6489 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 6490 6491 ret = rtw89_h2c_tx(rtwdev, skb, false); 6492 if (ret) { 6493 rtw89_err(rtwdev, "failed to send h2c\n"); 6494 goto fail; 6495 } 6496 6497 return 0; 6498 fail: 6499 dev_kfree_skb_any(skb); 6500 6501 return ret; 6502 } 6503 6504 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6505 const struct rtw89_chan *chan) 6506 { 6507 struct rtw89_hal *hal = &rtwdev->hal; 6508 struct rtw89_h2c_rf_iqk_v0 *h2c_v0; 6509 struct rtw89_h2c_rf_iqk *h2c; 6510 u32 len = sizeof(*h2c); 6511 struct sk_buff *skb; 6512 u8 ver = U8_MAX; 6513 int ret; 6514 6515 if (RTW89_CHK_FW_FEATURE(RFK_IQK_V0, &rtwdev->fw)) { 6516 len = sizeof(*h2c_v0); 6517 ver = 0; 6518 } 6519 6520 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6521 if (!skb) { 6522 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 6523 return -ENOMEM; 6524 } 6525 skb_put(skb, len); 6526 6527 if (ver == 0) { 6528 h2c_v0 = (struct rtw89_h2c_rf_iqk_v0 *)skb->data; 6529 6530 h2c_v0->phy_idx = cpu_to_le32(phy_idx); 6531 h2c_v0->dbcc = cpu_to_le32(rtwdev->dbcc_en); 6532 6533 goto done; 6534 } 6535 6536 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 6537 6538 h2c->len = sizeof(*h2c); 6539 h2c->ktype = 0; 6540 h2c->phy = phy_idx; 6541 h2c->kpath = rtw89_phy_get_kpath(rtwdev, phy_idx); 6542 h2c->band = chan->band_type; 6543 h2c->bw = chan->band_width; 6544 h2c->ch = chan->channel; 6545 h2c->cv = hal->cv; 6546 6547 done: 6548 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6549 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6550 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 6551 6552 ret = rtw89_h2c_tx(rtwdev, skb, false); 6553 if (ret) { 6554 rtw89_err(rtwdev, "failed to send h2c\n"); 6555 goto fail; 6556 } 6557 6558 return 0; 6559 fail: 6560 dev_kfree_skb_any(skb); 6561 6562 return ret; 6563 } 6564 6565 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6566 const struct rtw89_chan *chan) 6567 { 6568 struct rtw89_h2c_rf_dpk *h2c; 6569 u32 len = sizeof(*h2c); 6570 struct sk_buff *skb; 6571 int ret; 6572 6573 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6574 if (!skb) { 6575 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 6576 return -ENOMEM; 6577 } 6578 skb_put(skb, len); 6579 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 6580 6581 h2c->len = len; 6582 h2c->phy = phy_idx; 6583 h2c->dpk_enable = true; 6584 h2c->kpath = RF_AB; 6585 h2c->cur_band = chan->band_type; 6586 h2c->cur_bw = chan->band_width; 6587 h2c->cur_ch = chan->channel; 6588 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 6589 6590 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6591 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6592 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 6593 6594 ret = rtw89_h2c_tx(rtwdev, skb, false); 6595 if (ret) { 6596 rtw89_err(rtwdev, "failed to send h2c\n"); 6597 goto fail; 6598 } 6599 6600 return 0; 6601 fail: 6602 dev_kfree_skb_any(skb); 6603 6604 return ret; 6605 } 6606 6607 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6608 const struct rtw89_chan *chan) 6609 { 6610 struct rtw89_hal *hal = &rtwdev->hal; 6611 struct rtw89_h2c_rf_txgapk *h2c; 6612 u32 len = sizeof(*h2c); 6613 struct sk_buff *skb; 6614 int ret; 6615 6616 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6617 if (!skb) { 6618 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 6619 return -ENOMEM; 6620 } 6621 skb_put(skb, len); 6622 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 6623 6624 h2c->len = len; 6625 h2c->ktype = 2; 6626 h2c->phy = phy_idx; 6627 h2c->kpath = RF_AB; 6628 h2c->band = chan->band_type; 6629 h2c->bw = chan->band_width; 6630 h2c->ch = chan->channel; 6631 h2c->cv = hal->cv; 6632 6633 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6634 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6635 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 6636 6637 ret = rtw89_h2c_tx(rtwdev, skb, false); 6638 if (ret) { 6639 rtw89_err(rtwdev, "failed to send h2c\n"); 6640 goto fail; 6641 } 6642 6643 return 0; 6644 fail: 6645 dev_kfree_skb_any(skb); 6646 6647 return ret; 6648 } 6649 6650 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6651 const struct rtw89_chan *chan) 6652 { 6653 struct rtw89_h2c_rf_dack *h2c; 6654 u32 len = sizeof(*h2c); 6655 struct sk_buff *skb; 6656 int ret; 6657 6658 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6659 if (!skb) { 6660 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 6661 return -ENOMEM; 6662 } 6663 skb_put(skb, len); 6664 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 6665 6666 h2c->len = cpu_to_le32(len); 6667 h2c->phy = cpu_to_le32(phy_idx); 6668 h2c->type = cpu_to_le32(0); 6669 6670 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6671 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6672 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 6673 6674 ret = rtw89_h2c_tx(rtwdev, skb, false); 6675 if (ret) { 6676 rtw89_err(rtwdev, "failed to send h2c\n"); 6677 goto fail; 6678 } 6679 6680 return 0; 6681 fail: 6682 dev_kfree_skb_any(skb); 6683 6684 return ret; 6685 } 6686 6687 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6688 const struct rtw89_chan *chan, bool is_chl_k) 6689 { 6690 struct rtw89_h2c_rf_rxdck_v0 *v0; 6691 struct rtw89_h2c_rf_rxdck *h2c; 6692 u32 len = sizeof(*h2c); 6693 struct sk_buff *skb; 6694 int ver = -1; 6695 int ret; 6696 6697 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) { 6698 len = sizeof(*v0); 6699 ver = 0; 6700 } 6701 6702 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6703 if (!skb) { 6704 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 6705 return -ENOMEM; 6706 } 6707 skb_put(skb, len); 6708 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data; 6709 6710 v0->len = len; 6711 v0->phy = phy_idx; 6712 v0->is_afe = false; 6713 v0->kpath = RF_AB; 6714 v0->cur_band = chan->band_type; 6715 v0->cur_bw = chan->band_width; 6716 v0->cur_ch = chan->channel; 6717 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 6718 6719 if (ver == 0) 6720 goto hdr; 6721 6722 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 6723 h2c->is_chl_k = is_chl_k; 6724 6725 hdr: 6726 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6727 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6728 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 6729 6730 ret = rtw89_h2c_tx(rtwdev, skb, false); 6731 if (ret) { 6732 rtw89_err(rtwdev, "failed to send h2c\n"); 6733 goto fail; 6734 } 6735 6736 return 0; 6737 fail: 6738 dev_kfree_skb_any(skb); 6739 6740 return ret; 6741 } 6742 6743 int rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev *rtwdev, bool enable) 6744 { 6745 struct rtw89_h2c_rf_tas *h2c; 6746 u32 len = sizeof(*h2c); 6747 struct sk_buff *skb; 6748 int ret; 6749 6750 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6751 if (!skb) { 6752 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TAS\n"); 6753 return -ENOMEM; 6754 } 6755 skb_put(skb, len); 6756 h2c = (struct rtw89_h2c_rf_tas *)skb->data; 6757 6758 h2c->enable = cpu_to_le32(enable); 6759 6760 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6761 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6762 H2C_FUNC_RFK_TAS_OFFLOAD, 0, 0, len); 6763 6764 ret = rtw89_h2c_tx(rtwdev, skb, false); 6765 if (ret) { 6766 rtw89_err(rtwdev, "failed to send h2c\n"); 6767 goto fail; 6768 } 6769 6770 return 0; 6771 fail: 6772 dev_kfree_skb_any(skb); 6773 6774 return ret; 6775 } 6776 6777 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 6778 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 6779 bool rack, bool dack) 6780 { 6781 struct sk_buff *skb; 6782 int ret; 6783 6784 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6785 if (!skb) { 6786 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 6787 return -ENOMEM; 6788 } 6789 skb_put_data(skb, buf, len); 6790 6791 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6792 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 6793 len); 6794 6795 ret = rtw89_h2c_tx(rtwdev, skb, false); 6796 if (ret) { 6797 rtw89_err(rtwdev, "failed to send h2c\n"); 6798 goto fail; 6799 } 6800 6801 return 0; 6802 fail: 6803 dev_kfree_skb_any(skb); 6804 6805 return ret; 6806 } 6807 6808 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 6809 { 6810 struct sk_buff *skb; 6811 int ret; 6812 6813 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 6814 if (!skb) { 6815 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 6816 return -ENOMEM; 6817 } 6818 skb_put_data(skb, buf, len); 6819 6820 ret = rtw89_h2c_tx(rtwdev, skb, false); 6821 if (ret) { 6822 rtw89_err(rtwdev, "failed to send h2c\n"); 6823 goto fail; 6824 } 6825 6826 return 0; 6827 fail: 6828 dev_kfree_skb_any(skb); 6829 6830 return ret; 6831 } 6832 6833 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 6834 { 6835 struct rtw89_early_h2c *early_h2c; 6836 6837 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6838 6839 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 6840 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 6841 } 6842 } 6843 6844 void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 6845 { 6846 struct rtw89_early_h2c *early_h2c, *tmp; 6847 6848 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 6849 list_del(&early_h2c->list); 6850 kfree(early_h2c->h2c); 6851 kfree(early_h2c); 6852 } 6853 } 6854 6855 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 6856 { 6857 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6858 6859 __rtw89_fw_free_all_early_h2c(rtwdev); 6860 } 6861 6862 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 6863 { 6864 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 6865 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6866 6867 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 6868 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 6869 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 6870 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 6871 } 6872 6873 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 6874 struct sk_buff *c2h) 6875 { 6876 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6877 u8 category = attr->category; 6878 u8 class = attr->class; 6879 u8 func = attr->func; 6880 6881 switch (category) { 6882 default: 6883 return false; 6884 case RTW89_C2H_CAT_MAC: 6885 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 6886 case RTW89_C2H_CAT_OUTSRC: 6887 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 6888 } 6889 } 6890 6891 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 6892 { 6893 rtw89_fw_c2h_parse_attr(c2h); 6894 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 6895 goto enqueue; 6896 6897 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 6898 dev_kfree_skb_any(c2h); 6899 return; 6900 6901 enqueue: 6902 skb_queue_tail(&rtwdev->c2h_queue, c2h); 6903 wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->c2h_work); 6904 } 6905 6906 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 6907 struct sk_buff *skb) 6908 { 6909 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 6910 u8 category = attr->category; 6911 u8 class = attr->class; 6912 u8 func = attr->func; 6913 u16 len = attr->len; 6914 bool dump = true; 6915 6916 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 6917 return; 6918 6919 switch (category) { 6920 case RTW89_C2H_CAT_TEST: 6921 break; 6922 case RTW89_C2H_CAT_MAC: 6923 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 6924 if (class == RTW89_MAC_C2H_CLASS_INFO && 6925 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 6926 dump = false; 6927 break; 6928 case RTW89_C2H_CAT_OUTSRC: 6929 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 6930 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 6931 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 6932 else 6933 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 6934 break; 6935 } 6936 6937 if (dump) 6938 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 6939 } 6940 6941 void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work) 6942 { 6943 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 6944 c2h_work); 6945 struct sk_buff *skb, *tmp; 6946 struct sk_buff_head c2hq; 6947 unsigned long flags; 6948 6949 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6950 6951 __skb_queue_head_init(&c2hq); 6952 6953 spin_lock_irqsave(&rtwdev->c2h_queue.lock, flags); 6954 skb_queue_splice_init(&rtwdev->c2h_queue, &c2hq); 6955 spin_unlock_irqrestore(&rtwdev->c2h_queue.lock, flags); 6956 6957 skb_queue_walk_safe(&c2hq, skb, tmp) { 6958 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 6959 dev_kfree_skb_any(skb); 6960 } 6961 } 6962 6963 void rtw89_fw_c2h_purge_obsoleted_scan_events(struct rtw89_dev *rtwdev) 6964 { 6965 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6966 struct sk_buff *skb, *tmp; 6967 struct sk_buff_head c2hq; 6968 unsigned long flags; 6969 6970 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6971 6972 __skb_queue_head_init(&c2hq); 6973 6974 spin_lock_irqsave(&rtwdev->c2h_queue.lock, flags); 6975 skb_queue_splice_init(&rtwdev->c2h_queue, &c2hq); 6976 spin_unlock_irqrestore(&rtwdev->c2h_queue.lock, flags); 6977 6978 skb_queue_walk_safe(&c2hq, skb, tmp) { 6979 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 6980 6981 if (!attr->is_scan_event || attr->scan_seq == scan_info->seq) 6982 continue; 6983 6984 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 6985 "purge obsoleted scan event with seq=%d (cur=%d)\n", 6986 attr->scan_seq, scan_info->seq); 6987 6988 __skb_unlink(skb, &c2hq); 6989 dev_kfree_skb_any(skb); 6990 } 6991 6992 spin_lock_irqsave(&rtwdev->c2h_queue.lock, flags); 6993 skb_queue_splice(&c2hq, &rtwdev->c2h_queue); 6994 spin_unlock_irqrestore(&rtwdev->c2h_queue.lock, flags); 6995 } 6996 6997 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 6998 struct rtw89_mac_h2c_info *info) 6999 { 7000 const struct rtw89_chip_info *chip = rtwdev->chip; 7001 struct rtw89_fw_info *fw_info = &rtwdev->fw; 7002 const u32 *h2c_reg = chip->h2c_regs; 7003 u8 i, val, len; 7004 int ret; 7005 7006 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 7007 rtwdev, chip->h2c_ctrl_reg); 7008 if (ret) { 7009 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 7010 return ret; 7011 } 7012 7013 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 7014 sizeof(info->u.h2creg[0])); 7015 7016 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 7017 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 7018 7019 for (i = 0; i < RTW89_H2CREG_MAX; i++) 7020 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 7021 7022 fw_info->h2c_counter++; 7023 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 7024 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 7025 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 7026 7027 return 0; 7028 } 7029 7030 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 7031 struct rtw89_mac_c2h_info *info) 7032 { 7033 const struct rtw89_chip_info *chip = rtwdev->chip; 7034 struct rtw89_fw_info *fw_info = &rtwdev->fw; 7035 const u32 *c2h_reg = chip->c2h_regs; 7036 u32 timeout; 7037 u8 i, val; 7038 int ret; 7039 7040 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 7041 7042 if (rtwdev->hci.type == RTW89_HCI_TYPE_USB) 7043 timeout = RTW89_C2H_TIMEOUT_USB; 7044 else 7045 timeout = RTW89_C2H_TIMEOUT; 7046 7047 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 7048 timeout, false, rtwdev, 7049 chip->c2h_ctrl_reg); 7050 if (ret) { 7051 rtw89_warn(rtwdev, "c2h reg timeout\n"); 7052 return ret; 7053 } 7054 7055 for (i = 0; i < RTW89_C2HREG_MAX; i++) 7056 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 7057 7058 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 7059 7060 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 7061 info->content_len = 7062 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 7063 RTW89_C2HREG_HDR_LEN; 7064 7065 fw_info->c2h_counter++; 7066 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 7067 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 7068 7069 return 0; 7070 } 7071 7072 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 7073 struct rtw89_mac_h2c_info *h2c_info, 7074 struct rtw89_mac_c2h_info *c2h_info) 7075 { 7076 int ret; 7077 7078 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 7079 lockdep_assert_wiphy(rtwdev->hw->wiphy); 7080 7081 if (!h2c_info && !c2h_info) 7082 return -EINVAL; 7083 7084 if (!h2c_info) 7085 goto recv_c2h; 7086 7087 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 7088 if (ret) 7089 return ret; 7090 7091 recv_c2h: 7092 if (!c2h_info) 7093 return 0; 7094 7095 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 7096 if (ret) 7097 return ret; 7098 7099 return 0; 7100 } 7101 7102 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 7103 { 7104 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 7105 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 7106 return; 7107 } 7108 7109 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 7110 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 7111 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 7112 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 7113 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 7114 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 7115 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 7116 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 7117 7118 rtw89_fw_prog_cnt_dump(rtwdev); 7119 } 7120 7121 static void rtw89_hw_scan_release_pkt_list(struct rtw89_dev *rtwdev) 7122 { 7123 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 7124 struct rtw89_pktofld_info *info, *tmp; 7125 u8 idx; 7126 7127 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 7128 if (!(rtwdev->chip->support_bands & BIT(idx))) 7129 continue; 7130 7131 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 7132 if (test_bit(info->id, rtwdev->pkt_offload)) 7133 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 7134 list_del(&info->list); 7135 kfree(info); 7136 } 7137 } 7138 } 7139 7140 static void rtw89_hw_scan_cleanup(struct rtw89_dev *rtwdev, 7141 struct rtw89_vif_link *rtwvif_link) 7142 { 7143 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7144 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7145 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7146 7147 mac->free_chan_list(rtwdev); 7148 rtw89_hw_scan_release_pkt_list(rtwdev); 7149 7150 rtwvif->scan_req = NULL; 7151 rtwvif->scan_ies = NULL; 7152 scan_info->scanning_vif = NULL; 7153 scan_info->abort = false; 7154 scan_info->connected = false; 7155 scan_info->delay = 0; 7156 } 7157 7158 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 7159 struct cfg80211_scan_request *req, 7160 struct rtw89_pktofld_info *info, 7161 enum nl80211_band band, u8 ssid_idx) 7162 { 7163 if (band != NL80211_BAND_6GHZ) 7164 return false; 7165 7166 if (req->ssids[ssid_idx].ssid_len) { 7167 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 7168 req->ssids[ssid_idx].ssid_len); 7169 info->ssid_len = req->ssids[ssid_idx].ssid_len; 7170 return false; 7171 } else { 7172 info->wildcard_6ghz = true; 7173 return true; 7174 } 7175 } 7176 7177 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 7178 struct rtw89_vif_link *rtwvif_link, 7179 struct sk_buff *skb, u8 ssid_idx) 7180 { 7181 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7182 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7183 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 7184 struct cfg80211_scan_request *req = rtwvif->scan_req; 7185 struct rtw89_pktofld_info *info; 7186 struct sk_buff *new; 7187 int ret = 0; 7188 u8 band; 7189 7190 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 7191 if (!(rtwdev->chip->support_bands & BIT(band))) 7192 continue; 7193 7194 new = skb_copy(skb, GFP_KERNEL); 7195 if (!new) { 7196 ret = -ENOMEM; 7197 goto out; 7198 } 7199 skb_put_data(new, ies->ies[band], ies->len[band]); 7200 skb_put_data(new, ies->common_ies, ies->common_ie_len); 7201 7202 info = kzalloc(sizeof(*info), GFP_KERNEL); 7203 if (!info) { 7204 ret = -ENOMEM; 7205 kfree_skb(new); 7206 goto out; 7207 } 7208 7209 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx); 7210 7211 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 7212 if (ret) { 7213 kfree_skb(new); 7214 kfree(info); 7215 goto out; 7216 } 7217 7218 list_add_tail(&info->list, &scan_info->pkt_list[band]); 7219 kfree_skb(new); 7220 } 7221 out: 7222 return ret; 7223 } 7224 7225 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 7226 struct rtw89_vif_link *rtwvif_link, 7227 const u8 *mac_addr) 7228 { 7229 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7230 struct cfg80211_scan_request *req = rtwvif->scan_req; 7231 struct sk_buff *skb; 7232 u8 num = req->n_ssids, i; 7233 int ret; 7234 7235 for (i = 0; i < num; i++) { 7236 skb = ieee80211_probereq_get(rtwdev->hw, mac_addr, 7237 req->ssids[i].ssid, 7238 req->ssids[i].ssid_len, 7239 req->ie_len); 7240 if (!skb) 7241 return -ENOMEM; 7242 7243 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i); 7244 kfree_skb(skb); 7245 7246 if (ret) 7247 return ret; 7248 } 7249 7250 return 0; 7251 } 7252 7253 static int rtw89_update_6ghz_rnr_chan_ax(struct rtw89_dev *rtwdev, 7254 struct ieee80211_scan_ies *ies, 7255 struct cfg80211_scan_request *req, 7256 struct rtw89_mac_chinfo_ax *ch_info) 7257 { 7258 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 7259 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 7260 struct cfg80211_scan_6ghz_params *params; 7261 struct rtw89_pktofld_info *info, *tmp; 7262 struct ieee80211_hdr *hdr; 7263 struct sk_buff *skb; 7264 bool found; 7265 int ret = 0; 7266 u8 i; 7267 7268 if (!req->n_6ghz_params) 7269 return 0; 7270 7271 for (i = 0; i < req->n_6ghz_params; i++) { 7272 params = &req->scan_6ghz_params[i]; 7273 7274 if (req->channels[params->channel_idx]->hw_value != 7275 ch_info->pri_ch) 7276 continue; 7277 7278 found = false; 7279 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 7280 if (ether_addr_equal(tmp->bssid, params->bssid)) { 7281 found = true; 7282 break; 7283 } 7284 } 7285 if (found) 7286 continue; 7287 7288 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 7289 NULL, 0, req->ie_len); 7290 if (!skb) 7291 return -ENOMEM; 7292 7293 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 7294 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 7295 hdr = (struct ieee80211_hdr *)skb->data; 7296 ether_addr_copy(hdr->addr3, params->bssid); 7297 7298 info = kzalloc(sizeof(*info), GFP_KERNEL); 7299 if (!info) { 7300 ret = -ENOMEM; 7301 kfree_skb(skb); 7302 goto out; 7303 } 7304 7305 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 7306 if (ret) { 7307 kfree_skb(skb); 7308 kfree(info); 7309 goto out; 7310 } 7311 7312 ether_addr_copy(info->bssid, params->bssid); 7313 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 7314 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 7315 7316 ch_info->tx_pkt = true; 7317 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 7318 7319 kfree_skb(skb); 7320 } 7321 7322 out: 7323 return ret; 7324 } 7325 7326 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, 7327 int chan_type, int ssid_num, 7328 struct rtw89_mac_chinfo_ax *ch_info) 7329 { 7330 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7331 struct rtw89_pktofld_info *info; 7332 u8 probe_count = 0; 7333 7334 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 7335 ch_info->bw = RTW89_SCAN_WIDTH; 7336 ch_info->tx_pkt = true; 7337 ch_info->cfg_tx_pwr = false; 7338 ch_info->tx_pwr_idx = 0; 7339 ch_info->tx_null = false; 7340 ch_info->pause_data = false; 7341 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7342 7343 if (ssid_num) { 7344 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 7345 if (info->channel_6ghz && 7346 ch_info->pri_ch != info->channel_6ghz) 7347 continue; 7348 else if (info->channel_6ghz && probe_count != 0) 7349 ch_info->period += RTW89_CHANNEL_TIME_6G; 7350 7351 if (info->wildcard_6ghz) 7352 continue; 7353 7354 ch_info->pkt_id[probe_count++] = info->id; 7355 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 7356 break; 7357 } 7358 ch_info->num_pkt = probe_count; 7359 } 7360 7361 switch (chan_type) { 7362 case RTW89_CHAN_DFS: 7363 if (ch_info->ch_band != RTW89_BAND_6G) 7364 ch_info->period = max_t(u8, ch_info->period, 7365 RTW89_DFS_CHAN_TIME); 7366 ch_info->dwell_time = RTW89_DWELL_TIME; 7367 break; 7368 case RTW89_CHAN_ACTIVE: 7369 break; 7370 default: 7371 rtw89_err(rtwdev, "Channel type out of bound\n"); 7372 } 7373 } 7374 7375 static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type, 7376 int ssid_num, 7377 struct rtw89_mac_chinfo_ax *ch_info) 7378 { 7379 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7380 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 7381 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 7382 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7383 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 7384 struct cfg80211_scan_request *req = rtwvif->scan_req; 7385 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 7386 struct rtw89_pktofld_info *info; 7387 u8 band, probe_count = 0; 7388 int ret; 7389 7390 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 7391 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 7392 ch_info->bw = RTW89_SCAN_WIDTH; 7393 ch_info->tx_pkt = true; 7394 ch_info->cfg_tx_pwr = false; 7395 ch_info->tx_pwr_idx = 0; 7396 ch_info->tx_null = false; 7397 ch_info->pause_data = false; 7398 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7399 7400 if (ch_info->ch_band == RTW89_BAND_6G) { 7401 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 7402 !ch_info->is_psc) { 7403 ch_info->tx_pkt = false; 7404 if (!req->duration_mandatory) 7405 ch_info->period -= RTW89_DWELL_TIME_6G; 7406 } 7407 } 7408 7409 ret = rtw89_update_6ghz_rnr_chan_ax(rtwdev, ies, req, ch_info); 7410 if (ret) 7411 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 7412 7413 if (ssid_num) { 7414 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 7415 7416 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 7417 if (info->channel_6ghz && 7418 ch_info->pri_ch != info->channel_6ghz) 7419 continue; 7420 else if (info->channel_6ghz && probe_count != 0) 7421 ch_info->period += RTW89_CHANNEL_TIME_6G; 7422 7423 if (info->wildcard_6ghz) 7424 continue; 7425 7426 ch_info->pkt_id[probe_count++] = info->id; 7427 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 7428 break; 7429 } 7430 ch_info->num_pkt = probe_count; 7431 } 7432 7433 switch (chan_type) { 7434 case RTW89_CHAN_OPERATE: 7435 ch_info->central_ch = op->channel; 7436 ch_info->pri_ch = op->primary_channel; 7437 ch_info->ch_band = op->band_type; 7438 ch_info->bw = op->band_width; 7439 ch_info->tx_null = true; 7440 ch_info->num_pkt = 0; 7441 break; 7442 case RTW89_CHAN_DFS: 7443 if (ch_info->ch_band != RTW89_BAND_6G) 7444 ch_info->period = max_t(u8, ch_info->period, 7445 RTW89_DFS_CHAN_TIME); 7446 ch_info->dwell_time = RTW89_DWELL_TIME; 7447 ch_info->pause_data = true; 7448 break; 7449 case RTW89_CHAN_ACTIVE: 7450 ch_info->pause_data = true; 7451 break; 7452 case RTW89_CHAN_EXTRA_OP: 7453 ch_info->central_ch = ext->chan.channel; 7454 ch_info->pri_ch = ext->chan.primary_channel; 7455 ch_info->ch_band = ext->chan.band_type; 7456 ch_info->bw = ext->chan.band_width; 7457 ch_info->tx_null = true; 7458 ch_info->num_pkt = 0; 7459 ch_info->macid_tx = true; 7460 break; 7461 default: 7462 rtw89_err(rtwdev, "Channel type out of bound\n"); 7463 } 7464 } 7465 7466 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 7467 int ssid_num, 7468 struct rtw89_mac_chinfo_be *ch_info) 7469 { 7470 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7471 struct rtw89_pktofld_info *info; 7472 u8 probe_count = 0, i; 7473 7474 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 7475 ch_info->bw = RTW89_SCAN_WIDTH; 7476 ch_info->tx_null = false; 7477 ch_info->pause_data = false; 7478 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7479 7480 if (ssid_num) { 7481 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 7482 ch_info->pkt_id[probe_count++] = info->id; 7483 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 7484 break; 7485 } 7486 } 7487 7488 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 7489 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 7490 7491 switch (chan_type) { 7492 case RTW89_CHAN_DFS: 7493 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 7494 ch_info->dwell_time = RTW89_DWELL_TIME; 7495 break; 7496 case RTW89_CHAN_ACTIVE: 7497 break; 7498 default: 7499 rtw89_warn(rtwdev, "Channel type out of bound\n"); 7500 break; 7501 } 7502 } 7503 7504 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 7505 int ssid_num, 7506 struct rtw89_mac_chinfo_be *ch_info) 7507 { 7508 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7509 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 7510 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7511 struct cfg80211_scan_request *req = rtwvif->scan_req; 7512 struct rtw89_pktofld_info *info; 7513 u8 band, probe_count = 0, i; 7514 7515 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 7516 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 7517 ch_info->bw = RTW89_SCAN_WIDTH; 7518 ch_info->tx_null = false; 7519 ch_info->pause_data = false; 7520 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7521 7522 if (ssid_num) { 7523 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 7524 7525 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 7526 if (info->channel_6ghz && 7527 ch_info->pri_ch != info->channel_6ghz) 7528 continue; 7529 7530 if (info->wildcard_6ghz) 7531 continue; 7532 7533 ch_info->pkt_id[probe_count++] = info->id; 7534 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 7535 break; 7536 } 7537 } 7538 7539 if (ch_info->ch_band == RTW89_BAND_6G) { 7540 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 7541 !ch_info->is_psc) { 7542 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 7543 if (!req->duration_mandatory) 7544 ch_info->period -= RTW89_DWELL_TIME_6G; 7545 } 7546 } 7547 7548 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 7549 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 7550 7551 switch (chan_type) { 7552 case RTW89_CHAN_DFS: 7553 if (ch_info->ch_band != RTW89_BAND_6G) 7554 ch_info->period = 7555 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 7556 ch_info->dwell_time = RTW89_DWELL_TIME; 7557 ch_info->pause_data = true; 7558 break; 7559 case RTW89_CHAN_ACTIVE: 7560 ch_info->pause_data = true; 7561 break; 7562 default: 7563 rtw89_warn(rtwdev, "Channel type out of bound\n"); 7564 break; 7565 } 7566 } 7567 7568 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 7569 struct rtw89_vif_link *rtwvif_link) 7570 { 7571 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7572 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 7573 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 7574 struct ieee80211_channel *channel; 7575 struct list_head chan_list; 7576 int list_len; 7577 enum rtw89_chan_type type; 7578 int ret = 0; 7579 u32 idx; 7580 7581 INIT_LIST_HEAD(&chan_list); 7582 for (idx = 0, list_len = 0; 7583 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 7584 idx++, list_len++) { 7585 channel = nd_config->channels[idx]; 7586 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 7587 if (!ch_info) { 7588 ret = -ENOMEM; 7589 goto out; 7590 } 7591 7592 ch_info->period = RTW89_CHANNEL_TIME; 7593 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 7594 ch_info->central_ch = channel->hw_value; 7595 ch_info->pri_ch = channel->hw_value; 7596 ch_info->is_psc = cfg80211_channel_is_psc(channel); 7597 7598 if (channel->flags & 7599 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 7600 type = RTW89_CHAN_DFS; 7601 else 7602 type = RTW89_CHAN_ACTIVE; 7603 7604 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); 7605 list_add_tail(&ch_info->list, &chan_list); 7606 } 7607 ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &chan_list); 7608 7609 out: 7610 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 7611 list_del(&ch_info->list); 7612 kfree(ch_info); 7613 } 7614 7615 return ret; 7616 } 7617 7618 static int rtw89_hw_scan_add_op_types_ax(struct rtw89_dev *rtwdev, 7619 enum rtw89_chan_type type, 7620 struct list_head *chan_list, 7621 struct cfg80211_scan_request *req, 7622 int *off_chan_time) 7623 { 7624 struct rtw89_mac_chinfo_ax *tmp; 7625 7626 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 7627 if (!tmp) 7628 return -ENOMEM; 7629 7630 switch (type) { 7631 case RTW89_CHAN_OPERATE: 7632 tmp->period = req->duration_mandatory ? 7633 req->duration : RTW89_CHANNEL_TIME; 7634 *off_chan_time = 0; 7635 break; 7636 case RTW89_CHAN_EXTRA_OP: 7637 tmp->period = RTW89_CHANNEL_TIME_EXTRA_OP; 7638 /* still calc @off_chan_time for scan op */ 7639 *off_chan_time += tmp->period; 7640 break; 7641 default: 7642 kfree(tmp); 7643 return -EINVAL; 7644 } 7645 7646 rtw89_hw_scan_add_chan_ax(rtwdev, type, 0, tmp); 7647 list_add_tail(&tmp->list, chan_list); 7648 7649 return 0; 7650 } 7651 7652 int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev, 7653 struct rtw89_vif_link *rtwvif_link) 7654 { 7655 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7656 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 7657 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7658 struct cfg80211_scan_request *req = rtwvif->scan_req; 7659 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 7660 struct ieee80211_channel *channel; 7661 struct list_head chan_list; 7662 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 7663 enum rtw89_chan_type type; 7664 int off_chan_time = 0; 7665 int ret; 7666 u32 idx; 7667 7668 INIT_LIST_HEAD(&chan_list); 7669 7670 for (idx = 0; idx < req->n_channels; idx++) { 7671 channel = req->channels[idx]; 7672 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 7673 if (!ch_info) { 7674 ret = -ENOMEM; 7675 goto out; 7676 } 7677 7678 if (req->duration) 7679 ch_info->period = req->duration; 7680 else if (channel->band == NL80211_BAND_6GHZ) 7681 ch_info->period = RTW89_CHANNEL_TIME_6G + 7682 RTW89_DWELL_TIME_6G; 7683 else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) 7684 ch_info->period = RTW89_P2P_CHAN_TIME; 7685 else 7686 ch_info->period = RTW89_CHANNEL_TIME; 7687 7688 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 7689 ch_info->central_ch = channel->hw_value; 7690 ch_info->pri_ch = channel->hw_value; 7691 ch_info->rand_seq_num = random_seq; 7692 ch_info->is_psc = cfg80211_channel_is_psc(channel); 7693 7694 if (channel->flags & 7695 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 7696 type = RTW89_CHAN_DFS; 7697 else 7698 type = RTW89_CHAN_ACTIVE; 7699 rtw89_hw_scan_add_chan_ax(rtwdev, type, req->n_ssids, ch_info); 7700 7701 if (!(scan_info->connected && 7702 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME)) 7703 goto next; 7704 7705 ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_OPERATE, 7706 &chan_list, req, &off_chan_time); 7707 if (ret) { 7708 kfree(ch_info); 7709 goto out; 7710 } 7711 7712 if (!ext->set) 7713 goto next; 7714 7715 ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_EXTRA_OP, 7716 &chan_list, req, &off_chan_time); 7717 if (ret) { 7718 kfree(ch_info); 7719 goto out; 7720 } 7721 7722 next: 7723 list_add_tail(&ch_info->list, &chan_list); 7724 off_chan_time += ch_info->period; 7725 } 7726 7727 list_splice_tail(&chan_list, &scan_info->chan_list); 7728 return 0; 7729 7730 out: 7731 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 7732 list_del(&ch_info->list); 7733 kfree(ch_info); 7734 } 7735 7736 return ret; 7737 } 7738 7739 void rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev *rtwdev) 7740 { 7741 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7742 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 7743 7744 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 7745 list_del(&ch_info->list); 7746 kfree(ch_info); 7747 } 7748 } 7749 7750 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 7751 struct rtw89_vif_link *rtwvif_link) 7752 { 7753 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7754 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 7755 unsigned int list_len = 0; 7756 struct list_head list; 7757 int ret; 7758 7759 INIT_LIST_HEAD(&list); 7760 7761 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 7762 /* The operating channel (tx_null == true) should 7763 * not be last in the list, to avoid breaking 7764 * RTL8851BU and RTL8832BU. 7765 */ 7766 if (list_len + 1 == RTW89_SCAN_LIST_LIMIT_AX && ch_info->tx_null) 7767 break; 7768 7769 list_move_tail(&ch_info->list, &list); 7770 7771 list_len++; 7772 if (list_len == RTW89_SCAN_LIST_LIMIT_AX) 7773 break; 7774 } 7775 7776 ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &list); 7777 7778 list_for_each_entry_safe(ch_info, tmp, &list, list) { 7779 list_del(&ch_info->list); 7780 kfree(ch_info); 7781 } 7782 7783 return ret; 7784 } 7785 7786 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 7787 struct rtw89_vif_link *rtwvif_link) 7788 { 7789 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7790 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 7791 struct rtw89_mac_chinfo_be *ch_info, *tmp; 7792 struct ieee80211_channel *channel; 7793 struct list_head chan_list; 7794 enum rtw89_chan_type type; 7795 int list_len, ret; 7796 u32 idx; 7797 7798 INIT_LIST_HEAD(&chan_list); 7799 7800 for (idx = 0, list_len = 0; 7801 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 7802 idx++, list_len++) { 7803 channel = nd_config->channels[idx]; 7804 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 7805 if (!ch_info) { 7806 ret = -ENOMEM; 7807 goto out; 7808 } 7809 7810 ch_info->period = RTW89_CHANNEL_TIME; 7811 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 7812 ch_info->central_ch = channel->hw_value; 7813 ch_info->pri_ch = channel->hw_value; 7814 ch_info->is_psc = cfg80211_channel_is_psc(channel); 7815 7816 if (channel->flags & 7817 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 7818 type = RTW89_CHAN_DFS; 7819 else 7820 type = RTW89_CHAN_ACTIVE; 7821 7822 rtw89_pno_scan_add_chan_be(rtwdev, type, 7823 nd_config->n_match_sets, ch_info); 7824 list_add_tail(&ch_info->list, &chan_list); 7825 } 7826 7827 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 7828 rtwvif_link); 7829 7830 out: 7831 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 7832 list_del(&ch_info->list); 7833 kfree(ch_info); 7834 } 7835 7836 return ret; 7837 } 7838 7839 int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev, 7840 struct rtw89_vif_link *rtwvif_link) 7841 { 7842 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7843 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7844 struct cfg80211_scan_request *req = rtwvif->scan_req; 7845 struct rtw89_mac_chinfo_be *ch_info, *tmp; 7846 struct ieee80211_channel *channel; 7847 struct list_head chan_list; 7848 enum rtw89_chan_type type; 7849 bool chan_by_rnr; 7850 bool random_seq; 7851 int ret; 7852 u32 idx; 7853 7854 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 7855 chan_by_rnr = rtwdev->chip->support_rnr && 7856 (req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ); 7857 INIT_LIST_HEAD(&chan_list); 7858 7859 for (idx = 0; idx < req->n_channels; idx++) { 7860 channel = req->channels[idx]; 7861 7862 if (channel->band == NL80211_BAND_6GHZ && 7863 !cfg80211_channel_is_psc(channel) && chan_by_rnr) 7864 continue; 7865 7866 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 7867 if (!ch_info) { 7868 ret = -ENOMEM; 7869 goto out; 7870 } 7871 7872 if (req->duration) 7873 ch_info->period = req->duration; 7874 else if (channel->band == NL80211_BAND_6GHZ) 7875 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 7876 else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) 7877 ch_info->period = RTW89_P2P_CHAN_TIME; 7878 else 7879 ch_info->period = RTW89_CHANNEL_TIME; 7880 7881 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 7882 ch_info->central_ch = channel->hw_value; 7883 ch_info->pri_ch = channel->hw_value; 7884 ch_info->rand_seq_num = random_seq; 7885 ch_info->is_psc = cfg80211_channel_is_psc(channel); 7886 7887 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 7888 type = RTW89_CHAN_DFS; 7889 else 7890 type = RTW89_CHAN_ACTIVE; 7891 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 7892 7893 list_add_tail(&ch_info->list, &chan_list); 7894 } 7895 7896 list_splice_tail(&chan_list, &scan_info->chan_list); 7897 return 0; 7898 7899 out: 7900 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 7901 list_del(&ch_info->list); 7902 kfree(ch_info); 7903 } 7904 7905 return ret; 7906 } 7907 7908 void rtw89_hw_scan_free_chan_list_be(struct rtw89_dev *rtwdev) 7909 { 7910 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7911 struct rtw89_mac_chinfo_be *ch_info, *tmp; 7912 7913 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 7914 list_del(&ch_info->list); 7915 kfree(ch_info); 7916 } 7917 } 7918 7919 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 7920 struct rtw89_vif_link *rtwvif_link) 7921 { 7922 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7923 struct rtw89_mac_chinfo_be *ch_info, *tmp; 7924 unsigned int list_len = 0; 7925 struct list_head list; 7926 int ret; 7927 7928 INIT_LIST_HEAD(&list); 7929 7930 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 7931 list_move_tail(&ch_info->list, &list); 7932 7933 list_len++; 7934 if (list_len == RTW89_SCAN_LIST_LIMIT_BE) 7935 break; 7936 } 7937 7938 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &list, 7939 rtwvif_link); 7940 7941 list_for_each_entry_safe(ch_info, tmp, &list, list) { 7942 list_del(&ch_info->list); 7943 kfree(ch_info); 7944 } 7945 7946 return ret; 7947 } 7948 7949 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 7950 struct rtw89_vif_link *rtwvif_link, 7951 const u8 *mac_addr) 7952 { 7953 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7954 int ret; 7955 7956 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link, mac_addr); 7957 if (ret) { 7958 rtw89_err(rtwdev, "Update probe request failed\n"); 7959 goto out; 7960 } 7961 ret = mac->prep_chan_list(rtwdev, rtwvif_link); 7962 out: 7963 return ret; 7964 } 7965 7966 static void rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev *rtwdev, 7967 struct rtw89_vif_link *rtwvif_link, 7968 u16 tu, bool scan) 7969 { 7970 struct ieee80211_p2p_noa_desc noa_desc = {}; 7971 struct ieee80211_bss_conf *bss_conf; 7972 u16 beacon_int; 7973 u64 tsf; 7974 int ret; 7975 7976 rcu_read_lock(); 7977 7978 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 7979 beacon_int = bss_conf->beacon_int; 7980 7981 rcu_read_unlock(); 7982 7983 tu += beacon_int * 3; 7984 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX) 7985 rtwdev->scan_info.delay = ieee80211_tu_to_usec(beacon_int * 3) / 1000; 7986 7987 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 7988 if (ret) { 7989 rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__); 7990 return; 7991 } 7992 7993 noa_desc.start_time = cpu_to_le32(tsf); 7994 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX) { 7995 noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(tu)); 7996 noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(tu)); 7997 noa_desc.count = 1; 7998 } else { 7999 noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(20000)); 8000 noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(20000)); 8001 noa_desc.count = 255; 8002 } 8003 8004 rtw89_p2p_noa_renew(rtwvif_link); 8005 if (scan) 8006 rtw89_p2p_noa_append(rtwvif_link, &noa_desc); 8007 8008 rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link); 8009 } 8010 8011 static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev, bool scan) 8012 { 8013 const struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt; 8014 const struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8015 const struct rtw89_chip_info *chip = rtwdev->chip; 8016 struct rtw89_mac_chinfo_ax *chinfo_ax; 8017 struct rtw89_mac_chinfo_be *chinfo_be; 8018 struct rtw89_vif_link *rtwvif_link; 8019 struct list_head *pos, *tmp; 8020 struct ieee80211_vif *vif; 8021 struct rtw89_vif *rtwvif; 8022 u16 tu = 0; 8023 8024 lockdep_assert_wiphy(rtwdev->hw->wiphy); 8025 8026 if (!scan) 8027 goto update; 8028 8029 list_for_each_safe(pos, tmp, &scan_info->chan_list) { 8030 switch (chip->chip_gen) { 8031 case RTW89_CHIP_AX: 8032 chinfo_ax = list_entry(pos, typeof(*chinfo_ax), list); 8033 tu += chinfo_ax->period; 8034 break; 8035 case RTW89_CHIP_BE: 8036 chinfo_be = list_entry(pos, typeof(*chinfo_be), list); 8037 tu += chinfo_be->period; 8038 break; 8039 default: 8040 rtw89_warn(rtwdev, "%s: invalid chip gen %d\n", 8041 __func__, chip->chip_gen); 8042 return; 8043 } 8044 } 8045 8046 if (unlikely(tu == 0)) { 8047 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 8048 "%s: cannot estimate needed TU\n", __func__); 8049 return; 8050 } 8051 8052 update: 8053 list_for_each_entry(rtwvif, &mgnt->active_list, mgnt_entry) { 8054 unsigned int link_id; 8055 8056 vif = rtwvif_to_vif(rtwvif); 8057 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 8058 continue; 8059 8060 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 8061 rtw89_hw_scan_update_link_beacon_noa(rtwdev, rtwvif_link, 8062 tu, scan); 8063 } 8064 } 8065 8066 static void rtw89_hw_scan_set_extra_op_info(struct rtw89_dev *rtwdev, 8067 struct rtw89_vif *scan_rtwvif, 8068 const struct rtw89_chan *scan_op) 8069 { 8070 struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt; 8071 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8072 struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 8073 struct rtw89_vif *tmp; 8074 8075 ext->set = false; 8076 if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_EXTRA_OP, &rtwdev->fw)) 8077 return; 8078 8079 list_for_each_entry(tmp, &mgnt->active_list, mgnt_entry) { 8080 const struct rtw89_chan *tmp_chan; 8081 struct rtw89_vif_link *tmp_link; 8082 8083 if (tmp == scan_rtwvif) 8084 continue; 8085 8086 tmp_link = rtw89_vif_get_link_inst(tmp, 0); 8087 if (unlikely(!tmp_link)) { 8088 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 8089 "hw scan: no HW-0 link for extra op\n"); 8090 continue; 8091 } 8092 8093 tmp_chan = rtw89_chan_get(rtwdev, tmp_link->chanctx_idx); 8094 *ext = (struct rtw89_hw_scan_extra_op){ 8095 .set = true, 8096 .macid = tmp_link->mac_id, 8097 .port = tmp_link->port, 8098 .chan = *tmp_chan, 8099 .rtwvif_link = tmp_link, 8100 }; 8101 8102 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 8103 "hw scan: extra op: center %d primary %d\n", 8104 ext->chan.channel, ext->chan.primary_channel); 8105 break; 8106 } 8107 } 8108 8109 int rtw89_hw_scan_start(struct rtw89_dev *rtwdev, 8110 struct rtw89_vif_link *rtwvif_link, 8111 struct ieee80211_scan_request *scan_req) 8112 { 8113 enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev); 8114 struct cfg80211_scan_request *req = &scan_req->req; 8115 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 8116 rtwvif_link->chanctx_idx); 8117 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 8118 struct rtw89_chanctx_pause_parm pause_parm = { 8119 .rsn = RTW89_CHANCTX_PAUSE_REASON_HW_SCAN, 8120 .trigger = rtwvif_link, 8121 }; 8122 u32 rx_fltr = rtwdev->hal.rx_fltr; 8123 u8 mac_addr[ETH_ALEN]; 8124 int ret; 8125 8126 /* clone op and keep it during scan */ 8127 rtwdev->scan_info.op_chan = *chan; 8128 8129 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 8130 "hw scan: op: center %d primary %d\n", 8131 chan->channel, chan->primary_channel); 8132 8133 rtw89_hw_scan_set_extra_op_info(rtwdev, rtwvif, chan); 8134 8135 rtwdev->scan_info.connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 8136 rtwdev->scan_info.scanning_vif = rtwvif_link; 8137 rtwdev->scan_info.abort = false; 8138 rtwdev->scan_info.delay = 0; 8139 rtwvif->scan_ies = &scan_req->ies; 8140 rtwvif->scan_req = req; 8141 8142 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 8143 get_random_mask_addr(mac_addr, req->mac_addr, 8144 req->mac_addr_mask); 8145 else 8146 ether_addr_copy(mac_addr, rtwvif_link->mac_addr); 8147 8148 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, mac_addr); 8149 if (ret) { 8150 rtw89_hw_scan_cleanup(rtwdev, rtwvif_link); 8151 return ret; 8152 } 8153 8154 ieee80211_stop_queues(rtwdev->hw); 8155 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false); 8156 8157 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true); 8158 8159 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 8160 rx_fltr &= ~B_AX_A_BC; 8161 rx_fltr &= ~B_AX_A_A1_MATCH; 8162 8163 rtw89_mac_set_rx_fltr(rtwdev, rtwvif_link->mac_idx, rx_fltr); 8164 8165 rtw89_chanctx_pause(rtwdev, &pause_parm); 8166 rtw89_phy_dig_suspend(rtwdev); 8167 8168 if (mode == RTW89_ENTITY_MODE_MCC) 8169 rtw89_hw_scan_update_beacon_noa(rtwdev, true); 8170 8171 return 0; 8172 } 8173 8174 struct rtw89_hw_scan_complete_cb_data { 8175 struct rtw89_vif_link *rtwvif_link; 8176 bool aborted; 8177 }; 8178 8179 static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data) 8180 { 8181 enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev); 8182 struct rtw89_hw_scan_complete_cb_data *cb_data = data; 8183 struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link; 8184 struct cfg80211_scan_info info = { 8185 .aborted = cb_data->aborted, 8186 }; 8187 8188 if (!rtwvif_link) 8189 return -EINVAL; 8190 8191 rtw89_mac_set_rx_fltr(rtwdev, rtwvif_link->mac_idx, rtwdev->hal.rx_fltr); 8192 8193 rtw89_core_scan_complete(rtwdev, rtwvif_link, true); 8194 ieee80211_scan_completed(rtwdev->hw, &info); 8195 ieee80211_wake_queues(rtwdev->hw); 8196 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true); 8197 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 8198 rtw89_phy_dig_resume(rtwdev, true); 8199 8200 rtw89_hw_scan_cleanup(rtwdev, rtwvif_link); 8201 8202 if (mode == RTW89_ENTITY_MODE_MCC) 8203 rtw89_hw_scan_update_beacon_noa(rtwdev, false); 8204 8205 return 0; 8206 } 8207 8208 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, 8209 struct rtw89_vif_link *rtwvif_link, 8210 bool aborted) 8211 { 8212 struct rtw89_hw_scan_complete_cb_data cb_data = { 8213 .rtwvif_link = rtwvif_link, 8214 .aborted = aborted, 8215 }; 8216 const struct rtw89_chanctx_cb_parm cb_parm = { 8217 .cb = rtw89_hw_scan_complete_cb, 8218 .data = &cb_data, 8219 .caller = __func__, 8220 }; 8221 8222 /* The things here needs to be done after setting channel (for coex) 8223 * and before proceeding entity mode (for MCC). So, pass a callback 8224 * of them for the right sequence rather than doing them directly. 8225 */ 8226 rtw89_chanctx_proceed(rtwdev, &cb_parm); 8227 } 8228 8229 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, 8230 struct rtw89_vif_link *rtwvif_link) 8231 { 8232 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8233 int ret; 8234 8235 scan_info->abort = true; 8236 8237 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false); 8238 if (ret) 8239 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); 8240 8241 /* Indicate ieee80211_scan_completed() before returning, which is safe 8242 * because scan abort command always waits for completion of 8243 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan 8244 * work properly. 8245 */ 8246 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true); 8247 } 8248 8249 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 8250 { 8251 struct rtw89_vif_link *rtwvif_link; 8252 struct rtw89_vif *rtwvif; 8253 unsigned int link_id; 8254 8255 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 8256 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 8257 /* This variable implies connected or during attempt to connect */ 8258 if (!is_zero_ether_addr(rtwvif_link->bssid)) 8259 return true; 8260 } 8261 } 8262 8263 return false; 8264 } 8265 8266 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, 8267 struct rtw89_vif_link *rtwvif_link, 8268 bool enable) 8269 { 8270 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 8271 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8272 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 8273 struct rtw89_scan_option opt = {0}; 8274 bool connected; 8275 int ret = 0; 8276 8277 if (!rtwvif_link) 8278 return -EINVAL; 8279 8280 connected = rtwdev->scan_info.connected; 8281 opt.enable = enable; 8282 opt.target_ch_mode = connected; 8283 opt.delay = rtwdev->scan_info.delay; 8284 if (enable) { 8285 ret = mac->add_chan_list(rtwdev, rtwvif_link); 8286 if (ret) 8287 goto out; 8288 } 8289 8290 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 8291 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 8292 opt.scan_mode = RTW89_SCAN_MODE_SA; 8293 opt.band = rtwvif_link->mac_idx; 8294 opt.num_macc_role = 0; 8295 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 8296 opt.num_opch = connected ? 1 : 0; 8297 if (connected && ext->set) 8298 opt.num_opch++; 8299 8300 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 8301 } 8302 8303 ret = rtw89_mac_scan_offload(rtwdev, &opt, rtwvif_link, false); 8304 8305 out: 8306 return ret; 8307 } 8308 8309 #define H2C_FW_CPU_EXCEPTION_TYPE_0 0x5566 8310 #define H2C_FW_CPU_EXCEPTION_TYPE_1 0x0 8311 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 8312 { 8313 struct rtw89_h2c_trig_cpu_except *h2c; 8314 u32 cpu_exception_type_def; 8315 u32 len = sizeof(*h2c); 8316 struct sk_buff *skb; 8317 int ret; 8318 8319 if (RTW89_CHK_FW_FEATURE(CRASH_TRIGGER_TYPE_1, &rtwdev->fw)) 8320 cpu_exception_type_def = H2C_FW_CPU_EXCEPTION_TYPE_1; 8321 else if (RTW89_CHK_FW_FEATURE(CRASH_TRIGGER_TYPE_0, &rtwdev->fw)) 8322 cpu_exception_type_def = H2C_FW_CPU_EXCEPTION_TYPE_0; 8323 else 8324 return -EOPNOTSUPP; 8325 8326 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8327 if (!skb) { 8328 rtw89_err(rtwdev, 8329 "failed to alloc skb for fw cpu exception\n"); 8330 return -ENOMEM; 8331 } 8332 8333 skb_put(skb, len); 8334 h2c = (struct rtw89_h2c_trig_cpu_except *)skb->data; 8335 8336 h2c->w0 = le32_encode_bits(cpu_exception_type_def, 8337 RTW89_H2C_CPU_EXCEPTION_TYPE); 8338 8339 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8340 H2C_CAT_TEST, 8341 H2C_CL_FW_STATUS_TEST, 8342 H2C_FUNC_CPU_EXCEPTION, 0, 0, 8343 len); 8344 8345 ret = rtw89_h2c_tx(rtwdev, skb, false); 8346 if (ret) { 8347 rtw89_err(rtwdev, "failed to send h2c\n"); 8348 dev_kfree_skb_any(skb); 8349 return ret; 8350 } 8351 8352 return 0; 8353 } 8354 8355 #define H2C_PKT_DROP_LEN 24 8356 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 8357 const struct rtw89_pkt_drop_params *params) 8358 { 8359 struct sk_buff *skb; 8360 int ret; 8361 8362 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 8363 if (!skb) { 8364 rtw89_err(rtwdev, 8365 "failed to alloc skb for packet drop\n"); 8366 return -ENOMEM; 8367 } 8368 8369 switch (params->sel) { 8370 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 8371 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 8372 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 8373 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 8374 case RTW89_PKT_DROP_SEL_BAND_ONCE: 8375 break; 8376 default: 8377 rtw89_debug(rtwdev, RTW89_DBG_FW, 8378 "H2C of pkt drop might not fully support sel: %d yet\n", 8379 params->sel); 8380 break; 8381 } 8382 8383 skb_put(skb, H2C_PKT_DROP_LEN); 8384 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 8385 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 8386 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 8387 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 8388 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 8389 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 8390 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 8391 params->macid_band_sel[0]); 8392 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 8393 params->macid_band_sel[1]); 8394 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 8395 params->macid_band_sel[2]); 8396 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 8397 params->macid_band_sel[3]); 8398 8399 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8400 H2C_CAT_MAC, 8401 H2C_CL_MAC_FW_OFLD, 8402 H2C_FUNC_PKT_DROP, 0, 0, 8403 H2C_PKT_DROP_LEN); 8404 8405 ret = rtw89_h2c_tx(rtwdev, skb, false); 8406 if (ret) { 8407 rtw89_err(rtwdev, "failed to send h2c\n"); 8408 goto fail; 8409 } 8410 8411 return 0; 8412 8413 fail: 8414 dev_kfree_skb_any(skb); 8415 return ret; 8416 } 8417 8418 #define H2C_KEEP_ALIVE_LEN 4 8419 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8420 bool enable) 8421 { 8422 struct sk_buff *skb; 8423 u8 pkt_id = 0; 8424 int ret; 8425 8426 if (enable) { 8427 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 8428 RTW89_PKT_OFLD_TYPE_NULL_DATA, 8429 &pkt_id); 8430 if (ret) 8431 return -EPERM; 8432 } 8433 8434 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 8435 if (!skb) { 8436 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 8437 return -ENOMEM; 8438 } 8439 8440 skb_put(skb, H2C_KEEP_ALIVE_LEN); 8441 8442 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 8443 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 8444 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 8445 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id); 8446 8447 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8448 H2C_CAT_MAC, 8449 H2C_CL_MAC_WOW, 8450 H2C_FUNC_KEEP_ALIVE, 0, 1, 8451 H2C_KEEP_ALIVE_LEN); 8452 8453 ret = rtw89_h2c_tx(rtwdev, skb, false); 8454 if (ret) { 8455 rtw89_err(rtwdev, "failed to send h2c\n"); 8456 goto fail; 8457 } 8458 8459 return 0; 8460 8461 fail: 8462 dev_kfree_skb_any(skb); 8463 8464 return ret; 8465 } 8466 8467 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8468 bool enable) 8469 { 8470 struct rtw89_h2c_arp_offload *h2c; 8471 u32 len = sizeof(*h2c); 8472 struct sk_buff *skb; 8473 u8 pkt_id = 0; 8474 int ret; 8475 8476 if (enable) { 8477 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 8478 RTW89_PKT_OFLD_TYPE_ARP_RSP, 8479 &pkt_id); 8480 if (ret) 8481 return ret; 8482 } 8483 8484 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8485 if (!skb) { 8486 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); 8487 return -ENOMEM; 8488 } 8489 8490 skb_put(skb, len); 8491 h2c = (struct rtw89_h2c_arp_offload *)skb->data; 8492 8493 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | 8494 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | 8495 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | 8496 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); 8497 8498 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8499 H2C_CAT_MAC, 8500 H2C_CL_MAC_WOW, 8501 H2C_FUNC_ARP_OFLD, 0, 1, 8502 len); 8503 8504 ret = rtw89_h2c_tx(rtwdev, skb, false); 8505 if (ret) { 8506 rtw89_err(rtwdev, "failed to send h2c\n"); 8507 goto fail; 8508 } 8509 8510 return 0; 8511 8512 fail: 8513 dev_kfree_skb_any(skb); 8514 8515 return ret; 8516 } 8517 8518 #define H2C_DISCONNECT_DETECT_LEN 8 8519 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 8520 struct rtw89_vif_link *rtwvif_link, bool enable) 8521 { 8522 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8523 struct sk_buff *skb; 8524 u8 macid = rtwvif_link->mac_id; 8525 int ret; 8526 8527 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 8528 if (!skb) { 8529 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 8530 return -ENOMEM; 8531 } 8532 8533 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 8534 8535 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 8536 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 8537 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 8538 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 8539 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 8540 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 8541 } 8542 8543 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8544 H2C_CAT_MAC, 8545 H2C_CL_MAC_WOW, 8546 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 8547 H2C_DISCONNECT_DETECT_LEN); 8548 8549 ret = rtw89_h2c_tx(rtwdev, skb, false); 8550 if (ret) { 8551 rtw89_err(rtwdev, "failed to send h2c\n"); 8552 goto fail; 8553 } 8554 8555 return 0; 8556 8557 fail: 8558 dev_kfree_skb_any(skb); 8559 8560 return ret; 8561 } 8562 8563 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8564 bool enable) 8565 { 8566 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8567 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 8568 struct rtw89_h2c_cfg_nlo *h2c; 8569 u32 len = sizeof(*h2c); 8570 struct sk_buff *skb; 8571 int ret, i; 8572 8573 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8574 if (!skb) { 8575 rtw89_err(rtwdev, "failed to alloc skb for nlo\n"); 8576 return -ENOMEM; 8577 } 8578 8579 skb_put(skb, len); 8580 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data; 8581 8582 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) | 8583 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) | 8584 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID); 8585 8586 if (enable) { 8587 h2c->nlo_cnt = nd_config->n_match_sets; 8588 for (i = 0 ; i < nd_config->n_match_sets; i++) { 8589 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len; 8590 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid, 8591 nd_config->match_sets[i].ssid.ssid_len); 8592 } 8593 } 8594 8595 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8596 H2C_CAT_MAC, 8597 H2C_CL_MAC_WOW, 8598 H2C_FUNC_NLO, 0, 1, 8599 len); 8600 8601 ret = rtw89_h2c_tx(rtwdev, skb, false); 8602 if (ret) { 8603 rtw89_err(rtwdev, "failed to send h2c\n"); 8604 goto fail; 8605 } 8606 8607 return 0; 8608 8609 fail: 8610 dev_kfree_skb_any(skb); 8611 return ret; 8612 } 8613 8614 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8615 bool enable) 8616 { 8617 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8618 struct rtw89_h2c_wow_global *h2c; 8619 u8 macid = rtwvif_link->mac_id; 8620 u32 len = sizeof(*h2c); 8621 struct sk_buff *skb; 8622 int ret; 8623 8624 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8625 if (!skb) { 8626 rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); 8627 return -ENOMEM; 8628 } 8629 8630 skb_put(skb, len); 8631 h2c = (struct rtw89_h2c_wow_global *)skb->data; 8632 8633 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | 8634 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | 8635 le32_encode_bits(rtw_wow->ptk_alg, 8636 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | 8637 le32_encode_bits(rtw_wow->gtk_alg, 8638 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); 8639 h2c->key_info = rtw_wow->key_info; 8640 8641 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8642 H2C_CAT_MAC, 8643 H2C_CL_MAC_WOW, 8644 H2C_FUNC_WOW_GLOBAL, 0, 1, 8645 len); 8646 8647 ret = rtw89_h2c_tx(rtwdev, skb, false); 8648 if (ret) { 8649 rtw89_err(rtwdev, "failed to send h2c\n"); 8650 goto fail; 8651 } 8652 8653 return 0; 8654 8655 fail: 8656 dev_kfree_skb_any(skb); 8657 8658 return ret; 8659 } 8660 8661 #define H2C_WAKEUP_CTRL_LEN 4 8662 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 8663 struct rtw89_vif_link *rtwvif_link, 8664 bool enable) 8665 { 8666 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8667 struct sk_buff *skb; 8668 u8 macid = rtwvif_link->mac_id; 8669 int ret; 8670 8671 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 8672 if (!skb) { 8673 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); 8674 return -ENOMEM; 8675 } 8676 8677 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 8678 8679 if (rtw_wow->pattern_cnt) 8680 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 8681 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 8682 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 8683 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 8684 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 8685 8686 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 8687 8688 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8689 H2C_CAT_MAC, 8690 H2C_CL_MAC_WOW, 8691 H2C_FUNC_WAKEUP_CTRL, 0, 1, 8692 H2C_WAKEUP_CTRL_LEN); 8693 8694 ret = rtw89_h2c_tx(rtwdev, skb, false); 8695 if (ret) { 8696 rtw89_err(rtwdev, "failed to send h2c\n"); 8697 goto fail; 8698 } 8699 8700 return 0; 8701 8702 fail: 8703 dev_kfree_skb_any(skb); 8704 8705 return ret; 8706 } 8707 8708 #define H2C_WOW_CAM_UPD_LEN 24 8709 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 8710 struct rtw89_wow_cam_info *cam_info) 8711 { 8712 struct sk_buff *skb; 8713 int ret; 8714 8715 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 8716 if (!skb) { 8717 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 8718 return -ENOMEM; 8719 } 8720 8721 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 8722 8723 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 8724 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 8725 if (cam_info->valid) { 8726 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 8727 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 8728 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 8729 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 8730 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 8731 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 8732 cam_info->negative_pattern_match); 8733 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 8734 cam_info->skip_mac_hdr); 8735 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 8736 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 8737 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 8738 } 8739 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 8740 8741 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8742 H2C_CAT_MAC, 8743 H2C_CL_MAC_WOW, 8744 H2C_FUNC_WOW_CAM_UPD, 0, 1, 8745 H2C_WOW_CAM_UPD_LEN); 8746 8747 ret = rtw89_h2c_tx(rtwdev, skb, false); 8748 if (ret) { 8749 rtw89_err(rtwdev, "failed to send h2c\n"); 8750 goto fail; 8751 } 8752 8753 return 0; 8754 fail: 8755 dev_kfree_skb_any(skb); 8756 8757 return ret; 8758 } 8759 8760 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, 8761 struct rtw89_vif_link *rtwvif_link, 8762 bool enable) 8763 { 8764 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8765 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; 8766 struct rtw89_h2c_wow_gtk_ofld *h2c; 8767 u8 macid = rtwvif_link->mac_id; 8768 u32 len = sizeof(*h2c); 8769 u8 pkt_id_sa_query = 0; 8770 struct sk_buff *skb; 8771 u8 pkt_id_eapol = 0; 8772 int ret; 8773 8774 if (!rtw_wow->gtk_alg) 8775 return 0; 8776 8777 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8778 if (!skb) { 8779 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); 8780 return -ENOMEM; 8781 } 8782 8783 skb_put(skb, len); 8784 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; 8785 8786 if (!enable) 8787 goto hdr; 8788 8789 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 8790 RTW89_PKT_OFLD_TYPE_EAPOL_KEY, 8791 &pkt_id_eapol); 8792 if (ret) 8793 goto fail; 8794 8795 if (gtk_info->igtk_keyid) { 8796 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 8797 RTW89_PKT_OFLD_TYPE_SA_QUERY, 8798 &pkt_id_sa_query); 8799 if (ret) 8800 goto fail; 8801 } 8802 8803 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 8804 le32_encode_bits(!!memchr_inv(gtk_info->txmickey, 0, 8805 sizeof(gtk_info->txmickey)), 8806 RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 8807 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 8808 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 8809 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | 8810 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); 8811 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, 8812 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | 8813 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); 8814 h2c->gtk_info = rtw_wow->gtk_info; 8815 8816 hdr: 8817 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8818 H2C_CAT_MAC, 8819 H2C_CL_MAC_WOW, 8820 H2C_FUNC_GTK_OFLD, 0, 1, 8821 len); 8822 8823 ret = rtw89_h2c_tx(rtwdev, skb, false); 8824 if (ret) { 8825 rtw89_err(rtwdev, "failed to send h2c\n"); 8826 goto fail; 8827 } 8828 return 0; 8829 fail: 8830 dev_kfree_skb_any(skb); 8831 8832 return ret; 8833 } 8834 8835 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 8836 bool enable) 8837 { 8838 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait; 8839 struct rtw89_h2c_fwips *h2c; 8840 u32 len = sizeof(*h2c); 8841 struct sk_buff *skb; 8842 8843 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8844 if (!skb) { 8845 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n"); 8846 return -ENOMEM; 8847 } 8848 skb_put(skb, len); 8849 h2c = (struct rtw89_h2c_fwips *)skb->data; 8850 8851 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) | 8852 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE); 8853 8854 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8855 H2C_CAT_MAC, 8856 H2C_CL_MAC_PS, 8857 H2C_FUNC_IPS_CFG, 0, 1, 8858 len); 8859 8860 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG); 8861 } 8862 8863 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) 8864 { 8865 struct rtw89_wait_info *wait = &rtwdev->wow.wait; 8866 struct rtw89_h2c_wow_aoac *h2c; 8867 u32 len = sizeof(*h2c); 8868 struct sk_buff *skb; 8869 8870 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8871 if (!skb) { 8872 rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); 8873 return -ENOMEM; 8874 } 8875 8876 skb_put(skb, len); 8877 8878 /* This H2C only nofity firmware to generate AOAC report C2H, 8879 * no need any parameter. 8880 */ 8881 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8882 H2C_CAT_MAC, 8883 H2C_CL_MAC_WOW, 8884 H2C_FUNC_AOAC_REPORT_REQ, 1, 0, 8885 len); 8886 8887 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC); 8888 } 8889 8890 /* Return < 0, if failures happen during waiting for the condition. 8891 * Return 0, when waiting for the condition succeeds. 8892 * Return > 0, if the wait is considered unreachable due to driver/FW design, 8893 * where 1 means during SER. 8894 */ 8895 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 8896 struct rtw89_wait_info *wait, unsigned int cond) 8897 { 8898 struct rtw89_wait_response *prep; 8899 int ret = 0; 8900 8901 lockdep_assert_wiphy(rtwdev->hw->wiphy); 8902 8903 prep = rtw89_wait_for_cond_prep(wait, cond); 8904 if (IS_ERR(prep)) 8905 goto out; 8906 8907 ret = rtw89_h2c_tx(rtwdev, skb, false); 8908 if (ret) { 8909 rtw89_err(rtwdev, "failed to send h2c\n"); 8910 dev_kfree_skb_any(skb); 8911 ret = -EBUSY; 8912 goto out; 8913 } 8914 8915 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) { 8916 ret = 1; 8917 goto out; 8918 } 8919 8920 out: 8921 return rtw89_wait_for_cond_eval(wait, prep, ret); 8922 } 8923 8924 #define H2C_ADD_MCC_LEN 16 8925 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 8926 const struct rtw89_fw_mcc_add_req *p) 8927 { 8928 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8929 struct sk_buff *skb; 8930 unsigned int cond; 8931 8932 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 8933 if (!skb) { 8934 rtw89_err(rtwdev, 8935 "failed to alloc skb for add mcc\n"); 8936 return -ENOMEM; 8937 } 8938 8939 skb_put(skb, H2C_ADD_MCC_LEN); 8940 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 8941 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 8942 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 8943 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 8944 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 8945 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 8946 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 8947 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 8948 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 8949 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 8950 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 8951 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 8952 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 8953 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 8954 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 8955 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 8956 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 8957 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 8958 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 8959 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 8960 8961 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8962 H2C_CAT_MAC, 8963 H2C_CL_MCC, 8964 H2C_FUNC_ADD_MCC, 0, 0, 8965 H2C_ADD_MCC_LEN); 8966 8967 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 8968 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8969 } 8970 8971 #define H2C_START_MCC_LEN 12 8972 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 8973 const struct rtw89_fw_mcc_start_req *p) 8974 { 8975 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8976 struct sk_buff *skb; 8977 unsigned int cond; 8978 8979 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 8980 if (!skb) { 8981 rtw89_err(rtwdev, 8982 "failed to alloc skb for start mcc\n"); 8983 return -ENOMEM; 8984 } 8985 8986 skb_put(skb, H2C_START_MCC_LEN); 8987 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 8988 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 8989 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 8990 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 8991 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 8992 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 8993 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 8994 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 8995 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 8996 8997 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8998 H2C_CAT_MAC, 8999 H2C_CL_MCC, 9000 H2C_FUNC_START_MCC, 0, 0, 9001 H2C_START_MCC_LEN); 9002 9003 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 9004 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9005 } 9006 9007 #define H2C_STOP_MCC_LEN 4 9008 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 9009 bool prev_groups) 9010 { 9011 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9012 struct sk_buff *skb; 9013 unsigned int cond; 9014 9015 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 9016 if (!skb) { 9017 rtw89_err(rtwdev, 9018 "failed to alloc skb for stop mcc\n"); 9019 return -ENOMEM; 9020 } 9021 9022 skb_put(skb, H2C_STOP_MCC_LEN); 9023 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 9024 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 9025 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 9026 9027 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9028 H2C_CAT_MAC, 9029 H2C_CL_MCC, 9030 H2C_FUNC_STOP_MCC, 0, 0, 9031 H2C_STOP_MCC_LEN); 9032 9033 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 9034 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9035 } 9036 9037 #define H2C_DEL_MCC_GROUP_LEN 4 9038 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 9039 bool prev_groups) 9040 { 9041 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9042 struct sk_buff *skb; 9043 unsigned int cond; 9044 9045 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 9046 if (!skb) { 9047 rtw89_err(rtwdev, 9048 "failed to alloc skb for del mcc group\n"); 9049 return -ENOMEM; 9050 } 9051 9052 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 9053 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 9054 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 9055 9056 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9057 H2C_CAT_MAC, 9058 H2C_CL_MCC, 9059 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 9060 H2C_DEL_MCC_GROUP_LEN); 9061 9062 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 9063 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9064 } 9065 9066 #define H2C_RESET_MCC_GROUP_LEN 4 9067 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 9068 { 9069 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9070 struct sk_buff *skb; 9071 unsigned int cond; 9072 9073 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 9074 if (!skb) { 9075 rtw89_err(rtwdev, 9076 "failed to alloc skb for reset mcc group\n"); 9077 return -ENOMEM; 9078 } 9079 9080 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 9081 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 9082 9083 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9084 H2C_CAT_MAC, 9085 H2C_CL_MCC, 9086 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 9087 H2C_RESET_MCC_GROUP_LEN); 9088 9089 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 9090 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9091 } 9092 9093 #define H2C_MCC_REQ_TSF_LEN 4 9094 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 9095 const struct rtw89_fw_mcc_tsf_req *req, 9096 struct rtw89_mac_mcc_tsf_rpt *rpt) 9097 { 9098 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9099 struct rtw89_mac_mcc_tsf_rpt *tmp; 9100 struct sk_buff *skb; 9101 unsigned int cond; 9102 int ret; 9103 9104 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 9105 if (!skb) { 9106 rtw89_err(rtwdev, 9107 "failed to alloc skb for mcc req tsf\n"); 9108 return -ENOMEM; 9109 } 9110 9111 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 9112 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 9113 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 9114 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 9115 9116 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9117 H2C_CAT_MAC, 9118 H2C_CL_MCC, 9119 H2C_FUNC_MCC_REQ_TSF, 0, 0, 9120 H2C_MCC_REQ_TSF_LEN); 9121 9122 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 9123 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9124 if (ret) 9125 return ret; 9126 9127 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 9128 *rpt = *tmp; 9129 9130 return 0; 9131 } 9132 9133 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 9134 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 9135 u8 *bitmap) 9136 { 9137 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9138 struct sk_buff *skb; 9139 unsigned int cond; 9140 u8 map_len; 9141 u8 h2c_len; 9142 9143 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 9144 map_len = RTW89_MAX_MAC_ID_NUM / 8; 9145 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 9146 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 9147 if (!skb) { 9148 rtw89_err(rtwdev, 9149 "failed to alloc skb for mcc macid bitmap\n"); 9150 return -ENOMEM; 9151 } 9152 9153 skb_put(skb, h2c_len); 9154 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 9155 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 9156 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 9157 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 9158 9159 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9160 H2C_CAT_MAC, 9161 H2C_CL_MCC, 9162 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 9163 h2c_len); 9164 9165 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 9166 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9167 } 9168 9169 #define H2C_MCC_SYNC_LEN 4 9170 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 9171 u8 target, u8 offset) 9172 { 9173 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9174 struct sk_buff *skb; 9175 unsigned int cond; 9176 9177 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 9178 if (!skb) { 9179 rtw89_err(rtwdev, 9180 "failed to alloc skb for mcc sync\n"); 9181 return -ENOMEM; 9182 } 9183 9184 skb_put(skb, H2C_MCC_SYNC_LEN); 9185 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 9186 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 9187 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 9188 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 9189 9190 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9191 H2C_CAT_MAC, 9192 H2C_CL_MCC, 9193 H2C_FUNC_MCC_SYNC, 0, 0, 9194 H2C_MCC_SYNC_LEN); 9195 9196 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 9197 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9198 } 9199 9200 #define H2C_MCC_SET_DURATION_LEN 20 9201 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 9202 const struct rtw89_fw_mcc_duration *p) 9203 { 9204 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9205 struct sk_buff *skb; 9206 unsigned int cond; 9207 9208 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 9209 if (!skb) { 9210 rtw89_err(rtwdev, 9211 "failed to alloc skb for mcc set duration\n"); 9212 return -ENOMEM; 9213 } 9214 9215 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 9216 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 9217 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 9218 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 9219 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 9220 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 9221 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 9222 p->start_tsf_low); 9223 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 9224 p->start_tsf_high); 9225 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 9226 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 9227 9228 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9229 H2C_CAT_MAC, 9230 H2C_CL_MCC, 9231 H2C_FUNC_MCC_SET_DURATION, 0, 0, 9232 H2C_MCC_SET_DURATION_LEN); 9233 9234 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 9235 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9236 } 9237 9238 static 9239 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 9240 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 9241 struct rtw89_h2c_mrc_add_slot *slot_h2c) 9242 { 9243 bool fill_h2c = !!slot_h2c; 9244 unsigned int i; 9245 9246 if (!fill_h2c) 9247 goto calc_len; 9248 9249 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 9250 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 9251 le32_encode_bits(slot_arg->courtesy_en, 9252 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 9253 le32_encode_bits(slot_arg->role_num, 9254 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 9255 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 9256 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 9257 le32_encode_bits(slot_arg->courtesy_target, 9258 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 9259 9260 for (i = 0; i < slot_arg->role_num; i++) { 9261 slot_h2c->roles[i].w0 = 9262 le32_encode_bits(slot_arg->roles[i].macid, 9263 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 9264 le32_encode_bits(slot_arg->roles[i].role_type, 9265 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 9266 le32_encode_bits(slot_arg->roles[i].is_master, 9267 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 9268 le32_encode_bits(slot_arg->roles[i].en_tx_null, 9269 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 9270 le32_encode_bits(false, 9271 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 9272 le32_encode_bits(false, 9273 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 9274 slot_h2c->roles[i].w1 = 9275 le32_encode_bits(slot_arg->roles[i].central_ch, 9276 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 9277 le32_encode_bits(slot_arg->roles[i].primary_ch, 9278 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 9279 le32_encode_bits(slot_arg->roles[i].bw, 9280 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 9281 le32_encode_bits(slot_arg->roles[i].band, 9282 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 9283 le32_encode_bits(slot_arg->roles[i].null_early, 9284 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 9285 le32_encode_bits(false, 9286 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 9287 le32_encode_bits(true, 9288 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 9289 slot_h2c->roles[i].macid_main_bitmap = 9290 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 9291 slot_h2c->roles[i].macid_paired_bitmap = 9292 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 9293 } 9294 9295 calc_len: 9296 return struct_size(slot_h2c, roles, slot_arg->role_num); 9297 } 9298 9299 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 9300 const struct rtw89_fw_mrc_add_arg *arg) 9301 { 9302 struct rtw89_h2c_mrc_add *h2c_head; 9303 struct sk_buff *skb; 9304 unsigned int i; 9305 void *tmp; 9306 u32 len; 9307 int ret; 9308 9309 len = sizeof(*h2c_head); 9310 for (i = 0; i < arg->slot_num; i++) 9311 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 9312 9313 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9314 if (!skb) { 9315 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 9316 return -ENOMEM; 9317 } 9318 9319 skb_put(skb, len); 9320 tmp = skb->data; 9321 9322 h2c_head = tmp; 9323 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 9324 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 9325 le32_encode_bits(arg->sch_type, 9326 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 9327 le32_encode_bits(arg->slot_num, 9328 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 9329 le32_encode_bits(arg->btc_in_sch, 9330 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 9331 9332 tmp += sizeof(*h2c_head); 9333 for (i = 0; i < arg->slot_num; i++) 9334 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 9335 9336 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9337 H2C_CAT_MAC, 9338 H2C_CL_MRC, 9339 H2C_FUNC_ADD_MRC, 0, 0, 9340 len); 9341 9342 ret = rtw89_h2c_tx(rtwdev, skb, false); 9343 if (ret) { 9344 rtw89_err(rtwdev, "failed to send h2c\n"); 9345 dev_kfree_skb_any(skb); 9346 return -EBUSY; 9347 } 9348 9349 return 0; 9350 } 9351 9352 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 9353 const struct rtw89_fw_mrc_start_arg *arg) 9354 { 9355 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9356 struct rtw89_h2c_mrc_start *h2c; 9357 u32 len = sizeof(*h2c); 9358 struct sk_buff *skb; 9359 unsigned int cond; 9360 9361 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9362 if (!skb) { 9363 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 9364 return -ENOMEM; 9365 } 9366 9367 skb_put(skb, len); 9368 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 9369 9370 h2c->w0 = le32_encode_bits(arg->sch_idx, 9371 RTW89_H2C_MRC_START_W0_SCH_IDX) | 9372 le32_encode_bits(arg->old_sch_idx, 9373 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 9374 le32_encode_bits(arg->action, 9375 RTW89_H2C_MRC_START_W0_ACTION); 9376 9377 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 9378 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 9379 9380 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9381 H2C_CAT_MAC, 9382 H2C_CL_MRC, 9383 H2C_FUNC_START_MRC, 0, 0, 9384 len); 9385 9386 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 9387 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9388 } 9389 9390 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx) 9391 { 9392 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9393 struct rtw89_h2c_mrc_del *h2c; 9394 u32 len = sizeof(*h2c); 9395 struct sk_buff *skb; 9396 unsigned int cond; 9397 9398 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9399 if (!skb) { 9400 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 9401 return -ENOMEM; 9402 } 9403 9404 skb_put(skb, len); 9405 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 9406 9407 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) | 9408 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX); 9409 9410 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9411 H2C_CAT_MAC, 9412 H2C_CL_MRC, 9413 H2C_FUNC_DEL_MRC, 0, 0, 9414 len); 9415 9416 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 9417 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9418 } 9419 9420 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 9421 const struct rtw89_fw_mrc_req_tsf_arg *arg, 9422 struct rtw89_mac_mrc_tsf_rpt *rpt) 9423 { 9424 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 9425 struct rtw89_h2c_mrc_req_tsf *h2c; 9426 struct rtw89_mac_mrc_tsf_rpt *tmp; 9427 struct sk_buff *skb; 9428 unsigned int i; 9429 u32 len; 9430 int ret; 9431 9432 len = struct_size(h2c, infos, arg->num); 9433 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9434 if (!skb) { 9435 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 9436 return -ENOMEM; 9437 } 9438 9439 skb_put(skb, len); 9440 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 9441 9442 h2c->req_tsf_num = arg->num; 9443 for (i = 0; i < arg->num; i++) 9444 h2c->infos[i] = 9445 u8_encode_bits(arg->infos[i].band, 9446 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 9447 u8_encode_bits(arg->infos[i].port, 9448 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 9449 9450 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9451 H2C_CAT_MAC, 9452 H2C_CL_MRC, 9453 H2C_FUNC_MRC_REQ_TSF, 0, 0, 9454 len); 9455 9456 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 9457 if (ret) 9458 return ret; 9459 9460 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 9461 *rpt = *tmp; 9462 9463 return 0; 9464 } 9465 9466 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 9467 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 9468 { 9469 struct rtw89_h2c_mrc_upd_bitmap *h2c; 9470 u32 len = sizeof(*h2c); 9471 struct sk_buff *skb; 9472 int ret; 9473 9474 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9475 if (!skb) { 9476 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 9477 return -ENOMEM; 9478 } 9479 9480 skb_put(skb, len); 9481 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 9482 9483 h2c->w0 = le32_encode_bits(arg->sch_idx, 9484 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 9485 le32_encode_bits(arg->action, 9486 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 9487 le32_encode_bits(arg->macid, 9488 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 9489 h2c->w1 = le32_encode_bits(arg->client_macid, 9490 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 9491 9492 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9493 H2C_CAT_MAC, 9494 H2C_CL_MRC, 9495 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 9496 len); 9497 9498 ret = rtw89_h2c_tx(rtwdev, skb, false); 9499 if (ret) { 9500 rtw89_err(rtwdev, "failed to send h2c\n"); 9501 dev_kfree_skb_any(skb); 9502 return -EBUSY; 9503 } 9504 9505 return 0; 9506 } 9507 9508 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 9509 const struct rtw89_fw_mrc_sync_arg *arg) 9510 { 9511 struct rtw89_h2c_mrc_sync *h2c; 9512 u32 len = sizeof(*h2c); 9513 struct sk_buff *skb; 9514 int ret; 9515 9516 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9517 if (!skb) { 9518 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 9519 return -ENOMEM; 9520 } 9521 9522 skb_put(skb, len); 9523 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 9524 9525 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 9526 le32_encode_bits(arg->src.port, 9527 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 9528 le32_encode_bits(arg->src.band, 9529 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 9530 le32_encode_bits(arg->dest.port, 9531 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 9532 le32_encode_bits(arg->dest.band, 9533 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 9534 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 9535 9536 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9537 H2C_CAT_MAC, 9538 H2C_CL_MRC, 9539 H2C_FUNC_MRC_SYNC, 0, 0, 9540 len); 9541 9542 ret = rtw89_h2c_tx(rtwdev, skb, false); 9543 if (ret) { 9544 rtw89_err(rtwdev, "failed to send h2c\n"); 9545 dev_kfree_skb_any(skb); 9546 return -EBUSY; 9547 } 9548 9549 return 0; 9550 } 9551 9552 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 9553 const struct rtw89_fw_mrc_upd_duration_arg *arg) 9554 { 9555 struct rtw89_h2c_mrc_upd_duration *h2c; 9556 struct sk_buff *skb; 9557 unsigned int i; 9558 u32 len; 9559 int ret; 9560 9561 len = struct_size(h2c, slots, arg->slot_num); 9562 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9563 if (!skb) { 9564 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 9565 return -ENOMEM; 9566 } 9567 9568 skb_put(skb, len); 9569 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 9570 9571 h2c->w0 = le32_encode_bits(arg->sch_idx, 9572 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 9573 le32_encode_bits(arg->slot_num, 9574 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 9575 le32_encode_bits(false, 9576 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 9577 9578 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 9579 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 9580 9581 for (i = 0; i < arg->slot_num; i++) { 9582 h2c->slots[i] = 9583 le32_encode_bits(arg->slots[i].slot_idx, 9584 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 9585 le32_encode_bits(arg->slots[i].duration, 9586 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 9587 } 9588 9589 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9590 H2C_CAT_MAC, 9591 H2C_CL_MRC, 9592 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 9593 len); 9594 9595 ret = rtw89_h2c_tx(rtwdev, skb, false); 9596 if (ret) { 9597 rtw89_err(rtwdev, "failed to send h2c\n"); 9598 dev_kfree_skb_any(skb); 9599 return -EBUSY; 9600 } 9601 9602 return 0; 9603 } 9604 9605 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en) 9606 { 9607 struct rtw89_h2c_ap_info *h2c; 9608 u32 len = sizeof(*h2c); 9609 struct sk_buff *skb; 9610 int ret; 9611 9612 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9613 if (!skb) { 9614 rtw89_err(rtwdev, "failed to alloc skb for ap info\n"); 9615 return -ENOMEM; 9616 } 9617 9618 skb_put(skb, len); 9619 h2c = (struct rtw89_h2c_ap_info *)skb->data; 9620 9621 h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN); 9622 9623 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9624 H2C_CAT_MAC, 9625 H2C_CL_AP, 9626 H2C_FUNC_AP_INFO, 0, 0, 9627 len); 9628 9629 ret = rtw89_h2c_tx(rtwdev, skb, false); 9630 if (ret) { 9631 rtw89_err(rtwdev, "failed to send h2c\n"); 9632 dev_kfree_skb_any(skb); 9633 return -EBUSY; 9634 } 9635 9636 return 0; 9637 } 9638 9639 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en) 9640 { 9641 int ret; 9642 9643 if (en) { 9644 if (refcount_inc_not_zero(&rtwdev->refcount_ap_info)) 9645 return 0; 9646 } else { 9647 if (!refcount_dec_and_test(&rtwdev->refcount_ap_info)) 9648 return 0; 9649 } 9650 9651 ret = rtw89_fw_h2c_ap_info(rtwdev, en); 9652 if (ret) { 9653 if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 9654 return ret; 9655 9656 /* During recovery, neither driver nor stack has full error 9657 * handling, so show a warning, but return 0 with refcount 9658 * increased normally. It can avoid underflow when calling 9659 * with @en == false later. 9660 */ 9661 rtw89_warn(rtwdev, "h2c ap_info failed during SER\n"); 9662 } 9663 9664 if (en) 9665 refcount_set(&rtwdev->refcount_ap_info, 1); 9666 9667 return 0; 9668 } 9669 9670 int rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 9671 bool enable) 9672 { 9673 struct rtw89_wait_info *wait = &rtwdev->mlo.wait; 9674 struct rtw89_h2c_mlo_link_cfg *h2c; 9675 u8 mac_id = rtwvif_link->mac_id; 9676 u32 len = sizeof(*h2c); 9677 struct sk_buff *skb; 9678 unsigned int cond; 9679 int ret; 9680 9681 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9682 if (!skb) { 9683 rtw89_err(rtwdev, "failed to alloc skb for mlo link cfg\n"); 9684 return -ENOMEM; 9685 } 9686 9687 skb_put(skb, len); 9688 h2c = (struct rtw89_h2c_mlo_link_cfg *)skb->data; 9689 9690 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_MLO_LINK_CFG_W0_MACID) | 9691 le32_encode_bits(enable, RTW89_H2C_MLO_LINK_CFG_W0_OPTION); 9692 9693 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9694 H2C_CAT_MAC, 9695 H2C_CL_MLO, 9696 H2C_FUNC_MLO_LINK_CFG, 0, 0, 9697 len); 9698 9699 cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG); 9700 9701 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 9702 if (ret) { 9703 rtw89_err(rtwdev, "mlo link cfg (%s link id %u) failed: %d\n", 9704 str_enable_disable(enable), rtwvif_link->link_id, ret); 9705 return ret; 9706 } 9707 9708 return 0; 9709 } 9710 9711 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 9712 { 9713 static const u8 zeros[U8_MAX] = {}; 9714 9715 return memcmp(ext_ptr, zeros, ext_len) == 0; 9716 } 9717 9718 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 9719 ({ \ 9720 u8 __var_sz = sizeof(*(e)); \ 9721 bool __accept; \ 9722 if (__var_sz >= (ent_sz)) \ 9723 __accept = true; \ 9724 else \ 9725 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 9726 (ent_sz) - __var_sz);\ 9727 __accept; \ 9728 }) 9729 9730 static bool 9731 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 9732 const void *cursor, 9733 const struct rtw89_txpwr_conf *conf) 9734 { 9735 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9736 return false; 9737 9738 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 9739 return false; 9740 9741 switch (e->rs) { 9742 case RTW89_RS_CCK: 9743 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 9744 return false; 9745 break; 9746 case RTW89_RS_OFDM: 9747 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 9748 return false; 9749 break; 9750 case RTW89_RS_MCS: 9751 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 9752 e->nss >= RTW89_NSS_NUM || 9753 e->ofdma >= RTW89_OFDMA_NUM) 9754 return false; 9755 break; 9756 case RTW89_RS_HEDCM: 9757 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 9758 e->nss >= RTW89_NSS_HEDCM_NUM || 9759 e->ofdma >= RTW89_OFDMA_NUM) 9760 return false; 9761 break; 9762 case RTW89_RS_OFFSET: 9763 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 9764 return false; 9765 break; 9766 default: 9767 return false; 9768 } 9769 9770 return true; 9771 } 9772 9773 static 9774 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 9775 const struct rtw89_txpwr_table *tbl) 9776 { 9777 const struct rtw89_txpwr_conf *conf = tbl->data; 9778 struct rtw89_fw_txpwr_byrate_entry entry = {}; 9779 struct rtw89_txpwr_byrate *byr_head; 9780 struct rtw89_rate_desc desc = {}; 9781 const void *cursor; 9782 u32 data; 9783 s8 *byr; 9784 int i; 9785 9786 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9787 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 9788 continue; 9789 9790 byr_head = &rtwdev->byr[entry.band][entry.bw]; 9791 data = le32_to_cpu(entry.data); 9792 desc.ofdma = entry.ofdma; 9793 desc.nss = entry.nss; 9794 desc.rs = entry.rs; 9795 9796 for (i = 0; i < entry.len; i++, data >>= 8) { 9797 desc.idx = entry.shf + i; 9798 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 9799 *byr = data & 0xff; 9800 } 9801 } 9802 } 9803 9804 static bool 9805 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 9806 const void *cursor, 9807 const struct rtw89_txpwr_conf *conf) 9808 { 9809 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9810 return false; 9811 9812 if (e->bw >= RTW89_2G_BW_NUM) 9813 return false; 9814 if (e->nt >= RTW89_NTX_NUM) 9815 return false; 9816 if (e->rs >= RTW89_RS_LMT_NUM) 9817 return false; 9818 if (e->bf >= RTW89_BF_NUM) 9819 return false; 9820 if (e->regd >= RTW89_REGD_NUM) 9821 return false; 9822 if (e->ch_idx >= RTW89_2G_CH_NUM) 9823 return false; 9824 9825 return true; 9826 } 9827 9828 static 9829 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 9830 { 9831 const struct rtw89_txpwr_conf *conf = &data->conf; 9832 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 9833 const void *cursor; 9834 9835 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9836 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 9837 continue; 9838 9839 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 9840 [entry.ch_idx] = entry.v; 9841 } 9842 } 9843 9844 static bool 9845 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 9846 const void *cursor, 9847 const struct rtw89_txpwr_conf *conf) 9848 { 9849 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9850 return false; 9851 9852 if (e->bw >= RTW89_5G_BW_NUM) 9853 return false; 9854 if (e->nt >= RTW89_NTX_NUM) 9855 return false; 9856 if (e->rs >= RTW89_RS_LMT_NUM) 9857 return false; 9858 if (e->bf >= RTW89_BF_NUM) 9859 return false; 9860 if (e->regd >= RTW89_REGD_NUM) 9861 return false; 9862 if (e->ch_idx >= RTW89_5G_CH_NUM) 9863 return false; 9864 9865 return true; 9866 } 9867 9868 static 9869 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 9870 { 9871 const struct rtw89_txpwr_conf *conf = &data->conf; 9872 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 9873 const void *cursor; 9874 9875 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9876 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 9877 continue; 9878 9879 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 9880 [entry.ch_idx] = entry.v; 9881 } 9882 } 9883 9884 static bool 9885 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 9886 const void *cursor, 9887 const struct rtw89_txpwr_conf *conf) 9888 { 9889 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9890 return false; 9891 9892 if (e->bw >= RTW89_6G_BW_NUM) 9893 return false; 9894 if (e->nt >= RTW89_NTX_NUM) 9895 return false; 9896 if (e->rs >= RTW89_RS_LMT_NUM) 9897 return false; 9898 if (e->bf >= RTW89_BF_NUM) 9899 return false; 9900 if (e->regd >= RTW89_REGD_NUM) 9901 return false; 9902 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 9903 return false; 9904 if (e->ch_idx >= RTW89_6G_CH_NUM) 9905 return false; 9906 9907 return true; 9908 } 9909 9910 static 9911 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 9912 { 9913 const struct rtw89_txpwr_conf *conf = &data->conf; 9914 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 9915 const void *cursor; 9916 9917 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9918 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 9919 continue; 9920 9921 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 9922 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 9923 } 9924 } 9925 9926 static bool 9927 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 9928 const void *cursor, 9929 const struct rtw89_txpwr_conf *conf) 9930 { 9931 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9932 return false; 9933 9934 if (e->ru >= RTW89_RU_NUM) 9935 return false; 9936 if (e->nt >= RTW89_NTX_NUM) 9937 return false; 9938 if (e->regd >= RTW89_REGD_NUM) 9939 return false; 9940 if (e->ch_idx >= RTW89_2G_CH_NUM) 9941 return false; 9942 9943 return true; 9944 } 9945 9946 static 9947 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 9948 { 9949 const struct rtw89_txpwr_conf *conf = &data->conf; 9950 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 9951 const void *cursor; 9952 9953 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9954 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 9955 continue; 9956 9957 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 9958 } 9959 } 9960 9961 static bool 9962 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 9963 const void *cursor, 9964 const struct rtw89_txpwr_conf *conf) 9965 { 9966 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 9967 return false; 9968 9969 if (e->ru >= RTW89_RU_NUM) 9970 return false; 9971 if (e->nt >= RTW89_NTX_NUM) 9972 return false; 9973 if (e->regd >= RTW89_REGD_NUM) 9974 return false; 9975 if (e->ch_idx >= RTW89_5G_CH_NUM) 9976 return false; 9977 9978 return true; 9979 } 9980 9981 static 9982 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 9983 { 9984 const struct rtw89_txpwr_conf *conf = &data->conf; 9985 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 9986 const void *cursor; 9987 9988 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 9989 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 9990 continue; 9991 9992 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 9993 } 9994 } 9995 9996 static bool 9997 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 9998 const void *cursor, 9999 const struct rtw89_txpwr_conf *conf) 10000 { 10001 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 10002 return false; 10003 10004 if (e->ru >= RTW89_RU_NUM) 10005 return false; 10006 if (e->nt >= RTW89_NTX_NUM) 10007 return false; 10008 if (e->regd >= RTW89_REGD_NUM) 10009 return false; 10010 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 10011 return false; 10012 if (e->ch_idx >= RTW89_6G_CH_NUM) 10013 return false; 10014 10015 return true; 10016 } 10017 10018 static 10019 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 10020 { 10021 const struct rtw89_txpwr_conf *conf = &data->conf; 10022 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 10023 const void *cursor; 10024 10025 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 10026 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 10027 continue; 10028 10029 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 10030 [entry.ch_idx] = entry.v; 10031 } 10032 } 10033 10034 static bool 10035 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 10036 const void *cursor, 10037 const struct rtw89_txpwr_conf *conf) 10038 { 10039 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 10040 return false; 10041 10042 if (e->band >= RTW89_BAND_NUM) 10043 return false; 10044 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 10045 return false; 10046 if (e->regd >= RTW89_REGD_NUM) 10047 return false; 10048 10049 return true; 10050 } 10051 10052 static 10053 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 10054 { 10055 const struct rtw89_txpwr_conf *conf = &data->conf; 10056 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 10057 const void *cursor; 10058 10059 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 10060 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 10061 continue; 10062 10063 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 10064 } 10065 } 10066 10067 static bool 10068 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 10069 const void *cursor, 10070 const struct rtw89_txpwr_conf *conf) 10071 { 10072 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 10073 return false; 10074 10075 if (e->band >= RTW89_BAND_NUM) 10076 return false; 10077 if (e->regd >= RTW89_REGD_NUM) 10078 return false; 10079 10080 return true; 10081 } 10082 10083 static 10084 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 10085 { 10086 const struct rtw89_txpwr_conf *conf = &data->conf; 10087 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 10088 const void *cursor; 10089 10090 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 10091 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 10092 continue; 10093 10094 data->v[entry.band][entry.regd] = entry.v; 10095 } 10096 } 10097 10098 static bool rtw89_fw_has_da_txpwr_table(struct rtw89_dev *rtwdev, 10099 const struct rtw89_rfe_parms *parms) 10100 { 10101 const struct rtw89_chip_info *chip = rtwdev->chip; 10102 10103 if (chip->support_bands & BIT(NL80211_BAND_2GHZ) && 10104 !(parms->rule_da_2ghz.lmt && parms->rule_da_2ghz.lmt_ru)) 10105 return false; 10106 10107 if (chip->support_bands & BIT(NL80211_BAND_5GHZ) && 10108 !(parms->rule_da_5ghz.lmt && parms->rule_da_5ghz.lmt_ru)) 10109 return false; 10110 10111 if (chip->support_bands & BIT(NL80211_BAND_6GHZ) && 10112 !(parms->rule_da_6ghz.lmt && parms->rule_da_6ghz.lmt_ru)) 10113 return false; 10114 10115 return true; 10116 } 10117 10118 const struct rtw89_rfe_parms * 10119 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 10120 const struct rtw89_rfe_parms *init) 10121 { 10122 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 10123 struct rtw89_rfe_parms *parms; 10124 10125 if (!rfe_data) 10126 return init; 10127 10128 parms = &rfe_data->rfe_parms; 10129 if (init) 10130 *parms = *init; 10131 10132 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 10133 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 10134 rfe_data->byrate.tbl.size = 0; /* don't care here */ 10135 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 10136 parms->byr_tbl = &rfe_data->byrate.tbl; 10137 } 10138 10139 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 10140 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 10141 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 10142 } 10143 10144 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 10145 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 10146 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 10147 } 10148 10149 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 10150 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 10151 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 10152 } 10153 10154 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_2ghz.conf)) { 10155 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->da_lmt_2ghz); 10156 parms->rule_da_2ghz.lmt = &rfe_data->da_lmt_2ghz.v; 10157 } 10158 10159 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_5ghz.conf)) { 10160 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->da_lmt_5ghz); 10161 parms->rule_da_5ghz.lmt = &rfe_data->da_lmt_5ghz.v; 10162 } 10163 10164 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_6ghz.conf)) { 10165 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->da_lmt_6ghz); 10166 parms->rule_da_6ghz.lmt = &rfe_data->da_lmt_6ghz.v; 10167 } 10168 10169 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 10170 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 10171 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 10172 } 10173 10174 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 10175 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 10176 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 10177 } 10178 10179 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 10180 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 10181 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 10182 } 10183 10184 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_2ghz.conf)) { 10185 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->da_lmt_ru_2ghz); 10186 parms->rule_da_2ghz.lmt_ru = &rfe_data->da_lmt_ru_2ghz.v; 10187 } 10188 10189 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_5ghz.conf)) { 10190 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->da_lmt_ru_5ghz); 10191 parms->rule_da_5ghz.lmt_ru = &rfe_data->da_lmt_ru_5ghz.v; 10192 } 10193 10194 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_6ghz.conf)) { 10195 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->da_lmt_ru_6ghz); 10196 parms->rule_da_6ghz.lmt_ru = &rfe_data->da_lmt_ru_6ghz.v; 10197 } 10198 10199 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 10200 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 10201 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 10202 } 10203 10204 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 10205 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 10206 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 10207 } 10208 10209 parms->has_da = rtw89_fw_has_da_txpwr_table(rtwdev, parms); 10210 10211 return parms; 10212 } 10213