1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/if_arp.h> 6 #include "cam.h" 7 #include "chan.h" 8 #include "coex.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "phy.h" 13 #include "ps.h" 14 #include "reg.h" 15 #include "util.h" 16 #include "wow.h" 17 18 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev); 19 20 struct rtw89_eapol_2_of_2 { 21 u8 gtkbody[14]; 22 u8 key_des_ver; 23 u8 rsvd[92]; 24 } __packed; 25 26 struct rtw89_sa_query { 27 u8 category; 28 u8 action; 29 } __packed; 30 31 struct rtw89_arp_rsp { 32 u8 llc_hdr[sizeof(rfc1042_header)]; 33 __be16 llc_type; 34 struct arphdr arp_hdr; 35 u8 sender_hw[ETH_ALEN]; 36 __be32 sender_ip; 37 u8 target_hw[ETH_ALEN]; 38 __be32 target_ip; 39 } __packed; 40 41 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 42 43 const struct rtw89_fw_blacklist rtw89_fw_blacklist_default = { 44 .ver = 0x00, 45 .list = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 46 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 47 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 48 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 49 }, 50 }; 51 EXPORT_SYMBOL(rtw89_fw_blacklist_default); 52 53 union rtw89_fw_element_arg { 54 size_t offset; 55 enum rtw89_rf_path rf_path; 56 enum rtw89_fw_type fw_type; 57 }; 58 59 struct rtw89_fw_element_handler { 60 int (*fn)(struct rtw89_dev *rtwdev, 61 const struct rtw89_fw_element_hdr *elm, 62 const union rtw89_fw_element_arg arg); 63 const union rtw89_fw_element_arg arg; 64 const char *name; 65 }; 66 67 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 68 struct sk_buff *skb); 69 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 70 struct rtw89_wait_info *wait, unsigned int cond); 71 static int __parse_security_section(struct rtw89_dev *rtwdev, 72 struct rtw89_fw_bin_info *info, 73 struct rtw89_fw_hdr_section_info *section_info, 74 const void *content, 75 u32 *mssc_len); 76 77 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 78 bool header) 79 { 80 struct sk_buff *skb; 81 u32 header_len = 0; 82 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 83 84 if (header) 85 header_len = H2C_HEADER_LEN; 86 87 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 88 if (!skb) 89 return NULL; 90 skb_reserve(skb, header_len + h2c_desc_size); 91 memset(skb->data, 0, len); 92 93 return skb; 94 } 95 96 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 97 { 98 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 99 } 100 101 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 102 { 103 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 104 } 105 106 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 107 { 108 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 109 u8 val; 110 int ret; 111 112 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 113 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 114 1, FWDL_WAIT_CNT, false, rtwdev, type); 115 if (ret) { 116 switch (val) { 117 case RTW89_FWDL_CHECKSUM_FAIL: 118 rtw89_err(rtwdev, "fw checksum fail\n"); 119 return -EINVAL; 120 121 case RTW89_FWDL_SECURITY_FAIL: 122 rtw89_err(rtwdev, "fw security fail\n"); 123 return -EINVAL; 124 125 case RTW89_FWDL_CV_NOT_MATCH: 126 rtw89_err(rtwdev, "fw cv not match\n"); 127 return -EINVAL; 128 129 default: 130 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 131 return -EBUSY; 132 } 133 } 134 135 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 136 137 return 0; 138 } 139 140 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 141 struct rtw89_fw_bin_info *info) 142 { 143 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 144 const struct rtw89_chip_info *chip = rtwdev->chip; 145 struct rtw89_fw_hdr_section_info *section_info; 146 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 147 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 148 const struct rtw89_fw_hdr_section *section; 149 const u8 *fw_end = fw + len; 150 const u8 *bin; 151 u32 base_hdr_len; 152 u32 mssc_len; 153 int ret; 154 u32 i; 155 156 if (!info) 157 return -EINVAL; 158 159 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 160 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 161 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 162 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE); 163 164 if (chip->chip_gen == RTW89_CHIP_AX) 165 info->part_size = FWDL_SECTION_PER_PKT_LEN; 166 else 167 info->part_size = le32_get_bits(fw_hdr->w7, FW_HDR_W7_PART_SIZE); 168 169 if (info->dynamic_hdr_en) { 170 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 171 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 172 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 173 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 174 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 175 return -EINVAL; 176 } 177 } else { 178 info->hdr_len = base_hdr_len; 179 info->dynamic_hdr_len = 0; 180 } 181 182 bin = fw + info->hdr_len; 183 184 /* jump to section header */ 185 section_info = info->section_info; 186 for (i = 0; i < info->section_num; i++) { 187 section = &fw_hdr->sections[i]; 188 section_info->type = 189 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 190 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 191 192 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 193 section_info->len += FWDL_SECTION_CHKSUM_LEN; 194 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 195 section_info->dladdr = 196 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 197 section_info->addr = bin; 198 199 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 200 section_info->mssc = 201 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 202 203 ret = __parse_security_section(rtwdev, info, section_info, 204 bin, &mssc_len); 205 if (ret) 206 return ret; 207 208 if (sec->secure_boot && chip->chip_id == RTL8852B) 209 section_info->len_override = 960; 210 } else { 211 section_info->mssc = 0; 212 mssc_len = 0; 213 } 214 215 rtw89_debug(rtwdev, RTW89_DBG_FW, 216 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 217 i, section_info->type, section_info->len, 218 section_info->mssc, mssc_len, bin - fw); 219 rtw89_debug(rtwdev, RTW89_DBG_FW, 220 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 221 section_info->ignore, section_info->key_addr, 222 section_info->key_addr ? 223 section_info->key_addr - section_info->addr : 0, 224 section_info->key_len, section_info->key_idx); 225 226 bin += section_info->len + mssc_len; 227 section_info++; 228 } 229 230 if (fw_end != bin) { 231 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 232 return -EINVAL; 233 } 234 235 return 0; 236 } 237 238 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 239 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 240 u32 rmp_tbl_size, u32 *key_idx) 241 { 242 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 243 u32 sel_byte_idx; 244 u32 mss_sel_idx; 245 u8 sel_bit_idx; 246 int i; 247 248 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 249 if (!mss_hdr->defen) 250 return -ENOENT; 251 252 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 253 sec->mss_key_num; 254 } else { 255 if (mss_hdr->defen) 256 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 257 else 258 mss_sel_idx = 0; 259 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 260 le16_to_cpu(mss_hdr->msscust_max) + 261 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 262 sec->mss_key_num; 263 } 264 265 sel_byte_idx = mss_sel_idx >> 3; 266 sel_bit_idx = mss_sel_idx & 0x7; 267 268 if (sel_byte_idx >= rmp_tbl_size) 269 return -EFAULT; 270 271 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 272 return -ENOENT; 273 274 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 275 276 for (i = 0; i < sel_byte_idx; i++) 277 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 278 279 return 0; 280 } 281 282 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 283 struct rtw89_fw_bin_info *info, 284 struct rtw89_fw_hdr_section_info *section_info, 285 const void *content, 286 u32 *mssc_len) 287 { 288 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 289 const union rtw89_fw_section_mssc_content *section_content = content; 290 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 291 u32 rmp_tbl_size; 292 u32 key_sign_len; 293 u32 real_key_idx; 294 u32 sb_sel_ver; 295 int ret; 296 297 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 298 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 299 return -ENOENT; 300 } 301 302 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 303 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 304 le16_to_cpu(mss_hdr->msscust_max) * 305 mss_hdr->mssdev_max) >> 3; 306 if (mss_hdr->defen) 307 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 308 } else { 309 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 310 mss_hdr->rmpfmt); 311 return -EINVAL; 312 } 313 314 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 315 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 316 rmp_tbl_size, (int)sizeof(*mss_hdr), 317 le32_to_cpu(mss_hdr->key_raw_offset)); 318 return -EINVAL; 319 } 320 321 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 322 if (!key_sign_len) 323 key_sign_len = 512; 324 325 if (info->dsp_checksum) 326 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 327 328 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 329 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 330 331 if (!sec->secure_boot) 332 goto out; 333 334 sb_sel_ver = get_unaligned_le32(§ion_content->sb_sel_ver.v); 335 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 336 goto ignore; 337 338 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 339 if (ret) 340 goto ignore; 341 342 section_info->key_addr = content + section_info->len + 343 le32_to_cpu(mss_hdr->key_raw_offset) + 344 key_sign_len * real_key_idx; 345 section_info->key_len = key_sign_len; 346 section_info->key_idx = real_key_idx; 347 348 out: 349 if (info->secure_section_exist) { 350 section_info->ignore = true; 351 return 0; 352 } 353 354 info->secure_section_exist = true; 355 356 return 0; 357 358 ignore: 359 section_info->ignore = true; 360 361 return 0; 362 } 363 364 static int __check_secure_blacklist(struct rtw89_dev *rtwdev, 365 struct rtw89_fw_bin_info *info, 366 struct rtw89_fw_hdr_section_info *section_info, 367 const void *content) 368 { 369 const struct rtw89_fw_blacklist *chip_blacklist = rtwdev->chip->fw_blacklist; 370 const union rtw89_fw_section_mssc_content *section_content = content; 371 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 372 u8 byte_idx; 373 u8 bit_mask; 374 375 if (!sec->secure_boot) 376 return 0; 377 378 if (!info->secure_section_exist || section_info->ignore) 379 return 0; 380 381 if (!chip_blacklist) { 382 rtw89_warn(rtwdev, "chip no blacklist for secure firmware\n"); 383 return -ENOENT; 384 } 385 386 byte_idx = section_content->blacklist.bit_in_chip_list >> 3; 387 bit_mask = BIT(section_content->blacklist.bit_in_chip_list & 0x7); 388 389 if (section_content->blacklist.ver > chip_blacklist->ver) { 390 rtw89_warn(rtwdev, "chip blacklist out of date (%u, %u)\n", 391 section_content->blacklist.ver, chip_blacklist->ver); 392 return -EINVAL; 393 } 394 395 if (chip_blacklist->list[byte_idx] & bit_mask) { 396 rtw89_warn(rtwdev, "firmware %u in chip blacklist\n", 397 section_content->blacklist.ver); 398 return -EPERM; 399 } 400 401 return 0; 402 } 403 404 static int __parse_security_section(struct rtw89_dev *rtwdev, 405 struct rtw89_fw_bin_info *info, 406 struct rtw89_fw_hdr_section_info *section_info, 407 const void *content, 408 u32 *mssc_len) 409 { 410 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 411 int ret; 412 413 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) { 414 ret = __parse_formatted_mssc(rtwdev, info, section_info, 415 content, mssc_len); 416 if (ret) 417 return -EINVAL; 418 } else { 419 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 420 if (info->dsp_checksum) 421 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 422 423 if (sec->secure_boot) { 424 if (sec->mss_idx >= section_info->mssc) { 425 rtw89_err(rtwdev, "unexpected MSS %d >= %d\n", 426 sec->mss_idx, section_info->mssc); 427 return -EFAULT; 428 } 429 section_info->key_addr = content + section_info->len + 430 sec->mss_idx * FWDL_SECURITY_SIGLEN; 431 section_info->key_len = FWDL_SECURITY_SIGLEN; 432 } 433 434 info->secure_section_exist = true; 435 } 436 437 ret = __check_secure_blacklist(rtwdev, info, section_info, content); 438 WARN_ONCE(ret, "Current firmware in blacklist. Please update firmware.\n"); 439 440 return 0; 441 } 442 443 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 444 struct rtw89_fw_bin_info *info) 445 { 446 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 447 const struct rtw89_chip_info *chip = rtwdev->chip; 448 struct rtw89_fw_hdr_section_info *section_info; 449 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 450 const struct rtw89_fw_hdr_section_v1 *section; 451 const u8 *fw_end = fw + len; 452 const u8 *bin; 453 u32 base_hdr_len; 454 u32 mssc_len; 455 int ret; 456 u32 i; 457 458 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 459 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 460 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 461 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 462 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE); 463 464 if (chip->chip_gen == RTW89_CHIP_AX) 465 info->part_size = FWDL_SECTION_PER_PKT_LEN; 466 else 467 info->part_size = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_PART_SIZE); 468 469 if (info->dynamic_hdr_en) { 470 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 471 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 472 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 473 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 474 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 475 return -EINVAL; 476 } 477 } else { 478 info->hdr_len = base_hdr_len; 479 info->dynamic_hdr_len = 0; 480 } 481 482 bin = fw + info->hdr_len; 483 484 /* jump to section header */ 485 section_info = info->section_info; 486 for (i = 0; i < info->section_num; i++) { 487 section = &fw_hdr->sections[i]; 488 489 section_info->type = 490 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 491 section_info->len = 492 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 493 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 494 section_info->len += FWDL_SECTION_CHKSUM_LEN; 495 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 496 section_info->dladdr = 497 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 498 section_info->addr = bin; 499 500 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 501 section_info->mssc = 502 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 503 504 ret = __parse_security_section(rtwdev, info, section_info, 505 bin, &mssc_len); 506 if (ret) 507 return ret; 508 } else { 509 section_info->mssc = 0; 510 mssc_len = 0; 511 } 512 513 rtw89_debug(rtwdev, RTW89_DBG_FW, 514 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 515 i, section_info->type, section_info->len, 516 section_info->mssc, mssc_len, bin - fw); 517 rtw89_debug(rtwdev, RTW89_DBG_FW, 518 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 519 section_info->ignore, section_info->key_addr, 520 section_info->key_addr ? 521 section_info->key_addr - section_info->addr : 0, 522 section_info->key_len, section_info->key_idx); 523 524 bin += section_info->len + mssc_len; 525 section_info++; 526 } 527 528 if (fw_end != bin) { 529 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 530 return -EINVAL; 531 } 532 533 if (!info->secure_section_exist) 534 rtw89_warn(rtwdev, "no firmware secure section\n"); 535 536 return 0; 537 } 538 539 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 540 const struct rtw89_fw_suit *fw_suit, 541 struct rtw89_fw_bin_info *info) 542 { 543 const u8 *fw = fw_suit->data; 544 u32 len = fw_suit->size; 545 546 if (!fw || !len) { 547 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 548 return -ENOENT; 549 } 550 551 switch (fw_suit->hdr_ver) { 552 case 0: 553 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 554 case 1: 555 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 556 default: 557 return -ENOENT; 558 } 559 } 560 561 static 562 const struct rtw89_mfw_hdr *rtw89_mfw_get_hdr_ptr(struct rtw89_dev *rtwdev, 563 const struct firmware *firmware) 564 { 565 const struct rtw89_mfw_hdr *mfw_hdr; 566 567 if (sizeof(*mfw_hdr) > firmware->size) 568 return NULL; 569 570 mfw_hdr = (const struct rtw89_mfw_hdr *)&firmware->data[0]; 571 572 if (mfw_hdr->sig != RTW89_MFW_SIG) 573 return NULL; 574 575 return mfw_hdr; 576 } 577 578 static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev, 579 const struct firmware *firmware, 580 const struct rtw89_mfw_hdr *mfw_hdr) 581 { 582 const void *mfw = firmware->data; 583 u32 mfw_len = firmware->size; 584 u8 fw_nr = mfw_hdr->fw_nr; 585 const void *ptr; 586 587 if (fw_nr == 0) { 588 rtw89_err(rtwdev, "mfw header has no fw entry\n"); 589 return -ENOENT; 590 } 591 592 ptr = &mfw_hdr->info[fw_nr]; 593 594 if (ptr > mfw + mfw_len) { 595 rtw89_err(rtwdev, "mfw header out of address\n"); 596 return -EFAULT; 597 } 598 599 return 0; 600 } 601 602 static 603 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 604 struct rtw89_fw_suit *fw_suit, bool nowarn) 605 { 606 struct rtw89_fw_info *fw_info = &rtwdev->fw; 607 const struct firmware *firmware = fw_info->req.firmware; 608 const struct rtw89_mfw_info *mfw_info = NULL, *tmp; 609 const struct rtw89_mfw_hdr *mfw_hdr; 610 const u8 *mfw = firmware->data; 611 u32 mfw_len = firmware->size; 612 int ret; 613 int i; 614 615 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware); 616 if (!mfw_hdr) { 617 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 618 /* legacy firmware support normal type only */ 619 if (type != RTW89_FW_NORMAL) 620 return -EINVAL; 621 fw_suit->data = mfw; 622 fw_suit->size = mfw_len; 623 return 0; 624 } 625 626 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); 627 if (ret) 628 return ret; 629 630 for (i = 0; i < mfw_hdr->fw_nr; i++) { 631 tmp = &mfw_hdr->info[i]; 632 if (tmp->type != type) 633 continue; 634 635 if (type == RTW89_FW_LOGFMT) { 636 mfw_info = tmp; 637 goto found; 638 } 639 640 /* Version order of WiFi firmware in firmware file are not in order, 641 * pass all firmware to find the equal or less but closest version. 642 */ 643 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { 644 if (!mfw_info || mfw_info->cv < tmp->cv) 645 mfw_info = tmp; 646 } 647 } 648 649 if (mfw_info) 650 goto found; 651 652 if (!nowarn) 653 rtw89_err(rtwdev, "no suitable firmware found\n"); 654 return -ENOENT; 655 656 found: 657 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 658 fw_suit->size = le32_to_cpu(mfw_info->size); 659 660 if (fw_suit->data + fw_suit->size > mfw + mfw_len) { 661 rtw89_err(rtwdev, "fw_suit %d out of address\n", type); 662 return -EFAULT; 663 } 664 665 return 0; 666 } 667 668 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 669 { 670 struct rtw89_fw_info *fw_info = &rtwdev->fw; 671 const struct firmware *firmware = fw_info->req.firmware; 672 const struct rtw89_mfw_info *mfw_info; 673 const struct rtw89_mfw_hdr *mfw_hdr; 674 u32 size; 675 int ret; 676 677 mfw_hdr = rtw89_mfw_get_hdr_ptr(rtwdev, firmware); 678 if (!mfw_hdr) { 679 rtw89_warn(rtwdev, "not mfw format\n"); 680 return 0; 681 } 682 683 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); 684 if (ret) 685 return ret; 686 687 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 688 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 689 690 return size; 691 } 692 693 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 694 struct rtw89_fw_suit *fw_suit, 695 const struct rtw89_fw_hdr *hdr) 696 { 697 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 698 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 699 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 700 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 701 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 702 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 703 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 704 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 705 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 706 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 707 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 708 } 709 710 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 711 struct rtw89_fw_suit *fw_suit, 712 const struct rtw89_fw_hdr_v1 *hdr) 713 { 714 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 715 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 716 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 717 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 718 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 719 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 720 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 721 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 722 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 723 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 724 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 725 } 726 727 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 728 enum rtw89_fw_type type, 729 struct rtw89_fw_suit *fw_suit) 730 { 731 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 732 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 733 struct wiphy *wiphy = rtwdev->hw->wiphy; 734 735 if (type == RTW89_FW_LOGFMT) 736 return 0; 737 738 fw_suit->type = type; 739 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 740 741 switch (fw_suit->hdr_ver) { 742 case 0: 743 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 744 break; 745 case 1: 746 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 747 break; 748 default: 749 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 750 fw_suit->hdr_ver); 751 return -ENOENT; 752 } 753 754 rtw89_info(rtwdev, 755 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 756 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 757 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 758 759 if (type == RTW89_FW_NORMAL || type == RTW89_FW_NORMAL_CE || 760 type == RTW89_FW_NORMAL_B) 761 snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), 762 "%u.%u.%u.%u", 763 fw_suit->major_ver, fw_suit->minor_ver, 764 fw_suit->sub_ver, fw_suit->sub_idex); 765 766 return 0; 767 } 768 769 static 770 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 771 bool nowarn) 772 { 773 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 774 int ret; 775 776 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 777 if (ret) 778 return ret; 779 780 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 781 } 782 783 static 784 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 785 const struct rtw89_fw_element_hdr *elm, 786 const union rtw89_fw_element_arg arg) 787 { 788 enum rtw89_fw_type type = arg.fw_type; 789 struct rtw89_hal *hal = &rtwdev->hal; 790 struct rtw89_fw_suit *fw_suit; 791 792 /* Version of BB MCU is in decreasing order in firmware file, so take 793 * first equal or less version, which is equal or less but closest version. 794 */ 795 if (hal->cv < elm->u.bbmcu.cv) 796 return 1; /* ignore this element */ 797 798 fw_suit = rtw89_fw_suit_get(rtwdev, type); 799 if (fw_suit->data) 800 return 1; /* ignore this element (a firmware is taken already) */ 801 802 fw_suit->data = elm->u.bbmcu.contents; 803 fw_suit->size = le32_to_cpu(elm->size); 804 805 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 806 } 807 808 #define __DEF_FW_FEAT_COND(__cond, __op) \ 809 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 810 { \ 811 return suit_ver_code __op comp_ver_code; \ 812 } 813 814 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 815 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 816 __DEF_FW_FEAT_COND(lt, <); /* less than */ 817 818 struct __fw_feat_cfg { 819 enum rtw89_core_chip_id chip_id; 820 enum rtw89_fw_feature feature; 821 u32 ver_code; 822 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 823 bool disable; 824 int size; 825 }; 826 827 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 828 { \ 829 .chip_id = _chip, \ 830 .feature = RTW89_FW_FEATURE_ ## _feat, \ 831 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 832 .cond = __fw_feat_cond_ ## _cond, \ 833 } 834 835 #define __S_DIS_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 836 { \ 837 .chip_id = _chip, \ 838 .feature = RTW89_FW_FEATURE_ ## _feat, \ 839 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 840 .cond = __fw_feat_cond_ ## _cond, \ 841 .disable = true, \ 842 .size = 1, \ 843 } 844 845 #define __G_DIS_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _grp) \ 846 { \ 847 .chip_id = _chip, \ 848 .feature = RTW89_FW_FEATURE_ ## _grp ## _MIN, \ 849 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 850 .cond = __fw_feat_cond_ ## _cond, \ 851 .disable = true, \ 852 .size = RTW89_FW_FEATURE_ ## _grp ## _MAX - \ 853 RTW89_FW_FEATURE_ ## _grp ## _MIN + 1, \ 854 } 855 856 #define __DIS_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat, _type) \ 857 __##_type##_DIS_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) 858 859 static const struct __fw_feat_cfg fw_feat_tbl[] = { 860 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 861 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 862 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER_TYPE_0), 863 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 127, 0, SER_L1_BY_EVENT), 864 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 130, 0, SIM_SER_L0L1_BY_HALT_H2C), 865 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 866 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 867 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 868 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER_TYPE_0), 869 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX), 870 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 871 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 872 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 873 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER_TYPE_0), 874 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 875 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER), 876 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 15, BEACON_LOSS_COUNT_V1), 877 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX), 878 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG), 879 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 127, 0, SER_L1_BY_EVENT), 880 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, CRASH_TRIGGER_TYPE_1), 881 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, SCAN_OFFLOAD_EXTRA_OP), 882 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 128, 0, BEACON_TRACKING), 883 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 130, 0, SIM_SER_L0L1_BY_HALT_H2C), 884 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 885 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 886 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER_TYPE_0), 887 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 888 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER), 889 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 122, 0, BEACON_TRACKING), 890 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SCAN_OFFLOAD_EXTRA_OP), 891 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, LPS_DACK_BY_C2H_REG), 892 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, CRASH_TRIGGER_TYPE_1), 893 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 127, 0, SER_L1_BY_EVENT), 894 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 130, 0, SIM_SER_L0L1_BY_HALT_H2C), 895 __CFG_FW_FEAT(RTL8852C, ge, 0, 0, 0, 0, RFK_NTFY_MCC_V0), 896 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 897 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 898 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 899 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER_TYPE_0), 900 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 901 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 902 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, BEACON_LOSS_COUNT_V1), 903 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, LPS_DACK_BY_C2H_REG), 904 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 128, 0, CRASH_TRIGGER_TYPE_1), 905 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 129, 1, BEACON_TRACKING), 906 __CFG_FW_FEAT(RTL8852C, ge, 0, 29, 94, 0, SER_L1_BY_EVENT), 907 __CFG_FW_FEAT(RTL8852C, ge, 0, 29, 130, 0, SIM_SER_L0L1_BY_HALT_H2C), 908 __CFG_FW_FEAT(RTL8922A, ge, 0, 0, 0, 0, RFK_PRE_NOTIFY_V0), 909 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 910 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER_TYPE_0), 911 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 912 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD_EXTRA_OP), 913 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 914 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0), 915 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 916 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 28, 0, RFK_IQK_V0), 917 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 31, 0, RFK_PRE_NOTIFY_V1), 918 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO), 919 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0), 920 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO), 921 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0), 922 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 49, 0, RFK_PRE_NOTIFY_V2), 923 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 49, 0, RFK_PRE_NOTIFY_MCC_V0), 924 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1), 925 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 64, 0, NO_POWER_DIFFERENCE), 926 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 71, 0, BEACON_LOSS_COUNT_V1), 927 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 76, 0, LPS_DACK_BY_C2H_REG), 928 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 79, 0, CRASH_TRIGGER_TYPE_1), 929 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 80, 0, BEACON_TRACKING), 930 __DIS_FW_FEAT(RTL8922A, ge, 0, 35, 84, 0, WITH_RFK_PRE_NOTIFY, G), 931 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 84, 0, RFK_PRE_NOTIFY_MCC_V1), 932 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 84, 0, ADDR_CAM_V0), 933 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 97, 0, SIM_SER_L0L1_BY_HALT_H2C), 934 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 100, 0, SER_POST_RECOVER_DMAC), 935 }; 936 937 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 938 const struct rtw89_chip_info *chip, 939 u32 ver_code) 940 { 941 int i; 942 943 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 944 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 945 946 if (chip->chip_id != ent->chip_id) 947 continue; 948 949 if (!ent->cond(ver_code, ent->ver_code)) 950 continue; 951 952 if (!ent->disable) { 953 RTW89_SET_FW_FEATURE(ent->feature, fw); 954 continue; 955 } 956 957 for (int n = 0; n < ent->size; n++) 958 RTW89_CLR_FW_FEATURE(ent->feature + n, fw); 959 } 960 } 961 962 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 963 { 964 const struct rtw89_chip_info *chip = rtwdev->chip; 965 const struct rtw89_fw_suit *fw_suit; 966 u32 suit_ver_code; 967 968 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 969 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 970 971 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 972 } 973 974 const struct firmware * 975 rtw89_early_fw_feature_recognize(struct device *device, 976 const struct rtw89_chip_info *chip, 977 const struct rtw89_chip_variant *variant, 978 struct rtw89_fw_info *early_fw, 979 int *used_fw_format) 980 { 981 const struct rtw89_fw_def *fw_def = __rtw89_chip_get_fw_def(chip, variant); 982 const struct firmware *firmware; 983 char fw_name[64]; 984 int fw_format; 985 u32 ver_code; 986 int ret; 987 988 for (fw_format = fw_def->fw_format_max; fw_format >= 0; fw_format--) { 989 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 990 fw_def->fw_basename, fw_format); 991 992 ret = request_firmware(&firmware, fw_name, device); 993 if (!ret) { 994 dev_info(device, "loaded firmware %s\n", fw_name); 995 *used_fw_format = fw_format; 996 break; 997 } 998 } 999 1000 if (ret) { 1001 dev_err(device, "failed to early request firmware: %d\n", ret); 1002 return NULL; 1003 } 1004 1005 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 1006 1007 if (!ver_code) 1008 goto out; 1009 1010 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 1011 1012 out: 1013 return firmware; 1014 } 1015 1016 static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev) 1017 { 1018 const struct rtw89_chip_variant *variant = rtwdev->variant; 1019 const struct rtw89_fw_suit *fw_suit; 1020 u32 suit_ver_code; 1021 1022 if (!variant) 1023 return 0; 1024 1025 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 1026 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 1027 1028 if (variant->fw_min_ver_code > suit_ver_code) { 1029 rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n", 1030 variant->fw_min_ver_code); 1031 return -ENOENT; 1032 } 1033 1034 return 0; 1035 } 1036 1037 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 1038 { 1039 const struct rtw89_fw_def *fw_def = rtw89_chip_get_fw_def(rtwdev); 1040 const struct rtw89_chip_info *chip = rtwdev->chip; 1041 const struct rtw89_hal *hal = &rtwdev->hal; 1042 enum rtw89_fw_type normal_fw_type = RTW89_FW_NORMAL; 1043 enum rtw89_fw_type wowlan_fw_type = RTW89_FW_WOWLAN; 1044 int ret; 1045 1046 if (fw_def->fw_b_aid && fw_def->fw_b_aid == hal->aid) { 1047 normal_fw_type = RTW89_FW_NORMAL_B; 1048 wowlan_fw_type = RTW89_FW_WOWLAN_B; 1049 } 1050 1051 if (chip->try_ce_fw) { 1052 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 1053 if (!ret) 1054 goto normal_done; 1055 } 1056 1057 ret = __rtw89_fw_recognize(rtwdev, normal_fw_type, false); 1058 if (ret) 1059 return ret; 1060 1061 normal_done: 1062 ret = rtw89_fw_validate_ver_required(rtwdev); 1063 if (ret) 1064 return ret; 1065 1066 /* It still works if wowlan firmware isn't existing. */ 1067 __rtw89_fw_recognize(rtwdev, wowlan_fw_type, false); 1068 1069 /* It still works if log format file isn't existing. */ 1070 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 1071 1072 rtw89_fw_recognize_features(rtwdev); 1073 1074 rtw89_coex_recognize_ver(rtwdev); 1075 1076 return 0; 1077 } 1078 1079 static 1080 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 1081 const struct rtw89_fw_element_hdr *elm, 1082 const union rtw89_fw_element_arg arg) 1083 { 1084 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1085 const struct rtw89_chip_info *chip = rtwdev->chip; 1086 struct rtw89_hal *hal = &rtwdev->hal; 1087 struct rtw89_phy_table *tbl, **pp; 1088 struct rtw89_reg2_def *regs; 1089 bool radio = false; 1090 u32 n_regs, i; 1091 u16 aid; 1092 u8 idx; 1093 1094 switch (le32_to_cpu(elm->id)) { 1095 case RTW89_FW_ELEMENT_ID_BB_REG: 1096 pp = &elm_info->bb_tbl; 1097 break; 1098 case RTW89_FW_ELEMENT_ID_BB_GAIN: 1099 pp = &elm_info->bb_gain; 1100 break; 1101 case RTW89_FW_ELEMENT_ID_RADIO_A: 1102 case RTW89_FW_ELEMENT_ID_RADIO_B: 1103 case RTW89_FW_ELEMENT_ID_RADIO_C: 1104 case RTW89_FW_ELEMENT_ID_RADIO_D: 1105 idx = elm->u.reg2.idx; 1106 pp = &elm_info->rf_radio[idx]; 1107 1108 radio = true; 1109 break; 1110 case RTW89_FW_ELEMENT_ID_RF_NCTL: 1111 pp = &elm_info->rf_nctl; 1112 break; 1113 default: 1114 return -ENOENT; 1115 } 1116 1117 aid = le16_to_cpu(elm->aid); 1118 if (aid && aid != hal->aid) 1119 return 1; /* ignore if aid not matched */ 1120 else if (*pp) 1121 return 1; /* ignore if an element is existing */ 1122 1123 tbl = kzalloc_obj(*tbl); 1124 if (!tbl) 1125 return -ENOMEM; 1126 1127 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 1128 regs = kzalloc_objs(*regs, n_regs); 1129 if (!regs) 1130 goto out; 1131 1132 for (i = 0; i < n_regs; i++) { 1133 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 1134 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 1135 } 1136 1137 tbl->n_regs = n_regs; 1138 tbl->regs = regs; 1139 1140 if (radio) { 1141 tbl->rf_path = arg.rf_path; 1142 tbl->config = chip->chip_id == RTL8852A ? 1143 rtw89_phy_config_rf_reg : 1144 rtw89_phy_config_rf_reg_v1; 1145 } 1146 1147 *pp = tbl; 1148 1149 return 0; 1150 1151 out: 1152 kfree(tbl); 1153 return -ENOMEM; 1154 } 1155 1156 static 1157 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 1158 const struct rtw89_fw_element_hdr *elm, 1159 const union rtw89_fw_element_arg arg) 1160 { 1161 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 1162 const unsigned long offset = arg.offset; 1163 struct rtw89_efuse *efuse = &rtwdev->efuse; 1164 struct rtw89_hal *hal = &rtwdev->hal; 1165 u16 aid = le16_to_cpu(elm->aid); 1166 struct rtw89_txpwr_conf *conf; 1167 1168 if (aid && aid != hal->aid) 1169 return 1; 1170 1171 if (!rtwdev->rfe_data) { 1172 rtwdev->rfe_data = kzalloc_obj(*rtwdev->rfe_data); 1173 if (!rtwdev->rfe_data) 1174 return -ENOMEM; 1175 } 1176 1177 conf = (void *)rtwdev->rfe_data + offset; 1178 1179 /* if multiple matched, take the last eventually */ 1180 if (txpwr_elm->rfe_type == efuse->rfe_type) 1181 goto setup; 1182 1183 /* without one is matched, accept default */ 1184 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 1185 (!rtw89_txpwr_conf_valid(conf) || 1186 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 1187 goto setup; 1188 1189 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 1190 elm->id, txpwr_elm->rfe_type); 1191 return 0; 1192 1193 setup: 1194 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 1195 elm->id, txpwr_elm->rfe_type); 1196 1197 conf->rfe_type = txpwr_elm->rfe_type; 1198 conf->ent_sz = txpwr_elm->ent_sz; 1199 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 1200 conf->data = txpwr_elm->content; 1201 return 0; 1202 } 1203 1204 static 1205 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 1206 const struct rtw89_fw_element_hdr *elm, 1207 const union rtw89_fw_element_arg arg) 1208 { 1209 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1210 const struct rtw89_chip_info *chip = rtwdev->chip; 1211 u32 needed_bitmap = 0; 1212 u32 offset = 0; 1213 int subband; 1214 u32 bitmap; 1215 int type; 1216 1217 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 1218 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 1219 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 1220 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 1221 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 1222 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 1223 1224 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 1225 1226 if ((bitmap & needed_bitmap) != needed_bitmap) { 1227 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %08x\n", 1228 needed_bitmap, bitmap); 1229 return -ENOENT; 1230 } 1231 1232 elm_info->txpwr_trk = kzalloc_obj(*elm_info->txpwr_trk); 1233 if (!elm_info->txpwr_trk) 1234 return -ENOMEM; 1235 1236 for (type = 0; bitmap; type++, bitmap >>= 1) { 1237 if (!(bitmap & BIT(0))) 1238 continue; 1239 1240 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 1241 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 1242 subband = 4; 1243 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 1244 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 1245 subband = 3; 1246 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 1247 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 1248 subband = 1; 1249 else 1250 break; 1251 1252 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 1253 1254 offset += subband; 1255 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 1256 goto err; 1257 } 1258 1259 return 0; 1260 1261 err: 1262 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 1263 offset, le32_to_cpu(elm->size)); 1264 kfree(elm_info->txpwr_trk); 1265 elm_info->txpwr_trk = NULL; 1266 1267 return -EFAULT; 1268 } 1269 1270 static 1271 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 1272 const struct rtw89_fw_element_hdr *elm, 1273 const union rtw89_fw_element_arg arg) 1274 { 1275 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1276 u8 rfk_id; 1277 1278 if (elm_info->rfk_log_fmt) 1279 goto allocated; 1280 1281 elm_info->rfk_log_fmt = kzalloc_obj(*elm_info->rfk_log_fmt); 1282 if (!elm_info->rfk_log_fmt) 1283 return 1; /* this is an optional element, so just ignore this */ 1284 1285 allocated: 1286 rfk_id = elm->u.rfk_log_fmt.rfk_id; 1287 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 1288 return 1; 1289 1290 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 1291 1292 return 0; 1293 } 1294 1295 static bool rtw89_regd_entcpy(struct rtw89_regd *regd, const void *cursor, 1296 u8 cursor_size) 1297 { 1298 /* fill default values if needed for backward compatibility */ 1299 struct rtw89_fw_regd_entry entry = { 1300 .rule_2ghz = RTW89_NA, 1301 .rule_5ghz = RTW89_NA, 1302 .rule_6ghz = RTW89_NA, 1303 .fmap = cpu_to_le32(0x0), 1304 }; 1305 u8 valid_size = min_t(u8, sizeof(entry), cursor_size); 1306 unsigned int i; 1307 u32 fmap; 1308 1309 memcpy(&entry, cursor, valid_size); 1310 memset(regd, 0, sizeof(*regd)); 1311 1312 regd->alpha2[0] = entry.alpha2_0; 1313 regd->alpha2[1] = entry.alpha2_1; 1314 regd->alpha2[2] = '\0'; 1315 1316 /* also need to consider forward compatibility */ 1317 regd->txpwr_regd[RTW89_BAND_2G] = entry.rule_2ghz < RTW89_REGD_NUM ? 1318 entry.rule_2ghz : RTW89_NA; 1319 regd->txpwr_regd[RTW89_BAND_5G] = entry.rule_5ghz < RTW89_REGD_NUM ? 1320 entry.rule_5ghz : RTW89_NA; 1321 regd->txpwr_regd[RTW89_BAND_6G] = entry.rule_6ghz < RTW89_REGD_NUM ? 1322 entry.rule_6ghz : RTW89_NA; 1323 1324 BUILD_BUG_ON(sizeof(fmap) != sizeof(entry.fmap)); 1325 BUILD_BUG_ON(sizeof(fmap) * 8 < NUM_OF_RTW89_REGD_FUNC); 1326 1327 fmap = le32_to_cpu(entry.fmap); 1328 for (i = 0; i < NUM_OF_RTW89_REGD_FUNC; i++) { 1329 if (fmap & BIT(i)) 1330 set_bit(i, regd->func_bitmap); 1331 } 1332 1333 return true; 1334 } 1335 1336 #define rtw89_for_each_in_regd_element(regd, element) \ 1337 for (const void *cursor = (element)->content, \ 1338 *end = (element)->content + \ 1339 le32_to_cpu((element)->num_ents) * (element)->ent_sz; \ 1340 cursor < end; cursor += (element)->ent_sz) \ 1341 if (rtw89_regd_entcpy(regd, cursor, (element)->ent_sz)) 1342 1343 static 1344 int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev, 1345 const struct rtw89_fw_element_hdr *elm, 1346 const union rtw89_fw_element_arg arg) 1347 { 1348 const struct __rtw89_fw_regd_element *regd_elm = &elm->u.regd; 1349 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1350 u32 num_ents = le32_to_cpu(regd_elm->num_ents); 1351 struct rtw89_regd_data *p; 1352 struct rtw89_regd regd; 1353 u32 i = 0; 1354 1355 if (num_ents > RTW89_REGD_MAX_COUNTRY_NUM) { 1356 rtw89_warn(rtwdev, 1357 "regd element ents (%d) are over max num (%d)\n", 1358 num_ents, RTW89_REGD_MAX_COUNTRY_NUM); 1359 rtw89_warn(rtwdev, 1360 "regd element ignore and take another/common\n"); 1361 return 1; 1362 } 1363 1364 if (elm_info->regd) { 1365 rtw89_debug(rtwdev, RTW89_DBG_REGD, 1366 "regd element take the latter\n"); 1367 devm_kfree(rtwdev->dev, elm_info->regd); 1368 elm_info->regd = NULL; 1369 } 1370 1371 p = devm_kzalloc(rtwdev->dev, struct_size(p, map, num_ents), GFP_KERNEL); 1372 if (!p) 1373 return -ENOMEM; 1374 1375 p->nr = num_ents; 1376 rtw89_for_each_in_regd_element(®d, regd_elm) 1377 p->map[i++] = regd; 1378 1379 if (i != num_ents) { 1380 rtw89_err(rtwdev, "regd element has %d invalid ents\n", 1381 num_ents - i); 1382 devm_kfree(rtwdev->dev, p); 1383 return -EINVAL; 1384 } 1385 1386 elm_info->regd = p; 1387 return 0; 1388 } 1389 1390 static 1391 int rtw89_build_afe_pwr_seq_from_elm(struct rtw89_dev *rtwdev, 1392 const struct rtw89_fw_element_hdr *elm, 1393 const union rtw89_fw_element_arg arg) 1394 { 1395 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1396 1397 elm_info->afe = elm; 1398 1399 return 0; 1400 } 1401 1402 static 1403 int rtw89_recognize_diag_mac_from_elm(struct rtw89_dev *rtwdev, 1404 const struct rtw89_fw_element_hdr *elm, 1405 const union rtw89_fw_element_arg arg) 1406 { 1407 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1408 1409 elm_info->diag_mac = elm; 1410 1411 return 0; 1412 } 1413 1414 static 1415 int rtw89_build_tx_comp_from_elm(struct rtw89_dev *rtwdev, 1416 const struct rtw89_fw_element_hdr *elm, 1417 const union rtw89_fw_element_arg arg) 1418 { 1419 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1420 struct rtw89_hal *hal = &rtwdev->hal; 1421 u16 aid; 1422 1423 aid = le16_to_cpu(elm->aid); 1424 if (aid && aid != hal->aid) 1425 return 1; /* ignore if aid not matched */ 1426 else if (elm_info->tx_comp) 1427 return 1; /* ignore if an element is existing */ 1428 1429 elm_info->tx_comp = elm; 1430 1431 return 0; 1432 } 1433 1434 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 1435 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 1436 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 1437 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 1438 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 1439 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 1440 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 1441 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 1442 { .rf_path = RF_PATH_A }, "radio A"}, 1443 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 1444 { .rf_path = RF_PATH_B }, NULL}, 1445 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 1446 { .rf_path = RF_PATH_C }, NULL}, 1447 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 1448 { .rf_path = RF_PATH_D }, NULL}, 1449 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 1450 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 1451 rtw89_fw_recognize_txpwr_from_elm, 1452 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 1453 }, 1454 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 1455 rtw89_fw_recognize_txpwr_from_elm, 1456 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 1457 }, 1458 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 1459 rtw89_fw_recognize_txpwr_from_elm, 1460 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 1461 }, 1462 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 1463 rtw89_fw_recognize_txpwr_from_elm, 1464 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 1465 }, 1466 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_2GHZ] = { 1467 rtw89_fw_recognize_txpwr_from_elm, 1468 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_2ghz.conf) }, NULL, 1469 }, 1470 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_5GHZ] = { 1471 rtw89_fw_recognize_txpwr_from_elm, 1472 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_5ghz.conf) }, NULL, 1473 }, 1474 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_6GHZ] = { 1475 rtw89_fw_recognize_txpwr_from_elm, 1476 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_6ghz.conf) }, NULL, 1477 }, 1478 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 1479 rtw89_fw_recognize_txpwr_from_elm, 1480 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 1481 }, 1482 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 1483 rtw89_fw_recognize_txpwr_from_elm, 1484 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 1485 }, 1486 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 1487 rtw89_fw_recognize_txpwr_from_elm, 1488 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 1489 }, 1490 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_2GHZ] = { 1491 rtw89_fw_recognize_txpwr_from_elm, 1492 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_2ghz.conf) }, NULL, 1493 }, 1494 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_5GHZ] = { 1495 rtw89_fw_recognize_txpwr_from_elm, 1496 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_5ghz.conf) }, NULL, 1497 }, 1498 [RTW89_FW_ELEMENT_ID_TXPWR_DA_LMT_RU_6GHZ] = { 1499 rtw89_fw_recognize_txpwr_from_elm, 1500 { .offset = offsetof(struct rtw89_rfe_data, da_lmt_ru_6ghz.conf) }, NULL, 1501 }, 1502 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 1503 rtw89_fw_recognize_txpwr_from_elm, 1504 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 1505 }, 1506 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 1507 rtw89_fw_recognize_txpwr_from_elm, 1508 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 1509 }, 1510 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 1511 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 1512 }, 1513 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 1514 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 1515 }, 1516 [RTW89_FW_ELEMENT_ID_REGD] = { 1517 rtw89_recognize_regd_from_elm, {}, "REGD", 1518 }, 1519 [RTW89_FW_ELEMENT_ID_AFE_PWR_SEQ] = { 1520 rtw89_build_afe_pwr_seq_from_elm, {}, "AFE", 1521 }, 1522 [RTW89_FW_ELEMENT_ID_DIAG_MAC] = { 1523 rtw89_recognize_diag_mac_from_elm, {}, NULL, 1524 }, 1525 [RTW89_FW_ELEMENT_ID_TX_COMP] = { 1526 rtw89_build_tx_comp_from_elm, {}, NULL, 1527 }, 1528 }; 1529 1530 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 1531 { 1532 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1533 const struct firmware *firmware = fw_info->req.firmware; 1534 const struct rtw89_chip_info *chip = rtwdev->chip; 1535 u32 unrecognized_elements = chip->needed_fw_elms; 1536 const struct rtw89_fw_element_handler *handler; 1537 const struct rtw89_fw_element_hdr *hdr; 1538 u32 elm_size; 1539 u32 elem_id; 1540 u32 offset; 1541 int ret; 1542 1543 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1544 1545 offset = rtw89_mfw_get_size(rtwdev); 1546 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1547 if (offset == 0) 1548 return -EINVAL; 1549 1550 while (offset + sizeof(*hdr) < firmware->size) { 1551 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1552 1553 elm_size = le32_to_cpu(hdr->size); 1554 if (offset + elm_size >= firmware->size) { 1555 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1556 break; 1557 } 1558 1559 elem_id = le32_to_cpu(hdr->id); 1560 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1561 goto next; 1562 1563 handler = &__fw_element_handlers[elem_id]; 1564 if (!handler->fn) 1565 goto next; 1566 1567 ret = handler->fn(rtwdev, hdr, handler->arg); 1568 if (ret == 1) /* ignore this element */ 1569 goto next; 1570 if (ret) 1571 return ret; 1572 1573 if (handler->name) 1574 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1575 handler->name, hdr->ver); 1576 1577 unrecognized_elements &= ~BIT(elem_id); 1578 next: 1579 offset += sizeof(*hdr) + elm_size; 1580 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1581 } 1582 1583 if (unrecognized_elements) { 1584 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1585 unrecognized_elements); 1586 return -ENOENT; 1587 } 1588 1589 return 0; 1590 } 1591 1592 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1593 u8 type, u8 cat, u8 class, u8 func, 1594 bool rack, bool dack, u32 len) 1595 { 1596 const struct rtw89_chip_info *chip = rtwdev->chip; 1597 struct fwcmd_hdr *hdr; 1598 1599 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1600 1601 if (chip->chip_gen == RTW89_CHIP_AX && !(rtwdev->fw.h2c_seq % 4)) 1602 rack = true; 1603 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1604 FIELD_PREP(H2C_HDR_CAT, cat) | 1605 FIELD_PREP(H2C_HDR_CLASS, class) | 1606 FIELD_PREP(H2C_HDR_FUNC, func) | 1607 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1608 1609 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1610 len + H2C_HEADER_LEN) | 1611 (rack ? H2C_HDR_REC_ACK : 0) | 1612 (dack ? H2C_HDR_DONE_ACK : 0)); 1613 1614 rtwdev->fw.h2c_seq++; 1615 } 1616 1617 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1618 struct sk_buff *skb, 1619 u8 type, u8 cat, u8 class, u8 func, 1620 u32 len) 1621 { 1622 struct fwcmd_hdr *hdr; 1623 1624 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1625 1626 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1627 FIELD_PREP(H2C_HDR_CAT, cat) | 1628 FIELD_PREP(H2C_HDR_CLASS, class) | 1629 FIELD_PREP(H2C_HDR_FUNC, func) | 1630 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1631 1632 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1633 len + H2C_HEADER_LEN)); 1634 } 1635 1636 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1637 struct rtw89_fw_bin_info *info, 1638 struct rtw89_fw_hdr *fw_hdr) 1639 { 1640 struct rtw89_fw_hdr_section_info *section_info; 1641 struct rtw89_fw_hdr_section *section; 1642 int i; 1643 1644 le32p_replace_bits(&fw_hdr->w7, info->part_size, FW_HDR_W7_PART_SIZE); 1645 1646 for (i = 0; i < info->section_num; i++) { 1647 section_info = &info->section_info[i]; 1648 1649 if (!section_info->len_override) 1650 continue; 1651 1652 section = &fw_hdr->sections[i]; 1653 le32p_replace_bits(§ion->w1, section_info->len_override, 1654 FWSECTION_HDR_W1_SEC_SIZE); 1655 } 1656 1657 return 0; 1658 } 1659 1660 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1661 struct rtw89_fw_bin_info *info, 1662 struct rtw89_fw_hdr_v1 *fw_hdr) 1663 { 1664 struct rtw89_fw_hdr_section_info *section_info; 1665 struct rtw89_fw_hdr_section_v1 *section; 1666 u8 dst_sec_idx = 0; 1667 u8 sec_idx; 1668 1669 le32p_replace_bits(&fw_hdr->w7, info->part_size, FW_HDR_V1_W7_PART_SIZE); 1670 1671 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1672 section_info = &info->section_info[sec_idx]; 1673 section = &fw_hdr->sections[sec_idx]; 1674 1675 if (section_info->ignore) 1676 continue; 1677 1678 if (dst_sec_idx != sec_idx) 1679 fw_hdr->sections[dst_sec_idx] = *section; 1680 1681 dst_sec_idx++; 1682 } 1683 1684 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1685 1686 return (info->section_num - dst_sec_idx) * sizeof(*section); 1687 } 1688 1689 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1690 const struct rtw89_fw_suit *fw_suit, 1691 struct rtw89_fw_bin_info *info) 1692 { 1693 u32 len = info->hdr_len - info->dynamic_hdr_len; 1694 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1695 const u8 *fw = fw_suit->data; 1696 struct rtw89_fw_hdr *fw_hdr; 1697 struct sk_buff *skb; 1698 u32 truncated; 1699 int ret; 1700 1701 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1702 if (!skb) { 1703 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1704 return -ENOMEM; 1705 } 1706 1707 skb_put_data(skb, fw, len); 1708 1709 switch (fw_suit->hdr_ver) { 1710 case 0: 1711 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1712 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1713 break; 1714 case 1: 1715 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1716 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1717 break; 1718 default: 1719 ret = -EOPNOTSUPP; 1720 goto fail; 1721 } 1722 1723 if (truncated) { 1724 len -= truncated; 1725 skb_trim(skb, len); 1726 } 1727 1728 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1729 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1730 H2C_FUNC_MAC_FWHDR_DL, len); 1731 1732 ret = rtw89_h2c_tx(rtwdev, skb, false); 1733 if (ret) { 1734 rtw89_err(rtwdev, "failed to send h2c\n"); 1735 goto fail; 1736 } 1737 1738 return 0; 1739 fail: 1740 dev_kfree_skb_any(skb); 1741 1742 return ret; 1743 } 1744 1745 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1746 const struct rtw89_fw_suit *fw_suit, 1747 struct rtw89_fw_bin_info *info) 1748 { 1749 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1750 int ret; 1751 1752 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1753 if (ret) { 1754 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1755 return ret; 1756 } 1757 1758 ret = mac->fwdl_check_path_ready(rtwdev, false); 1759 if (ret) { 1760 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1761 return ret; 1762 } 1763 1764 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1765 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1766 1767 return 0; 1768 } 1769 1770 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1771 struct rtw89_fw_hdr_section_info *info, 1772 u32 part_size) 1773 { 1774 struct sk_buff *skb; 1775 const u8 *section = info->addr; 1776 u32 residue_len = info->len; 1777 bool copy_key = false; 1778 u32 pkt_len; 1779 int ret; 1780 1781 if (info->ignore) 1782 return 0; 1783 1784 if (info->len_override) { 1785 if (info->len_override > info->len) 1786 rtw89_warn(rtwdev, "override length %u larger than original %u\n", 1787 info->len_override, info->len); 1788 else 1789 residue_len = info->len_override; 1790 } 1791 1792 if (info->key_addr && info->key_len) { 1793 if (residue_len > part_size || info->len < info->key_len) 1794 rtw89_warn(rtwdev, 1795 "ignore to copy key data because of len %d, %d, %d, %d\n", 1796 info->len, part_size, 1797 info->key_len, residue_len); 1798 else 1799 copy_key = true; 1800 } 1801 1802 while (residue_len) { 1803 pkt_len = min(residue_len, part_size); 1804 1805 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1806 if (!skb) { 1807 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1808 return -ENOMEM; 1809 } 1810 skb_put_data(skb, section, pkt_len); 1811 1812 if (copy_key) 1813 memcpy(skb->data + pkt_len - info->key_len, 1814 info->key_addr, info->key_len); 1815 1816 ret = rtw89_h2c_tx(rtwdev, skb, true); 1817 if (ret) { 1818 rtw89_err(rtwdev, "failed to send h2c\n"); 1819 goto fail; 1820 } 1821 1822 section += pkt_len; 1823 residue_len -= pkt_len; 1824 } 1825 1826 return 0; 1827 fail: 1828 dev_kfree_skb_any(skb); 1829 1830 return ret; 1831 } 1832 1833 static enum rtw89_fwdl_check_type 1834 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1835 const struct rtw89_fw_suit *fw_suit) 1836 { 1837 switch (fw_suit->type) { 1838 case RTW89_FW_BBMCU0: 1839 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1840 case RTW89_FW_BBMCU1: 1841 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1842 default: 1843 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1844 } 1845 } 1846 1847 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1848 const struct rtw89_fw_suit *fw_suit, 1849 struct rtw89_fw_bin_info *info) 1850 { 1851 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1852 const struct rtw89_chip_info *chip = rtwdev->chip; 1853 enum rtw89_fwdl_check_type chk_type; 1854 u8 section_num = info->section_num; 1855 int ret; 1856 1857 while (section_num--) { 1858 ret = __rtw89_fw_download_main(rtwdev, section_info, info->part_size); 1859 if (ret) 1860 return ret; 1861 section_info++; 1862 } 1863 1864 if (chip->chip_gen == RTW89_CHIP_AX) 1865 return 0; 1866 1867 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1868 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1869 if (ret) { 1870 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1871 fw_suit->type); 1872 return ret; 1873 } 1874 1875 return 0; 1876 } 1877 1878 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1879 { 1880 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1881 u32 addr = R_AX_DBG_PORT_SEL; 1882 u32 val32; 1883 u16 index; 1884 1885 if (chip_gen == RTW89_CHIP_BE) { 1886 addr = R_BE_WLCPU_PORT_PC; 1887 goto dump; 1888 } 1889 1890 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1891 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1892 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1893 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1894 1895 dump: 1896 for (index = 0; index < 15; index++) { 1897 val32 = rtw89_read32(rtwdev, addr); 1898 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1899 fsleep(10); 1900 } 1901 } 1902 1903 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1904 { 1905 u32 val32; 1906 1907 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1908 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1909 1910 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1911 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1912 1913 rtw89_fw_prog_cnt_dump(rtwdev); 1914 } 1915 1916 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1917 struct rtw89_fw_suit *fw_suit) 1918 { 1919 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1920 struct rtw89_fw_bin_info info = {}; 1921 int ret; 1922 1923 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1924 if (ret) { 1925 rtw89_err(rtwdev, "parse fw header fail\n"); 1926 return ret; 1927 } 1928 1929 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode); 1930 1931 if (rtwdev->chip->chip_id == RTL8922A && 1932 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1933 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1934 1935 ret = mac->fwdl_check_path_ready(rtwdev, true); 1936 if (ret) { 1937 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1938 return ret; 1939 } 1940 1941 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1942 if (ret) 1943 return ret; 1944 1945 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1946 if (ret) 1947 return ret; 1948 1949 return 0; 1950 } 1951 1952 static 1953 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1954 bool include_bb) 1955 { 1956 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1957 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1958 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1959 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1960 int ret; 1961 int i; 1962 1963 mac->disable_cpu(rtwdev); 1964 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1965 if (ret) 1966 return ret; 1967 1968 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1969 if (ret) 1970 goto fwdl_err; 1971 1972 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1973 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1974 1975 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1976 if (ret) 1977 goto fwdl_err; 1978 } 1979 1980 fw_info->h2c_seq = 0; 1981 fw_info->rec_seq = 0; 1982 fw_info->h2c_counter = 0; 1983 fw_info->c2h_counter = 0; 1984 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1985 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1986 1987 mdelay(5); 1988 1989 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1990 if (ret) { 1991 rtw89_warn(rtwdev, "download firmware fail\n"); 1992 goto fwdl_err; 1993 } 1994 1995 return ret; 1996 1997 fwdl_err: 1998 rtw89_fw_dl_fail_dump(rtwdev); 1999 return ret; 2000 } 2001 2002 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 2003 bool include_bb) 2004 { 2005 int retry; 2006 int ret; 2007 2008 for (retry = 0; retry < 5; retry++) { 2009 ret = __rtw89_fw_download(rtwdev, type, include_bb); 2010 if (!ret) 2011 return 0; 2012 } 2013 2014 return ret; 2015 } 2016 2017 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 2018 { 2019 struct rtw89_fw_info *fw = &rtwdev->fw; 2020 2021 wait_for_completion(&fw->req.completion); 2022 if (!fw->req.firmware) 2023 return -EINVAL; 2024 2025 return 0; 2026 } 2027 2028 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 2029 struct rtw89_fw_req_info *req, 2030 const char *fw_name, bool nowarn) 2031 { 2032 int ret; 2033 2034 if (req->firmware) { 2035 rtw89_debug(rtwdev, RTW89_DBG_FW, 2036 "full firmware has been early requested\n"); 2037 complete_all(&req->completion); 2038 return 0; 2039 } 2040 2041 if (nowarn) 2042 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 2043 else 2044 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 2045 2046 complete_all(&req->completion); 2047 2048 return ret; 2049 } 2050 2051 void rtw89_load_firmware_work(struct work_struct *work) 2052 { 2053 struct rtw89_dev *rtwdev = 2054 container_of(work, struct rtw89_dev, load_firmware_work); 2055 const struct rtw89_fw_def *fw_def = rtw89_chip_get_fw_def(rtwdev); 2056 char fw_name[64]; 2057 2058 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 2059 fw_def->fw_basename, rtwdev->fw.fw_format); 2060 2061 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 2062 } 2063 2064 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 2065 { 2066 if (!tbl) 2067 return; 2068 2069 kfree(tbl->regs); 2070 kfree(tbl); 2071 } 2072 2073 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 2074 { 2075 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 2076 int i; 2077 2078 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 2079 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 2080 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 2081 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 2082 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 2083 2084 kfree(elm_info->txpwr_trk); 2085 kfree(elm_info->rfk_log_fmt); 2086 } 2087 2088 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 2089 { 2090 struct rtw89_fw_info *fw = &rtwdev->fw; 2091 2092 cancel_work_sync(&rtwdev->load_firmware_work); 2093 2094 if (fw->req.firmware) { 2095 release_firmware(fw->req.firmware); 2096 2097 /* assign NULL back in case rtw89_free_ieee80211_hw() 2098 * try to release the same one again. 2099 */ 2100 fw->req.firmware = NULL; 2101 } 2102 2103 kfree(fw->log.fmts); 2104 rtw89_unload_firmware_elements(rtwdev); 2105 } 2106 2107 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 2108 { 2109 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 2110 u32 i; 2111 2112 if (fmt_id > fw_log->last_fmt_id) 2113 return 0; 2114 2115 for (i = 0; i < fw_log->fmt_count; i++) { 2116 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 2117 return i; 2118 } 2119 return 0; 2120 } 2121 2122 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 2123 { 2124 struct rtw89_fw_log *log = &rtwdev->fw.log; 2125 const struct rtw89_fw_logsuit_hdr *suit_hdr; 2126 struct rtw89_fw_suit *suit = &log->suit; 2127 const void *fmts_ptr, *fmts_end_ptr; 2128 u32 fmt_count; 2129 int i; 2130 2131 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 2132 fmt_count = le32_to_cpu(suit_hdr->count); 2133 log->fmt_ids = suit_hdr->ids; 2134 fmts_ptr = &suit_hdr->ids[fmt_count]; 2135 fmts_end_ptr = suit->data + suit->size; 2136 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 2137 if (!log->fmts) 2138 return -ENOMEM; 2139 2140 for (i = 0; i < fmt_count; i++) { 2141 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 2142 if (!fmts_ptr) 2143 break; 2144 2145 (*log->fmts)[i] = fmts_ptr; 2146 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 2147 log->fmt_count++; 2148 fmts_ptr += strlen(fmts_ptr); 2149 } 2150 2151 return 0; 2152 } 2153 2154 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 2155 { 2156 struct rtw89_fw_log *log = &rtwdev->fw.log; 2157 struct rtw89_fw_suit *suit = &log->suit; 2158 2159 if (!suit || !suit->data) { 2160 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 2161 return -EINVAL; 2162 } 2163 if (log->fmts) 2164 return 0; 2165 2166 return rtw89_fw_log_create_fmts_dict(rtwdev); 2167 } 2168 2169 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 2170 const struct rtw89_fw_c2h_log_fmt *log_fmt, 2171 u32 fmt_idx, u8 para_int, bool raw_data) 2172 { 2173 const char *(*fmts)[] = rtwdev->fw.log.fmts; 2174 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 2175 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 2176 int i; 2177 2178 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 2179 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 2180 log_fmt->argc); 2181 return; 2182 } 2183 2184 if (para_int) 2185 for (i = 0 ; i < log_fmt->argc; i++) 2186 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 2187 2188 if (raw_data) { 2189 if (para_int) 2190 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 2191 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 2192 para_int, log_fmt->argc, (int)sizeof(args), args); 2193 else 2194 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 2195 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 2196 para_int, log_fmt->argc, log_fmt->u.raw); 2197 } else { 2198 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 2199 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 2200 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 2201 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 2202 args[0xf]); 2203 } 2204 2205 rtw89_info(rtwdev, "C2H log: %s", str_buf); 2206 } 2207 2208 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 2209 { 2210 const struct rtw89_fw_c2h_log_fmt *log_fmt; 2211 u8 para_int; 2212 u32 fmt_idx; 2213 2214 if (len < RTW89_C2H_HEADER_LEN) { 2215 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 2216 return; 2217 } 2218 2219 buf += RTW89_C2H_HEADER_LEN; 2220 len -= RTW89_C2H_HEADER_LEN; 2221 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 2222 2223 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 2224 goto plain_log; 2225 2226 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 2227 goto plain_log; 2228 2229 if (!rtwdev->fw.log.fmts) 2230 return; 2231 2232 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 2233 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 2234 2235 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 2236 rtw89_info(rtwdev, "C2H log: %s%s", 2237 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 2238 else if (fmt_idx != 0 && para_int) 2239 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 2240 else 2241 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 2242 return; 2243 2244 plain_log: 2245 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 2246 2247 } 2248 2249 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 2250 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr, 2251 enum rtw89_upd_mode upd_mode) 2252 { 2253 const struct rtw89_chip_info *chip = rtwdev->chip; 2254 struct rtw89_h2c_addr_cam_v0 *h2c_v0; 2255 struct rtw89_h2c_addr_cam *h2c; 2256 u32 len = sizeof(*h2c); 2257 struct sk_buff *skb; 2258 u8 ver = U8_MAX; 2259 int ret; 2260 2261 if (RTW89_CHK_FW_FEATURE(ADDR_CAM_V0, &rtwdev->fw) || 2262 chip->chip_gen == RTW89_CHIP_AX) { 2263 len = sizeof(*h2c_v0); 2264 ver = 0; 2265 } 2266 2267 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2268 if (!skb) { 2269 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2270 return -ENOMEM; 2271 } 2272 skb_put(skb, len); 2273 h2c_v0 = (struct rtw89_h2c_addr_cam_v0 *)skb->data; 2274 2275 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, 2276 scan_mac_addr, h2c_v0); 2277 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, h2c_v0); 2278 2279 if (ver == 0) 2280 goto hdr; 2281 2282 h2c = (struct rtw89_h2c_addr_cam *)skb->data; 2283 h2c->w15 = le32_encode_bits(upd_mode, ADDR_CAM_W15_UPD_MODE); 2284 2285 hdr: 2286 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2287 H2C_CAT_MAC, 2288 H2C_CL_MAC_ADDR_CAM_UPDATE, 2289 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 2290 len); 2291 2292 ret = rtw89_h2c_tx(rtwdev, skb, false); 2293 if (ret) { 2294 rtw89_err(rtwdev, "failed to send h2c\n"); 2295 goto fail; 2296 } 2297 2298 return 0; 2299 fail: 2300 dev_kfree_skb_any(skb); 2301 2302 return ret; 2303 } 2304 2305 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 2306 struct rtw89_vif_link *rtwvif_link, 2307 struct rtw89_sta_link *rtwsta_link) 2308 { 2309 struct rtw89_h2c_dctlinfo_ud_v1 *h2c; 2310 u32 len = sizeof(*h2c); 2311 struct sk_buff *skb; 2312 int ret; 2313 2314 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2315 if (!skb) { 2316 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 2317 return -ENOMEM; 2318 } 2319 skb_put(skb, len); 2320 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; 2321 2322 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c); 2323 2324 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2325 H2C_CAT_MAC, 2326 H2C_CL_MAC_FR_EXCHG, 2327 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 2328 len); 2329 2330 ret = rtw89_h2c_tx(rtwdev, skb, false); 2331 if (ret) { 2332 rtw89_err(rtwdev, "failed to send h2c\n"); 2333 goto fail; 2334 } 2335 2336 return 0; 2337 fail: 2338 dev_kfree_skb_any(skb); 2339 2340 return ret; 2341 } 2342 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 2343 2344 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 2345 struct rtw89_vif_link *rtwvif_link, 2346 struct rtw89_sta_link *rtwsta_link) 2347 { 2348 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 2349 u32 len = sizeof(*h2c); 2350 struct sk_buff *skb; 2351 int ret; 2352 2353 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2354 if (!skb) { 2355 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 2356 return -ENOMEM; 2357 } 2358 skb_put(skb, len); 2359 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 2360 2361 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c); 2362 2363 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2364 H2C_CAT_MAC, 2365 H2C_CL_MAC_FR_EXCHG, 2366 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 2367 len); 2368 2369 ret = rtw89_h2c_tx(rtwdev, skb, false); 2370 if (ret) { 2371 rtw89_err(rtwdev, "failed to send h2c\n"); 2372 goto fail; 2373 } 2374 2375 return 0; 2376 fail: 2377 dev_kfree_skb_any(skb); 2378 2379 return ret; 2380 } 2381 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 2382 2383 int rtw89_fw_h2c_dctl_sec_cam_v3(struct rtw89_dev *rtwdev, 2384 struct rtw89_vif_link *rtwvif_link, 2385 struct rtw89_sta_link *rtwsta_link) 2386 { 2387 struct rtw89_h2c_dctlinfo_ud_v3 *h2c; 2388 u32 len = sizeof(*h2c); 2389 struct sk_buff *skb; 2390 int ret; 2391 2392 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2393 if (!skb) { 2394 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 2395 return -ENOMEM; 2396 } 2397 skb_put(skb, len); 2398 h2c = (struct rtw89_h2c_dctlinfo_ud_v3 *)skb->data; 2399 2400 rtw89_cam_fill_dctl_sec_cam_info_v3(rtwdev, rtwvif_link, rtwsta_link, h2c); 2401 2402 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2403 H2C_CAT_MAC, 2404 H2C_CL_MAC_FR_EXCHG, 2405 H2C_FUNC_MAC_DCTLINFO_UD_V3, 0, 0, 2406 len); 2407 2408 ret = rtw89_h2c_tx(rtwdev, skb, false); 2409 if (ret) { 2410 rtw89_err(rtwdev, "failed to send h2c\n"); 2411 goto fail; 2412 } 2413 2414 return 0; 2415 fail: 2416 dev_kfree_skb_any(skb); 2417 2418 return ret; 2419 } 2420 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v3); 2421 2422 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 2423 struct rtw89_vif_link *rtwvif_link, 2424 struct rtw89_sta_link *rtwsta_link) 2425 { 2426 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2427 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 2428 u32 len = sizeof(*h2c); 2429 struct sk_buff *skb; 2430 int ret; 2431 2432 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2433 if (!skb) { 2434 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 2435 return -ENOMEM; 2436 } 2437 skb_put(skb, len); 2438 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 2439 2440 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 2441 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 2442 2443 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 2444 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 2445 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 2446 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 2447 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 2448 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 2449 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 2450 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 2451 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 2452 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 2453 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 2454 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 2455 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 2456 2457 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2458 H2C_CAT_MAC, 2459 H2C_CL_MAC_FR_EXCHG, 2460 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 2461 len); 2462 2463 ret = rtw89_h2c_tx(rtwdev, skb, false); 2464 if (ret) { 2465 rtw89_err(rtwdev, "failed to send h2c\n"); 2466 goto fail; 2467 } 2468 2469 return 0; 2470 fail: 2471 dev_kfree_skb_any(skb); 2472 2473 return ret; 2474 } 2475 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 2476 2477 int rtw89_fw_h2c_default_dmac_tbl_v3(struct rtw89_dev *rtwdev, 2478 struct rtw89_vif_link *rtwvif_link, 2479 struct rtw89_sta_link *rtwsta_link) 2480 { 2481 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2482 struct rtw89_h2c_dctlinfo_ud_v3 *h2c; 2483 u32 len = sizeof(*h2c); 2484 struct sk_buff *skb; 2485 int ret; 2486 2487 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2488 if (!skb) { 2489 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 2490 return -ENOMEM; 2491 } 2492 skb_put(skb, len); 2493 h2c = (struct rtw89_h2c_dctlinfo_ud_v3 *)skb->data; 2494 2495 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V3_C0_MACID) | 2496 le32_encode_bits(1, DCTLINFO_V3_C0_OP); 2497 2498 h2c->m0 = cpu_to_le32(DCTLINFO_V3_W0_ALL); 2499 h2c->m1 = cpu_to_le32(DCTLINFO_V3_W1_ALL); 2500 h2c->m2 = cpu_to_le32(DCTLINFO_V3_W2_ALL); 2501 h2c->m3 = cpu_to_le32(DCTLINFO_V3_W3_ALL); 2502 h2c->m4 = cpu_to_le32(DCTLINFO_V3_W4_ALL); 2503 h2c->m5 = cpu_to_le32(DCTLINFO_V3_W5_ALL); 2504 h2c->m6 = cpu_to_le32(DCTLINFO_V3_W6_ALL); 2505 h2c->m7 = cpu_to_le32(DCTLINFO_V3_W7_ALL); 2506 h2c->m8 = cpu_to_le32(DCTLINFO_V3_W8_ALL); 2507 h2c->m9 = cpu_to_le32(DCTLINFO_V3_W9_ALL); 2508 h2c->m10 = cpu_to_le32(DCTLINFO_V3_W10_ALL); 2509 h2c->m11 = cpu_to_le32(DCTLINFO_V3_W11_ALL); 2510 h2c->m12 = cpu_to_le32(DCTLINFO_V3_W12_ALL); 2511 h2c->m13 = cpu_to_le32(DCTLINFO_V3_W13_ALL); 2512 2513 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2514 H2C_CAT_MAC, 2515 H2C_CL_MAC_FR_EXCHG, 2516 H2C_FUNC_MAC_DCTLINFO_UD_V3, 0, 0, 2517 len); 2518 2519 ret = rtw89_h2c_tx(rtwdev, skb, false); 2520 if (ret) { 2521 rtw89_err(rtwdev, "failed to send h2c\n"); 2522 goto fail; 2523 } 2524 2525 return 0; 2526 fail: 2527 dev_kfree_skb_any(skb); 2528 2529 return ret; 2530 } 2531 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v3); 2532 2533 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, 2534 struct rtw89_vif_link *rtwvif_link, 2535 struct rtw89_sta_link *rtwsta_link, 2536 bool valid, struct ieee80211_ampdu_params *params) 2537 { 2538 const struct rtw89_chip_info *chip = rtwdev->chip; 2539 struct rtw89_h2c_ba_cam *h2c; 2540 u8 macid = rtwsta_link->mac_id; 2541 u32 len = sizeof(*h2c); 2542 struct sk_buff *skb; 2543 u8 entry_idx; 2544 int ret; 2545 2546 ret = valid ? 2547 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2548 &entry_idx) : 2549 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2550 &entry_idx); 2551 if (ret) { 2552 /* it still works even if we don't have static BA CAM, because 2553 * hardware can create dynamic BA CAM automatically. 2554 */ 2555 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2556 "failed to %s entry tid=%d for h2c ba cam\n", 2557 valid ? "alloc" : "free", params->tid); 2558 return 0; 2559 } 2560 2561 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2562 if (!skb) { 2563 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2564 return -ENOMEM; 2565 } 2566 skb_put(skb, len); 2567 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2568 2569 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 2570 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 2571 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 2572 else 2573 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 2574 if (!valid) 2575 goto end; 2576 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 2577 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 2578 if (params->buf_size > 64) 2579 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2580 else 2581 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2582 /* If init req is set, hw will set the ssn */ 2583 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 2584 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 2585 2586 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 2587 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 2588 le32_encode_bits(rtwvif_link->mac_idx, 2589 RTW89_H2C_BA_CAM_W1_BAND); 2590 } 2591 2592 end: 2593 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2594 H2C_CAT_MAC, 2595 H2C_CL_BA_CAM, 2596 H2C_FUNC_MAC_BA_CAM, 0, 1, 2597 len); 2598 2599 ret = rtw89_h2c_tx(rtwdev, skb, false); 2600 if (ret) { 2601 rtw89_err(rtwdev, "failed to send h2c\n"); 2602 goto fail; 2603 } 2604 2605 return 0; 2606 fail: 2607 dev_kfree_skb_any(skb); 2608 2609 return ret; 2610 } 2611 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 2612 2613 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 2614 u8 entry_idx, u8 uid) 2615 { 2616 struct rtw89_h2c_ba_cam *h2c; 2617 u32 len = sizeof(*h2c); 2618 struct sk_buff *skb; 2619 int ret; 2620 2621 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2622 if (!skb) { 2623 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 2624 return -ENOMEM; 2625 } 2626 skb_put(skb, len); 2627 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2628 2629 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 2630 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 2631 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 2632 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 2633 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 2634 2635 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2636 H2C_CAT_MAC, 2637 H2C_CL_BA_CAM, 2638 H2C_FUNC_MAC_BA_CAM, 0, 1, 2639 len); 2640 2641 ret = rtw89_h2c_tx(rtwdev, skb, false); 2642 if (ret) { 2643 rtw89_err(rtwdev, "failed to send h2c\n"); 2644 goto fail; 2645 } 2646 2647 return 0; 2648 fail: 2649 dev_kfree_skb_any(skb); 2650 2651 return ret; 2652 } 2653 2654 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 2655 { 2656 const struct rtw89_chip_info *chip = rtwdev->chip; 2657 u8 entry_idx = chip->bacam_num; 2658 u8 uid = 0; 2659 int i; 2660 2661 for (i = 0; i < chip->bacam_dynamic_num; i++) { 2662 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 2663 entry_idx++; 2664 uid++; 2665 } 2666 } 2667 2668 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, 2669 struct rtw89_vif_link *rtwvif_link, 2670 struct rtw89_sta_link *rtwsta_link, 2671 bool valid, struct ieee80211_ampdu_params *params) 2672 { 2673 const struct rtw89_chip_info *chip = rtwdev->chip; 2674 struct rtw89_h2c_ba_cam_v1 *h2c; 2675 u8 macid = rtwsta_link->mac_id; 2676 u32 len = sizeof(*h2c); 2677 struct sk_buff *skb; 2678 u8 entry_idx; 2679 u8 bmap_size; 2680 int ret; 2681 2682 ret = valid ? 2683 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2684 &entry_idx) : 2685 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2686 &entry_idx); 2687 if (ret) { 2688 /* it still works even if we don't have static BA CAM, because 2689 * hardware can create dynamic BA CAM automatically. 2690 */ 2691 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2692 "failed to %s entry tid=%d for h2c ba cam\n", 2693 valid ? "alloc" : "free", params->tid); 2694 return 0; 2695 } 2696 2697 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2698 if (!skb) { 2699 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2700 return -ENOMEM; 2701 } 2702 skb_put(skb, len); 2703 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2704 2705 if (params->buf_size > 512) 2706 bmap_size = 10; 2707 else if (params->buf_size > 256) 2708 bmap_size = 8; 2709 else if (params->buf_size > 64) 2710 bmap_size = 4; 2711 else 2712 bmap_size = 0; 2713 2714 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2715 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2716 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2717 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2718 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2719 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2720 2721 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2722 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2723 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2724 le32_encode_bits(!!rtwvif_link->mac_idx, 2725 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2726 2727 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2728 H2C_CAT_MAC, 2729 H2C_CL_BA_CAM, 2730 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2731 len); 2732 2733 ret = rtw89_h2c_tx(rtwdev, skb, false); 2734 if (ret) { 2735 rtw89_err(rtwdev, "failed to send h2c\n"); 2736 goto fail; 2737 } 2738 2739 return 0; 2740 fail: 2741 dev_kfree_skb_any(skb); 2742 2743 return ret; 2744 } 2745 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2746 2747 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2748 u8 offset, u8 mac_idx) 2749 { 2750 struct rtw89_h2c_ba_cam_init *h2c; 2751 u32 len = sizeof(*h2c); 2752 struct sk_buff *skb; 2753 int ret; 2754 2755 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2756 if (!skb) { 2757 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2758 return -ENOMEM; 2759 } 2760 skb_put(skb, len); 2761 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2762 2763 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2764 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2765 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2766 2767 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2768 H2C_CAT_MAC, 2769 H2C_CL_BA_CAM, 2770 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2771 len); 2772 2773 ret = rtw89_h2c_tx(rtwdev, skb, false); 2774 if (ret) { 2775 rtw89_err(rtwdev, "failed to send h2c\n"); 2776 goto fail; 2777 } 2778 2779 return 0; 2780 fail: 2781 dev_kfree_skb_any(skb); 2782 2783 return ret; 2784 } 2785 2786 #define H2C_LOG_CFG_LEN 12 2787 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2788 { 2789 struct sk_buff *skb; 2790 u32 comp = 0; 2791 int ret; 2792 2793 if (enable) 2794 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2795 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2796 BIT(RTW89_FW_LOG_COMP_MLO) | BIT(RTW89_FW_LOG_COMP_SCAN); 2797 2798 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2799 if (!skb) { 2800 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2801 return -ENOMEM; 2802 } 2803 2804 skb_put(skb, H2C_LOG_CFG_LEN); 2805 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2806 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2807 SET_LOG_CFG_COMP(skb->data, comp); 2808 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2809 2810 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2811 H2C_CAT_MAC, 2812 H2C_CL_FW_INFO, 2813 H2C_FUNC_LOG_CFG, 0, 0, 2814 H2C_LOG_CFG_LEN); 2815 2816 ret = rtw89_h2c_tx(rtwdev, skb, false); 2817 if (ret) { 2818 rtw89_err(rtwdev, "failed to send h2c\n"); 2819 goto fail; 2820 } 2821 2822 return 0; 2823 fail: 2824 dev_kfree_skb_any(skb); 2825 2826 return ret; 2827 } 2828 2829 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 2830 struct rtw89_vif_link *rtwvif_link) 2831 { 2832 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 2833 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; 2834 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2835 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2836 struct rtw89_eapol_2_of_2 *eapol_pkt; 2837 struct ieee80211_bss_conf *bss_conf; 2838 struct ieee80211_hdr_3addr *hdr; 2839 struct sk_buff *skb; 2840 u8 key_des_ver; 2841 2842 if (rtw_wow->ptk_alg == 3) 2843 key_des_ver = 1; 2844 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) 2845 key_des_ver = 2; 2846 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) 2847 key_des_ver = 3; 2848 else 2849 key_des_ver = 0; 2850 2851 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); 2852 if (!skb) 2853 return NULL; 2854 2855 hdr = skb_put_zero(skb, sizeof(*hdr)); 2856 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2857 IEEE80211_FCTL_TODS | 2858 IEEE80211_FCTL_PROTECTED); 2859 2860 rcu_read_lock(); 2861 2862 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2863 2864 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2865 ether_addr_copy(hdr->addr2, bss_conf->addr); 2866 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2867 2868 rcu_read_unlock(); 2869 2870 skb_put_zero(skb, sec_hdr_len); 2871 2872 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); 2873 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); 2874 eapol_pkt->key_des_ver = key_des_ver; 2875 2876 return skb; 2877 } 2878 2879 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, 2880 struct rtw89_vif_link *rtwvif_link) 2881 { 2882 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2883 struct ieee80211_bss_conf *bss_conf; 2884 struct ieee80211_hdr_3addr *hdr; 2885 struct rtw89_sa_query *sa_query; 2886 struct sk_buff *skb; 2887 2888 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); 2889 if (!skb) 2890 return NULL; 2891 2892 hdr = skb_put_zero(skb, sizeof(*hdr)); 2893 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2894 IEEE80211_STYPE_ACTION | 2895 IEEE80211_FCTL_PROTECTED); 2896 2897 rcu_read_lock(); 2898 2899 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2900 2901 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2902 ether_addr_copy(hdr->addr2, bss_conf->addr); 2903 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2904 2905 rcu_read_unlock(); 2906 2907 skb_put_zero(skb, sec_hdr_len); 2908 2909 sa_query = skb_put_zero(skb, sizeof(*sa_query)); 2910 sa_query->category = WLAN_CATEGORY_SA_QUERY; 2911 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; 2912 2913 return skb; 2914 } 2915 2916 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, 2917 struct rtw89_vif_link *rtwvif_link) 2918 { 2919 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 2920 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2921 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2922 struct ieee80211_hdr_3addr *hdr; 2923 struct rtw89_arp_rsp *arp_skb; 2924 struct arphdr *arp_hdr; 2925 struct sk_buff *skb; 2926 __le16 fc; 2927 2928 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); 2929 if (!skb) 2930 return NULL; 2931 2932 hdr = skb_put_zero(skb, sizeof(*hdr)); 2933 2934 if (rtw_wow->ptk_alg) 2935 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | 2936 IEEE80211_FCTL_PROTECTED); 2937 else 2938 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); 2939 2940 hdr->frame_control = fc; 2941 ether_addr_copy(hdr->addr1, rtwvif_link->bssid); 2942 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr); 2943 ether_addr_copy(hdr->addr3, rtwvif_link->bssid); 2944 2945 skb_put_zero(skb, sec_hdr_len); 2946 2947 arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); 2948 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 2949 arp_skb->llc_type = htons(ETH_P_ARP); 2950 2951 arp_hdr = &arp_skb->arp_hdr; 2952 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 2953 arp_hdr->ar_pro = htons(ETH_P_IP); 2954 arp_hdr->ar_hln = ETH_ALEN; 2955 arp_hdr->ar_pln = 4; 2956 arp_hdr->ar_op = htons(ARPOP_REPLY); 2957 2958 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr); 2959 arp_skb->sender_ip = rtwvif->ip_addr; 2960 2961 return skb; 2962 } 2963 2964 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2965 struct rtw89_vif_link *rtwvif_link, 2966 enum rtw89_fw_pkt_ofld_type type, 2967 u8 *id) 2968 { 2969 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 2970 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; 2971 struct rtw89_pktofld_info *info; 2972 struct sk_buff *skb; 2973 int ret; 2974 2975 info = kzalloc_obj(*info); 2976 if (!info) 2977 return -ENOMEM; 2978 2979 switch (type) { 2980 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2981 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2982 break; 2983 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2984 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2985 break; 2986 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2987 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false); 2988 break; 2989 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2990 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true); 2991 break; 2992 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2993 skb = rtw89_eapol_get(rtwdev, rtwvif_link); 2994 break; 2995 case RTW89_PKT_OFLD_TYPE_SA_QUERY: 2996 skb = rtw89_sa_query_get(rtwdev, rtwvif_link); 2997 break; 2998 case RTW89_PKT_OFLD_TYPE_ARP_RSP: 2999 skb = rtw89_arp_response_get(rtwdev, rtwvif_link); 3000 break; 3001 default: 3002 goto err; 3003 } 3004 3005 if (!skb) 3006 goto err; 3007 3008 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 3009 kfree_skb(skb); 3010 3011 if (ret) 3012 goto err; 3013 3014 list_add_tail(&info->list, &rtwvif_link->general_pkt_list); 3015 *id = info->id; 3016 return 0; 3017 3018 err: 3019 kfree(info); 3020 return -ENOMEM; 3021 } 3022 3023 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 3024 struct rtw89_vif_link *rtwvif_link, 3025 bool notify_fw) 3026 { 3027 struct list_head *pkt_list = &rtwvif_link->general_pkt_list; 3028 struct rtw89_pktofld_info *info, *tmp; 3029 3030 list_for_each_entry_safe(info, tmp, pkt_list, list) { 3031 if (notify_fw) 3032 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 3033 else 3034 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 3035 list_del(&info->list); 3036 kfree(info); 3037 } 3038 } 3039 3040 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 3041 { 3042 struct rtw89_vif_link *rtwvif_link; 3043 struct rtw89_vif *rtwvif; 3044 unsigned int link_id; 3045 3046 rtw89_for_each_rtwvif(rtwdev, rtwvif) 3047 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 3048 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link, 3049 notify_fw); 3050 } 3051 3052 #define H2C_GENERAL_PKT_LEN 6 3053 #define H2C_GENERAL_PKT_ID_UND 0xff 3054 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 3055 struct rtw89_vif_link *rtwvif_link, u8 macid) 3056 { 3057 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 3058 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 3059 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 3060 struct sk_buff *skb; 3061 int ret; 3062 3063 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 3064 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 3065 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 3066 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 3067 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 3068 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 3069 3070 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 3071 if (!skb) { 3072 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3073 return -ENOMEM; 3074 } 3075 skb_put(skb, H2C_GENERAL_PKT_LEN); 3076 SET_GENERAL_PKT_MACID(skb->data, macid); 3077 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 3078 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 3079 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 3080 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 3081 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 3082 3083 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3084 H2C_CAT_MAC, 3085 H2C_CL_FW_INFO, 3086 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 3087 H2C_GENERAL_PKT_LEN); 3088 3089 ret = rtw89_h2c_tx(rtwdev, skb, false); 3090 if (ret) { 3091 rtw89_err(rtwdev, "failed to send h2c\n"); 3092 goto fail; 3093 } 3094 3095 return 0; 3096 fail: 3097 dev_kfree_skb_any(skb); 3098 3099 return ret; 3100 } 3101 3102 #define H2C_LPS_PARM_LEN 8 3103 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 3104 struct rtw89_lps_parm *lps_param) 3105 { 3106 struct sk_buff *skb; 3107 bool done_ack; 3108 int ret; 3109 3110 if (RTW89_CHK_FW_FEATURE(LPS_DACK_BY_C2H_REG, &rtwdev->fw)) 3111 done_ack = false; 3112 else 3113 done_ack = !lps_param->psmode; 3114 3115 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 3116 if (!skb) { 3117 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3118 return -ENOMEM; 3119 } 3120 skb_put(skb, H2C_LPS_PARM_LEN); 3121 3122 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 3123 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 3124 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 3125 SET_LPS_PARM_RLBM(skb->data, 1); 3126 SET_LPS_PARM_SMARTPS(skb->data, 1); 3127 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 3128 SET_LPS_PARM_VOUAPSD(skb->data, 0); 3129 SET_LPS_PARM_VIUAPSD(skb->data, 0); 3130 SET_LPS_PARM_BEUAPSD(skb->data, 0); 3131 SET_LPS_PARM_BKUAPSD(skb->data, 0); 3132 3133 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3134 H2C_CAT_MAC, 3135 H2C_CL_MAC_PS, 3136 H2C_FUNC_MAC_LPS_PARM, 0, done_ack, 3137 H2C_LPS_PARM_LEN); 3138 3139 ret = rtw89_h2c_tx(rtwdev, skb, false); 3140 if (ret) { 3141 rtw89_err(rtwdev, "failed to send h2c\n"); 3142 goto fail; 3143 } 3144 3145 return 0; 3146 fail: 3147 dev_kfree_skb_any(skb); 3148 3149 return ret; 3150 } 3151 3152 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3153 { 3154 const struct rtw89_chip_info *chip = rtwdev->chip; 3155 const struct rtw89_chan *chan; 3156 struct rtw89_vif_link *rtwvif_link; 3157 struct rtw89_h2c_lps_ch_info *h2c; 3158 u32 len = sizeof(*h2c); 3159 unsigned int link_id; 3160 struct sk_buff *skb; 3161 bool no_chan = true; 3162 u8 phy_idx; 3163 u32 done; 3164 int ret; 3165 3166 if (chip->chip_gen != RTW89_CHIP_BE) 3167 return 0; 3168 3169 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3170 if (!skb) { 3171 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 3172 return -ENOMEM; 3173 } 3174 skb_put(skb, len); 3175 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 3176 3177 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 3178 phy_idx = rtwvif_link->phy_idx; 3179 if (phy_idx >= ARRAY_SIZE(h2c->info)) 3180 continue; 3181 3182 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3183 no_chan = false; 3184 3185 h2c->info[phy_idx].central_ch = chan->channel; 3186 h2c->info[phy_idx].pri_ch = chan->primary_channel; 3187 h2c->info[phy_idx].band = chan->band_type; 3188 h2c->info[phy_idx].bw = chan->band_width; 3189 } 3190 3191 if (no_chan) { 3192 rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n"); 3193 ret = -ENOENT; 3194 goto fail; 3195 } 3196 3197 h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode); 3198 3199 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3200 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 3201 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 3202 3203 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 3204 ret = rtw89_h2c_tx(rtwdev, skb, false); 3205 if (ret) { 3206 rtw89_err(rtwdev, "failed to send h2c\n"); 3207 goto fail; 3208 } 3209 3210 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 3211 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 3212 if (ret) 3213 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 3214 3215 return 0; 3216 fail: 3217 dev_kfree_skb_any(skb); 3218 3219 return ret; 3220 } 3221 3222 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, 3223 struct rtw89_vif *rtwvif) 3224 { 3225 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be; 3226 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 3227 static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12}; 3228 const struct rtw89_chip_info *chip = rtwdev->chip; 3229 struct rtw89_efuse *efuse = &rtwdev->efuse; 3230 struct rtw89_h2c_lps_ml_cmn_info *h2c; 3231 struct rtw89_vif_link *rtwvif_link; 3232 const struct rtw89_chan *chan; 3233 u8 bw_idx = RTW89_BB_BW_20_40; 3234 u32 len = sizeof(*h2c); 3235 unsigned int link_id; 3236 struct sk_buff *skb; 3237 u8 beacon_bw_ofst; 3238 u8 gain_band; 3239 u32 done; 3240 u8 path; 3241 int ret; 3242 int i; 3243 3244 if (chip->chip_gen != RTW89_CHIP_BE) 3245 return 0; 3246 3247 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3248 if (!skb) { 3249 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n"); 3250 return -ENOMEM; 3251 } 3252 skb_put(skb, len); 3253 h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data; 3254 3255 h2c->fmt_id = 0x3; 3256 3257 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 3258 h2c->rfe_type = efuse->rfe_type; 3259 3260 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 3261 path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A; 3262 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3263 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type); 3264 3265 h2c->central_ch[rtwvif_link->phy_idx] = chan->channel; 3266 h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel; 3267 h2c->band[rtwvif_link->phy_idx] = chan->band_type; 3268 h2c->bw[rtwvif_link->phy_idx] = chan->band_width; 3269 if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6) 3270 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1; 3271 else 3272 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2; 3273 3274 /* Fill BW20 RX gain table for beacon mode */ 3275 for (i = 0; i < TIA_GAIN_NUM; i++) { 3276 h2c->tia_gain[rtwvif_link->phy_idx][i] = 3277 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]); 3278 } 3279 3280 if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) { 3281 beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx]; 3282 h2c->dup_bcn_ofst[rtwvif_link->phy_idx] = beacon_bw_ofst; 3283 } 3284 3285 memcpy(h2c->lna_gain[rtwvif_link->phy_idx], 3286 gain->lna_gain[gain_band][bw_idx][path], 3287 LNA_GAIN_NUM); 3288 memcpy(h2c->tia_lna_op1db[rtwvif_link->phy_idx], 3289 gain->tia_lna_op1db[gain_band][bw_idx][path], 3290 LNA_GAIN_NUM + 1); 3291 memcpy(h2c->lna_op1db[rtwvif_link->phy_idx], 3292 gain->lna_op1db[gain_band][bw_idx][path], 3293 LNA_GAIN_NUM); 3294 } 3295 3296 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3297 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 3298 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len); 3299 3300 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 3301 ret = rtw89_h2c_tx(rtwdev, skb, false); 3302 if (ret) { 3303 rtw89_err(rtwdev, "failed to send h2c\n"); 3304 goto fail; 3305 } 3306 3307 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 3308 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 3309 if (ret) 3310 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n"); 3311 3312 return 0; 3313 fail: 3314 dev_kfree_skb_any(skb); 3315 3316 return ret; 3317 } 3318 3319 void rtw89_bb_lps_cmn_info_rx_gain_fill(struct rtw89_dev *rtwdev, 3320 struct rtw89_bb_link_info_rx_gain *h2c_gain, 3321 const struct rtw89_chan *chan, u8 phy_idx) 3322 { 3323 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be; 3324 enum rtw89_bb_link_rx_gain_table_type tab_idx; 3325 struct rtw89_chan chan_bcn; 3326 u8 bw = chan->band_width; 3327 u8 gain_band; 3328 u8 bw_idx; 3329 u8 path; 3330 int i; 3331 3332 rtw89_chan_create(&chan_bcn, chan->primary_channel, chan->primary_channel, 3333 chan->band_type, RTW89_CHANNEL_WIDTH_20); 3334 3335 for (tab_idx = RTW89_BB_PS_LINK_RX_GAIN_TAB_BCN_PATH_A; 3336 tab_idx < RTW89_BB_PS_LINK_RX_GAIN_TAB_MAX; tab_idx++) { 3337 struct rtw89_phy_calc_efuse_gain calc = {}; 3338 3339 path = (tab_idx & BIT(0)) ? (RF_PATH_B) : (RF_PATH_A); 3340 if (tab_idx & BIT(1)) { 3341 rtw89_chip_calc_rx_gain_normal(rtwdev, chan, path, phy_idx, 3342 &calc); 3343 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type); 3344 if (bw > RTW89_CHANNEL_WIDTH_40) 3345 bw_idx = RTW89_BB_BW_80_160_320; 3346 else 3347 bw_idx = RTW89_BB_BW_20_40; 3348 } else { 3349 rtw89_chip_calc_rx_gain_normal(rtwdev, &chan_bcn, path, phy_idx, 3350 &calc); 3351 gain_band = rtw89_subband_to_gain_band_be(chan_bcn.subband_type); 3352 bw_idx = RTW89_BB_BW_20_40; 3353 } 3354 3355 /* efuse ofst and comp */ 3356 h2c_gain->gain_ofst[tab_idx] = calc.rssi_ofst; 3357 h2c_gain->cck_gain_ofst[tab_idx] = calc.cck_rpl_ofst; 3358 h2c_gain->cck_rpl_bias_comp[tab_idx][0] = calc.cck_mean_gain_bias; 3359 h2c_gain->cck_rpl_bias_comp[tab_idx][1] = calc.cck_mean_gain_bias; 3360 3361 for (i = 0; i < TIA_GAIN_NUM; i++) { 3362 h2c_gain->gain_err_tia[tab_idx][i] = 3363 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]); 3364 } 3365 memcpy(h2c_gain->gain_err_lna[tab_idx], 3366 gain->lna_gain[gain_band][bw_idx][path], 3367 LNA_GAIN_NUM); 3368 memcpy(h2c_gain->op1db_lna[tab_idx], 3369 gain->lna_op1db[gain_band][bw_idx][path], 3370 LNA_GAIN_NUM); 3371 memcpy(h2c_gain->op1db_tia[tab_idx], 3372 gain->tia_lna_op1db[gain_band][bw_idx][path], 3373 LNA_GAIN_NUM + 1); 3374 3375 memcpy(h2c_gain->rpl_bias_comp_bw[tab_idx]._20M, 3376 gain->rpl_ofst_20[gain_band][path], 3377 RTW89_BW20_SC_20M); 3378 memcpy(h2c_gain->rpl_bias_comp_bw[tab_idx]._40M, 3379 gain->rpl_ofst_40[gain_band][path], 3380 RTW89_BW20_SC_40M); 3381 memcpy(h2c_gain->rpl_bias_comp_bw[tab_idx]._80M, 3382 gain->rpl_ofst_80[gain_band][path], 3383 RTW89_BW20_SC_80M); 3384 memcpy(h2c_gain->rpl_bias_comp_bw[tab_idx]._160M, 3385 gain->rpl_ofst_160[gain_band][path], 3386 RTW89_BW20_SC_160M); 3387 } 3388 } 3389 3390 int rtw89_fw_h2c_lps_ml_cmn_info_v1(struct rtw89_dev *rtwdev, 3391 struct rtw89_vif *rtwvif) 3392 { 3393 static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12}; 3394 const struct rtw89_chip_info *chip = rtwdev->chip; 3395 struct rtw89_efuse *efuse = &rtwdev->efuse; 3396 struct rtw89_h2c_lps_ml_cmn_info_v1 *h2c; 3397 struct rtw89_vif_link *rtwvif_link; 3398 const struct rtw89_chan *chan; 3399 struct rtw89_bb_ctx *bb; 3400 u32 len = sizeof(*h2c); 3401 unsigned int link_id; 3402 struct sk_buff *skb; 3403 u8 beacon_bw_ofst; 3404 u32 done; 3405 int ret; 3406 3407 if (chip->chip_gen != RTW89_CHIP_BE) 3408 return 0; 3409 3410 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3411 if (!skb) { 3412 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info_v1\n"); 3413 return -ENOMEM; 3414 } 3415 skb_put(skb, len); 3416 h2c = (struct rtw89_h2c_lps_ml_cmn_info_v1 *)skb->data; 3417 3418 h2c->fmt_id = 0x20; 3419 3420 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 3421 h2c->rfe_type = efuse->rfe_type; 3422 h2c->rssi_main = U8_MAX; 3423 3424 memset(h2c->link_id, 0xfe, RTW89_BB_PS_LINK_BUF_MAX); 3425 3426 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 3427 u8 phy_idx = rtwvif_link->phy_idx; 3428 3429 bb = rtw89_get_bb_ctx(rtwdev, phy_idx); 3430 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3431 3432 h2c->link_id[phy_idx] = phy_idx; 3433 h2c->central_ch[phy_idx] = chan->channel; 3434 h2c->pri_ch[phy_idx] = chan->primary_channel; 3435 h2c->band[phy_idx] = chan->band_type; 3436 h2c->bw[phy_idx] = chan->band_width; 3437 3438 if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) { 3439 beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx]; 3440 h2c->dup_bcn_ofst[phy_idx] = beacon_bw_ofst; 3441 } 3442 3443 if (h2c->rssi_main > bb->ch_info.rssi_min) 3444 h2c->rssi_main = bb->ch_info.rssi_min; 3445 3446 rtw89_bb_lps_cmn_info_rx_gain_fill(rtwdev, 3447 &h2c->rx_gain[phy_idx], 3448 chan, phy_idx); 3449 } 3450 3451 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3452 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 3453 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len); 3454 3455 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT_BE4, B_CHK_LPS_STAT, 0); 3456 ret = rtw89_h2c_tx(rtwdev, skb, false); 3457 if (ret) { 3458 rtw89_err(rtwdev, "failed to send h2c\n"); 3459 goto fail; 3460 } 3461 3462 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 3463 true, rtwdev, R_CHK_LPS_STAT_BE4, B_CHK_LPS_STAT); 3464 if (ret) 3465 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n"); 3466 3467 return 0; 3468 fail: 3469 dev_kfree_skb_any(skb); 3470 3471 return ret; 3472 } 3473 3474 #define H2C_P2P_ACT_LEN 20 3475 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, 3476 struct rtw89_vif_link *rtwvif_link, 3477 struct ieee80211_p2p_noa_desc *desc, 3478 u8 act, u8 noa_id, u8 ctwindow_oppps) 3479 { 3480 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 3481 struct sk_buff *skb; 3482 u8 *cmd; 3483 int ret; 3484 3485 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 3486 if (!skb) { 3487 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 3488 return -ENOMEM; 3489 } 3490 skb_put(skb, H2C_P2P_ACT_LEN); 3491 cmd = skb->data; 3492 3493 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id); 3494 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 3495 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 3496 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 3497 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 3498 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 3499 if (desc) { 3500 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 3501 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 3502 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 3503 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 3504 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 3505 } 3506 3507 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3508 H2C_CAT_MAC, H2C_CL_MAC_PS, 3509 H2C_FUNC_P2P_ACT, 0, 0, 3510 H2C_P2P_ACT_LEN); 3511 3512 ret = rtw89_h2c_tx(rtwdev, skb, false); 3513 if (ret) { 3514 rtw89_err(rtwdev, "failed to send h2c\n"); 3515 goto fail; 3516 } 3517 3518 return 0; 3519 fail: 3520 dev_kfree_skb_any(skb); 3521 3522 return ret; 3523 } 3524 3525 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 3526 struct sk_buff *skb) 3527 { 3528 const struct rtw89_chip_info *chip = rtwdev->chip; 3529 struct rtw89_hal *hal = &rtwdev->hal; 3530 u8 ntx_path; 3531 u8 map_b; 3532 3533 if (chip->rf_path_num == 1) { 3534 ntx_path = RF_A; 3535 map_b = 0; 3536 } else { 3537 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_AB; 3538 map_b = ntx_path == RF_AB ? 1 : 0; 3539 } 3540 3541 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 3542 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 3543 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 3544 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 3545 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 3546 } 3547 3548 #define H2C_CMC_TBL_LEN 68 3549 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 3550 struct rtw89_vif_link *rtwvif_link, 3551 struct rtw89_sta_link *rtwsta_link) 3552 { 3553 const struct rtw89_chip_info *chip = rtwdev->chip; 3554 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3555 struct sk_buff *skb; 3556 int ret; 3557 3558 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3559 if (!skb) { 3560 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3561 return -ENOMEM; 3562 } 3563 skb_put(skb, H2C_CMC_TBL_LEN); 3564 SET_CTRL_INFO_MACID(skb->data, macid); 3565 SET_CTRL_INFO_OPERATION(skb->data, 1); 3566 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3567 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 3568 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3569 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 3570 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 3571 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 3572 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 3573 } 3574 SET_CMC_TBL_MGQ_RPT_EN(skb->data, rtwdev->hci.tx_rpt_enabled); 3575 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 3576 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 3577 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3578 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3579 3580 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3581 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3582 chip->h2c_cctl_func_id, 0, 1, 3583 H2C_CMC_TBL_LEN); 3584 3585 ret = rtw89_h2c_tx(rtwdev, skb, false); 3586 if (ret) { 3587 rtw89_err(rtwdev, "failed to send h2c\n"); 3588 goto fail; 3589 } 3590 3591 return 0; 3592 fail: 3593 dev_kfree_skb_any(skb); 3594 3595 return ret; 3596 } 3597 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 3598 3599 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3600 struct rtw89_vif_link *rtwvif_link, 3601 struct rtw89_sta_link *rtwsta_link) 3602 { 3603 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3604 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3605 u32 len = sizeof(*h2c); 3606 struct sk_buff *skb; 3607 int ret; 3608 3609 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3610 if (!skb) { 3611 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3612 return -ENOMEM; 3613 } 3614 skb_put(skb, len); 3615 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3616 3617 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3618 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3619 3620 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE) | 3621 le32_encode_bits(rtwdev->hci.tx_rpt_enabled, CCTLINFO_G7_W0_MGQ_RPT_EN); 3622 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 3623 3624 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 3625 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 3626 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3627 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 3628 3629 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 3630 3631 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 3632 3633 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3634 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 3635 3636 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3637 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3638 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3639 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3640 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3641 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 3642 3643 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 3644 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 3645 3646 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 3647 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 3648 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 3649 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 3650 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 3651 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 3652 3653 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 3654 3655 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 3656 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 3657 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 3658 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 3659 3660 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 3661 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 3662 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 3663 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 3664 3665 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3666 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3667 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3668 len); 3669 3670 ret = rtw89_h2c_tx(rtwdev, skb, false); 3671 if (ret) { 3672 rtw89_err(rtwdev, "failed to send h2c\n"); 3673 goto fail; 3674 } 3675 3676 return 0; 3677 fail: 3678 dev_kfree_skb_any(skb); 3679 3680 return ret; 3681 } 3682 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 3683 3684 int rtw89_fw_h2c_default_cmac_tbl_be(struct rtw89_dev *rtwdev, 3685 struct rtw89_vif_link *rtwvif_link, 3686 struct rtw89_sta_link *rtwsta_link) 3687 { 3688 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3689 bool preld = rtw89_mac_chk_preload_allow(rtwdev); 3690 struct rtw89_h2c_cctlinfo_ud_be *h2c; 3691 u32 len = sizeof(*h2c); 3692 struct sk_buff *skb; 3693 int ret; 3694 3695 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3696 if (!skb) { 3697 rtw89_err(rtwdev, "failed to alloc skb for default cmac be\n"); 3698 return -ENOMEM; 3699 } 3700 skb_put(skb, len); 3701 h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data; 3702 3703 h2c->c0 = le32_encode_bits(mac_id, BE_CCTL_INFO_C0_V1_MACID) | 3704 le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP); 3705 3706 h2c->w0 = le32_encode_bits(4, BE_CCTL_INFO_W0_DATARATE); 3707 h2c->m0 = cpu_to_le32(BE_CCTL_INFO_W0_ALL); 3708 3709 h2c->w1 = le32_encode_bits(4, BE_CCTL_INFO_W1_DATA_RTY_LOWEST_RATE) | 3710 le32_encode_bits(0xa, BE_CCTL_INFO_W1_RTSRATE) | 3711 le32_encode_bits(4, BE_CCTL_INFO_W1_RTS_RTY_LOWEST_RATE); 3712 h2c->m1 = cpu_to_le32(BE_CCTL_INFO_W1_ALL); 3713 3714 h2c->w1 = le32_encode_bits(preld, BE_CCTL_INFO_W2_PRELOAD_ENABLE); 3715 h2c->m2 = cpu_to_le32(BE_CCTL_INFO_W2_ALL); 3716 3717 h2c->m3 = cpu_to_le32(BE_CCTL_INFO_W3_ALL); 3718 3719 h2c->w4 = le32_encode_bits(0xFFFF, BE_CCTL_INFO_W4_ACT_SUBCH_CBW); 3720 h2c->m4 = cpu_to_le32(BE_CCTL_INFO_W4_ALL); 3721 3722 h2c->w5 = le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING0_V1) | 3723 le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING1_V1) | 3724 le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING2_V1) | 3725 le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING3_V1) | 3726 le32_encode_bits(2, BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING4_V1); 3727 h2c->m5 = cpu_to_le32(BE_CCTL_INFO_W5_ALL); 3728 3729 h2c->w6 = le32_encode_bits(0xb, BE_CCTL_INFO_W6_RESP_REF_RATE); 3730 h2c->m6 = cpu_to_le32(BE_CCTL_INFO_W6_ALL); 3731 3732 h2c->w7 = le32_encode_bits(1, BE_CCTL_INFO_W7_NC) | 3733 le32_encode_bits(1, BE_CCTL_INFO_W7_NR) | 3734 le32_encode_bits(1, BE_CCTL_INFO_W7_CB) | 3735 le32_encode_bits(0x1, BE_CCTL_INFO_W7_CSI_PARA_EN) | 3736 le32_encode_bits(0xb, BE_CCTL_INFO_W7_CSI_FIX_RATE); 3737 h2c->m7 = cpu_to_le32(BE_CCTL_INFO_W7_ALL); 3738 3739 h2c->m8 = cpu_to_le32(BE_CCTL_INFO_W8_ALL); 3740 3741 h2c->w14 = le32_encode_bits(0, BE_CCTL_INFO_W14_VO_CURR_RATE) | 3742 le32_encode_bits(0, BE_CCTL_INFO_W14_VI_CURR_RATE) | 3743 le32_encode_bits(0, BE_CCTL_INFO_W14_BE_CURR_RATE_L); 3744 h2c->m14 = cpu_to_le32(BE_CCTL_INFO_W14_ALL); 3745 3746 h2c->w15 = le32_encode_bits(0, BE_CCTL_INFO_W15_BE_CURR_RATE_H) | 3747 le32_encode_bits(0, BE_CCTL_INFO_W15_BK_CURR_RATE) | 3748 le32_encode_bits(0, BE_CCTL_INFO_W15_MGNT_CURR_RATE); 3749 h2c->m15 = cpu_to_le32(BE_CCTL_INFO_W15_ALL); 3750 3751 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3752 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3753 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3754 len); 3755 3756 ret = rtw89_h2c_tx(rtwdev, skb, false); 3757 if (ret) { 3758 rtw89_err(rtwdev, "failed to send h2c\n"); 3759 goto fail; 3760 } 3761 3762 return 0; 3763 fail: 3764 dev_kfree_skb_any(skb); 3765 3766 return ret; 3767 } 3768 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_be); 3769 3770 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 3771 struct ieee80211_link_sta *link_sta, 3772 u8 *pads) 3773 { 3774 bool ppe_th; 3775 u8 ppe16, ppe8; 3776 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3777 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0]; 3778 u8 ru_bitmap; 3779 u8 n, idx, sh; 3780 u16 ppe; 3781 int i; 3782 3783 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 3784 link_sta->he_cap.he_cap_elem.phy_cap_info[6]); 3785 if (!ppe_th) { 3786 u8 pad; 3787 3788 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 3789 link_sta->he_cap.he_cap_elem.phy_cap_info[9]); 3790 3791 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3792 pads[i] = pad; 3793 3794 return; 3795 } 3796 3797 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 3798 n = hweight8(ru_bitmap); 3799 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3800 3801 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3802 if (!(ru_bitmap & BIT(i))) { 3803 pads[i] = 1; 3804 continue; 3805 } 3806 3807 idx = n >> 3; 3808 sh = n & 7; 3809 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 3810 3811 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx])); 3812 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3813 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 3814 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3815 3816 if (ppe16 != 7 && ppe8 == 7) 3817 pads[i] = RTW89_PE_DURATION_16; 3818 else if (ppe8 != 7) 3819 pads[i] = RTW89_PE_DURATION_8; 3820 else 3821 pads[i] = RTW89_PE_DURATION_0; 3822 } 3823 } 3824 3825 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 3826 struct rtw89_vif_link *rtwvif_link, 3827 struct rtw89_sta_link *rtwsta_link) 3828 { 3829 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3830 const struct rtw89_chip_info *chip = rtwdev->chip; 3831 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3832 rtwvif_link->chanctx_idx); 3833 struct ieee80211_link_sta *link_sta; 3834 struct sk_buff *skb; 3835 u8 pads[RTW89_PPE_BW_NUM]; 3836 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3837 u16 lowest_rate; 3838 int ret; 3839 3840 memset(pads, 0, sizeof(pads)); 3841 3842 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3843 if (!skb) { 3844 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3845 return -ENOMEM; 3846 } 3847 3848 rcu_read_lock(); 3849 3850 if (rtwsta_link) 3851 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3852 3853 if (rtwsta_link && link_sta->he_cap.has_he) 3854 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3855 3856 if (vif->p2p) 3857 lowest_rate = RTW89_HW_RATE_OFDM6; 3858 else if (chan->band_type == RTW89_BAND_2G) 3859 lowest_rate = RTW89_HW_RATE_CCK1; 3860 else 3861 lowest_rate = RTW89_HW_RATE_OFDM6; 3862 3863 skb_put(skb, H2C_CMC_TBL_LEN); 3864 SET_CTRL_INFO_MACID(skb->data, mac_id); 3865 SET_CTRL_INFO_OPERATION(skb->data, 1); 3866 SET_CMC_TBL_DISRTSFB(skb->data, 1); 3867 SET_CMC_TBL_DISDATAFB(skb->data, 1); 3868 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 3869 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 3870 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 3871 if (vif->type == NL80211_IFTYPE_STATION) 3872 SET_CMC_TBL_ULDL(skb->data, 1); 3873 else 3874 SET_CMC_TBL_ULDL(skb->data, 0); 3875 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port); 3876 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 3877 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3878 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3879 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3880 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3881 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3882 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3883 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3884 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3885 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3886 } 3887 if (rtwsta_link) 3888 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 3889 link_sta->he_cap.has_he); 3890 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3891 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3892 3893 rcu_read_unlock(); 3894 3895 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3896 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3897 chip->h2c_cctl_func_id, 0, 1, 3898 H2C_CMC_TBL_LEN); 3899 3900 ret = rtw89_h2c_tx(rtwdev, skb, false); 3901 if (ret) { 3902 rtw89_err(rtwdev, "failed to send h2c\n"); 3903 goto fail; 3904 } 3905 3906 return 0; 3907 fail: 3908 dev_kfree_skb_any(skb); 3909 3910 return ret; 3911 } 3912 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 3913 3914 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 3915 struct ieee80211_link_sta *link_sta, 3916 u8 *pads) 3917 { 3918 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3919 u16 ppe_thres_hdr; 3920 u8 ppe16, ppe8; 3921 u8 n, idx, sh; 3922 u8 ru_bitmap; 3923 bool ppe_th; 3924 u16 ppe; 3925 int i; 3926 3927 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3928 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 3929 if (!ppe_th) { 3930 u8 pad; 3931 3932 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3933 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 3934 3935 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3936 pads[i] = pad; 3937 3938 return; 3939 } 3940 3941 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres); 3942 ru_bitmap = u16_get_bits(ppe_thres_hdr, 3943 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 3944 n = hweight8(ru_bitmap); 3945 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 3946 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3947 3948 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3949 if (!(ru_bitmap & BIT(i))) { 3950 pads[i] = 1; 3951 continue; 3952 } 3953 3954 idx = n >> 3; 3955 sh = n & 7; 3956 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 3957 3958 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx); 3959 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3960 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 3961 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3962 3963 if (ppe16 != 7 && ppe8 == 7) 3964 pads[i] = RTW89_PE_DURATION_16_20; 3965 else if (ppe8 != 7) 3966 pads[i] = RTW89_PE_DURATION_8; 3967 else 3968 pads[i] = RTW89_PE_DURATION_0; 3969 } 3970 } 3971 3972 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3973 struct rtw89_vif_link *rtwvif_link, 3974 struct rtw89_sta_link *rtwsta_link) 3975 { 3976 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3977 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3978 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3979 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3980 struct ieee80211_bss_conf *bss_conf; 3981 struct ieee80211_link_sta *link_sta; 3982 u8 pads[RTW89_PPE_BW_NUM]; 3983 u32 len = sizeof(*h2c); 3984 struct sk_buff *skb; 3985 u16 lowest_rate; 3986 int ret; 3987 3988 memset(pads, 0, sizeof(pads)); 3989 3990 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3991 if (!skb) { 3992 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3993 return -ENOMEM; 3994 } 3995 3996 rcu_read_lock(); 3997 3998 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3999 4000 if (rtwsta_link) { 4001 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 4002 4003 if (link_sta->eht_cap.has_eht) 4004 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads); 4005 else if (link_sta->he_cap.has_he) 4006 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 4007 } 4008 4009 if (vif->p2p) 4010 lowest_rate = RTW89_HW_RATE_OFDM6; 4011 else if (chan->band_type == RTW89_BAND_2G) 4012 lowest_rate = RTW89_HW_RATE_CCK1; 4013 else 4014 lowest_rate = RTW89_HW_RATE_OFDM6; 4015 4016 skb_put(skb, len); 4017 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 4018 4019 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 4020 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 4021 4022 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 4023 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 4024 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 4025 CCTLINFO_G7_W0_DISDATAFB); 4026 4027 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 4028 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 4029 4030 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 4031 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 4032 4033 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 4034 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 4035 4036 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 4037 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 4038 4039 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 4040 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 4041 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 4042 } 4043 4044 if (bss_conf->eht_support) { 4045 u16 punct = bss_conf->chanreq.oper.punctured; 4046 4047 h2c->w4 |= le32_encode_bits(~punct, 4048 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 4049 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 4050 } 4051 4052 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 4053 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 4054 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 4055 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 4056 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 4057 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 4058 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 4059 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 4060 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 4061 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 4062 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 4063 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 4064 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 4065 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 4066 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 4067 4068 h2c->w6 = le32_encode_bits(vif->cfg.aid, CCTLINFO_G7_W6_AID12_PAID) | 4069 le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 4070 CCTLINFO_G7_W6_ULDL); 4071 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_AID12_PAID | CCTLINFO_G7_W6_ULDL); 4072 4073 if (rtwsta_link) { 4074 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he, 4075 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 4076 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 4077 } 4078 4079 rcu_read_unlock(); 4080 4081 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4082 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 4083 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 4084 len); 4085 4086 ret = rtw89_h2c_tx(rtwdev, skb, false); 4087 if (ret) { 4088 rtw89_err(rtwdev, "failed to send h2c\n"); 4089 goto fail; 4090 } 4091 4092 return 0; 4093 fail: 4094 dev_kfree_skb_any(skb); 4095 4096 return ret; 4097 } 4098 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 4099 4100 int rtw89_fw_h2c_assoc_cmac_tbl_be(struct rtw89_dev *rtwdev, 4101 struct rtw89_vif_link *rtwvif_link, 4102 struct rtw89_sta_link *rtwsta_link) 4103 { 4104 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4105 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 4106 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 4107 struct rtw89_h2c_cctlinfo_ud_be *h2c; 4108 struct ieee80211_bss_conf *bss_conf; 4109 struct ieee80211_link_sta *link_sta; 4110 u8 pads[RTW89_PPE_BW_NUM]; 4111 u32 len = sizeof(*h2c); 4112 struct sk_buff *skb; 4113 u16 lowest_rate; 4114 int ret; 4115 4116 memset(pads, 0, sizeof(pads)); 4117 4118 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4119 if (!skb) { 4120 rtw89_err(rtwdev, "failed to alloc skb for assoc cmac be\n"); 4121 return -ENOMEM; 4122 } 4123 4124 rcu_read_lock(); 4125 4126 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 4127 4128 if (rtwsta_link) { 4129 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 4130 4131 if (link_sta->eht_cap.has_eht) 4132 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads); 4133 else if (link_sta->he_cap.has_he) 4134 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 4135 } 4136 4137 if (vif->p2p) 4138 lowest_rate = RTW89_HW_RATE_OFDM6; 4139 else if (chan->band_type == RTW89_BAND_2G) 4140 lowest_rate = RTW89_HW_RATE_CCK1; 4141 else 4142 lowest_rate = RTW89_HW_RATE_OFDM6; 4143 4144 skb_put(skb, len); 4145 h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data; 4146 4147 h2c->c0 = le32_encode_bits(mac_id, BE_CCTL_INFO_C0_V1_MACID) | 4148 le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP); 4149 4150 h2c->w0 = le32_encode_bits(1, BE_CCTL_INFO_W0_DISRTSFB) | 4151 le32_encode_bits(1, BE_CCTL_INFO_W0_DISDATAFB); 4152 h2c->m0 = cpu_to_le32(BE_CCTL_INFO_W0_DISRTSFB | 4153 BE_CCTL_INFO_W0_DISDATAFB); 4154 4155 h2c->w1 = le32_encode_bits(lowest_rate, BE_CCTL_INFO_W1_RTS_RTY_LOWEST_RATE); 4156 h2c->m1 = cpu_to_le32(BE_CCTL_INFO_W1_RTS_RTY_LOWEST_RATE); 4157 4158 h2c->w2 = le32_encode_bits(0, BE_CCTL_INFO_W2_DATA_TXCNT_LMT_SEL); 4159 h2c->m2 = cpu_to_le32(BE_CCTL_INFO_W2_DATA_TXCNT_LMT_SEL); 4160 4161 h2c->w3 = le32_encode_bits(0, BE_CCTL_INFO_W3_RTS_TXCNT_LMT_SEL); 4162 h2c->m3 = cpu_to_le32(BE_CCTL_INFO_W3_RTS_TXCNT_LMT_SEL); 4163 4164 h2c->w4 = le32_encode_bits(rtwvif_link->port, BE_CCTL_INFO_W4_MULTI_PORT_ID); 4165 h2c->m4 = cpu_to_le32(BE_CCTL_INFO_W4_MULTI_PORT_ID); 4166 4167 if (bss_conf->eht_support) { 4168 u16 punct = bss_conf->chanreq.oper.punctured; 4169 4170 h2c->w4 |= le32_encode_bits(~punct, 4171 BE_CCTL_INFO_W4_ACT_SUBCH_CBW); 4172 h2c->m4 |= cpu_to_le32(BE_CCTL_INFO_W4_ACT_SUBCH_CBW); 4173 } 4174 4175 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 4176 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING0_V1) | 4177 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 4178 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING1_V1) | 4179 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 4180 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING2_V1) | 4181 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 4182 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING3_V1) | 4183 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 4184 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING4_V1); 4185 h2c->m5 = cpu_to_le32(BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING0_V1 | 4186 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING1_V1 | 4187 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING2_V1 | 4188 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING3_V1 | 4189 BE_CCTL_INFO_W5_NOMINAL_PKT_PADDING4_V1); 4190 4191 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 4192 h2c->w5 |= le32_encode_bits(0, BE_CCTL_INFO_W5_DATA_DCM_V1); 4193 h2c->m5 |= cpu_to_le32(BE_CCTL_INFO_W5_DATA_DCM_V1); 4194 } 4195 4196 h2c->w6 = le32_encode_bits(vif->cfg.aid, BE_CCTL_INFO_W6_AID12_PAID) | 4197 le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 4198 BE_CCTL_INFO_W6_ULDL); 4199 h2c->m6 = cpu_to_le32(BE_CCTL_INFO_W6_AID12_PAID | BE_CCTL_INFO_W6_ULDL); 4200 4201 if (rtwsta_link) { 4202 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he, 4203 BE_CCTL_INFO_W8_BSR_QUEUE_SIZE_FORMAT_V1); 4204 h2c->m8 = cpu_to_le32(BE_CCTL_INFO_W8_BSR_QUEUE_SIZE_FORMAT_V1); 4205 } 4206 4207 rcu_read_unlock(); 4208 4209 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4210 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 4211 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 4212 len); 4213 4214 ret = rtw89_h2c_tx(rtwdev, skb, false); 4215 if (ret) { 4216 rtw89_err(rtwdev, "failed to send h2c\n"); 4217 goto fail; 4218 } 4219 4220 return 0; 4221 fail: 4222 dev_kfree_skb_any(skb); 4223 4224 return ret; 4225 } 4226 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_be); 4227 4228 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 4229 struct rtw89_vif_link *rtwvif_link, 4230 struct rtw89_sta_link *rtwsta_link) 4231 { 4232 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 4233 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 4234 u32 len = sizeof(*h2c); 4235 struct sk_buff *skb; 4236 u16 agg_num = 0; 4237 u8 ba_bmap = 0; 4238 int ret; 4239 u8 tid; 4240 4241 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4242 if (!skb) { 4243 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 4244 return -ENOMEM; 4245 } 4246 skb_put(skb, len); 4247 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 4248 4249 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 4250 if (agg_num == 0) 4251 agg_num = rtwsta->ampdu_params[tid].agg_num; 4252 else 4253 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 4254 } 4255 4256 if (agg_num <= 0x20) 4257 ba_bmap = 3; 4258 else if (agg_num > 0x20 && agg_num <= 0x40) 4259 ba_bmap = 0; 4260 else if (agg_num > 0x40 && agg_num <= 0x80) 4261 ba_bmap = 1; 4262 else if (agg_num > 0x80 && agg_num <= 0x100) 4263 ba_bmap = 2; 4264 else if (agg_num > 0x100 && agg_num <= 0x200) 4265 ba_bmap = 4; 4266 else if (agg_num > 0x200 && agg_num <= 0x400) 4267 ba_bmap = 5; 4268 4269 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 4270 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 4271 4272 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 4273 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 4274 4275 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4276 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 4277 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 4278 len); 4279 4280 ret = rtw89_h2c_tx(rtwdev, skb, false); 4281 if (ret) { 4282 rtw89_err(rtwdev, "failed to send h2c\n"); 4283 goto fail; 4284 } 4285 4286 return 0; 4287 fail: 4288 dev_kfree_skb_any(skb); 4289 4290 return ret; 4291 } 4292 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 4293 4294 int rtw89_fw_h2c_ampdu_cmac_tbl_be(struct rtw89_dev *rtwdev, 4295 struct rtw89_vif_link *rtwvif_link, 4296 struct rtw89_sta_link *rtwsta_link) 4297 { 4298 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 4299 struct rtw89_h2c_cctlinfo_ud_be *h2c; 4300 u32 len = sizeof(*h2c); 4301 struct sk_buff *skb; 4302 u16 agg_num = 0; 4303 u8 ba_bmap = 0; 4304 int ret; 4305 u8 tid; 4306 4307 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4308 if (!skb) { 4309 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac be\n"); 4310 return -ENOMEM; 4311 } 4312 skb_put(skb, len); 4313 h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data; 4314 4315 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 4316 if (agg_num == 0) 4317 agg_num = rtwsta->ampdu_params[tid].agg_num; 4318 else 4319 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 4320 } 4321 4322 if (agg_num <= 0x20) 4323 ba_bmap = 3; 4324 else if (agg_num > 0x20 && agg_num <= 0x40) 4325 ba_bmap = 0; 4326 else if (agg_num > 0x40 && agg_num <= 0x80) 4327 ba_bmap = 1; 4328 else if (agg_num > 0x80 && agg_num <= 0x100) 4329 ba_bmap = 2; 4330 else if (agg_num > 0x100 && agg_num <= 0x200) 4331 ba_bmap = 4; 4332 else if (agg_num > 0x200 && agg_num <= 0x400) 4333 ba_bmap = 5; 4334 4335 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, BE_CCTL_INFO_C0_V1_MACID) | 4336 le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP); 4337 4338 h2c->w3 = le32_encode_bits(ba_bmap, BE_CCTL_INFO_W3_BA_BMAP); 4339 h2c->m3 = cpu_to_le32(BE_CCTL_INFO_W3_BA_BMAP); 4340 4341 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4342 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 4343 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 4344 len); 4345 4346 ret = rtw89_h2c_tx(rtwdev, skb, false); 4347 if (ret) { 4348 rtw89_err(rtwdev, "failed to send h2c\n"); 4349 goto fail; 4350 } 4351 4352 return 0; 4353 fail: 4354 dev_kfree_skb_any(skb); 4355 4356 return ret; 4357 } 4358 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_be); 4359 4360 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 4361 struct rtw89_sta_link *rtwsta_link) 4362 { 4363 const struct rtw89_chip_info *chip = rtwdev->chip; 4364 struct sk_buff *skb; 4365 int ret; 4366 4367 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 4368 if (!skb) { 4369 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 4370 return -ENOMEM; 4371 } 4372 skb_put(skb, H2C_CMC_TBL_LEN); 4373 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 4374 SET_CTRL_INFO_OPERATION(skb->data, 1); 4375 if (rtwsta_link->cctl_tx_time) { 4376 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 4377 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time); 4378 } 4379 if (rtwsta_link->cctl_tx_retry_limit) { 4380 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 4381 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt); 4382 } 4383 4384 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4385 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 4386 chip->h2c_cctl_func_id, 0, 1, 4387 H2C_CMC_TBL_LEN); 4388 4389 ret = rtw89_h2c_tx(rtwdev, skb, false); 4390 if (ret) { 4391 rtw89_err(rtwdev, "failed to send h2c\n"); 4392 goto fail; 4393 } 4394 4395 return 0; 4396 fail: 4397 dev_kfree_skb_any(skb); 4398 4399 return ret; 4400 } 4401 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl); 4402 4403 int rtw89_fw_h2c_txtime_cmac_tbl_g7(struct rtw89_dev *rtwdev, 4404 struct rtw89_sta_link *rtwsta_link) 4405 { 4406 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 4407 u32 len = sizeof(*h2c); 4408 struct sk_buff *skb; 4409 int ret; 4410 4411 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4412 if (!skb) { 4413 rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_g7\n"); 4414 return -ENOMEM; 4415 } 4416 skb_put(skb, len); 4417 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 4418 4419 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 4420 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 4421 4422 if (rtwsta_link->cctl_tx_time) { 4423 h2c->w3 |= le32_encode_bits(1, CCTLINFO_G7_W3_AMPDU_TIME_SEL); 4424 h2c->m3 |= cpu_to_le32(CCTLINFO_G7_W3_AMPDU_TIME_SEL); 4425 4426 h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time, 4427 CCTLINFO_G7_W2_AMPDU_MAX_TIME); 4428 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_AMPDU_MAX_TIME); 4429 } 4430 if (rtwsta_link->cctl_tx_retry_limit) { 4431 h2c->w2 |= le32_encode_bits(1, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL) | 4432 le32_encode_bits(rtwsta_link->data_tx_cnt_lmt, 4433 CCTLINFO_G7_W2_DATA_TX_CNT_LMT); 4434 h2c->m2 |= cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL | 4435 CCTLINFO_G7_W2_DATA_TX_CNT_LMT); 4436 } 4437 4438 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4439 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 4440 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 4441 len); 4442 4443 ret = rtw89_h2c_tx(rtwdev, skb, false); 4444 if (ret) { 4445 rtw89_err(rtwdev, "failed to send h2c\n"); 4446 goto fail; 4447 } 4448 4449 return 0; 4450 fail: 4451 dev_kfree_skb_any(skb); 4452 4453 return ret; 4454 } 4455 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_g7); 4456 4457 int rtw89_fw_h2c_txtime_cmac_tbl_be(struct rtw89_dev *rtwdev, 4458 struct rtw89_sta_link *rtwsta_link) 4459 { 4460 struct rtw89_h2c_cctlinfo_ud_be *h2c; 4461 u32 len = sizeof(*h2c); 4462 struct sk_buff *skb; 4463 int ret; 4464 4465 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4466 if (!skb) { 4467 rtw89_err(rtwdev, "failed to alloc skb for txtime_cmac_be\n"); 4468 return -ENOMEM; 4469 } 4470 skb_put(skb, len); 4471 h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data; 4472 4473 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, BE_CCTL_INFO_C0_V1_MACID) | 4474 le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP); 4475 4476 if (rtwsta_link->cctl_tx_time) { 4477 h2c->w3 |= le32_encode_bits(1, BE_CCTL_INFO_W3_AMPDU_TIME_SEL); 4478 h2c->m3 |= cpu_to_le32(BE_CCTL_INFO_W3_AMPDU_TIME_SEL); 4479 4480 h2c->w2 |= le32_encode_bits(rtwsta_link->ampdu_max_time, 4481 BE_CCTL_INFO_W2_AMPDU_MAX_TIME); 4482 h2c->m2 |= cpu_to_le32(BE_CCTL_INFO_W2_AMPDU_MAX_TIME); 4483 } 4484 if (rtwsta_link->cctl_tx_retry_limit) { 4485 h2c->w2 |= le32_encode_bits(1, BE_CCTL_INFO_W2_DATA_TXCNT_LMT_SEL) | 4486 le32_encode_bits(rtwsta_link->data_tx_cnt_lmt, 4487 BE_CCTL_INFO_W2_DATA_TX_CNT_LMT); 4488 h2c->m2 |= cpu_to_le32(BE_CCTL_INFO_W2_DATA_TXCNT_LMT_SEL | 4489 BE_CCTL_INFO_W2_DATA_TX_CNT_LMT); 4490 } 4491 4492 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4493 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 4494 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 4495 len); 4496 4497 ret = rtw89_h2c_tx(rtwdev, skb, false); 4498 if (ret) { 4499 rtw89_err(rtwdev, "failed to send h2c\n"); 4500 goto fail; 4501 } 4502 4503 return 0; 4504 fail: 4505 dev_kfree_skb_any(skb); 4506 4507 return ret; 4508 } 4509 EXPORT_SYMBOL(rtw89_fw_h2c_txtime_cmac_tbl_be); 4510 4511 int rtw89_fw_h2c_punctured_cmac_tbl_g7(struct rtw89_dev *rtwdev, 4512 struct rtw89_vif_link *rtwvif_link, 4513 u16 punctured) 4514 { 4515 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 4516 u32 len = sizeof(*h2c); 4517 struct sk_buff *skb; 4518 int ret; 4519 4520 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4521 if (!skb) { 4522 rtw89_err(rtwdev, "failed to alloc skb for punctured cmac g7\n"); 4523 return -ENOMEM; 4524 } 4525 4526 skb_put(skb, len); 4527 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 4528 4529 h2c->c0 = le32_encode_bits(rtwvif_link->mac_id, CCTLINFO_G7_C0_MACID) | 4530 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 4531 4532 h2c->w4 = le32_encode_bits(~punctured, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 4533 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 4534 4535 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4536 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 4537 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 4538 len); 4539 4540 ret = rtw89_h2c_tx(rtwdev, skb, false); 4541 if (ret) { 4542 rtw89_err(rtwdev, "failed to send h2c\n"); 4543 goto fail; 4544 } 4545 4546 return 0; 4547 fail: 4548 dev_kfree_skb_any(skb); 4549 4550 return ret; 4551 } 4552 EXPORT_SYMBOL(rtw89_fw_h2c_punctured_cmac_tbl_g7); 4553 4554 int rtw89_fw_h2c_punctured_cmac_tbl_be(struct rtw89_dev *rtwdev, 4555 struct rtw89_vif_link *rtwvif_link, 4556 u16 punctured) 4557 { 4558 struct rtw89_h2c_cctlinfo_ud_be *h2c; 4559 u32 len = sizeof(*h2c); 4560 struct sk_buff *skb; 4561 int ret; 4562 4563 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4564 if (!skb) { 4565 rtw89_err(rtwdev, "failed to alloc skb for punctured cmac be\n"); 4566 return -ENOMEM; 4567 } 4568 skb_put(skb, len); 4569 h2c = (struct rtw89_h2c_cctlinfo_ud_be *)skb->data; 4570 4571 h2c->c0 = le32_encode_bits(rtwvif_link->mac_id, BE_CCTL_INFO_C0_V1_MACID) | 4572 le32_encode_bits(1, BE_CCTL_INFO_C0_V1_OP); 4573 4574 h2c->w4 = le32_encode_bits(~punctured, BE_CCTL_INFO_W4_ACT_SUBCH_CBW); 4575 h2c->m4 = cpu_to_le32(BE_CCTL_INFO_W4_ACT_SUBCH_CBW); 4576 4577 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4578 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 4579 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 4580 len); 4581 4582 ret = rtw89_h2c_tx(rtwdev, skb, false); 4583 if (ret) { 4584 rtw89_err(rtwdev, "failed to send h2c\n"); 4585 goto fail; 4586 } 4587 4588 return 0; 4589 fail: 4590 dev_kfree_skb_any(skb); 4591 4592 return ret; 4593 } 4594 EXPORT_SYMBOL(rtw89_fw_h2c_punctured_cmac_tbl_be); 4595 4596 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 4597 struct rtw89_sta_link *rtwsta_link) 4598 { 4599 const struct rtw89_chip_info *chip = rtwdev->chip; 4600 struct sk_buff *skb; 4601 int ret; 4602 4603 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 4604 return 0; 4605 4606 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 4607 if (!skb) { 4608 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 4609 return -ENOMEM; 4610 } 4611 skb_put(skb, H2C_CMC_TBL_LEN); 4612 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 4613 SET_CTRL_INFO_OPERATION(skb->data, 1); 4614 4615 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 4616 4617 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4618 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 4619 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 4620 H2C_CMC_TBL_LEN); 4621 4622 ret = rtw89_h2c_tx(rtwdev, skb, false); 4623 if (ret) { 4624 rtw89_err(rtwdev, "failed to send h2c\n"); 4625 goto fail; 4626 } 4627 4628 return 0; 4629 fail: 4630 dev_kfree_skb_any(skb); 4631 4632 return ret; 4633 } 4634 4635 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 4636 struct rtw89_vif_link *rtwvif_link) 4637 { 4638 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 4639 rtwvif_link->chanctx_idx); 4640 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4641 struct rtw89_h2c_bcn_upd *h2c; 4642 struct sk_buff *skb_beacon; 4643 struct ieee80211_hdr *hdr; 4644 u32 len = sizeof(*h2c); 4645 struct sk_buff *skb; 4646 int bcn_total_len; 4647 u16 beacon_rate; 4648 u16 tim_offset; 4649 void *noa_data; 4650 u8 noa_len; 4651 int ret; 4652 4653 if (vif->p2p) 4654 beacon_rate = RTW89_HW_RATE_OFDM6; 4655 else if (chan->band_type == RTW89_BAND_2G) 4656 beacon_rate = RTW89_HW_RATE_CCK1; 4657 else 4658 beacon_rate = RTW89_HW_RATE_OFDM6; 4659 4660 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 4661 NULL, 0); 4662 if (!skb_beacon) { 4663 rtw89_err(rtwdev, "failed to get beacon skb\n"); 4664 return -ENOMEM; 4665 } 4666 4667 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 4668 if (noa_len && 4669 (noa_len <= skb_tailroom(skb_beacon) || 4670 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 4671 skb_put_data(skb_beacon, noa_data, noa_len); 4672 } 4673 4674 hdr = (struct ieee80211_hdr *)skb_beacon; 4675 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 4676 4677 bcn_total_len = len + skb_beacon->len; 4678 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 4679 if (!skb) { 4680 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 4681 dev_kfree_skb_any(skb_beacon); 4682 return -ENOMEM; 4683 } 4684 skb_put(skb, len); 4685 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 4686 4687 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) | 4688 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 4689 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 4690 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 4691 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 4692 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 4693 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 4694 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 4695 4696 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 4697 dev_kfree_skb_any(skb_beacon); 4698 4699 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4700 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 4701 H2C_FUNC_MAC_BCN_UPD, 0, 1, 4702 bcn_total_len); 4703 4704 ret = rtw89_h2c_tx(rtwdev, skb, false); 4705 if (ret) { 4706 rtw89_err(rtwdev, "failed to send h2c\n"); 4707 dev_kfree_skb_any(skb); 4708 return ret; 4709 } 4710 4711 return 0; 4712 } 4713 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 4714 4715 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 4716 struct rtw89_vif_link *rtwvif_link) 4717 { 4718 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 4719 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4720 struct rtw89_h2c_bcn_upd_be *h2c; 4721 struct sk_buff *skb_beacon; 4722 struct ieee80211_hdr *hdr; 4723 u32 len = sizeof(*h2c); 4724 struct sk_buff *skb; 4725 int bcn_total_len; 4726 u16 beacon_rate; 4727 u16 tim_offset; 4728 void *noa_data; 4729 u8 noa_len; 4730 int ret; 4731 4732 if (vif->p2p) 4733 beacon_rate = RTW89_HW_RATE_OFDM6; 4734 else if (chan->band_type == RTW89_BAND_2G) 4735 beacon_rate = RTW89_HW_RATE_CCK1; 4736 else 4737 beacon_rate = RTW89_HW_RATE_OFDM6; 4738 4739 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 4740 NULL, 0); 4741 if (!skb_beacon) { 4742 rtw89_err(rtwdev, "failed to get beacon skb\n"); 4743 return -ENOMEM; 4744 } 4745 4746 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 4747 if (noa_len && 4748 (noa_len <= skb_tailroom(skb_beacon) || 4749 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 4750 skb_put_data(skb_beacon, noa_data, noa_len); 4751 } 4752 4753 hdr = (struct ieee80211_hdr *)skb_beacon; 4754 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 4755 4756 bcn_total_len = len + skb_beacon->len; 4757 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 4758 if (!skb) { 4759 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 4760 dev_kfree_skb_any(skb_beacon); 4761 return -ENOMEM; 4762 } 4763 skb_put(skb, len); 4764 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 4765 4766 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 4767 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 4768 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 4769 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 4770 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 4771 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 4772 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 4773 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 4774 4775 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 4776 dev_kfree_skb_any(skb_beacon); 4777 4778 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4779 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 4780 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 4781 bcn_total_len); 4782 4783 ret = rtw89_h2c_tx(rtwdev, skb, false); 4784 if (ret) { 4785 rtw89_err(rtwdev, "failed to send h2c\n"); 4786 goto fail; 4787 } 4788 4789 return 0; 4790 4791 fail: 4792 dev_kfree_skb_any(skb); 4793 4794 return ret; 4795 } 4796 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 4797 4798 int rtw89_fw_h2c_tbtt_tuning(struct rtw89_dev *rtwdev, 4799 struct rtw89_vif_link *rtwvif_link, u32 offset) 4800 { 4801 struct rtw89_h2c_tbtt_tuning *h2c; 4802 u32 len = sizeof(*h2c); 4803 struct sk_buff *skb; 4804 int ret; 4805 4806 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4807 if (!skb) { 4808 rtw89_err(rtwdev, "failed to alloc skb for h2c tbtt tuning\n"); 4809 return -ENOMEM; 4810 } 4811 skb_put(skb, len); 4812 h2c = (struct rtw89_h2c_tbtt_tuning *)skb->data; 4813 4814 h2c->w0 = le32_encode_bits(rtwvif_link->phy_idx, RTW89_H2C_TBTT_TUNING_W0_BAND) | 4815 le32_encode_bits(rtwvif_link->port, RTW89_H2C_TBTT_TUNING_W0_PORT); 4816 h2c->w1 = le32_encode_bits(offset, RTW89_H2C_TBTT_TUNING_W1_SHIFT); 4817 4818 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4819 H2C_CAT_MAC, H2C_CL_MAC_PS, 4820 H2C_FUNC_TBTT_TUNING, 0, 0, 4821 len); 4822 4823 ret = rtw89_h2c_tx(rtwdev, skb, false); 4824 if (ret) { 4825 rtw89_err(rtwdev, "failed to send h2c\n"); 4826 goto fail; 4827 } 4828 4829 return 0; 4830 fail: 4831 dev_kfree_skb_any(skb); 4832 4833 return ret; 4834 } 4835 4836 int rtw89_fw_h2c_pwr_lvl(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4837 { 4838 #define RTW89_BCN_TO_VAL_MIN 4 4839 #define RTW89_BCN_TO_VAL_MAX 64 4840 #define RTW89_DTIM_TO_VAL_MIN 7 4841 #define RTW89_DTIM_TO_VAL_MAX 15 4842 struct rtw89_beacon_track_info *bcn_track = &rtwdev->bcn_track; 4843 struct rtw89_h2c_pwr_lvl *h2c; 4844 u32 len = sizeof(*h2c); 4845 struct sk_buff *skb; 4846 u8 bcn_to_val; 4847 int ret; 4848 4849 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4850 if (!skb) { 4851 rtw89_err(rtwdev, "failed to alloc skb for h2c pwr lvl\n"); 4852 return -ENOMEM; 4853 } 4854 skb_put(skb, len); 4855 h2c = (struct rtw89_h2c_pwr_lvl *)skb->data; 4856 4857 bcn_to_val = clamp_t(u8, bcn_track->bcn_timeout, 4858 RTW89_BCN_TO_VAL_MIN, RTW89_BCN_TO_VAL_MAX); 4859 4860 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_PWR_LVL_W0_MACID) | 4861 le32_encode_bits(bcn_to_val, RTW89_H2C_PWR_LVL_W0_BCN_TO_VAL) | 4862 le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_PS_LVL) | 4863 le32_encode_bits(0, RTW89_H2C_PWR_LVL_W0_TRX_LVL) | 4864 le32_encode_bits(RTW89_DTIM_TO_VAL_MIN, 4865 RTW89_H2C_PWR_LVL_W0_DTIM_TO_VAL); 4866 4867 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4868 H2C_CAT_MAC, H2C_CL_MAC_PS, 4869 H2C_FUNC_PS_POWER_LEVEL, 0, 0, 4870 len); 4871 4872 ret = rtw89_h2c_tx(rtwdev, skb, false); 4873 if (ret) { 4874 rtw89_err(rtwdev, "failed to send h2c\n"); 4875 goto fail; 4876 } 4877 4878 return 0; 4879 fail: 4880 dev_kfree_skb_any(skb); 4881 4882 return ret; 4883 } 4884 4885 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 4886 struct rtw89_vif_link *rtwvif_link, 4887 struct rtw89_sta_link *rtwsta_link, 4888 enum rtw89_upd_mode upd_mode) 4889 { 4890 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 4891 struct rtw89_h2c_role_maintain *h2c; 4892 u32 len = sizeof(*h2c); 4893 struct sk_buff *skb; 4894 u8 self_role; 4895 int ret; 4896 4897 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 4898 if (rtwsta_link) 4899 self_role = RTW89_SELF_ROLE_AP_CLIENT; 4900 else 4901 self_role = rtwvif_link->self_role; 4902 } else { 4903 self_role = rtwvif_link->self_role; 4904 } 4905 4906 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4907 if (!skb) { 4908 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4909 return -ENOMEM; 4910 } 4911 skb_put(skb, len); 4912 h2c = (struct rtw89_h2c_role_maintain *)skb->data; 4913 4914 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_ROLE_MAINTAIN_W0_MACID) | 4915 le32_encode_bits(self_role, RTW89_H2C_ROLE_MAINTAIN_W0_SELF_ROLE) | 4916 le32_encode_bits(upd_mode, RTW89_H2C_ROLE_MAINTAIN_W0_UPD_MODE) | 4917 le32_encode_bits(rtwvif_link->wifi_role, 4918 RTW89_H2C_ROLE_MAINTAIN_W0_WIFI_ROLE) | 4919 le32_encode_bits(rtwvif_link->mac_idx, 4920 RTW89_H2C_ROLE_MAINTAIN_W0_BAND) | 4921 le32_encode_bits(rtwvif_link->port, RTW89_H2C_ROLE_MAINTAIN_W0_PORT); 4922 4923 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4924 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 4925 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 4926 len); 4927 4928 ret = rtw89_h2c_tx(rtwdev, skb, false); 4929 if (ret) { 4930 rtw89_err(rtwdev, "failed to send h2c\n"); 4931 goto fail; 4932 } 4933 4934 return 0; 4935 fail: 4936 dev_kfree_skb_any(skb); 4937 4938 return ret; 4939 } 4940 4941 static enum rtw89_fw_sta_type 4942 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 4943 struct rtw89_sta_link *rtwsta_link) 4944 { 4945 struct ieee80211_bss_conf *bss_conf; 4946 struct ieee80211_link_sta *link_sta; 4947 enum rtw89_fw_sta_type type; 4948 4949 rcu_read_lock(); 4950 4951 if (!rtwsta_link) 4952 goto by_vif; 4953 4954 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 4955 4956 if (link_sta->eht_cap.has_eht) 4957 type = RTW89_FW_BE_STA; 4958 else if (link_sta->he_cap.has_he) 4959 type = RTW89_FW_AX_STA; 4960 else 4961 type = RTW89_FW_N_AC_STA; 4962 4963 goto out; 4964 4965 by_vif: 4966 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 4967 4968 if (bss_conf->eht_support) 4969 type = RTW89_FW_BE_STA; 4970 else if (bss_conf->he_support) 4971 type = RTW89_FW_AX_STA; 4972 else 4973 type = RTW89_FW_N_AC_STA; 4974 4975 out: 4976 rcu_read_unlock(); 4977 4978 return type; 4979 } 4980 4981 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 4982 struct rtw89_sta_link *rtwsta_link, bool dis_conn) 4983 { 4984 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 4985 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 4986 bool is_mld = ieee80211_vif_is_mld(vif); 4987 u8 self_role = rtwvif_link->self_role; 4988 enum rtw89_fw_sta_type sta_type; 4989 u8 net_type = rtwvif_link->net_type; 4990 struct rtw89_h2c_join_v1 *h2c_v1; 4991 struct rtw89_h2c_join *h2c; 4992 u32 len = sizeof(*h2c); 4993 bool format_v1 = false; 4994 struct sk_buff *skb; 4995 u8 main_mac_id; 4996 bool init_ps; 4997 int ret; 4998 4999 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 5000 len = sizeof(*h2c_v1); 5001 format_v1 = true; 5002 } 5003 5004 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) { 5005 self_role = RTW89_SELF_ROLE_AP_CLIENT; 5006 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 5007 } 5008 5009 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5010 if (!skb) { 5011 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 5012 return -ENOMEM; 5013 } 5014 skb_put(skb, len); 5015 h2c = (struct rtw89_h2c_join *)skb->data; 5016 5017 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 5018 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 5019 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 5020 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) | 5021 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) | 5022 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 5023 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 5024 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 5025 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 5026 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 5027 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 5028 le32_encode_bits(rtwvif_link->wifi_role, 5029 RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 5030 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 5031 5032 if (!format_v1) 5033 goto done; 5034 5035 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 5036 5037 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link); 5038 init_ps = rtwvif_link != rtw89_get_designated_link(rtwvif_link->rtwvif); 5039 5040 if (rtwsta_link) 5041 main_mac_id = rtw89_sta_get_main_macid(rtwsta_link->rtwsta); 5042 else 5043 main_mac_id = rtw89_vif_get_main_macid(rtwvif_link->rtwvif); 5044 5045 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE) | 5046 le32_encode_bits(is_mld, RTW89_H2C_JOININFO_W1_IS_MLD) | 5047 le32_encode_bits(main_mac_id, RTW89_H2C_JOININFO_W1_MAIN_MACID) | 5048 le32_encode_bits(RTW89_H2C_JOININFO_MLO_MODE_MLSR, 5049 RTW89_H2C_JOININFO_W1_MLO_MODE) | 5050 le32_encode_bits(0, RTW89_H2C_JOININFO_W1_EMLSR_CAB) | 5051 le32_encode_bits(0, RTW89_H2C_JOININFO_W1_NSTR_EN) | 5052 le32_encode_bits(init_ps, RTW89_H2C_JOININFO_W1_INIT_PWR_STATE) | 5053 le32_encode_bits(IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US, 5054 RTW89_H2C_JOININFO_W1_EMLSR_PADDING) | 5055 le32_encode_bits(IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US, 5056 RTW89_H2C_JOININFO_W1_EMLSR_TRANS_DELAY) | 5057 le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MACID_EXT) | 5058 le32_encode_bits(0, RTW89_H2C_JOININFO_W2_MAIN_MACID_EXT); 5059 5060 h2c_v1->w2 = 0; 5061 5062 done: 5063 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5064 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 5065 H2C_FUNC_MAC_JOININFO, 0, 1, 5066 len); 5067 5068 ret = rtw89_h2c_tx(rtwdev, skb, false); 5069 if (ret) { 5070 rtw89_err(rtwdev, "failed to send h2c\n"); 5071 goto fail; 5072 } 5073 5074 return 0; 5075 fail: 5076 dev_kfree_skb_any(skb); 5077 5078 return ret; 5079 } 5080 5081 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 5082 { 5083 struct rtw89_h2c_notify_dbcc *h2c; 5084 u32 len = sizeof(*h2c); 5085 struct sk_buff *skb; 5086 int ret; 5087 5088 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5089 if (!skb) { 5090 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 5091 return -ENOMEM; 5092 } 5093 skb_put(skb, len); 5094 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 5095 5096 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 5097 5098 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5099 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 5100 H2C_FUNC_NOTIFY_DBCC, 0, 1, 5101 len); 5102 5103 ret = rtw89_h2c_tx(rtwdev, skb, false); 5104 if (ret) { 5105 rtw89_err(rtwdev, "failed to send h2c\n"); 5106 goto fail; 5107 } 5108 5109 return 0; 5110 fail: 5111 dev_kfree_skb_any(skb); 5112 5113 return ret; 5114 } 5115 5116 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 5117 bool pause) 5118 { 5119 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 5120 struct rtw89_fw_macid_pause_grp *h2c; 5121 __le32 set = cpu_to_le32(BIT(sh)); 5122 u8 h2c_macid_pause_id; 5123 struct sk_buff *skb; 5124 u32 len; 5125 int ret; 5126 5127 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 5128 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 5129 len = sizeof(*h2c_new); 5130 } else { 5131 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 5132 len = sizeof(*h2c); 5133 } 5134 5135 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5136 if (!skb) { 5137 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 5138 return -ENOMEM; 5139 } 5140 skb_put(skb, len); 5141 5142 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 5143 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 5144 5145 h2c_new->n[0].pause_mask_grp[grp] = set; 5146 h2c_new->n[0].sleep_mask_grp[grp] = set; 5147 if (pause) { 5148 h2c_new->n[0].pause_grp[grp] = set; 5149 h2c_new->n[0].sleep_grp[grp] = set; 5150 } 5151 } else { 5152 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 5153 5154 h2c->mask_grp[grp] = set; 5155 if (pause) 5156 h2c->pause_grp[grp] = set; 5157 } 5158 5159 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5160 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5161 h2c_macid_pause_id, 1, 0, 5162 len); 5163 5164 ret = rtw89_h2c_tx(rtwdev, skb, false); 5165 if (ret) { 5166 rtw89_err(rtwdev, "failed to send h2c\n"); 5167 goto fail; 5168 } 5169 5170 return 0; 5171 fail: 5172 dev_kfree_skb_any(skb); 5173 5174 return ret; 5175 } 5176 5177 #define H2C_EDCA_LEN 12 5178 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 5179 u8 ac, u32 val) 5180 { 5181 struct sk_buff *skb; 5182 int ret; 5183 5184 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 5185 if (!skb) { 5186 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 5187 return -ENOMEM; 5188 } 5189 skb_put(skb, H2C_EDCA_LEN); 5190 RTW89_SET_EDCA_SEL(skb->data, 0); 5191 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx); 5192 RTW89_SET_EDCA_WMM(skb->data, 0); 5193 RTW89_SET_EDCA_AC(skb->data, ac); 5194 RTW89_SET_EDCA_PARAM(skb->data, val); 5195 5196 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5197 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5198 H2C_FUNC_USR_EDCA, 0, 1, 5199 H2C_EDCA_LEN); 5200 5201 ret = rtw89_h2c_tx(rtwdev, skb, false); 5202 if (ret) { 5203 rtw89_err(rtwdev, "failed to send h2c\n"); 5204 goto fail; 5205 } 5206 5207 return 0; 5208 fail: 5209 dev_kfree_skb_any(skb); 5210 5211 return ret; 5212 } 5213 5214 #define H2C_TSF32_TOGL_LEN 4 5215 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, 5216 struct rtw89_vif_link *rtwvif_link, 5217 bool en) 5218 { 5219 struct sk_buff *skb; 5220 u16 early_us = en ? 2000 : 0; 5221 u8 *cmd; 5222 int ret; 5223 5224 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 5225 if (!skb) { 5226 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 5227 return -ENOMEM; 5228 } 5229 skb_put(skb, H2C_TSF32_TOGL_LEN); 5230 cmd = skb->data; 5231 5232 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx); 5233 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 5234 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port); 5235 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 5236 5237 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5238 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5239 H2C_FUNC_TSF32_TOGL, 0, 0, 5240 H2C_TSF32_TOGL_LEN); 5241 5242 ret = rtw89_h2c_tx(rtwdev, skb, false); 5243 if (ret) { 5244 rtw89_err(rtwdev, "failed to send h2c\n"); 5245 goto fail; 5246 } 5247 5248 return 0; 5249 fail: 5250 dev_kfree_skb_any(skb); 5251 5252 return ret; 5253 } 5254 5255 #define H2C_OFLD_CFG_LEN 8 5256 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 5257 { 5258 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 5259 struct sk_buff *skb; 5260 int ret; 5261 5262 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 5263 if (!skb) { 5264 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 5265 return -ENOMEM; 5266 } 5267 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 5268 5269 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5270 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5271 H2C_FUNC_OFLD_CFG, 0, 1, 5272 H2C_OFLD_CFG_LEN); 5273 5274 ret = rtw89_h2c_tx(rtwdev, skb, false); 5275 if (ret) { 5276 rtw89_err(rtwdev, "failed to send h2c\n"); 5277 goto fail; 5278 } 5279 5280 return 0; 5281 fail: 5282 dev_kfree_skb_any(skb); 5283 5284 return ret; 5285 } 5286 5287 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv) 5288 { 5289 struct rtw89_h2c_tx_duty *h2c; 5290 u32 len = sizeof(*h2c); 5291 struct sk_buff *skb; 5292 u16 pause, active; 5293 int ret; 5294 5295 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5296 if (!skb) { 5297 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n"); 5298 return -ENOMEM; 5299 } 5300 5301 skb_put(skb, len); 5302 h2c = (struct rtw89_h2c_tx_duty *)skb->data; 5303 5304 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100); 5305 5306 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) { 5307 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP); 5308 } else { 5309 active = 100 - lv * RTW89_THERMAL_PROT_STEP; 5310 pause = 100 - active; 5311 5312 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) | 5313 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK); 5314 } 5315 5316 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5317 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5318 H2C_FUNC_TX_DUTY, 0, 0, len); 5319 5320 ret = rtw89_h2c_tx(rtwdev, skb, false); 5321 if (ret) { 5322 rtw89_err(rtwdev, "failed to send h2c\n"); 5323 goto fail; 5324 } 5325 5326 return 0; 5327 fail: 5328 dev_kfree_skb_any(skb); 5329 5330 return ret; 5331 } 5332 5333 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 5334 struct rtw89_vif_link *rtwvif_link, 5335 bool connect) 5336 { 5337 struct ieee80211_bss_conf *bss_conf; 5338 s32 thold = RTW89_DEFAULT_CQM_THOLD; 5339 u32 hyst = RTW89_DEFAULT_CQM_HYST; 5340 struct rtw89_h2c_bcnfltr *h2c; 5341 u32 len = sizeof(*h2c); 5342 struct sk_buff *skb; 5343 u8 max_cnt, cnt; 5344 int ret; 5345 5346 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 5347 return -EINVAL; 5348 5349 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 5350 return -EINVAL; 5351 5352 rcu_read_lock(); 5353 5354 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 5355 5356 if (bss_conf->cqm_rssi_hyst) 5357 hyst = bss_conf->cqm_rssi_hyst; 5358 if (bss_conf->cqm_rssi_thold) 5359 thold = bss_conf->cqm_rssi_thold; 5360 5361 rcu_read_unlock(); 5362 5363 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5364 if (!skb) { 5365 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 5366 return -ENOMEM; 5367 } 5368 5369 skb_put(skb, len); 5370 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 5371 5372 if (RTW89_CHK_FW_FEATURE(BEACON_LOSS_COUNT_V1, &rtwdev->fw)) 5373 max_cnt = BIT(7) - 1; 5374 else 5375 max_cnt = BIT(4) - 1; 5376 5377 cnt = min(RTW89_BCN_LOSS_CNT, max_cnt); 5378 5379 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 5380 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 5381 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 5382 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 5383 RTW89_H2C_BCNFLTR_W0_MODE) | 5384 le32_encode_bits(cnt >> 4, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_H3) | 5385 le32_encode_bits(cnt & 0xf, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT_L4) | 5386 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 5387 le32_encode_bits(thold + MAX_RSSI, 5388 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 5389 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 5390 5391 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5392 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5393 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 5394 5395 ret = rtw89_h2c_tx(rtwdev, skb, false); 5396 if (ret) { 5397 rtw89_err(rtwdev, "failed to send h2c\n"); 5398 goto fail; 5399 } 5400 5401 return 0; 5402 fail: 5403 dev_kfree_skb_any(skb); 5404 5405 return ret; 5406 } 5407 5408 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 5409 struct rtw89_rx_phy_ppdu *phy_ppdu) 5410 { 5411 struct rtw89_h2c_ofld_rssi *h2c; 5412 u32 len = sizeof(*h2c); 5413 struct sk_buff *skb; 5414 s8 rssi; 5415 int ret; 5416 5417 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 5418 return -EINVAL; 5419 5420 if (!phy_ppdu) 5421 return -EINVAL; 5422 5423 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5424 if (!skb) { 5425 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 5426 return -ENOMEM; 5427 } 5428 5429 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 5430 skb_put(skb, len); 5431 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 5432 5433 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 5434 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 5435 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 5436 5437 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5438 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5439 H2C_FUNC_OFLD_RSSI, 0, 1, len); 5440 5441 ret = rtw89_h2c_tx(rtwdev, skb, false); 5442 if (ret) { 5443 rtw89_err(rtwdev, "failed to send h2c\n"); 5444 goto fail; 5445 } 5446 5447 return 0; 5448 fail: 5449 dev_kfree_skb_any(skb); 5450 5451 return ret; 5452 } 5453 5454 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 5455 { 5456 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 5457 struct rtw89_traffic_stats *stats = &rtwvif->stats; 5458 struct rtw89_h2c_ofld *h2c; 5459 u32 len = sizeof(*h2c); 5460 struct sk_buff *skb; 5461 int ret; 5462 5463 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 5464 return -EINVAL; 5465 5466 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5467 if (!skb) { 5468 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 5469 return -ENOMEM; 5470 } 5471 5472 skb_put(skb, len); 5473 h2c = (struct rtw89_h2c_ofld *)skb->data; 5474 5475 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 5476 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 5477 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 5478 5479 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5480 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5481 H2C_FUNC_OFLD_TP, 0, 1, len); 5482 5483 ret = rtw89_h2c_tx(rtwdev, skb, false); 5484 if (ret) { 5485 rtw89_err(rtwdev, "failed to send h2c\n"); 5486 goto fail; 5487 } 5488 5489 return 0; 5490 fail: 5491 dev_kfree_skb_any(skb); 5492 5493 return ret; 5494 } 5495 5496 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 5497 { 5498 const struct rtw89_chip_info *chip = rtwdev->chip; 5499 struct rtw89_h2c_ra_v1 *h2c_v1; 5500 struct rtw89_h2c_ra *h2c; 5501 u32 len = sizeof(*h2c); 5502 struct sk_buff *skb; 5503 u8 ver = U8_MAX; 5504 int ret; 5505 5506 if (chip->chip_gen == RTW89_CHIP_AX) { 5507 len = sizeof(*h2c); 5508 ver = 0; 5509 } else { 5510 len = sizeof(*h2c_v1); 5511 ver = 1; 5512 } 5513 5514 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5515 if (!skb) { 5516 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 5517 return -ENOMEM; 5518 } 5519 skb_put(skb, len); 5520 h2c = (struct rtw89_h2c_ra *)skb->data; 5521 rtw89_debug(rtwdev, RTW89_DBG_RA, 5522 "ra cmd msk: %llx ", ra->ra_mask); 5523 5524 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 5525 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 5526 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 5527 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 5528 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 5529 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 5530 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 5531 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 5532 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 5533 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 5534 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 5535 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 5536 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 5537 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 5538 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 5539 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 5540 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 5541 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 5542 5543 if (!csi || ver >= 1) 5544 goto next_v1; 5545 5546 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 5547 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 5548 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 5549 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 5550 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 5551 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 5552 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 5553 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 5554 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 5555 5556 next_v1: 5557 if (ver < 1) 5558 goto done; 5559 5560 h2c->w3 |= le32_encode_bits(ra->partial_bw_er, 5561 RTW89_H2C_RA_V1_W3_PARTIAL_BW_SU_ER) | 5562 le32_encode_bits(ra->band, RTW89_H2C_RA_V1_W3_BAND); 5563 5564 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 5565 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 5566 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 5567 5568 done: 5569 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5570 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 5571 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 5572 len); 5573 5574 ret = rtw89_h2c_tx(rtwdev, skb, false); 5575 if (ret) { 5576 rtw89_err(rtwdev, "failed to send h2c\n"); 5577 goto fail; 5578 } 5579 5580 return 0; 5581 fail: 5582 dev_kfree_skb_any(skb); 5583 5584 return ret; 5585 } 5586 5587 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 5588 { 5589 struct rtw89_btc *btc = &rtwdev->btc; 5590 struct rtw89_btc_dm *dm = &btc->dm; 5591 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 5592 struct rtw89_btc_module *module = &init_info->module; 5593 struct rtw89_btc_ant_info *ant = &module->ant; 5594 struct rtw89_h2c_cxinit *h2c; 5595 u32 len = sizeof(*h2c); 5596 struct sk_buff *skb; 5597 int ret; 5598 5599 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5600 if (!skb) { 5601 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 5602 return -ENOMEM; 5603 } 5604 skb_put(skb, len); 5605 h2c = (struct rtw89_h2c_cxinit *)skb->data; 5606 5607 h2c->hdr.type = type; 5608 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 5609 5610 h2c->ant_type = ant->type; 5611 h2c->ant_num = ant->num; 5612 h2c->ant_iso = ant->isolation; 5613 h2c->ant_info = 5614 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 5615 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 5616 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 5617 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 5618 5619 h2c->mod_rfe = module->rfe_type; 5620 h2c->mod_cv = module->cv; 5621 h2c->mod_info = 5622 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 5623 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 5624 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 5625 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 5626 h2c->mod_adie_kt = module->kt_ver_adie; 5627 h2c->wl_gch = init_info->wl_guard_ch; 5628 5629 h2c->info = 5630 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 5631 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 5632 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 5633 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 5634 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 5635 5636 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5637 H2C_CAT_OUTSRC, BTFC_SET, 5638 SET_DRV_INFO, 0, 0, 5639 len); 5640 5641 ret = rtw89_h2c_tx(rtwdev, skb, false); 5642 if (ret) { 5643 rtw89_err(rtwdev, "failed to send h2c\n"); 5644 goto fail; 5645 } 5646 5647 return 0; 5648 fail: 5649 dev_kfree_skb_any(skb); 5650 5651 return ret; 5652 } 5653 5654 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 5655 { 5656 struct rtw89_btc *btc = &rtwdev->btc; 5657 struct rtw89_btc_dm *dm = &btc->dm; 5658 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 5659 struct rtw89_h2c_cxinit_v7 *h2c; 5660 u32 len = sizeof(*h2c); 5661 struct sk_buff *skb; 5662 int ret; 5663 5664 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5665 if (!skb) { 5666 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 5667 return -ENOMEM; 5668 } 5669 skb_put(skb, len); 5670 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 5671 5672 h2c->hdr.type = type; 5673 h2c->hdr.ver = btc->ver->fcxinit; 5674 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 5675 h2c->init = *init_info; 5676 5677 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5678 H2C_CAT_OUTSRC, BTFC_SET, 5679 SET_DRV_INFO, 0, 0, 5680 len); 5681 5682 ret = rtw89_h2c_tx(rtwdev, skb, false); 5683 if (ret) { 5684 rtw89_err(rtwdev, "failed to send h2c\n"); 5685 goto fail; 5686 } 5687 5688 return 0; 5689 fail: 5690 dev_kfree_skb_any(skb); 5691 5692 return ret; 5693 } 5694 5695 #define PORT_DATA_OFFSET 4 5696 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 5697 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 5698 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 5699 5700 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 5701 { 5702 struct rtw89_btc *btc = &rtwdev->btc; 5703 const struct rtw89_btc_ver *ver = btc->ver; 5704 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 5705 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 5706 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 5707 struct rtw89_btc_wl_active_role *active = role_info->active_role; 5708 struct sk_buff *skb; 5709 u32 len; 5710 u8 offset = 0; 5711 u8 *cmd; 5712 int ret; 5713 int i; 5714 5715 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 5716 5717 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5718 if (!skb) { 5719 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 5720 return -ENOMEM; 5721 } 5722 skb_put(skb, len); 5723 cmd = skb->data; 5724 5725 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5726 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 5727 5728 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 5729 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 5730 5731 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 5732 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 5733 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 5734 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 5735 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 5736 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 5737 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 5738 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 5739 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 5740 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 5741 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 5742 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 5743 5744 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 5745 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 5746 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 5747 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 5748 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 5749 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 5750 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 5751 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 5752 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 5753 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 5754 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 5755 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 5756 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 5757 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 5758 } 5759 5760 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5761 H2C_CAT_OUTSRC, BTFC_SET, 5762 SET_DRV_INFO, 0, 0, 5763 len); 5764 5765 ret = rtw89_h2c_tx(rtwdev, skb, false); 5766 if (ret) { 5767 rtw89_err(rtwdev, "failed to send h2c\n"); 5768 goto fail; 5769 } 5770 5771 return 0; 5772 fail: 5773 dev_kfree_skb_any(skb); 5774 5775 return ret; 5776 } 5777 5778 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 5779 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 5780 5781 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 5782 { 5783 struct rtw89_btc *btc = &rtwdev->btc; 5784 const struct rtw89_btc_ver *ver = btc->ver; 5785 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 5786 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 5787 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 5788 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 5789 struct sk_buff *skb; 5790 u32 len; 5791 u8 *cmd, offset; 5792 int ret; 5793 int i; 5794 5795 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 5796 5797 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5798 if (!skb) { 5799 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 5800 return -ENOMEM; 5801 } 5802 skb_put(skb, len); 5803 cmd = skb->data; 5804 5805 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5806 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 5807 5808 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 5809 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 5810 5811 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 5812 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 5813 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 5814 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 5815 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 5816 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 5817 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 5818 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 5819 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 5820 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 5821 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 5822 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 5823 5824 offset = PORT_DATA_OFFSET; 5825 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 5826 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 5827 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 5828 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 5829 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 5830 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 5831 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 5832 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 5833 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 5834 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 5835 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 5836 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 5837 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 5838 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 5839 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 5840 } 5841 5842 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 5843 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 5844 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 5845 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 5846 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 5847 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 5848 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 5849 5850 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5851 H2C_CAT_OUTSRC, BTFC_SET, 5852 SET_DRV_INFO, 0, 0, 5853 len); 5854 5855 ret = rtw89_h2c_tx(rtwdev, skb, false); 5856 if (ret) { 5857 rtw89_err(rtwdev, "failed to send h2c\n"); 5858 goto fail; 5859 } 5860 5861 return 0; 5862 fail: 5863 dev_kfree_skb_any(skb); 5864 5865 return ret; 5866 } 5867 5868 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 5869 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 5870 5871 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 5872 { 5873 struct rtw89_btc *btc = &rtwdev->btc; 5874 const struct rtw89_btc_ver *ver = btc->ver; 5875 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 5876 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 5877 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 5878 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 5879 struct sk_buff *skb; 5880 u32 len; 5881 u8 *cmd, offset; 5882 int ret; 5883 int i; 5884 5885 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 5886 5887 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5888 if (!skb) { 5889 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 5890 return -ENOMEM; 5891 } 5892 skb_put(skb, len); 5893 cmd = skb->data; 5894 5895 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5896 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 5897 5898 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 5899 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 5900 5901 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 5902 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 5903 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 5904 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 5905 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 5906 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 5907 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 5908 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 5909 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 5910 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 5911 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 5912 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 5913 5914 offset = PORT_DATA_OFFSET; 5915 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 5916 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 5917 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 5918 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 5919 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 5920 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 5921 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 5922 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 5923 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 5924 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 5925 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 5926 } 5927 5928 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 5929 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 5930 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 5931 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 5932 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 5933 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 5934 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 5935 5936 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5937 H2C_CAT_OUTSRC, BTFC_SET, 5938 SET_DRV_INFO, 0, 0, 5939 len); 5940 5941 ret = rtw89_h2c_tx(rtwdev, skb, false); 5942 if (ret) { 5943 rtw89_err(rtwdev, "failed to send h2c\n"); 5944 goto fail; 5945 } 5946 5947 return 0; 5948 fail: 5949 dev_kfree_skb_any(skb); 5950 5951 return ret; 5952 } 5953 5954 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type) 5955 { 5956 struct rtw89_btc *btc = &rtwdev->btc; 5957 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7; 5958 struct rtw89_h2c_cxrole_v7 *h2c; 5959 u32 len = sizeof(*h2c); 5960 struct sk_buff *skb; 5961 int ret; 5962 5963 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5964 if (!skb) { 5965 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5966 return -ENOMEM; 5967 } 5968 skb_put(skb, len); 5969 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data; 5970 5971 h2c->hdr.type = type; 5972 h2c->hdr.ver = btc->ver->fwlrole; 5973 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 5974 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 5975 h2c->_u32.role_map = cpu_to_le32(role->role_map); 5976 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 5977 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 5978 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en); 5979 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg); 5980 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy); 5981 5982 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5983 H2C_CAT_OUTSRC, BTFC_SET, 5984 SET_DRV_INFO, 0, 0, 5985 len); 5986 5987 ret = rtw89_h2c_tx(rtwdev, skb, false); 5988 if (ret) { 5989 rtw89_err(rtwdev, "failed to send h2c\n"); 5990 goto fail; 5991 } 5992 5993 return 0; 5994 fail: 5995 dev_kfree_skb_any(skb); 5996 5997 return ret; 5998 } 5999 6000 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 6001 { 6002 struct rtw89_btc *btc = &rtwdev->btc; 6003 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 6004 struct rtw89_h2c_cxrole_v8 *h2c; 6005 u32 len = sizeof(*h2c); 6006 struct sk_buff *skb; 6007 int ret; 6008 6009 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6010 if (!skb) { 6011 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 6012 return -ENOMEM; 6013 } 6014 skb_put(skb, len); 6015 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 6016 6017 h2c->hdr.type = type; 6018 h2c->hdr.ver = btc->ver->fwlrole; 6019 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 6020 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 6021 h2c->_u32.role_map = cpu_to_le32(role->role_map); 6022 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 6023 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 6024 6025 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6026 H2C_CAT_OUTSRC, BTFC_SET, 6027 SET_DRV_INFO, 0, 0, 6028 len); 6029 6030 ret = rtw89_h2c_tx(rtwdev, skb, false); 6031 if (ret) { 6032 rtw89_err(rtwdev, "failed to send h2c\n"); 6033 goto fail; 6034 } 6035 6036 return 0; 6037 fail: 6038 dev_kfree_skb_any(skb); 6039 6040 return ret; 6041 } 6042 6043 int rtw89_fw_h2c_cxdrv_osi_info(struct rtw89_dev *rtwdev, u8 type) 6044 { 6045 struct rtw89_btc *btc = &rtwdev->btc; 6046 struct rtw89_btc_fbtc_outsrc_set_info *osi = &btc->dm.ost_info; 6047 struct rtw89_h2c_cxosi *h2c; 6048 u32 len = sizeof(*h2c); 6049 struct sk_buff *skb; 6050 int ret; 6051 6052 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6053 if (!skb) { 6054 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_osi\n"); 6055 return -ENOMEM; 6056 } 6057 skb_put(skb, len); 6058 h2c = (struct rtw89_h2c_cxosi *)skb->data; 6059 6060 h2c->hdr.type = type; 6061 h2c->hdr.ver = btc->ver->fcxosi; 6062 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 6063 h2c->osi = *osi; 6064 6065 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6066 H2C_CAT_OUTSRC, BTFC_SET, 6067 SET_DRV_INFO, 0, 0, 6068 len); 6069 6070 ret = rtw89_h2c_tx(rtwdev, skb, false); 6071 if (ret) { 6072 rtw89_err(rtwdev, "failed to send h2c\n"); 6073 goto fail; 6074 } 6075 6076 return 0; 6077 fail: 6078 dev_kfree_skb_any(skb); 6079 6080 return ret; 6081 } 6082 6083 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 6084 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 6085 { 6086 struct rtw89_btc *btc = &rtwdev->btc; 6087 const struct rtw89_btc_ver *ver = btc->ver; 6088 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 6089 struct sk_buff *skb; 6090 u8 *cmd; 6091 int ret; 6092 6093 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 6094 if (!skb) { 6095 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 6096 return -ENOMEM; 6097 } 6098 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 6099 cmd = skb->data; 6100 6101 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 6102 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 6103 6104 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 6105 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 6106 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 6107 if (ver->fcxctrl == 0) 6108 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 6109 6110 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6111 H2C_CAT_OUTSRC, BTFC_SET, 6112 SET_DRV_INFO, 0, 0, 6113 H2C_LEN_CXDRVINFO_CTRL); 6114 6115 ret = rtw89_h2c_tx(rtwdev, skb, false); 6116 if (ret) { 6117 rtw89_err(rtwdev, "failed to send h2c\n"); 6118 goto fail; 6119 } 6120 6121 return 0; 6122 fail: 6123 dev_kfree_skb_any(skb); 6124 6125 return ret; 6126 } 6127 6128 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 6129 { 6130 struct rtw89_btc *btc = &rtwdev->btc; 6131 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 6132 struct rtw89_h2c_cxctrl_v7 *h2c; 6133 u32 len = sizeof(*h2c); 6134 struct sk_buff *skb; 6135 int ret; 6136 6137 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6138 if (!skb) { 6139 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n"); 6140 return -ENOMEM; 6141 } 6142 skb_put(skb, len); 6143 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 6144 6145 h2c->hdr.type = type; 6146 h2c->hdr.ver = btc->ver->fcxctrl; 6147 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 6148 h2c->ctrl = *ctrl; 6149 6150 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6151 H2C_CAT_OUTSRC, BTFC_SET, 6152 SET_DRV_INFO, 0, 0, len); 6153 6154 ret = rtw89_h2c_tx(rtwdev, skb, false); 6155 if (ret) { 6156 rtw89_err(rtwdev, "failed to send h2c\n"); 6157 goto fail; 6158 } 6159 6160 return 0; 6161 fail: 6162 dev_kfree_skb_any(skb); 6163 6164 return ret; 6165 } 6166 6167 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 6168 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 6169 { 6170 struct rtw89_btc *btc = &rtwdev->btc; 6171 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 6172 struct sk_buff *skb; 6173 u8 *cmd; 6174 int ret; 6175 6176 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 6177 if (!skb) { 6178 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 6179 return -ENOMEM; 6180 } 6181 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 6182 cmd = skb->data; 6183 6184 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 6185 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 6186 6187 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 6188 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 6189 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 6190 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 6191 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 6192 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 6193 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 6194 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 6195 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 6196 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 6197 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 6198 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 6199 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 6200 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 6201 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 6202 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 6203 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 6204 6205 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6206 H2C_CAT_OUTSRC, BTFC_SET, 6207 SET_DRV_INFO, 0, 0, 6208 H2C_LEN_CXDRVINFO_TRX); 6209 6210 ret = rtw89_h2c_tx(rtwdev, skb, false); 6211 if (ret) { 6212 rtw89_err(rtwdev, "failed to send h2c\n"); 6213 goto fail; 6214 } 6215 6216 return 0; 6217 fail: 6218 dev_kfree_skb_any(skb); 6219 6220 return ret; 6221 } 6222 6223 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 6224 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 6225 { 6226 struct rtw89_btc *btc = &rtwdev->btc; 6227 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 6228 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 6229 struct sk_buff *skb; 6230 u8 *cmd; 6231 int ret; 6232 6233 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 6234 if (!skb) { 6235 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 6236 return -ENOMEM; 6237 } 6238 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 6239 cmd = skb->data; 6240 6241 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 6242 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 6243 6244 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 6245 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 6246 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 6247 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 6248 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 6249 6250 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6251 H2C_CAT_OUTSRC, BTFC_SET, 6252 SET_DRV_INFO, 0, 0, 6253 H2C_LEN_CXDRVINFO_RFK); 6254 6255 ret = rtw89_h2c_tx(rtwdev, skb, false); 6256 if (ret) { 6257 rtw89_err(rtwdev, "failed to send h2c\n"); 6258 goto fail; 6259 } 6260 6261 return 0; 6262 fail: 6263 dev_kfree_skb_any(skb); 6264 6265 return ret; 6266 } 6267 6268 #define H2C_LEN_PKT_OFLD 4 6269 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 6270 { 6271 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 6272 struct sk_buff *skb; 6273 unsigned int cond; 6274 u8 *cmd; 6275 int ret; 6276 6277 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 6278 if (!skb) { 6279 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 6280 return -ENOMEM; 6281 } 6282 skb_put(skb, H2C_LEN_PKT_OFLD); 6283 cmd = skb->data; 6284 6285 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 6286 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 6287 6288 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6289 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 6290 H2C_FUNC_PACKET_OFLD, 1, 1, 6291 H2C_LEN_PKT_OFLD); 6292 6293 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 6294 6295 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6296 if (ret < 0) { 6297 rtw89_debug(rtwdev, RTW89_DBG_FW, 6298 "failed to del pkt ofld: id %d, ret %d\n", 6299 id, ret); 6300 return ret; 6301 } 6302 6303 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 6304 return 0; 6305 } 6306 6307 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 6308 struct sk_buff *skb_ofld) 6309 { 6310 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 6311 struct sk_buff *skb; 6312 unsigned int cond; 6313 u8 *cmd; 6314 u8 alloc_id; 6315 int ret; 6316 6317 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 6318 RTW89_MAX_PKT_OFLD_NUM); 6319 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 6320 return -ENOSPC; 6321 6322 *id = alloc_id; 6323 6324 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 6325 if (!skb) { 6326 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 6327 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 6328 return -ENOMEM; 6329 } 6330 skb_put(skb, H2C_LEN_PKT_OFLD); 6331 cmd = skb->data; 6332 6333 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 6334 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 6335 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 6336 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 6337 6338 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6339 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 6340 H2C_FUNC_PACKET_OFLD, 1, 1, 6341 H2C_LEN_PKT_OFLD + skb_ofld->len); 6342 6343 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 6344 6345 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6346 if (ret < 0) { 6347 rtw89_debug(rtwdev, RTW89_DBG_FW, 6348 "failed to add pkt ofld: id %d, ret %d\n", 6349 alloc_id, ret); 6350 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 6351 return ret; 6352 } 6353 6354 return 0; 6355 } 6356 6357 static 6358 int rtw89_fw_h2c_scan_list_offload_ax(struct rtw89_dev *rtwdev, int ch_num, 6359 struct list_head *chan_list) 6360 { 6361 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6362 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 6363 struct rtw89_h2c_chinfo_elem *elem; 6364 struct rtw89_mac_chinfo_ax *ch_info; 6365 struct rtw89_h2c_chinfo *h2c; 6366 struct sk_buff *skb; 6367 unsigned int cond; 6368 int skb_len; 6369 int ret; 6370 6371 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 6372 6373 skb_len = struct_size(h2c, elem, ch_num); 6374 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 6375 if (!skb) { 6376 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 6377 return -ENOMEM; 6378 } 6379 skb_put(skb, sizeof(*h2c)); 6380 h2c = (struct rtw89_h2c_chinfo *)skb->data; 6381 6382 h2c->ch_num = ch_num; 6383 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 6384 6385 list_for_each_entry(ch_info, chan_list, list) { 6386 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 6387 6388 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 6389 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 6390 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 6391 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 6392 6393 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 6394 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 6395 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 6396 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 6397 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 6398 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 6399 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 6400 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 6401 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 6402 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 6403 6404 if (scan_info->extra_op.set) 6405 elem->w1 |= le32_encode_bits(ch_info->macid_tx, 6406 RTW89_H2C_CHINFO_W1_MACID_TX); 6407 6408 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 6409 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 6410 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 6411 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 6412 6413 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 6414 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 6415 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 6416 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 6417 } 6418 6419 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6420 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 6421 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 6422 6423 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 6424 6425 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6426 if (ret) { 6427 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 6428 return ret; 6429 } 6430 6431 return 0; 6432 } 6433 6434 static 6435 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 6436 struct list_head *chan_list, 6437 struct rtw89_vif_link *rtwvif_link) 6438 { 6439 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 6440 struct rtw89_h2c_chinfo_elem_be *elem; 6441 struct rtw89_mac_chinfo_be *ch_info; 6442 struct rtw89_h2c_chinfo_be *h2c; 6443 struct sk_buff *skb; 6444 unsigned int cond; 6445 u8 ver = U8_MAX; 6446 int skb_len; 6447 int ret; 6448 6449 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE); 6450 6451 skb_len = struct_size(h2c, elem, ch_num); 6452 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 6453 if (!skb) { 6454 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 6455 return -ENOMEM; 6456 } 6457 6458 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 6459 ver = 0; 6460 6461 skb_put(skb, sizeof(*h2c)); 6462 h2c = (struct rtw89_h2c_chinfo_be *)skb->data; 6463 6464 h2c->ch_num = ch_num; 6465 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 6466 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx, 6467 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 6468 6469 list_for_each_entry(ch_info, chan_list, list) { 6470 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 6471 6472 elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 6473 le32_encode_bits(ch_info->central_ch, 6474 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 6475 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 6476 6477 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 6478 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 6479 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 6480 le32_encode_bits(ch_info->pause_data, 6481 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 6482 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 6483 le32_encode_bits(ch_info->rand_seq_num, 6484 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 6485 le32_encode_bits(ch_info->notify_action, 6486 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 6487 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 6488 RTW89_H2C_CHINFO_BE_W1_PROBE) | 6489 le32_encode_bits(ch_info->leave_crit, 6490 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 6491 le32_encode_bits(ch_info->chkpt_timer, 6492 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 6493 6494 elem->w2 = le32_encode_bits(ch_info->leave_time, 6495 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 6496 le32_encode_bits(ch_info->leave_th, 6497 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 6498 le32_encode_bits(ch_info->tx_pkt_ctrl, 6499 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 6500 6501 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 6502 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 6503 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 6504 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 6505 6506 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 6507 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 6508 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 6509 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 6510 6511 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 6512 le32_encode_bits(ch_info->fw_probe0_ssids, 6513 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 6514 6515 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 6516 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 6517 le32_encode_bits(ch_info->fw_probe0_bssids, 6518 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 6519 if (ver == 0) 6520 elem->w0 |= 6521 le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD); 6522 else 6523 elem->w7 = le32_encode_bits(ch_info->period, 6524 RTW89_H2C_CHINFO_BE_W7_PERIOD_V1); 6525 } 6526 6527 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6528 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 6529 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 6530 6531 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 6532 6533 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6534 if (ret) { 6535 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 6536 return ret; 6537 } 6538 6539 return 0; 6540 } 6541 6542 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, 6543 struct rtw89_scan_option *option, 6544 struct rtw89_vif_link *rtwvif_link, 6545 bool wowlan) 6546 { 6547 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6548 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 6549 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 6550 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE; 6551 struct rtw89_h2c_scanofld *h2c; 6552 u32 len = sizeof(*h2c); 6553 struct sk_buff *skb; 6554 unsigned int cond; 6555 u64 tsf = 0; 6556 int ret; 6557 6558 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6559 if (!skb) { 6560 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 6561 return -ENOMEM; 6562 } 6563 skb_put(skb, len); 6564 h2c = (struct rtw89_h2c_scanofld *)skb->data; 6565 6566 if (option->delay) { 6567 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 6568 if (ret) { 6569 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret); 6570 scan_mode = RTW89_SCAN_IMMEDIATE; 6571 } else { 6572 scan_mode = RTW89_SCAN_DELAY; 6573 tsf += (u64)option->delay * 1000; 6574 } 6575 } 6576 6577 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 6578 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 6579 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) | 6580 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 6581 6582 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 6583 le32_encode_bits(option->target_ch_mode, 6584 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 6585 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) | 6586 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 6587 6588 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) | 6589 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD); 6590 6591 if (option->target_ch_mode) { 6592 h2c->w1 |= le32_encode_bits(op->band_width, 6593 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 6594 le32_encode_bits(op->primary_channel, 6595 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 6596 le32_encode_bits(op->channel, 6597 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 6598 h2c->w0 |= le32_encode_bits(op->band_type, 6599 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 6600 } 6601 6602 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf), 6603 RTW89_H2C_SCANOFLD_W3_TSF_HIGH); 6604 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf), 6605 RTW89_H2C_SCANOFLD_W4_TSF_LOW); 6606 6607 if (scan_info->extra_op.set) 6608 h2c->w6 = le32_encode_bits(scan_info->extra_op.macid, 6609 RTW89_H2C_SCANOFLD_W6_SECOND_MACID); 6610 6611 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6612 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 6613 H2C_FUNC_SCANOFLD, 1, 1, 6614 len); 6615 6616 if (option->enable) 6617 cond = RTW89_SCANOFLD_WAIT_COND_START; 6618 else 6619 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 6620 6621 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6622 if (ret) { 6623 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 6624 return ret; 6625 } 6626 6627 return 0; 6628 } 6629 6630 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 6631 struct rtw89_scan_option *option) 6632 { 6633 struct ieee80211_supported_band *sband; 6634 struct ieee80211_channel *chan; 6635 u8 i, idx; 6636 6637 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 6638 if (!sband) { 6639 option->prohib_chan = U64_MAX; 6640 return; 6641 } 6642 6643 for (i = 0; i < sband->n_channels; i++) { 6644 chan = &sband->channels[i]; 6645 if (chan->flags & IEEE80211_CHAN_DISABLED) { 6646 idx = (chan->hw_value - 1) / 4; 6647 option->prohib_chan |= BIT(idx); 6648 } 6649 } 6650 } 6651 6652 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 6653 struct rtw89_scan_option *option, 6654 struct rtw89_vif_link *rtwvif_link, 6655 bool wowlan) 6656 { 6657 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6658 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6659 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 6660 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 6661 struct cfg80211_scan_request *req = rtwvif->scan_req; 6662 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 6663 struct rtw89_hw_scan_extra_op scan_op[2] = {}; 6664 struct rtw89_chan *op = &scan_info->op_chan; 6665 struct rtw89_h2c_scanofld_be_opch *opch; 6666 struct rtw89_pktofld_info *pkt_info; 6667 struct rtw89_h2c_scanofld_be *h2c; 6668 struct ieee80211_vif *vif; 6669 struct sk_buff *skb; 6670 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 6671 u8 opch_size = sizeof(*opch) * option->num_opch; 6672 enum rtw89_scan_be_opmode opmode; 6673 u8 probe_id[NUM_NL80211_BANDS]; 6674 u8 scan_offload_ver = U8_MAX; 6675 u8 cfg_len = sizeof(*h2c); 6676 unsigned int cond; 6677 u8 ver = U8_MAX; 6678 u8 policy_val; 6679 void *ptr; 6680 u8 txnull; 6681 u8 txbcn; 6682 int ret; 6683 u32 len; 6684 u8 i; 6685 6686 if (option->num_opch > RTW89_MAX_OP_NUM_BE) { 6687 rtw89_err(rtwdev, "num of scan OP chan %d over limit\n", option->num_opch); 6688 return -ENOENT; 6689 } 6690 6691 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 6692 6693 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { 6694 cfg_len = offsetofend(typeof(*h2c), w8); 6695 scan_offload_ver = 0; 6696 } 6697 6698 len = cfg_len + macc_role_size + opch_size; 6699 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6700 if (!skb) { 6701 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 6702 return -ENOMEM; 6703 } 6704 6705 skb_put(skb, len); 6706 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 6707 ptr = skb->data; 6708 6709 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 6710 6711 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 6712 ver = 0; 6713 6714 if (!wowlan) { 6715 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 6716 if (pkt_info->wildcard_6ghz) { 6717 /* Provide wildcard as template */ 6718 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 6719 break; 6720 } 6721 } 6722 } 6723 6724 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 6725 le32_encode_bits(option->scan_mode, 6726 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 6727 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 6728 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 6729 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 6730 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 6731 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 6732 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 6733 6734 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 6735 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 6736 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 6737 6738 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 6739 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 6740 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 6741 6742 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 6743 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 6744 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 6745 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 6746 6747 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 6748 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 6749 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 6750 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 6751 le32_encode_bits(option->delay / 1000, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 6752 6753 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 6754 6755 h2c->w6 = le32_encode_bits(option->prohib_chan, 6756 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 6757 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 6758 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 6759 if (!wowlan && req->no_cck) { 6760 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); 6761 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, 6762 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | 6763 le32_encode_bits(RTW89_HW_RATE_OFDM6, 6764 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | 6765 le32_encode_bits(RTW89_HW_RATE_OFDM6, 6766 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); 6767 } 6768 6769 if (scan_offload_ver == 0) 6770 goto flex_member; 6771 6772 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), 6773 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | 6774 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0), 6775 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) | 6776 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0), 6777 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP); 6778 6779 flex_member: 6780 ptr += cfg_len; 6781 6782 for (i = 0; i < option->num_macc_role; i++) { 6783 macc_role = ptr; 6784 macc_role->w0 = 6785 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 6786 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 6787 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 6788 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 6789 ptr += sizeof(*macc_role); 6790 } 6791 6792 for (i = 0; i < option->num_opch; i++) { 6793 struct rtw89_vif_link *rtwvif_link_op; 6794 bool is_ap; 6795 6796 switch (i) { 6797 case 0: 6798 scan_op[0].macid = rtwvif_link->mac_id; 6799 scan_op[0].port = rtwvif_link->port; 6800 scan_op[0].chan = *op; 6801 rtwvif_link_op = rtwvif_link; 6802 break; 6803 case 1: 6804 scan_op[1] = *ext; 6805 rtwvif_link_op = ext->rtwvif_link; 6806 break; 6807 } 6808 6809 vif = rtwvif_to_vif(rtwvif_link_op->rtwvif); 6810 is_ap = vif->type == NL80211_IFTYPE_AP; 6811 txnull = !is_zero_ether_addr(rtwvif_link_op->bssid) && 6812 vif->type != NL80211_IFTYPE_AP; 6813 opmode = is_ap ? RTW89_SCAN_OPMODE_TBTT : RTW89_SCAN_OPMODE_INTV; 6814 policy_val = is_ap ? 2 : RTW89_OFF_CHAN_TIME / 10; 6815 txbcn = is_ap ? 1 : 0; 6816 6817 opch = ptr; 6818 opch->w0 = le32_encode_bits(scan_op[i].macid, 6819 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 6820 le32_encode_bits(option->band, 6821 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 6822 le32_encode_bits(scan_op[i].port, 6823 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 6824 le32_encode_bits(opmode, 6825 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 6826 le32_encode_bits(txnull, 6827 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 6828 le32_encode_bits(policy_val, 6829 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 6830 6831 opch->w1 = le32_encode_bits(scan_op[i].chan.band_type, 6832 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 6833 le32_encode_bits(scan_op[i].chan.band_width, 6834 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 6835 le32_encode_bits(0x3, 6836 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 6837 le32_encode_bits(scan_op[i].chan.primary_channel, 6838 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 6839 le32_encode_bits(scan_op[i].chan.channel, 6840 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 6841 6842 opch->w2 = le32_encode_bits(0, 6843 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 6844 le32_encode_bits(0, 6845 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 6846 le32_encode_bits(rtw89_is_mlo_1_1(rtwdev) ? 1 : 2, 6847 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS) | 6848 le32_encode_bits(txbcn, 6849 RTW89_H2C_SCANOFLD_BE_OPCH_W2_TXBCN); 6850 6851 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 6852 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 6853 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 6854 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 6855 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 6856 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 6857 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 6858 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 6859 6860 if (ver == 0) 6861 opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME, 6862 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION); 6863 else 6864 opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME, 6865 RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1); 6866 ptr += sizeof(*opch); 6867 } 6868 6869 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6870 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 6871 H2C_FUNC_SCANOFLD_BE, 1, 1, 6872 len); 6873 6874 if (option->enable) 6875 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 6876 else 6877 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 6878 6879 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6880 if (ret) { 6881 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 6882 return ret; 6883 } 6884 6885 return 0; 6886 } 6887 6888 int rtw89_fw_h2c_trx_protect(struct rtw89_dev *rtwdev, 6889 enum rtw89_phy_idx phy_idx, bool enable) 6890 { 6891 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 6892 const struct rtw89_chip_info *chip = rtwdev->chip; 6893 struct rtw89_h2c_trx_protect *h2c; 6894 u32 len = sizeof(*h2c); 6895 struct sk_buff *skb; 6896 int ret; 6897 6898 if (chip->chip_gen != RTW89_CHIP_BE) 6899 return 0; 6900 6901 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6902 if (!skb) { 6903 rtw89_err(rtwdev, "failed to alloc skb for h2c trx protect\n"); 6904 return -ENOMEM; 6905 } 6906 6907 skb_put(skb, len); 6908 h2c = (struct rtw89_h2c_trx_protect *)skb->data; 6909 6910 h2c->c0 = le32_encode_bits(BIT(phy_idx), RTW89_H2C_TRX_PROTECT_C0_BAND_BITMAP) | 6911 le32_encode_bits(0, RTW89_H2C_TRX_PROTECT_C0_OP_MODE); 6912 h2c->c1 = le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_C1_RX_IN) | 6913 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_C1_PPDU_STS) | 6914 le32_encode_bits(1, RTW89_H2C_TRX_PROTECT_C1_MSK_RX_IN) | 6915 le32_encode_bits(1, RTW89_H2C_TRX_PROTECT_C1_MSK_PPDU_STS); 6916 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_BE0) | 6917 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_BK0) | 6918 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_VI0) | 6919 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_VO0) | 6920 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_BE1) | 6921 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_BK1) | 6922 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_VI1) | 6923 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_VO1) | 6924 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_MG0) | 6925 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_MG1) | 6926 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_MG2) | 6927 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_HI) | 6928 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_BCN) | 6929 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_UL) | 6930 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_TWT0) | 6931 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_TWT1) | 6932 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_TWT2) | 6933 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_TWT3) | 6934 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_SPEQ0) | 6935 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W0_TXEN_SPEQ1); 6936 h2c->m0 = cpu_to_le32(RTW89_H2C_TRX_PROTECT_W0_TXEN_BE0 | 6937 RTW89_H2C_TRX_PROTECT_W0_TXEN_BK0 | 6938 RTW89_H2C_TRX_PROTECT_W0_TXEN_VI0 | 6939 RTW89_H2C_TRX_PROTECT_W0_TXEN_VO0 | 6940 RTW89_H2C_TRX_PROTECT_W0_TXEN_BE1 | 6941 RTW89_H2C_TRX_PROTECT_W0_TXEN_BK1 | 6942 RTW89_H2C_TRX_PROTECT_W0_TXEN_VI1 | 6943 RTW89_H2C_TRX_PROTECT_W0_TXEN_VO1 | 6944 RTW89_H2C_TRX_PROTECT_W0_TXEN_MG0 | 6945 RTW89_H2C_TRX_PROTECT_W0_TXEN_MG1 | 6946 RTW89_H2C_TRX_PROTECT_W0_TXEN_MG2 | 6947 RTW89_H2C_TRX_PROTECT_W0_TXEN_HI | 6948 RTW89_H2C_TRX_PROTECT_W0_TXEN_BCN | 6949 RTW89_H2C_TRX_PROTECT_W0_TXEN_UL | 6950 RTW89_H2C_TRX_PROTECT_W0_TXEN_TWT0 | 6951 RTW89_H2C_TRX_PROTECT_W0_TXEN_TWT1 | 6952 RTW89_H2C_TRX_PROTECT_W0_TXEN_TWT2 | 6953 RTW89_H2C_TRX_PROTECT_W0_TXEN_TWT3 | 6954 RTW89_H2C_TRX_PROTECT_W0_TXEN_SPEQ0 | 6955 RTW89_H2C_TRX_PROTECT_W0_TXEN_SPEQ1); 6956 h2c->w1 = le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W1_CHINFO_EN) | 6957 le32_encode_bits(enable, RTW89_H2C_TRX_PROTECT_W1_DFS_EN); 6958 h2c->m1 = cpu_to_le32(RTW89_H2C_TRX_PROTECT_W1_CHINFO_EN | 6959 RTW89_H2C_TRX_PROTECT_W1_DFS_EN); 6960 6961 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6962 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 6963 H2C_FUNC_TRX_PROTECT, 0, 1, len); 6964 6965 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, 6966 RTW89_FW_OFLD_WAIT_COND_TRX_PROTECT); 6967 if (ret) { 6968 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to trx protect\n"); 6969 return ret; 6970 } 6971 6972 return 0; 6973 } 6974 6975 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 6976 struct rtw89_fw_h2c_rf_reg_info *info, 6977 u16 len, u8 page) 6978 { 6979 struct sk_buff *skb; 6980 u8 class = info->rf_path == RF_PATH_A ? 6981 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 6982 int ret; 6983 6984 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6985 if (!skb) { 6986 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 6987 return -ENOMEM; 6988 } 6989 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 6990 6991 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6992 H2C_CAT_OUTSRC, class, page, 0, 0, 6993 len); 6994 6995 ret = rtw89_h2c_tx(rtwdev, skb, false); 6996 if (ret) { 6997 rtw89_err(rtwdev, "failed to send h2c\n"); 6998 goto fail; 6999 } 7000 7001 return 0; 7002 fail: 7003 dev_kfree_skb_any(skb); 7004 7005 return ret; 7006 } 7007 7008 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 7009 { 7010 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; 7011 struct rtw89_fw_h2c_rf_get_mccch_v0 *mccch_v0; 7012 struct rtw89_fw_h2c_rf_get_mccch *mccch; 7013 u32 len = sizeof(*mccch); 7014 struct sk_buff *skb; 7015 u8 ver = U8_MAX; 7016 int ret; 7017 u8 idx; 7018 7019 if (RTW89_CHK_FW_FEATURE(RFK_NTFY_MCC_V0, &rtwdev->fw)) { 7020 len = sizeof(*mccch_v0); 7021 ver = 0; 7022 } 7023 7024 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7025 if (!skb) { 7026 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 7027 return -ENOMEM; 7028 } 7029 skb_put(skb, len); 7030 7031 idx = rfk_mcc->table_idx; 7032 if (ver == 0) { 7033 mccch_v0 = (struct rtw89_fw_h2c_rf_get_mccch_v0 *)skb->data; 7034 mccch_v0->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 7035 mccch_v0->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 7036 mccch_v0->band_0 = cpu_to_le32(rfk_mcc->band[0]); 7037 mccch_v0->band_1 = cpu_to_le32(rfk_mcc->band[1]); 7038 mccch_v0->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 7039 mccch_v0->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 7040 } else { 7041 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 7042 mccch->ch_0_0 = cpu_to_le32(rfk_mcc->ch[0]); 7043 mccch->ch_0_1 = cpu_to_le32(rfk_mcc->ch[0]); 7044 mccch->ch_1_0 = cpu_to_le32(rfk_mcc->ch[1]); 7045 mccch->ch_1_1 = cpu_to_le32(rfk_mcc->ch[1]); 7046 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 7047 } 7048 7049 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7050 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 7051 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 7052 len); 7053 7054 ret = rtw89_h2c_tx(rtwdev, skb, false); 7055 if (ret) { 7056 rtw89_err(rtwdev, "failed to send h2c\n"); 7057 goto fail; 7058 } 7059 7060 return 0; 7061 fail: 7062 dev_kfree_skb_any(skb); 7063 7064 return ret; 7065 } 7066 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 7067 7068 int rtw89_fw_h2c_mcc_dig(struct rtw89_dev *rtwdev, 7069 enum rtw89_chanctx_idx chanctx_idx, 7070 u8 mcc_role_idx, u8 pd_val, bool en) 7071 { 7072 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, chanctx_idx); 7073 const struct rtw89_dig_regs *dig_regs = rtwdev->chip->dig_regs; 7074 struct rtw89_h2c_mcc_dig *h2c; 7075 u32 len = sizeof(*h2c); 7076 struct sk_buff *skb; 7077 int ret; 7078 7079 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7080 if (!skb) { 7081 rtw89_err(rtwdev, "failed to alloc skb for h2c mcc_dig\n"); 7082 return -ENOMEM; 7083 } 7084 skb_put(skb, len); 7085 h2c = (struct rtw89_h2c_mcc_dig *)skb->data; 7086 7087 h2c->w0 = le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_REG_CNT) | 7088 le32_encode_bits(en, RTW89_H2C_MCC_DIG_W0_DM_EN) | 7089 le32_encode_bits(mcc_role_idx, RTW89_H2C_MCC_DIG_W0_IDX) | 7090 le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_SET) | 7091 le32_encode_bits(1, RTW89_H2C_MCC_DIG_W0_PHY0_EN) | 7092 le32_encode_bits(chan->channel, RTW89_H2C_MCC_DIG_W0_CENTER_CH) | 7093 le32_encode_bits(chan->band_type, RTW89_H2C_MCC_DIG_W0_BAND_TYPE); 7094 h2c->w1 = le32_encode_bits(dig_regs->seg0_pd_reg, 7095 RTW89_H2C_MCC_DIG_W1_ADDR_LSB) | 7096 le32_encode_bits(dig_regs->seg0_pd_reg >> 8, 7097 RTW89_H2C_MCC_DIG_W1_ADDR_MSB) | 7098 le32_encode_bits(dig_regs->pd_lower_bound_mask, 7099 RTW89_H2C_MCC_DIG_W1_BMASK_LSB) | 7100 le32_encode_bits(dig_regs->pd_lower_bound_mask >> 8, 7101 RTW89_H2C_MCC_DIG_W1_BMASK_MSB); 7102 h2c->w2 = le32_encode_bits(pd_val, RTW89_H2C_MCC_DIG_W2_VAL_LSB); 7103 7104 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7105 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 7106 H2C_FUNC_FW_MCC_DIG, 0, 0, len); 7107 7108 ret = rtw89_h2c_tx(rtwdev, skb, false); 7109 if (ret) { 7110 rtw89_err(rtwdev, "failed to send h2c\n"); 7111 goto fail; 7112 } 7113 7114 return 0; 7115 fail: 7116 dev_kfree_skb_any(skb); 7117 7118 return ret; 7119 } 7120 7121 int rtw89_fw_h2c_rf_ps_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 7122 { 7123 const struct rtw89_chip_info *chip = rtwdev->chip; 7124 struct rtw89_vif_link *rtwvif_link; 7125 struct rtw89_h2c_rf_ps_info *h2c; 7126 const struct rtw89_chan *chan; 7127 u32 len = sizeof(*h2c); 7128 unsigned int link_id; 7129 struct sk_buff *skb; 7130 int ret; 7131 u8 path; 7132 u32 val; 7133 7134 if (chip->chip_gen != RTW89_CHIP_BE) 7135 return 0; 7136 7137 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7138 if (!skb) { 7139 rtw89_err(rtwdev, "failed to alloc skb for h2c rf ps info\n"); 7140 return -ENOMEM; 7141 } 7142 skb_put(skb, len); 7143 h2c = (struct rtw89_h2c_rf_ps_info *)skb->data; 7144 h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 7145 7146 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 7147 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 7148 path = rtw89_phy_get_syn_sel(rtwdev, rtwvif_link->phy_idx); 7149 val = rtw89_chip_chan_to_rf18_val(rtwdev, chan); 7150 7151 if (path >= chip->rf_path_num || path >= NUM_OF_RTW89_FW_RFK_PATH) { 7152 rtw89_err(rtwdev, "unsupported rf path (%d)\n", path); 7153 ret = -ENOENT; 7154 goto fail; 7155 } 7156 7157 h2c->rf18[path] = cpu_to_le32(val); 7158 h2c->pri_ch[path] = chan->primary_channel; 7159 } 7160 7161 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7162 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 7163 H2C_FUNC_OUTSRC_RF_PS_INFO, 0, 0, 7164 sizeof(*h2c)); 7165 7166 ret = rtw89_h2c_tx(rtwdev, skb, false); 7167 if (ret) { 7168 rtw89_err(rtwdev, "failed to send h2c\n"); 7169 goto fail; 7170 } 7171 7172 return 0; 7173 fail: 7174 dev_kfree_skb_any(skb); 7175 7176 return ret; 7177 } 7178 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ps_info); 7179 7180 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 7181 enum rtw89_phy_idx phy_idx) 7182 { 7183 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 7184 struct rtw89_fw_h2c_rfk_pre_info_common *common; 7185 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; 7186 struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1; 7187 struct rtw89_fw_h2c_rfk_pre_info_v2 *h2c_v2; 7188 struct rtw89_fw_h2c_rfk_pre_info *h2c; 7189 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH]; 7190 u32 len = sizeof(*h2c); 7191 struct sk_buff *skb; 7192 u8 ver = U8_MAX; 7193 u8 tbl, path; 7194 u32 val32; 7195 int ret; 7196 7197 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V3, &rtwdev->fw)) { 7198 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V2, &rtwdev->fw)) { 7199 len = sizeof(*h2c_v2); 7200 ver = 2; 7201 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) { 7202 len = sizeof(*h2c_v1); 7203 ver = 1; 7204 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 7205 len = sizeof(*h2c_v0); 7206 ver = 0; 7207 } 7208 7209 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7210 if (!skb) { 7211 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 7212 return -ENOMEM; 7213 } 7214 skb_put(skb, len); 7215 7216 if (ver <= 2) 7217 goto old_format; 7218 7219 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 7220 7221 h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 7222 h2c->phy_idx = cpu_to_le32(phy_idx); 7223 h2c->mlo_1_1 = cpu_to_le32(rtw89_is_mlo_1_1(rtwdev)); 7224 7225 goto done; 7226 7227 old_format: 7228 h2c_v2 = (struct rtw89_fw_h2c_rfk_pre_info_v2 *)skb->data; 7229 common = &h2c_v2->base_v1.common; 7230 7231 common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 7232 7233 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 7234 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH); 7235 7236 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 7237 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 7238 common->dbcc.ch[path][tbl] = 7239 cpu_to_le32(rfk_mcc->data[path].ch[tbl]); 7240 common->dbcc.band[path][tbl] = 7241 cpu_to_le32(rfk_mcc->data[path].band[tbl]); 7242 } 7243 } 7244 7245 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 7246 tbl_sel[path] = rfk_mcc->data[path].table_idx; 7247 7248 common->tbl.cur_ch[path] = 7249 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]); 7250 common->tbl.cur_band[path] = 7251 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]); 7252 7253 if (ver <= 1) 7254 continue; 7255 7256 h2c_v2->cur_bandwidth[path] = 7257 cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]); 7258 } 7259 7260 common->phy_idx = cpu_to_le32(phy_idx); 7261 7262 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ 7263 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; 7264 7265 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]); 7266 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]); 7267 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]); 7268 7269 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 7270 h2c_v0->ktbl_sel0 = cpu_to_le32(val32); 7271 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 7272 h2c_v0->ktbl_sel1 = cpu_to_le32(val32); 7273 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 7274 h2c_v0->rfmod0 = cpu_to_le32(val32); 7275 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 7276 h2c_v0->rfmod1 = cpu_to_le32(val32); 7277 7278 if (rtw89_is_mlo_1_1(rtwdev)) 7279 h2c_v0->mlo_1_1 = cpu_to_le32(1); 7280 7281 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 7282 7283 goto done; 7284 } 7285 7286 if (rtw89_is_mlo_1_1(rtwdev)) { 7287 h2c_v1 = &h2c_v2->base_v1; 7288 h2c_v1->mlo_1_1 = cpu_to_le32(1); 7289 } 7290 done: 7291 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7292 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 7293 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 7294 len); 7295 7296 ret = rtw89_h2c_tx(rtwdev, skb, false); 7297 if (ret) { 7298 rtw89_err(rtwdev, "failed to send h2c\n"); 7299 goto fail; 7300 } 7301 7302 return 0; 7303 fail: 7304 dev_kfree_skb_any(skb); 7305 7306 return ret; 7307 } 7308 7309 int rtw89_fw_h2c_rf_pre_ntfy_mcc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 7310 { 7311 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; 7312 struct rtw89_rfk_mcc_info *rfk_mcc_v0 = &rtwdev->rfk_mcc; 7313 struct rtw89_fw_h2c_rfk_pre_info_mcc_v0 *h2c_v0; 7314 struct rtw89_fw_h2c_rfk_pre_info_mcc_v1 *h2c_v1; 7315 struct rtw89_fw_h2c_rfk_pre_info_mcc *h2c; 7316 struct rtw89_hal *hal = &rtwdev->hal; 7317 u32 len = sizeof(*h2c); 7318 struct sk_buff *skb; 7319 u8 ver = U8_MAX; 7320 u8 tbl, path; 7321 u8 tbl_sel; 7322 int ret; 7323 7324 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_MCC_V2, &rtwdev->fw)) { 7325 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_MCC_V1, &rtwdev->fw)) { 7326 len = sizeof(*h2c_v1); 7327 ver = 1; 7328 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_MCC_V0, &rtwdev->fw)) { 7329 len = sizeof(*h2c_v0); 7330 ver = 0; 7331 } 7332 7333 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7334 if (!skb) { 7335 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy_mcc\n"); 7336 return -ENOMEM; 7337 } 7338 skb_put(skb, len); 7339 7340 if (ver != 0) 7341 goto v1; 7342 7343 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_mcc_v0 *)skb->data; 7344 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 7345 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 7346 h2c_v0->tbl_18[tbl][path] = 7347 cpu_to_le32(rfk_mcc_v0->data[path].rf18[tbl]); 7348 tbl_sel = rfk_mcc_v0->data[path].table_idx; 7349 h2c_v0->cur_18[path] = 7350 cpu_to_le32(rfk_mcc_v0->data[path].rf18[tbl_sel]); 7351 } 7352 } 7353 7354 h2c_v0->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 7355 goto done; 7356 7357 v1: 7358 h2c_v1 = (struct rtw89_fw_h2c_rfk_pre_info_mcc_v1 *)skb->data; 7359 7360 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 7361 7362 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) 7363 h2c_v1->tbl_18[tbl] = cpu_to_le32(rfk_mcc->rf18[tbl]); 7364 7365 BUILD_BUG_ON(ARRAY_SIZE(rtwdev->rfk_mcc.data) < NUM_OF_RTW89_FW_RFK_PATH); 7366 7367 /* shared table array, but tbl_sel can be independent by path */ 7368 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 7369 tbl = rfk_mcc[path].table_idx; 7370 h2c_v1->cur_18[path] = cpu_to_le32(rfk_mcc->rf18[tbl]); 7371 7372 if (path == phy_idx) 7373 h2c_v1->tbl_idx = tbl; 7374 } 7375 7376 h2c_v1->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 7377 h2c_v1->phy_idx = phy_idx; 7378 7379 if (rtw89_is_mlo_1_1(rtwdev)) 7380 h2c_v1->mlo_1_1 = cpu_to_le32(1); 7381 7382 if (ver == 1) 7383 goto done; 7384 7385 h2c = (struct rtw89_fw_h2c_rfk_pre_info_mcc *)skb->data; 7386 7387 h2c->aid = cpu_to_le32(hal->aid); 7388 h2c->acv = hal->acv; 7389 7390 done: 7391 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7392 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 7393 H2C_FUNC_OUTSRC_RF_MCC_INFO, 0, 0, len); 7394 7395 ret = rtw89_h2c_tx(rtwdev, skb, false); 7396 if (ret) { 7397 rtw89_err(rtwdev, "failed to send h2c\n"); 7398 goto fail; 7399 } 7400 7401 return 0; 7402 fail: 7403 dev_kfree_skb_any(skb); 7404 7405 return ret; 7406 } 7407 7408 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 7409 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode) 7410 { 7411 const struct rtw89_chip_info *chip = rtwdev->chip; 7412 struct rtw89_efuse *efuse = &rtwdev->efuse; 7413 struct rtw89_hal *hal = &rtwdev->hal; 7414 struct rtw89_h2c_rf_tssi *h2c; 7415 u32 len = sizeof(*h2c); 7416 struct sk_buff *skb; 7417 int ret; 7418 7419 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7420 if (!skb) { 7421 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 7422 return -ENOMEM; 7423 } 7424 skb_put(skb, len); 7425 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 7426 7427 h2c->len = cpu_to_le16(len); 7428 h2c->phy = phy_idx; 7429 h2c->ch = chan->channel; 7430 h2c->bw = chan->band_width; 7431 h2c->band = chan->band_type; 7432 h2c->cv = hal->cv; 7433 h2c->tssi_mode = tssi_mode; 7434 h2c->rfe_type = efuse->rfe_type; 7435 7436 if (chip->chip_id == RTL8922A) 7437 h2c->hwtx_en = true; 7438 else 7439 h2c->hwtx_en = false; 7440 7441 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 7442 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 7443 7444 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7445 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 7446 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 7447 7448 ret = rtw89_h2c_tx(rtwdev, skb, false); 7449 if (ret) { 7450 rtw89_err(rtwdev, "failed to send h2c\n"); 7451 goto fail; 7452 } 7453 7454 return 0; 7455 fail: 7456 dev_kfree_skb_any(skb); 7457 7458 return ret; 7459 } 7460 7461 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 7462 const struct rtw89_chan *chan) 7463 { 7464 struct rtw89_hal *hal = &rtwdev->hal; 7465 struct rtw89_h2c_rf_iqk_v0 *h2c_v0; 7466 struct rtw89_h2c_rf_iqk *h2c; 7467 u32 len = sizeof(*h2c); 7468 struct sk_buff *skb; 7469 u8 ver = U8_MAX; 7470 int ret; 7471 7472 if (RTW89_CHK_FW_FEATURE(RFK_IQK_V0, &rtwdev->fw)) { 7473 len = sizeof(*h2c_v0); 7474 ver = 0; 7475 } 7476 7477 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7478 if (!skb) { 7479 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 7480 return -ENOMEM; 7481 } 7482 skb_put(skb, len); 7483 7484 if (ver == 0) { 7485 h2c_v0 = (struct rtw89_h2c_rf_iqk_v0 *)skb->data; 7486 7487 h2c_v0->phy_idx = cpu_to_le32(phy_idx); 7488 h2c_v0->dbcc = cpu_to_le32(rtwdev->dbcc_en); 7489 7490 goto done; 7491 } 7492 7493 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 7494 7495 h2c->len = sizeof(*h2c); 7496 h2c->ktype = 0; 7497 h2c->phy = phy_idx; 7498 h2c->kpath = rtw89_phy_get_kpath(rtwdev, phy_idx); 7499 h2c->band = chan->band_type; 7500 h2c->bw = chan->band_width; 7501 h2c->ch = chan->channel; 7502 h2c->cv = hal->cv; 7503 7504 done: 7505 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7506 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 7507 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 7508 7509 ret = rtw89_h2c_tx(rtwdev, skb, false); 7510 if (ret) { 7511 rtw89_err(rtwdev, "failed to send h2c\n"); 7512 goto fail; 7513 } 7514 7515 return 0; 7516 fail: 7517 dev_kfree_skb_any(skb); 7518 7519 return ret; 7520 } 7521 7522 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 7523 const struct rtw89_chan *chan) 7524 { 7525 struct rtw89_h2c_rf_dpk *h2c; 7526 u32 len = sizeof(*h2c); 7527 struct sk_buff *skb; 7528 int ret; 7529 7530 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7531 if (!skb) { 7532 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 7533 return -ENOMEM; 7534 } 7535 skb_put(skb, len); 7536 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 7537 7538 h2c->len = len; 7539 h2c->phy = phy_idx; 7540 h2c->dpk_enable = true; 7541 h2c->kpath = RF_AB; 7542 h2c->cur_band = chan->band_type; 7543 h2c->cur_bw = chan->band_width; 7544 h2c->cur_ch = chan->channel; 7545 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 7546 7547 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7548 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 7549 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 7550 7551 ret = rtw89_h2c_tx(rtwdev, skb, false); 7552 if (ret) { 7553 rtw89_err(rtwdev, "failed to send h2c\n"); 7554 goto fail; 7555 } 7556 7557 return 0; 7558 fail: 7559 dev_kfree_skb_any(skb); 7560 7561 return ret; 7562 } 7563 7564 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 7565 const struct rtw89_chan *chan) 7566 { 7567 struct rtw89_hal *hal = &rtwdev->hal; 7568 struct rtw89_h2c_rf_txgapk *h2c; 7569 u32 len = sizeof(*h2c); 7570 struct sk_buff *skb; 7571 int ret; 7572 7573 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7574 if (!skb) { 7575 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 7576 return -ENOMEM; 7577 } 7578 skb_put(skb, len); 7579 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 7580 7581 h2c->len = len; 7582 h2c->ktype = 2; 7583 h2c->phy = phy_idx; 7584 h2c->kpath = RF_AB; 7585 h2c->band = chan->band_type; 7586 h2c->bw = chan->band_width; 7587 h2c->ch = chan->channel; 7588 h2c->cv = hal->cv; 7589 7590 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7591 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 7592 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 7593 7594 ret = rtw89_h2c_tx(rtwdev, skb, false); 7595 if (ret) { 7596 rtw89_err(rtwdev, "failed to send h2c\n"); 7597 goto fail; 7598 } 7599 7600 return 0; 7601 fail: 7602 dev_kfree_skb_any(skb); 7603 7604 return ret; 7605 } 7606 7607 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 7608 const struct rtw89_chan *chan) 7609 { 7610 struct rtw89_h2c_rf_dack *h2c; 7611 u32 len = sizeof(*h2c); 7612 struct sk_buff *skb; 7613 int ret; 7614 7615 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7616 if (!skb) { 7617 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 7618 return -ENOMEM; 7619 } 7620 skb_put(skb, len); 7621 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 7622 7623 h2c->len = len; 7624 h2c->phy = phy_idx; 7625 h2c->type = 0; 7626 7627 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7628 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 7629 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 7630 7631 ret = rtw89_h2c_tx(rtwdev, skb, false); 7632 if (ret) { 7633 rtw89_err(rtwdev, "failed to send h2c\n"); 7634 goto fail; 7635 } 7636 7637 return 0; 7638 fail: 7639 dev_kfree_skb_any(skb); 7640 7641 return ret; 7642 } 7643 7644 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 7645 const struct rtw89_chan *chan, bool is_chl_k) 7646 { 7647 struct rtw89_h2c_rf_rxdck_v0 *v0; 7648 struct rtw89_h2c_rf_rxdck *h2c; 7649 u32 len = sizeof(*h2c); 7650 struct sk_buff *skb; 7651 int ver = -1; 7652 int ret; 7653 7654 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) { 7655 len = sizeof(*v0); 7656 ver = 0; 7657 } 7658 7659 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7660 if (!skb) { 7661 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 7662 return -ENOMEM; 7663 } 7664 skb_put(skb, len); 7665 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data; 7666 7667 v0->len = len; 7668 v0->phy = phy_idx; 7669 v0->is_afe = false; 7670 v0->kpath = RF_AB; 7671 v0->cur_band = chan->band_type; 7672 v0->cur_bw = chan->band_width; 7673 v0->cur_ch = chan->channel; 7674 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 7675 7676 if (ver == 0) 7677 goto hdr; 7678 7679 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 7680 h2c->is_chl_k = is_chl_k; 7681 7682 hdr: 7683 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7684 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 7685 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 7686 7687 ret = rtw89_h2c_tx(rtwdev, skb, false); 7688 if (ret) { 7689 rtw89_err(rtwdev, "failed to send h2c\n"); 7690 goto fail; 7691 } 7692 7693 return 0; 7694 fail: 7695 dev_kfree_skb_any(skb); 7696 7697 return ret; 7698 } 7699 7700 int rtw89_fw_h2c_rf_tas_trigger(struct rtw89_dev *rtwdev, bool enable) 7701 { 7702 struct rtw89_h2c_rf_tas *h2c; 7703 u32 len = sizeof(*h2c); 7704 struct sk_buff *skb; 7705 int ret; 7706 7707 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7708 if (!skb) { 7709 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TAS\n"); 7710 return -ENOMEM; 7711 } 7712 skb_put(skb, len); 7713 h2c = (struct rtw89_h2c_rf_tas *)skb->data; 7714 7715 h2c->enable = cpu_to_le32(enable); 7716 7717 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7718 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 7719 H2C_FUNC_RFK_TAS_OFFLOAD, 0, 0, len); 7720 7721 ret = rtw89_h2c_tx(rtwdev, skb, false); 7722 if (ret) { 7723 rtw89_err(rtwdev, "failed to send h2c\n"); 7724 goto fail; 7725 } 7726 7727 return 0; 7728 fail: 7729 dev_kfree_skb_any(skb); 7730 7731 return ret; 7732 } 7733 7734 int rtw89_fw_h2c_rf_txiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 7735 const struct rtw89_chan *chan) 7736 { 7737 struct rtw89_h2c_rf_txiqk *h2c; 7738 u32 len = sizeof(*h2c); 7739 struct sk_buff *skb; 7740 int ret; 7741 7742 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7743 if (!skb) { 7744 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXIQK\n"); 7745 return -ENOMEM; 7746 } 7747 skb_put(skb, len); 7748 h2c = (struct rtw89_h2c_rf_txiqk *)skb->data; 7749 7750 h2c->len = len; 7751 h2c->phy = phy_idx; 7752 h2c->txiqk_enable = true; 7753 h2c->is_wb_txiqk = true; 7754 h2c->kpath = RF_AB; 7755 h2c->cur_band = chan->band_type; 7756 h2c->cur_bw = chan->band_width; 7757 h2c->cur_ch = chan->channel; 7758 h2c->txiqk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 7759 7760 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7761 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 7762 H2C_FUNC_RFK_TXIQK_OFFOAD, 0, 0, len); 7763 7764 ret = rtw89_h2c_tx(rtwdev, skb, false); 7765 if (ret) { 7766 rtw89_err(rtwdev, "failed to send h2c\n"); 7767 goto fail; 7768 } 7769 7770 return 0; 7771 fail: 7772 dev_kfree_skb_any(skb); 7773 7774 return ret; 7775 } 7776 7777 int rtw89_fw_h2c_rf_cim3k(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 7778 const struct rtw89_chan *chan) 7779 { 7780 struct rtw89_h2c_rf_cim3k *h2c; 7781 u32 len = sizeof(*h2c); 7782 struct sk_buff *skb; 7783 int ret; 7784 7785 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7786 if (!skb) { 7787 rtw89_err(rtwdev, "failed to alloc skb for h2c RF CIM3K\n"); 7788 return -ENOMEM; 7789 } 7790 skb_put(skb, len); 7791 h2c = (struct rtw89_h2c_rf_cim3k *)skb->data; 7792 7793 h2c->len = len; 7794 h2c->phy = phy_idx; 7795 h2c->kpath = RF_AB; 7796 h2c->cur_band = chan->band_type; 7797 h2c->cur_bw = chan->band_width; 7798 h2c->cur_ch = chan->channel; 7799 h2c->cim3k_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 7800 7801 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7802 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 7803 H2C_FUNC_RFK_CIM3K_OFFOAD, 0, 0, len); 7804 7805 ret = rtw89_h2c_tx(rtwdev, skb, false); 7806 if (ret) { 7807 rtw89_err(rtwdev, "failed to send h2c\n"); 7808 goto fail; 7809 } 7810 7811 return 0; 7812 fail: 7813 dev_kfree_skb_any(skb); 7814 7815 return ret; 7816 } 7817 7818 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 7819 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 7820 bool rack, bool dack) 7821 { 7822 struct sk_buff *skb; 7823 int ret; 7824 7825 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7826 if (!skb) { 7827 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 7828 return -ENOMEM; 7829 } 7830 skb_put_data(skb, buf, len); 7831 7832 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7833 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 7834 len); 7835 7836 ret = rtw89_h2c_tx(rtwdev, skb, false); 7837 if (ret) { 7838 rtw89_err(rtwdev, "failed to send h2c\n"); 7839 goto fail; 7840 } 7841 7842 return 0; 7843 fail: 7844 dev_kfree_skb_any(skb); 7845 7846 return ret; 7847 } 7848 7849 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 7850 { 7851 struct sk_buff *skb; 7852 int ret; 7853 7854 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 7855 if (!skb) { 7856 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 7857 return -ENOMEM; 7858 } 7859 skb_put_data(skb, buf, len); 7860 7861 ret = rtw89_h2c_tx(rtwdev, skb, false); 7862 if (ret) { 7863 rtw89_err(rtwdev, "failed to send h2c\n"); 7864 goto fail; 7865 } 7866 7867 return 0; 7868 fail: 7869 dev_kfree_skb_any(skb); 7870 7871 return ret; 7872 } 7873 7874 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 7875 { 7876 struct rtw89_early_h2c *early_h2c; 7877 7878 lockdep_assert_wiphy(rtwdev->hw->wiphy); 7879 7880 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 7881 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 7882 } 7883 } 7884 7885 void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 7886 { 7887 struct rtw89_early_h2c *early_h2c, *tmp; 7888 7889 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 7890 list_del(&early_h2c->list); 7891 kfree(early_h2c->h2c); 7892 kfree(early_h2c); 7893 } 7894 } 7895 7896 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 7897 { 7898 lockdep_assert_wiphy(rtwdev->hw->wiphy); 7899 7900 __rtw89_fw_free_all_early_h2c(rtwdev); 7901 } 7902 7903 void rtw89_fw_c2h_dummy_handler(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len) 7904 { 7905 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 7906 u8 category = attr->category; 7907 u8 class = attr->class; 7908 u8 func = attr->func; 7909 7910 rtw89_debug(rtwdev, RTW89_DBG_FW, 7911 "C2H cate=%u cls=%u func=%u is dummy\n", category, class, func); 7912 } 7913 7914 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 7915 { 7916 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 7917 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 7918 7919 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 7920 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 7921 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 7922 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 7923 } 7924 7925 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 7926 struct sk_buff *c2h) 7927 { 7928 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 7929 u8 category = attr->category; 7930 u8 class = attr->class; 7931 u8 func = attr->func; 7932 7933 switch (category) { 7934 default: 7935 return false; 7936 case RTW89_C2H_CAT_MAC: 7937 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 7938 case RTW89_C2H_CAT_OUTSRC: 7939 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 7940 } 7941 } 7942 7943 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 7944 { 7945 rtw89_fw_c2h_parse_attr(c2h); 7946 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 7947 goto enqueue; 7948 7949 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 7950 dev_kfree_skb_any(c2h); 7951 return; 7952 7953 enqueue: 7954 skb_queue_tail(&rtwdev->c2h_queue, c2h); 7955 wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->c2h_work); 7956 } 7957 7958 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 7959 struct sk_buff *skb) 7960 { 7961 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 7962 u8 category = attr->category; 7963 u8 class = attr->class; 7964 u8 func = attr->func; 7965 u16 len = attr->len; 7966 bool dump = true; 7967 7968 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 7969 return; 7970 7971 switch (category) { 7972 case RTW89_C2H_CAT_TEST: 7973 break; 7974 case RTW89_C2H_CAT_MAC: 7975 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 7976 if (class == RTW89_MAC_C2H_CLASS_INFO && 7977 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 7978 dump = false; 7979 break; 7980 case RTW89_C2H_CAT_OUTSRC: 7981 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 7982 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 7983 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 7984 else 7985 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 7986 break; 7987 } 7988 7989 if (dump) 7990 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 7991 } 7992 7993 void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work) 7994 { 7995 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 7996 c2h_work); 7997 struct sk_buff *skb, *tmp; 7998 struct sk_buff_head c2hq; 7999 unsigned long flags; 8000 8001 lockdep_assert_wiphy(rtwdev->hw->wiphy); 8002 8003 __skb_queue_head_init(&c2hq); 8004 8005 spin_lock_irqsave(&rtwdev->c2h_queue.lock, flags); 8006 skb_queue_splice_init(&rtwdev->c2h_queue, &c2hq); 8007 spin_unlock_irqrestore(&rtwdev->c2h_queue.lock, flags); 8008 8009 skb_queue_walk_safe(&c2hq, skb, tmp) { 8010 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 8011 dev_kfree_skb_any(skb); 8012 } 8013 } 8014 8015 void rtw89_fw_c2h_purge_obsoleted_scan_events(struct rtw89_dev *rtwdev) 8016 { 8017 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8018 struct sk_buff *skb, *tmp; 8019 struct sk_buff_head c2hq; 8020 unsigned long flags; 8021 8022 lockdep_assert_wiphy(rtwdev->hw->wiphy); 8023 8024 __skb_queue_head_init(&c2hq); 8025 8026 spin_lock_irqsave(&rtwdev->c2h_queue.lock, flags); 8027 skb_queue_splice_init(&rtwdev->c2h_queue, &c2hq); 8028 spin_unlock_irqrestore(&rtwdev->c2h_queue.lock, flags); 8029 8030 skb_queue_walk_safe(&c2hq, skb, tmp) { 8031 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 8032 8033 if (!attr->is_scan_event || attr->scan_seq == scan_info->seq) 8034 continue; 8035 8036 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 8037 "purge obsoleted scan event with seq=%d (cur=%d)\n", 8038 attr->scan_seq, scan_info->seq); 8039 8040 __skb_unlink(skb, &c2hq); 8041 dev_kfree_skb_any(skb); 8042 } 8043 8044 spin_lock_irqsave(&rtwdev->c2h_queue.lock, flags); 8045 skb_queue_splice(&c2hq, &rtwdev->c2h_queue); 8046 spin_unlock_irqrestore(&rtwdev->c2h_queue.lock, flags); 8047 } 8048 8049 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 8050 struct rtw89_mac_h2c_info *info) 8051 { 8052 const struct rtw89_chip_info *chip = rtwdev->chip; 8053 struct rtw89_fw_info *fw_info = &rtwdev->fw; 8054 const u32 *h2c_reg = chip->h2c_regs; 8055 u8 i, val, len; 8056 int ret; 8057 8058 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 8059 rtwdev, chip->h2c_ctrl_reg); 8060 if (ret) { 8061 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 8062 return ret; 8063 } 8064 8065 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 8066 sizeof(info->u.h2creg[0])); 8067 8068 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 8069 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 8070 8071 for (i = 0; i < RTW89_H2CREG_MAX; i++) 8072 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 8073 8074 fw_info->h2c_counter++; 8075 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 8076 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 8077 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 8078 8079 return 0; 8080 } 8081 8082 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 8083 struct rtw89_mac_c2h_info *info) 8084 { 8085 const struct rtw89_chip_info *chip = rtwdev->chip; 8086 struct rtw89_fw_info *fw_info = &rtwdev->fw; 8087 const u32 *c2h_reg = chip->c2h_regs; 8088 u32 timeout; 8089 u8 i, val; 8090 int ret; 8091 8092 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 8093 8094 if (rtwdev->hci.type == RTW89_HCI_TYPE_USB) 8095 timeout = RTW89_C2H_TIMEOUT_USB; 8096 else 8097 timeout = RTW89_C2H_TIMEOUT; 8098 8099 if (info->timeout) 8100 timeout = info->timeout; 8101 8102 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 8103 timeout, false, rtwdev, 8104 chip->c2h_ctrl_reg); 8105 if (ret) { 8106 rtw89_warn(rtwdev, "c2h reg timeout\n"); 8107 return ret; 8108 } 8109 8110 for (i = 0; i < RTW89_C2HREG_MAX; i++) 8111 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 8112 8113 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 8114 8115 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 8116 info->content_len = 8117 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 8118 RTW89_C2HREG_HDR_LEN; 8119 8120 fw_info->c2h_counter++; 8121 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 8122 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 8123 8124 return 0; 8125 } 8126 8127 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 8128 struct rtw89_mac_h2c_info *h2c_info, 8129 struct rtw89_mac_c2h_info *c2h_info) 8130 { 8131 int ret; 8132 8133 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 8134 lockdep_assert_wiphy(rtwdev->hw->wiphy); 8135 8136 if (!h2c_info && !c2h_info) 8137 return -EINVAL; 8138 8139 if (!h2c_info) 8140 goto recv_c2h; 8141 8142 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 8143 if (ret) 8144 return ret; 8145 8146 recv_c2h: 8147 if (!c2h_info) 8148 return 0; 8149 8150 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 8151 if (ret) 8152 return ret; 8153 8154 return 0; 8155 } 8156 8157 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 8158 { 8159 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 8160 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 8161 return; 8162 } 8163 8164 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 8165 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 8166 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 8167 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 8168 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 8169 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 8170 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 8171 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 8172 8173 rtw89_fw_prog_cnt_dump(rtwdev); 8174 } 8175 8176 static void rtw89_hw_scan_release_pkt_list(struct rtw89_dev *rtwdev) 8177 { 8178 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 8179 struct rtw89_pktofld_info *info, *tmp; 8180 u8 idx; 8181 8182 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 8183 if (!(rtwdev->chip->support_bands & BIT(idx))) 8184 continue; 8185 8186 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 8187 if (test_bit(info->id, rtwdev->pkt_offload)) 8188 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 8189 list_del(&info->list); 8190 kfree(info); 8191 } 8192 } 8193 } 8194 8195 static void rtw89_hw_scan_cleanup(struct rtw89_dev *rtwdev, 8196 struct rtw89_vif_link *rtwvif_link) 8197 { 8198 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 8199 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8200 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 8201 8202 mac->free_chan_list(rtwdev); 8203 rtw89_hw_scan_release_pkt_list(rtwdev); 8204 8205 rtwvif->scan_req = NULL; 8206 rtwvif->scan_ies = NULL; 8207 scan_info->scanning_vif = NULL; 8208 scan_info->abort = false; 8209 scan_info->connected = false; 8210 scan_info->delay = 0; 8211 } 8212 8213 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 8214 struct cfg80211_scan_request *req, 8215 struct rtw89_pktofld_info *info, 8216 enum nl80211_band band, u8 ssid_idx) 8217 { 8218 if (band != NL80211_BAND_6GHZ) 8219 return false; 8220 8221 if (req->ssids[ssid_idx].ssid_len) { 8222 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 8223 req->ssids[ssid_idx].ssid_len); 8224 info->ssid_len = req->ssids[ssid_idx].ssid_len; 8225 return false; 8226 } else { 8227 info->wildcard_6ghz = true; 8228 return true; 8229 } 8230 } 8231 8232 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 8233 struct rtw89_vif_link *rtwvif_link, 8234 struct sk_buff *skb, u8 ssid_idx) 8235 { 8236 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8237 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 8238 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 8239 struct cfg80211_scan_request *req = rtwvif->scan_req; 8240 struct rtw89_pktofld_info *info; 8241 struct sk_buff *new; 8242 int ret = 0; 8243 u8 band; 8244 8245 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 8246 if (!(rtwdev->chip->support_bands & BIT(band))) 8247 continue; 8248 8249 new = skb_copy(skb, GFP_KERNEL); 8250 if (!new) { 8251 ret = -ENOMEM; 8252 goto out; 8253 } 8254 skb_put_data(new, ies->ies[band], ies->len[band]); 8255 skb_put_data(new, ies->common_ies, ies->common_ie_len); 8256 8257 info = kzalloc_obj(*info); 8258 if (!info) { 8259 ret = -ENOMEM; 8260 kfree_skb(new); 8261 goto out; 8262 } 8263 8264 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx); 8265 8266 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 8267 if (ret) { 8268 kfree_skb(new); 8269 kfree(info); 8270 goto out; 8271 } 8272 8273 list_add_tail(&info->list, &scan_info->pkt_list[band]); 8274 kfree_skb(new); 8275 } 8276 out: 8277 return ret; 8278 } 8279 8280 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 8281 struct rtw89_vif_link *rtwvif_link, 8282 const u8 *mac_addr) 8283 { 8284 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 8285 struct cfg80211_scan_request *req = rtwvif->scan_req; 8286 struct sk_buff *skb; 8287 u8 num = req->n_ssids, i; 8288 int ret; 8289 8290 for (i = 0; i < num; i++) { 8291 skb = ieee80211_probereq_get(rtwdev->hw, mac_addr, 8292 req->ssids[i].ssid, 8293 req->ssids[i].ssid_len, 8294 req->ie_len); 8295 if (!skb) 8296 return -ENOMEM; 8297 8298 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i); 8299 kfree_skb(skb); 8300 8301 if (ret) 8302 return ret; 8303 } 8304 8305 return 0; 8306 } 8307 8308 static int rtw89_update_6ghz_rnr_chan_ax(struct rtw89_dev *rtwdev, 8309 struct ieee80211_scan_ies *ies, 8310 struct cfg80211_scan_request *req, 8311 struct rtw89_mac_chinfo_ax *ch_info) 8312 { 8313 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 8314 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 8315 struct cfg80211_scan_6ghz_params *params; 8316 struct rtw89_pktofld_info *info, *tmp; 8317 struct ieee80211_hdr *hdr; 8318 struct sk_buff *skb; 8319 bool found; 8320 int ret = 0; 8321 u8 i; 8322 8323 if (!req->n_6ghz_params) 8324 return 0; 8325 8326 for (i = 0; i < req->n_6ghz_params; i++) { 8327 params = &req->scan_6ghz_params[i]; 8328 8329 if (req->channels[params->channel_idx]->hw_value != 8330 ch_info->pri_ch) 8331 continue; 8332 8333 found = false; 8334 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 8335 if (ether_addr_equal(tmp->bssid, params->bssid)) { 8336 found = true; 8337 break; 8338 } 8339 } 8340 if (found) 8341 continue; 8342 8343 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 8344 NULL, 0, req->ie_len); 8345 if (!skb) 8346 return -ENOMEM; 8347 8348 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 8349 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 8350 hdr = (struct ieee80211_hdr *)skb->data; 8351 ether_addr_copy(hdr->addr3, params->bssid); 8352 8353 info = kzalloc_obj(*info); 8354 if (!info) { 8355 ret = -ENOMEM; 8356 kfree_skb(skb); 8357 goto out; 8358 } 8359 8360 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 8361 if (ret) { 8362 kfree_skb(skb); 8363 kfree(info); 8364 goto out; 8365 } 8366 8367 ether_addr_copy(info->bssid, params->bssid); 8368 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 8369 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 8370 8371 ch_info->tx_pkt = true; 8372 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 8373 8374 kfree_skb(skb); 8375 } 8376 8377 out: 8378 return ret; 8379 } 8380 8381 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, 8382 int chan_type, int ssid_num, 8383 struct rtw89_mac_chinfo_ax *ch_info) 8384 { 8385 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8386 struct rtw89_pktofld_info *info; 8387 u8 probe_count = 0; 8388 8389 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 8390 ch_info->bw = RTW89_SCAN_WIDTH; 8391 ch_info->tx_pkt = true; 8392 ch_info->cfg_tx_pwr = false; 8393 ch_info->tx_pwr_idx = 0; 8394 ch_info->tx_null = false; 8395 ch_info->pause_data = false; 8396 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 8397 8398 if (ssid_num) { 8399 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 8400 if (info->channel_6ghz && 8401 ch_info->pri_ch != info->channel_6ghz) 8402 continue; 8403 else if (info->channel_6ghz && probe_count != 0) 8404 ch_info->period += RTW89_CHANNEL_TIME_6G; 8405 8406 if (info->wildcard_6ghz) 8407 continue; 8408 8409 ch_info->pkt_id[probe_count++] = info->id; 8410 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 8411 break; 8412 } 8413 ch_info->num_pkt = probe_count; 8414 } 8415 8416 switch (chan_type) { 8417 case RTW89_CHAN_DFS: 8418 if (ch_info->ch_band != RTW89_BAND_6G) 8419 ch_info->period = max_t(u8, ch_info->period, 8420 RTW89_DFS_CHAN_TIME); 8421 ch_info->dwell_time = RTW89_DWELL_TIME; 8422 break; 8423 case RTW89_CHAN_ACTIVE: 8424 break; 8425 default: 8426 rtw89_err(rtwdev, "Channel type out of bound\n"); 8427 } 8428 } 8429 8430 static void rtw89_hw_scan_add_chan_ax(struct rtw89_dev *rtwdev, int chan_type, 8431 int ssid_num, 8432 struct rtw89_mac_chinfo_ax *ch_info) 8433 { 8434 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8435 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 8436 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 8437 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 8438 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 8439 struct cfg80211_scan_request *req = rtwvif->scan_req; 8440 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 8441 struct rtw89_pktofld_info *info; 8442 struct ieee80211_vif *vif; 8443 u8 band, probe_count = 0; 8444 int ret; 8445 8446 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 8447 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 8448 ch_info->bw = RTW89_SCAN_WIDTH; 8449 ch_info->tx_pkt = true; 8450 ch_info->cfg_tx_pwr = false; 8451 ch_info->tx_pwr_idx = 0; 8452 ch_info->tx_null = false; 8453 ch_info->pause_data = false; 8454 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 8455 8456 if (ch_info->ch_band == RTW89_BAND_6G) { 8457 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 8458 !ch_info->is_psc) { 8459 ch_info->tx_pkt = false; 8460 if (!req->duration_mandatory) 8461 ch_info->period -= RTW89_DWELL_TIME_6G; 8462 } 8463 } 8464 8465 ret = rtw89_update_6ghz_rnr_chan_ax(rtwdev, ies, req, ch_info); 8466 if (ret) 8467 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 8468 8469 if (ssid_num) { 8470 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 8471 8472 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 8473 if (info->channel_6ghz && 8474 ch_info->pri_ch != info->channel_6ghz) 8475 continue; 8476 else if (info->channel_6ghz && probe_count != 0) 8477 ch_info->period += RTW89_CHANNEL_TIME_6G; 8478 8479 if (info->wildcard_6ghz) 8480 continue; 8481 8482 ch_info->pkt_id[probe_count++] = info->id; 8483 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 8484 break; 8485 } 8486 ch_info->num_pkt = probe_count; 8487 } 8488 8489 switch (chan_type) { 8490 case RTW89_CHAN_OPERATE: 8491 ch_info->central_ch = op->channel; 8492 ch_info->pri_ch = op->primary_channel; 8493 ch_info->ch_band = op->band_type; 8494 ch_info->bw = op->band_width; 8495 vif = rtwvif_link_to_vif(rtwvif_link); 8496 ch_info->tx_null = !is_zero_ether_addr(rtwvif_link->bssid) && 8497 vif->type != NL80211_IFTYPE_AP; 8498 ch_info->num_pkt = 0; 8499 break; 8500 case RTW89_CHAN_DFS: 8501 if (ch_info->ch_band != RTW89_BAND_6G) 8502 ch_info->period = max_t(u8, ch_info->period, 8503 RTW89_DFS_CHAN_TIME); 8504 ch_info->dwell_time = RTW89_DWELL_TIME; 8505 ch_info->pause_data = true; 8506 break; 8507 case RTW89_CHAN_ACTIVE: 8508 ch_info->pause_data = true; 8509 break; 8510 case RTW89_CHAN_EXTRA_OP: 8511 ch_info->central_ch = ext->chan.channel; 8512 ch_info->pri_ch = ext->chan.primary_channel; 8513 ch_info->ch_band = ext->chan.band_type; 8514 ch_info->bw = ext->chan.band_width; 8515 vif = rtwvif_link_to_vif(ext->rtwvif_link); 8516 ch_info->tx_null = !is_zero_ether_addr(ext->rtwvif_link->bssid) && 8517 vif->type != NL80211_IFTYPE_AP; 8518 ch_info->num_pkt = 0; 8519 ch_info->macid_tx = true; 8520 break; 8521 default: 8522 rtw89_err(rtwdev, "Channel type out of bound\n"); 8523 } 8524 } 8525 8526 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 8527 int ssid_num, 8528 struct rtw89_mac_chinfo_be *ch_info) 8529 { 8530 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8531 struct rtw89_pktofld_info *info; 8532 u8 probe_count = 0, i; 8533 8534 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 8535 ch_info->bw = RTW89_SCAN_WIDTH; 8536 ch_info->tx_null = false; 8537 ch_info->pause_data = false; 8538 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 8539 8540 if (ssid_num) { 8541 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 8542 ch_info->pkt_id[probe_count++] = info->id; 8543 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 8544 break; 8545 } 8546 } 8547 8548 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 8549 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 8550 8551 switch (chan_type) { 8552 case RTW89_CHAN_DFS: 8553 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 8554 ch_info->dwell_time = RTW89_DWELL_TIME; 8555 break; 8556 case RTW89_CHAN_ACTIVE: 8557 break; 8558 default: 8559 rtw89_warn(rtwdev, "Channel type out of bound\n"); 8560 break; 8561 } 8562 } 8563 8564 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 8565 int ssid_num, 8566 struct rtw89_mac_chinfo_be *ch_info) 8567 { 8568 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8569 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 8570 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 8571 struct cfg80211_scan_request *req = rtwvif->scan_req; 8572 struct rtw89_pktofld_info *info; 8573 u8 band, probe_count = 0, i; 8574 8575 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 8576 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 8577 ch_info->bw = RTW89_SCAN_WIDTH; 8578 ch_info->tx_null = false; 8579 ch_info->pause_data = false; 8580 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 8581 8582 if (ssid_num) { 8583 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 8584 8585 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 8586 if (info->channel_6ghz && 8587 ch_info->pri_ch != info->channel_6ghz) 8588 continue; 8589 8590 if (info->wildcard_6ghz) 8591 continue; 8592 8593 ch_info->pkt_id[probe_count++] = info->id; 8594 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 8595 break; 8596 } 8597 } 8598 8599 if (ch_info->ch_band == RTW89_BAND_6G) { 8600 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 8601 !ch_info->is_psc) { 8602 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 8603 if (!req->duration_mandatory) 8604 ch_info->period -= RTW89_DWELL_TIME_6G; 8605 } 8606 } 8607 8608 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 8609 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 8610 8611 switch (chan_type) { 8612 case RTW89_CHAN_DFS: 8613 if (ch_info->ch_band != RTW89_BAND_6G) 8614 ch_info->period = 8615 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 8616 ch_info->dwell_time = RTW89_DWELL_TIME; 8617 ch_info->pause_data = true; 8618 break; 8619 case RTW89_CHAN_ACTIVE: 8620 ch_info->pause_data = true; 8621 break; 8622 default: 8623 rtw89_warn(rtwdev, "Channel type out of bound\n"); 8624 break; 8625 } 8626 } 8627 8628 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 8629 struct rtw89_vif_link *rtwvif_link) 8630 { 8631 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8632 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 8633 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 8634 struct ieee80211_channel *channel; 8635 struct list_head chan_list; 8636 int list_len; 8637 enum rtw89_chan_type type; 8638 int ret = 0; 8639 u32 idx; 8640 8641 INIT_LIST_HEAD(&chan_list); 8642 for (idx = 0, list_len = 0; 8643 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 8644 idx++, list_len++) { 8645 channel = nd_config->channels[idx]; 8646 ch_info = kzalloc_obj(*ch_info); 8647 if (!ch_info) { 8648 ret = -ENOMEM; 8649 goto out; 8650 } 8651 8652 ch_info->period = RTW89_CHANNEL_TIME; 8653 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 8654 ch_info->central_ch = channel->hw_value; 8655 ch_info->pri_ch = channel->hw_value; 8656 ch_info->is_psc = cfg80211_channel_is_psc(channel); 8657 8658 if (channel->flags & 8659 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 8660 type = RTW89_CHAN_DFS; 8661 else 8662 type = RTW89_CHAN_ACTIVE; 8663 8664 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); 8665 list_add_tail(&ch_info->list, &chan_list); 8666 } 8667 ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &chan_list); 8668 8669 out: 8670 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 8671 list_del(&ch_info->list); 8672 kfree(ch_info); 8673 } 8674 8675 return ret; 8676 } 8677 8678 static int rtw89_hw_scan_add_op_types_ax(struct rtw89_dev *rtwdev, 8679 enum rtw89_chan_type type, 8680 struct list_head *chan_list, 8681 struct cfg80211_scan_request *req, 8682 int *off_chan_time) 8683 { 8684 struct rtw89_mac_chinfo_ax *tmp; 8685 8686 tmp = kzalloc_obj(*tmp); 8687 if (!tmp) 8688 return -ENOMEM; 8689 8690 switch (type) { 8691 case RTW89_CHAN_OPERATE: 8692 tmp->period = req->duration_mandatory ? 8693 req->duration : RTW89_CHANNEL_TIME; 8694 *off_chan_time = 0; 8695 break; 8696 case RTW89_CHAN_EXTRA_OP: 8697 tmp->period = RTW89_CHANNEL_TIME_EXTRA_OP; 8698 /* still calc @off_chan_time for scan op */ 8699 *off_chan_time += tmp->period; 8700 break; 8701 default: 8702 kfree(tmp); 8703 return -EINVAL; 8704 } 8705 8706 rtw89_hw_scan_add_chan_ax(rtwdev, type, 0, tmp); 8707 list_add_tail(&tmp->list, chan_list); 8708 8709 return 0; 8710 } 8711 8712 int rtw89_hw_scan_prep_chan_list_ax(struct rtw89_dev *rtwdev, 8713 struct rtw89_vif_link *rtwvif_link) 8714 { 8715 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8716 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 8717 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 8718 struct cfg80211_scan_request *req = rtwvif->scan_req; 8719 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 8720 struct ieee80211_channel *channel; 8721 struct list_head chan_list; 8722 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 8723 enum rtw89_chan_type type; 8724 int off_chan_time = 0; 8725 int ret; 8726 u32 idx; 8727 8728 INIT_LIST_HEAD(&chan_list); 8729 8730 for (idx = 0; idx < req->n_channels; idx++) { 8731 channel = req->channels[idx]; 8732 ch_info = kzalloc_obj(*ch_info); 8733 if (!ch_info) { 8734 ret = -ENOMEM; 8735 goto out; 8736 } 8737 8738 if (req->duration) 8739 ch_info->period = req->duration; 8740 else if (channel->band == NL80211_BAND_6GHZ) 8741 ch_info->period = RTW89_CHANNEL_TIME_6G + 8742 RTW89_DWELL_TIME_6G; 8743 else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) 8744 ch_info->period = RTW89_P2P_CHAN_TIME; 8745 else 8746 ch_info->period = RTW89_CHANNEL_TIME; 8747 8748 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 8749 ch_info->central_ch = channel->hw_value; 8750 ch_info->pri_ch = channel->hw_value; 8751 ch_info->rand_seq_num = random_seq; 8752 ch_info->is_psc = cfg80211_channel_is_psc(channel); 8753 8754 if (channel->flags & 8755 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 8756 type = RTW89_CHAN_DFS; 8757 else 8758 type = RTW89_CHAN_ACTIVE; 8759 rtw89_hw_scan_add_chan_ax(rtwdev, type, req->n_ssids, ch_info); 8760 8761 if (!(scan_info->connected && 8762 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME)) 8763 goto next; 8764 8765 ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_OPERATE, 8766 &chan_list, req, &off_chan_time); 8767 if (ret) { 8768 kfree(ch_info); 8769 goto out; 8770 } 8771 8772 if (!ext->set) 8773 goto next; 8774 8775 ret = rtw89_hw_scan_add_op_types_ax(rtwdev, RTW89_CHAN_EXTRA_OP, 8776 &chan_list, req, &off_chan_time); 8777 if (ret) { 8778 kfree(ch_info); 8779 goto out; 8780 } 8781 8782 next: 8783 list_add_tail(&ch_info->list, &chan_list); 8784 off_chan_time += ch_info->period; 8785 } 8786 8787 list_splice_tail(&chan_list, &scan_info->chan_list); 8788 return 0; 8789 8790 out: 8791 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 8792 list_del(&ch_info->list); 8793 kfree(ch_info); 8794 } 8795 8796 return ret; 8797 } 8798 8799 void rtw89_hw_scan_free_chan_list_ax(struct rtw89_dev *rtwdev) 8800 { 8801 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8802 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 8803 8804 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 8805 list_del(&ch_info->list); 8806 kfree(ch_info); 8807 } 8808 } 8809 8810 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 8811 struct rtw89_vif_link *rtwvif_link) 8812 { 8813 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8814 struct rtw89_mac_chinfo_ax *ch_info, *tmp; 8815 unsigned int list_len = 0; 8816 struct list_head list; 8817 int ret; 8818 8819 INIT_LIST_HEAD(&list); 8820 8821 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 8822 /* The operating channel (tx_null == true) should 8823 * not be last in the list, to avoid breaking 8824 * RTL8851BU and RTL8832BU. 8825 */ 8826 if (list_len + 1 == RTW89_SCAN_LIST_LIMIT_AX && ch_info->tx_null) 8827 break; 8828 8829 list_move_tail(&ch_info->list, &list); 8830 8831 list_len++; 8832 if (list_len == RTW89_SCAN_LIST_LIMIT_AX) 8833 break; 8834 } 8835 8836 ret = rtw89_fw_h2c_scan_list_offload_ax(rtwdev, list_len, &list); 8837 8838 list_for_each_entry_safe(ch_info, tmp, &list, list) { 8839 list_del(&ch_info->list); 8840 kfree(ch_info); 8841 } 8842 8843 return ret; 8844 } 8845 8846 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 8847 struct rtw89_vif_link *rtwvif_link) 8848 { 8849 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 8850 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 8851 struct rtw89_mac_chinfo_be *ch_info, *tmp; 8852 struct ieee80211_channel *channel; 8853 struct list_head chan_list; 8854 enum rtw89_chan_type type; 8855 int list_len, ret; 8856 u32 idx; 8857 8858 INIT_LIST_HEAD(&chan_list); 8859 8860 for (idx = 0, list_len = 0; 8861 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 8862 idx++, list_len++) { 8863 channel = nd_config->channels[idx]; 8864 ch_info = kzalloc_obj(*ch_info); 8865 if (!ch_info) { 8866 ret = -ENOMEM; 8867 goto out; 8868 } 8869 8870 ch_info->period = RTW89_CHANNEL_TIME; 8871 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 8872 ch_info->central_ch = channel->hw_value; 8873 ch_info->pri_ch = channel->hw_value; 8874 ch_info->is_psc = cfg80211_channel_is_psc(channel); 8875 8876 if (channel->flags & 8877 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 8878 type = RTW89_CHAN_DFS; 8879 else 8880 type = RTW89_CHAN_ACTIVE; 8881 8882 rtw89_pno_scan_add_chan_be(rtwdev, type, 8883 nd_config->n_match_sets, ch_info); 8884 list_add_tail(&ch_info->list, &chan_list); 8885 } 8886 8887 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 8888 rtwvif_link); 8889 8890 out: 8891 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 8892 list_del(&ch_info->list); 8893 kfree(ch_info); 8894 } 8895 8896 return ret; 8897 } 8898 8899 int rtw89_hw_scan_prep_chan_list_be(struct rtw89_dev *rtwdev, 8900 struct rtw89_vif_link *rtwvif_link) 8901 { 8902 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8903 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 8904 struct cfg80211_scan_request *req = rtwvif->scan_req; 8905 struct rtw89_mac_chinfo_be *ch_info, *tmp; 8906 struct ieee80211_channel *channel; 8907 struct list_head chan_list; 8908 enum rtw89_chan_type type; 8909 bool chan_by_rnr; 8910 bool random_seq; 8911 int ret; 8912 u32 idx; 8913 8914 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 8915 chan_by_rnr = rtwdev->chip->support_rnr && 8916 (req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ); 8917 INIT_LIST_HEAD(&chan_list); 8918 8919 for (idx = 0; idx < req->n_channels; idx++) { 8920 channel = req->channels[idx]; 8921 8922 if (channel->band == NL80211_BAND_6GHZ && 8923 !cfg80211_channel_is_psc(channel) && chan_by_rnr) 8924 continue; 8925 8926 ch_info = kzalloc_obj(*ch_info); 8927 if (!ch_info) { 8928 ret = -ENOMEM; 8929 goto out; 8930 } 8931 8932 if (req->duration) 8933 ch_info->period = req->duration; 8934 else if (channel->band == NL80211_BAND_6GHZ) 8935 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 8936 else if (rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT) 8937 ch_info->period = RTW89_P2P_CHAN_TIME; 8938 else 8939 ch_info->period = RTW89_CHANNEL_TIME; 8940 8941 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 8942 ch_info->central_ch = channel->hw_value; 8943 ch_info->pri_ch = channel->hw_value; 8944 ch_info->rand_seq_num = random_seq; 8945 ch_info->is_psc = cfg80211_channel_is_psc(channel); 8946 8947 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 8948 type = RTW89_CHAN_DFS; 8949 else 8950 type = RTW89_CHAN_ACTIVE; 8951 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 8952 8953 list_add_tail(&ch_info->list, &chan_list); 8954 } 8955 8956 list_splice_tail(&chan_list, &scan_info->chan_list); 8957 return 0; 8958 8959 out: 8960 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 8961 list_del(&ch_info->list); 8962 kfree(ch_info); 8963 } 8964 8965 return ret; 8966 } 8967 8968 void rtw89_hw_scan_free_chan_list_be(struct rtw89_dev *rtwdev) 8969 { 8970 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8971 struct rtw89_mac_chinfo_be *ch_info, *tmp; 8972 8973 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 8974 list_del(&ch_info->list); 8975 kfree(ch_info); 8976 } 8977 } 8978 8979 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 8980 struct rtw89_vif_link *rtwvif_link) 8981 { 8982 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 8983 struct rtw89_mac_chinfo_be *ch_info, *tmp; 8984 unsigned int list_len = 0; 8985 struct list_head list; 8986 int ret; 8987 8988 INIT_LIST_HEAD(&list); 8989 8990 list_for_each_entry_safe(ch_info, tmp, &scan_info->chan_list, list) { 8991 list_move_tail(&ch_info->list, &list); 8992 8993 list_len++; 8994 if (list_len == RTW89_SCAN_LIST_LIMIT_BE) 8995 break; 8996 } 8997 8998 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &list, 8999 rtwvif_link); 9000 9001 list_for_each_entry_safe(ch_info, tmp, &list, list) { 9002 list_del(&ch_info->list); 9003 kfree(ch_info); 9004 } 9005 9006 return ret; 9007 } 9008 9009 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 9010 struct rtw89_vif_link *rtwvif_link, 9011 const u8 *mac_addr) 9012 { 9013 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 9014 int ret; 9015 9016 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link, mac_addr); 9017 if (ret) { 9018 rtw89_err(rtwdev, "Update probe request failed\n"); 9019 goto out; 9020 } 9021 ret = mac->prep_chan_list(rtwdev, rtwvif_link); 9022 out: 9023 return ret; 9024 } 9025 9026 static void rtw89_hw_scan_update_link_beacon_noa(struct rtw89_dev *rtwdev, 9027 struct rtw89_vif_link *rtwvif_link, 9028 u16 tu, bool scan) 9029 { 9030 struct ieee80211_p2p_noa_desc noa_desc = {}; 9031 struct ieee80211_bss_conf *bss_conf; 9032 u16 beacon_int; 9033 u64 tsf; 9034 int ret; 9035 9036 rcu_read_lock(); 9037 9038 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 9039 beacon_int = bss_conf->beacon_int; 9040 9041 rcu_read_unlock(); 9042 9043 tu += beacon_int * 3; 9044 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX) 9045 rtwdev->scan_info.delay = ieee80211_tu_to_usec(beacon_int * 3) / 1000; 9046 9047 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 9048 if (ret) { 9049 rtw89_warn(rtwdev, "%s: failed to get tsf\n", __func__); 9050 return; 9051 } 9052 9053 noa_desc.start_time = cpu_to_le32(tsf); 9054 if (rtwdev->chip->chip_gen == RTW89_CHIP_AX) { 9055 noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(tu)); 9056 noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(tu)); 9057 noa_desc.count = 1; 9058 } else { 9059 noa_desc.duration = cpu_to_le32(ieee80211_tu_to_usec(20000)); 9060 noa_desc.interval = cpu_to_le32(ieee80211_tu_to_usec(20000)); 9061 noa_desc.count = 255; 9062 } 9063 9064 rtw89_p2p_noa_renew(rtwvif_link); 9065 if (scan) 9066 rtw89_p2p_noa_append(rtwvif_link, &noa_desc); 9067 9068 rtw89_chip_h2c_update_beacon(rtwdev, rtwvif_link); 9069 } 9070 9071 static void rtw89_hw_scan_update_beacon_noa(struct rtw89_dev *rtwdev, bool scan) 9072 { 9073 const struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt; 9074 const struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 9075 const struct rtw89_chip_info *chip = rtwdev->chip; 9076 struct rtw89_mac_chinfo_ax *chinfo_ax; 9077 struct rtw89_mac_chinfo_be *chinfo_be; 9078 struct rtw89_vif_link *rtwvif_link; 9079 struct list_head *pos, *tmp; 9080 struct ieee80211_vif *vif; 9081 struct rtw89_vif *rtwvif; 9082 u16 tu = 0; 9083 9084 lockdep_assert_wiphy(rtwdev->hw->wiphy); 9085 9086 if (!scan) 9087 goto update; 9088 9089 list_for_each_safe(pos, tmp, &scan_info->chan_list) { 9090 switch (chip->chip_gen) { 9091 case RTW89_CHIP_AX: 9092 chinfo_ax = list_entry(pos, typeof(*chinfo_ax), list); 9093 tu += chinfo_ax->period; 9094 break; 9095 case RTW89_CHIP_BE: 9096 chinfo_be = list_entry(pos, typeof(*chinfo_be), list); 9097 tu += chinfo_be->period; 9098 break; 9099 default: 9100 rtw89_warn(rtwdev, "%s: invalid chip gen %d\n", 9101 __func__, chip->chip_gen); 9102 return; 9103 } 9104 } 9105 9106 if (unlikely(tu == 0)) { 9107 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 9108 "%s: cannot estimate needed TU\n", __func__); 9109 return; 9110 } 9111 9112 update: 9113 list_for_each_entry(rtwvif, &mgnt->active_list, mgnt_entry) { 9114 unsigned int link_id; 9115 9116 vif = rtwvif_to_vif(rtwvif); 9117 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 9118 continue; 9119 9120 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 9121 rtw89_hw_scan_update_link_beacon_noa(rtwdev, rtwvif_link, 9122 tu, scan); 9123 } 9124 } 9125 9126 static void rtw89_hw_scan_set_extra_op_info(struct rtw89_dev *rtwdev, 9127 struct rtw89_vif *scan_rtwvif, 9128 const struct rtw89_chan *scan_op) 9129 { 9130 struct rtw89_entity_mgnt *mgnt = &rtwdev->hal.entity_mgnt; 9131 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 9132 struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 9133 struct rtw89_vif *tmp; 9134 9135 ext->set = false; 9136 if (!RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_EXTRA_OP, &rtwdev->fw)) 9137 return; 9138 9139 list_for_each_entry(tmp, &mgnt->active_list, mgnt_entry) { 9140 const struct rtw89_chan *tmp_chan; 9141 struct rtw89_vif_link *tmp_link; 9142 9143 if (tmp == scan_rtwvif) 9144 continue; 9145 9146 tmp_link = rtw89_get_designated_link(tmp); 9147 if (unlikely(!tmp_link)) 9148 continue; 9149 9150 tmp_chan = rtw89_chan_get(rtwdev, tmp_link->chanctx_idx); 9151 *ext = (struct rtw89_hw_scan_extra_op){ 9152 .set = true, 9153 .macid = tmp_link->mac_id, 9154 .port = tmp_link->port, 9155 .chan = *tmp_chan, 9156 .rtwvif_link = tmp_link, 9157 }; 9158 9159 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 9160 "hw scan: extra op: center %d primary %d\n", 9161 ext->chan.channel, ext->chan.primary_channel); 9162 break; 9163 } 9164 } 9165 9166 int rtw89_hw_scan_start(struct rtw89_dev *rtwdev, 9167 struct rtw89_vif_link *rtwvif_link, 9168 struct ieee80211_scan_request *scan_req) 9169 { 9170 enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev); 9171 struct cfg80211_scan_request *req = &scan_req->req; 9172 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 9173 rtwvif_link->chanctx_idx); 9174 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 9175 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 9176 struct rtw89_chanctx_pause_parm pause_parm = { 9177 .rsn = RTW89_CHANCTX_PAUSE_REASON_HW_SCAN, 9178 .trigger = rtwvif_link, 9179 }; 9180 u32 rx_fltr = rtwdev->hal.rx_fltr; 9181 u8 mac_addr[ETH_ALEN]; 9182 int ret; 9183 9184 /* clone op and keep it during scan */ 9185 rtwdev->scan_info.op_chan = *chan; 9186 9187 rtw89_debug(rtwdev, RTW89_DBG_HW_SCAN, 9188 "hw scan: op: center %d primary %d\n", 9189 chan->channel, chan->primary_channel); 9190 9191 rtw89_hw_scan_set_extra_op_info(rtwdev, rtwvif, chan); 9192 9193 rtwdev->scan_info.connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 9194 rtwdev->scan_info.scanning_vif = rtwvif_link; 9195 rtwdev->scan_info.abort = false; 9196 rtwdev->scan_info.delay = 0; 9197 rtwvif->scan_ies = &scan_req->ies; 9198 rtwvif->scan_req = req; 9199 9200 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 9201 get_random_mask_addr(mac_addr, req->mac_addr, 9202 req->mac_addr_mask); 9203 else if (ieee80211_vif_is_mld(vif)) 9204 ether_addr_copy(mac_addr, vif->addr); 9205 else 9206 ether_addr_copy(mac_addr, rtwvif_link->mac_addr); 9207 9208 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, mac_addr); 9209 if (ret) { 9210 rtw89_hw_scan_cleanup(rtwdev, rtwvif_link); 9211 return ret; 9212 } 9213 9214 ieee80211_stop_queues(rtwdev->hw); 9215 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false); 9216 9217 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true); 9218 9219 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 9220 rx_fltr &= ~B_AX_A_BC; 9221 rx_fltr &= ~B_AX_A_A1_MATCH; 9222 9223 rtw89_mac_set_rx_fltr(rtwdev, rtwvif_link->mac_idx, rx_fltr); 9224 9225 rtw89_chanctx_pause(rtwdev, &pause_parm); 9226 rtw89_phy_dig_suspend(rtwdev); 9227 9228 if (mode == RTW89_ENTITY_MODE_MCC) 9229 rtw89_hw_scan_update_beacon_noa(rtwdev, true); 9230 9231 return 0; 9232 } 9233 9234 struct rtw89_hw_scan_complete_cb_data { 9235 struct rtw89_vif_link *rtwvif_link; 9236 bool aborted; 9237 }; 9238 9239 static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data) 9240 { 9241 enum rtw89_entity_mode mode = rtw89_get_entity_mode(rtwdev); 9242 struct rtw89_hw_scan_complete_cb_data *cb_data = data; 9243 struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link; 9244 struct cfg80211_scan_info info = { 9245 .aborted = cb_data->aborted, 9246 }; 9247 9248 if (!rtwvif_link) 9249 return -EINVAL; 9250 9251 rtw89_mac_set_rx_fltr(rtwdev, rtwvif_link->mac_idx, rtwdev->hal.rx_fltr); 9252 9253 rtw89_core_scan_complete(rtwdev, rtwvif_link, true); 9254 ieee80211_scan_completed(rtwdev->hw, &info); 9255 ieee80211_wake_queues(rtwdev->hw); 9256 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true); 9257 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 9258 rtw89_phy_dig_resume(rtwdev, true); 9259 9260 rtw89_hw_scan_cleanup(rtwdev, rtwvif_link); 9261 9262 if (mode == RTW89_ENTITY_MODE_MCC) 9263 rtw89_hw_scan_update_beacon_noa(rtwdev, false); 9264 9265 return 0; 9266 } 9267 9268 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, 9269 struct rtw89_vif_link *rtwvif_link, 9270 bool aborted) 9271 { 9272 struct rtw89_hw_scan_complete_cb_data cb_data = { 9273 .rtwvif_link = rtwvif_link, 9274 .aborted = aborted, 9275 }; 9276 const struct rtw89_chanctx_cb_parm cb_parm = { 9277 .cb = rtw89_hw_scan_complete_cb, 9278 .data = &cb_data, 9279 .caller = __func__, 9280 }; 9281 9282 /* The things here needs to be done after setting channel (for coex) 9283 * and before proceeding entity mode (for MCC). So, pass a callback 9284 * of them for the right sequence rather than doing them directly. 9285 */ 9286 rtw89_chanctx_proceed(rtwdev, &cb_parm); 9287 } 9288 9289 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, 9290 struct rtw89_vif_link *rtwvif_link) 9291 { 9292 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 9293 int ret; 9294 9295 scan_info->abort = true; 9296 9297 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false); 9298 if (ret) 9299 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); 9300 9301 /* Indicate ieee80211_scan_completed() before returning, which is safe 9302 * because scan abort command always waits for completion of 9303 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan 9304 * work properly. 9305 */ 9306 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true); 9307 } 9308 9309 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 9310 { 9311 struct rtw89_vif_link *rtwvif_link; 9312 struct rtw89_vif *rtwvif; 9313 unsigned int link_id; 9314 9315 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 9316 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 9317 /* This variable implies connected or during attempt to connect */ 9318 if (!is_zero_ether_addr(rtwvif_link->bssid)) 9319 return true; 9320 } 9321 } 9322 9323 return false; 9324 } 9325 9326 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, 9327 struct rtw89_vif_link *rtwvif_link, 9328 bool enable) 9329 { 9330 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 9331 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 9332 const struct rtw89_hw_scan_extra_op *ext = &scan_info->extra_op; 9333 struct rtw89_scan_option opt = {0}; 9334 bool connected; 9335 int ret = 0; 9336 9337 if (!rtwvif_link) 9338 return -EINVAL; 9339 9340 connected = rtwdev->scan_info.connected; 9341 opt.enable = enable; 9342 opt.target_ch_mode = connected; 9343 opt.delay = rtwdev->scan_info.delay; 9344 if (enable) { 9345 ret = mac->add_chan_list(rtwdev, rtwvif_link); 9346 if (ret) 9347 goto out; 9348 } 9349 9350 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 9351 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 9352 opt.scan_mode = RTW89_SCAN_MODE_SA; 9353 opt.band = rtwvif_link->mac_idx; 9354 opt.num_macc_role = 0; 9355 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 9356 opt.num_opch = connected ? 1 : 0; 9357 if (connected && ext->set) 9358 opt.num_opch++; 9359 9360 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 9361 } 9362 9363 ret = rtw89_mac_scan_offload(rtwdev, &opt, rtwvif_link, false); 9364 9365 out: 9366 return ret; 9367 } 9368 9369 #define H2C_FW_CPU_EXCEPTION_TYPE_0 0x5566 9370 #define H2C_FW_CPU_EXCEPTION_TYPE_1 0x0 9371 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 9372 { 9373 struct rtw89_h2c_trig_cpu_except *h2c; 9374 u32 cpu_exception_type_def; 9375 u32 len = sizeof(*h2c); 9376 struct sk_buff *skb; 9377 int ret; 9378 9379 if (RTW89_CHK_FW_FEATURE(CRASH_TRIGGER_TYPE_1, &rtwdev->fw)) 9380 cpu_exception_type_def = H2C_FW_CPU_EXCEPTION_TYPE_1; 9381 else if (RTW89_CHK_FW_FEATURE(CRASH_TRIGGER_TYPE_0, &rtwdev->fw)) 9382 cpu_exception_type_def = H2C_FW_CPU_EXCEPTION_TYPE_0; 9383 else 9384 return -EOPNOTSUPP; 9385 9386 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9387 if (!skb) { 9388 rtw89_err(rtwdev, 9389 "failed to alloc skb for fw cpu exception\n"); 9390 return -ENOMEM; 9391 } 9392 9393 skb_put(skb, len); 9394 h2c = (struct rtw89_h2c_trig_cpu_except *)skb->data; 9395 9396 h2c->w0 = le32_encode_bits(cpu_exception_type_def, 9397 RTW89_H2C_CPU_EXCEPTION_TYPE); 9398 9399 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9400 H2C_CAT_TEST, 9401 H2C_CL_FW_STATUS_TEST, 9402 H2C_FUNC_CPU_EXCEPTION, 0, 0, 9403 len); 9404 9405 ret = rtw89_h2c_tx(rtwdev, skb, false); 9406 if (ret) { 9407 rtw89_err(rtwdev, "failed to send h2c\n"); 9408 dev_kfree_skb_any(skb); 9409 return ret; 9410 } 9411 9412 return 0; 9413 } 9414 9415 #define H2C_PKT_DROP_LEN 24 9416 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 9417 const struct rtw89_pkt_drop_params *params) 9418 { 9419 struct sk_buff *skb; 9420 int ret; 9421 9422 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 9423 if (!skb) { 9424 rtw89_err(rtwdev, 9425 "failed to alloc skb for packet drop\n"); 9426 return -ENOMEM; 9427 } 9428 9429 switch (params->sel) { 9430 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 9431 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 9432 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 9433 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 9434 case RTW89_PKT_DROP_SEL_BAND_ONCE: 9435 break; 9436 default: 9437 rtw89_debug(rtwdev, RTW89_DBG_FW, 9438 "H2C of pkt drop might not fully support sel: %d yet\n", 9439 params->sel); 9440 break; 9441 } 9442 9443 skb_put(skb, H2C_PKT_DROP_LEN); 9444 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 9445 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 9446 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 9447 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 9448 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 9449 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 9450 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 9451 params->macid_band_sel[0]); 9452 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 9453 params->macid_band_sel[1]); 9454 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 9455 params->macid_band_sel[2]); 9456 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 9457 params->macid_band_sel[3]); 9458 9459 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9460 H2C_CAT_MAC, 9461 H2C_CL_MAC_FW_OFLD, 9462 H2C_FUNC_PKT_DROP, 0, 0, 9463 H2C_PKT_DROP_LEN); 9464 9465 ret = rtw89_h2c_tx(rtwdev, skb, false); 9466 if (ret) { 9467 rtw89_err(rtwdev, "failed to send h2c\n"); 9468 goto fail; 9469 } 9470 9471 return 0; 9472 9473 fail: 9474 dev_kfree_skb_any(skb); 9475 return ret; 9476 } 9477 9478 #define H2C_KEEP_ALIVE_LEN 4 9479 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 9480 bool enable) 9481 { 9482 struct sk_buff *skb; 9483 u8 pkt_id = 0; 9484 int ret; 9485 9486 if (enable) { 9487 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 9488 RTW89_PKT_OFLD_TYPE_NULL_DATA, 9489 &pkt_id); 9490 if (ret) 9491 return -EPERM; 9492 } 9493 9494 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 9495 if (!skb) { 9496 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 9497 return -ENOMEM; 9498 } 9499 9500 skb_put(skb, H2C_KEEP_ALIVE_LEN); 9501 9502 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 9503 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 9504 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 9505 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id); 9506 9507 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9508 H2C_CAT_MAC, 9509 H2C_CL_MAC_WOW, 9510 H2C_FUNC_KEEP_ALIVE, 0, 1, 9511 H2C_KEEP_ALIVE_LEN); 9512 9513 ret = rtw89_h2c_tx(rtwdev, skb, false); 9514 if (ret) { 9515 rtw89_err(rtwdev, "failed to send h2c\n"); 9516 goto fail; 9517 } 9518 9519 return 0; 9520 9521 fail: 9522 dev_kfree_skb_any(skb); 9523 9524 return ret; 9525 } 9526 9527 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 9528 bool enable) 9529 { 9530 struct rtw89_h2c_arp_offload *h2c; 9531 u32 len = sizeof(*h2c); 9532 struct sk_buff *skb; 9533 u8 pkt_id = 0; 9534 int ret; 9535 9536 if (enable) { 9537 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 9538 RTW89_PKT_OFLD_TYPE_ARP_RSP, 9539 &pkt_id); 9540 if (ret) 9541 return ret; 9542 } 9543 9544 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9545 if (!skb) { 9546 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); 9547 return -ENOMEM; 9548 } 9549 9550 skb_put(skb, len); 9551 h2c = (struct rtw89_h2c_arp_offload *)skb->data; 9552 9553 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | 9554 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | 9555 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | 9556 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); 9557 9558 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9559 H2C_CAT_MAC, 9560 H2C_CL_MAC_WOW, 9561 H2C_FUNC_ARP_OFLD, 0, 1, 9562 len); 9563 9564 ret = rtw89_h2c_tx(rtwdev, skb, false); 9565 if (ret) { 9566 rtw89_err(rtwdev, "failed to send h2c\n"); 9567 goto fail; 9568 } 9569 9570 return 0; 9571 9572 fail: 9573 dev_kfree_skb_any(skb); 9574 9575 return ret; 9576 } 9577 9578 #define H2C_DISCONNECT_DETECT_LEN 8 9579 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 9580 struct rtw89_vif_link *rtwvif_link, bool enable) 9581 { 9582 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 9583 struct sk_buff *skb; 9584 u8 macid = rtwvif_link->mac_id; 9585 int ret; 9586 9587 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 9588 if (!skb) { 9589 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 9590 return -ENOMEM; 9591 } 9592 9593 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 9594 9595 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 9596 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 9597 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 9598 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 9599 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 9600 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 9601 } 9602 9603 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9604 H2C_CAT_MAC, 9605 H2C_CL_MAC_WOW, 9606 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 9607 H2C_DISCONNECT_DETECT_LEN); 9608 9609 ret = rtw89_h2c_tx(rtwdev, skb, false); 9610 if (ret) { 9611 rtw89_err(rtwdev, "failed to send h2c\n"); 9612 goto fail; 9613 } 9614 9615 return 0; 9616 9617 fail: 9618 dev_kfree_skb_any(skb); 9619 9620 return ret; 9621 } 9622 9623 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 9624 bool enable) 9625 { 9626 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 9627 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 9628 struct rtw89_h2c_cfg_nlo *h2c; 9629 u32 len = sizeof(*h2c); 9630 struct sk_buff *skb; 9631 int ret, i; 9632 9633 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9634 if (!skb) { 9635 rtw89_err(rtwdev, "failed to alloc skb for nlo\n"); 9636 return -ENOMEM; 9637 } 9638 9639 skb_put(skb, len); 9640 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data; 9641 9642 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) | 9643 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) | 9644 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID); 9645 9646 if (enable) { 9647 h2c->nlo_cnt = nd_config->n_match_sets; 9648 for (i = 0 ; i < nd_config->n_match_sets; i++) { 9649 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len; 9650 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid, 9651 nd_config->match_sets[i].ssid.ssid_len); 9652 } 9653 } 9654 9655 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9656 H2C_CAT_MAC, 9657 H2C_CL_MAC_WOW, 9658 H2C_FUNC_NLO, 0, 1, 9659 len); 9660 9661 ret = rtw89_h2c_tx(rtwdev, skb, false); 9662 if (ret) { 9663 rtw89_err(rtwdev, "failed to send h2c\n"); 9664 goto fail; 9665 } 9666 9667 return 0; 9668 9669 fail: 9670 dev_kfree_skb_any(skb); 9671 return ret; 9672 } 9673 9674 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 9675 bool enable) 9676 { 9677 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 9678 struct rtw89_h2c_wow_global *h2c; 9679 u8 macid = rtwvif_link->mac_id; 9680 u32 len = sizeof(*h2c); 9681 struct sk_buff *skb; 9682 int ret; 9683 9684 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9685 if (!skb) { 9686 rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); 9687 return -ENOMEM; 9688 } 9689 9690 skb_put(skb, len); 9691 h2c = (struct rtw89_h2c_wow_global *)skb->data; 9692 9693 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | 9694 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | 9695 le32_encode_bits(rtw_wow->ptk_alg, 9696 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | 9697 le32_encode_bits(rtw_wow->gtk_alg, 9698 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); 9699 h2c->key_info = rtw_wow->key_info; 9700 9701 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9702 H2C_CAT_MAC, 9703 H2C_CL_MAC_WOW, 9704 H2C_FUNC_WOW_GLOBAL, 0, 1, 9705 len); 9706 9707 ret = rtw89_h2c_tx(rtwdev, skb, false); 9708 if (ret) { 9709 rtw89_err(rtwdev, "failed to send h2c\n"); 9710 goto fail; 9711 } 9712 9713 return 0; 9714 9715 fail: 9716 dev_kfree_skb_any(skb); 9717 9718 return ret; 9719 } 9720 9721 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 9722 struct rtw89_vif_link *rtwvif_link, 9723 bool enable) 9724 { 9725 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 9726 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 9727 struct rtw89_h2c_wow_wakeup_ctrl *h2c; 9728 struct sk_buff *skb; 9729 u32 len = sizeof(*h2c); 9730 u8 macid = rtwvif_link->mac_id; 9731 int ret; 9732 9733 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9734 if (!skb) { 9735 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); 9736 return -ENOMEM; 9737 } 9738 9739 skb_put(skb, len); 9740 h2c = (struct rtw89_h2c_wow_wakeup_ctrl *)skb->data; 9741 9742 if (rtw_wow->pattern_cnt) 9743 h2c->w0 |= le32_encode_bits(enable, 9744 RTW89_H2C_WOW_WAKEUP_CTRL_W0_PATTERN_MATCH_ENABLE); 9745 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) { 9746 h2c->w0 |= le32_encode_bits(enable, 9747 RTW89_H2C_WOW_WAKEUP_CTRL_W0_MAGIC_ENABLE); 9748 if (ieee80211_vif_is_mld(vif)) 9749 h2c->w0 |= le32_encode_bits(enable, 9750 RTW89_H2C_WOW_WAKEUP_CTRL_W0_MAGIC_MLD_ENABLE); 9751 } 9752 9753 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 9754 h2c->w0 |= le32_encode_bits(enable, 9755 RTW89_H2C_WOW_WAKEUP_CTRL_W0_DEAUTH_ENABLE); 9756 9757 h2c->w0 |= le32_encode_bits(macid, RTW89_H2C_WOW_WAKEUP_CTRL_W0_MAC_ID); 9758 9759 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9760 H2C_CAT_MAC, 9761 H2C_CL_MAC_WOW, 9762 H2C_FUNC_WAKEUP_CTRL, 0, 1, 9763 len); 9764 9765 ret = rtw89_h2c_tx(rtwdev, skb, false); 9766 if (ret) { 9767 rtw89_err(rtwdev, "failed to send h2c\n"); 9768 goto fail; 9769 } 9770 9771 return 0; 9772 9773 fail: 9774 dev_kfree_skb_any(skb); 9775 9776 return ret; 9777 } 9778 9779 int rtw89_fw_h2c_wow_cam_update(struct rtw89_dev *rtwdev, 9780 struct rtw89_wow_cam_info *cam_info) 9781 { 9782 struct rtw89_h2c_wow_cam_update *h2c; 9783 u32 len = sizeof(*h2c); 9784 struct sk_buff *skb; 9785 int ret; 9786 9787 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9788 if (!skb) { 9789 rtw89_err(rtwdev, "failed to alloc skb for wow cam update\n"); 9790 return -ENOMEM; 9791 } 9792 skb_put(skb, len); 9793 h2c = (struct rtw89_h2c_wow_cam_update *)skb->data; 9794 9795 h2c->w0 = le32_encode_bits(cam_info->r_w, RTW89_H2C_WOW_CAM_UPD_W0_R_W) | 9796 le32_encode_bits(cam_info->idx, RTW89_H2C_WOW_CAM_UPD_W0_IDX); 9797 9798 if (!cam_info->valid) 9799 goto fill_valid; 9800 9801 h2c->wkfm0 = cam_info->mask[0]; 9802 h2c->wkfm1 = cam_info->mask[1]; 9803 h2c->wkfm2 = cam_info->mask[2]; 9804 h2c->wkfm3 = cam_info->mask[3]; 9805 h2c->w5 = le32_encode_bits(cam_info->crc, RTW89_H2C_WOW_CAM_UPD_W5_CRC) | 9806 le32_encode_bits(cam_info->negative_pattern_match, 9807 RTW89_H2C_WOW_CAM_UPD_W5_NEGATIVE_PATTERN_MATCH) | 9808 le32_encode_bits(cam_info->skip_mac_hdr, 9809 RTW89_H2C_WOW_CAM_UPD_W5_SKIP_MAC_HDR) | 9810 le32_encode_bits(cam_info->uc, RTW89_H2C_WOW_CAM_UPD_W5_UC) | 9811 le32_encode_bits(cam_info->mc, RTW89_H2C_WOW_CAM_UPD_W5_MC) | 9812 le32_encode_bits(cam_info->bc, RTW89_H2C_WOW_CAM_UPD_W5_BC); 9813 fill_valid: 9814 h2c->w5 |= le32_encode_bits(cam_info->valid, RTW89_H2C_WOW_CAM_UPD_W5_VALID); 9815 9816 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9817 H2C_CAT_MAC, 9818 H2C_CL_MAC_WOW, 9819 H2C_FUNC_WOW_CAM_UPD, 0, 1, 9820 len); 9821 9822 ret = rtw89_h2c_tx(rtwdev, skb, false); 9823 if (ret) { 9824 rtw89_err(rtwdev, "failed to send h2c\n"); 9825 goto fail; 9826 } 9827 9828 return 0; 9829 fail: 9830 dev_kfree_skb_any(skb); 9831 9832 return ret; 9833 } 9834 EXPORT_SYMBOL(rtw89_fw_h2c_wow_cam_update); 9835 9836 int rtw89_fw_h2c_wow_cam_update_v1(struct rtw89_dev *rtwdev, 9837 struct rtw89_wow_cam_info *cam_info) 9838 { 9839 struct rtw89_h2c_wow_payload_cam_update *h2c; 9840 u32 len = sizeof(*h2c); 9841 struct sk_buff *skb; 9842 int ret; 9843 9844 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9845 if (!skb) { 9846 rtw89_err(rtwdev, "failed to alloc skb for wow payload cam update\n"); 9847 return -ENOMEM; 9848 } 9849 skb_put(skb, len); 9850 h2c = (struct rtw89_h2c_wow_payload_cam_update *)skb->data; 9851 9852 h2c->w0 = le32_encode_bits(cam_info->r_w, RTW89_H2C_WOW_PLD_CAM_UPD_W0_R_W) | 9853 le32_encode_bits(cam_info->idx, RTW89_H2C_WOW_PLD_CAM_UPD_W0_IDX); 9854 h2c->w8 = le32_encode_bits(cam_info->valid, RTW89_H2C_WOW_PLD_CAM_UPD_W8_VALID) | 9855 le32_encode_bits(1, RTW89_H2C_WOW_PLD_CAM_UPD_W8_WOW_PTR); 9856 9857 if (!cam_info->valid) 9858 goto done; 9859 9860 h2c->wkfm0 = cam_info->mask[0]; 9861 h2c->wkfm1 = cam_info->mask[1]; 9862 h2c->wkfm2 = cam_info->mask[2]; 9863 h2c->wkfm3 = cam_info->mask[3]; 9864 h2c->w5 = le32_encode_bits(cam_info->uc, RTW89_H2C_WOW_PLD_CAM_UPD_W5_UC) | 9865 le32_encode_bits(cam_info->mc, RTW89_H2C_WOW_PLD_CAM_UPD_W5_MC) | 9866 le32_encode_bits(cam_info->bc, RTW89_H2C_WOW_PLD_CAM_UPD_W5_BC) | 9867 le32_encode_bits(cam_info->skip_mac_hdr, 9868 RTW89_H2C_WOW_PLD_CAM_UPD_W5_SKIP_MAC_HDR); 9869 h2c->w6 = le32_encode_bits(cam_info->crc, RTW89_H2C_WOW_PLD_CAM_UPD_W6_CRC); 9870 h2c->w7 = le32_encode_bits(cam_info->negative_pattern_match, 9871 RTW89_H2C_WOW_PLD_CAM_UPD_W7_NEGATIVE_PATTERN_MATCH); 9872 9873 done: 9874 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9875 H2C_CAT_MAC, 9876 H2C_CL_MAC_WOW, 9877 H2C_FUNC_WOW_PLD_CAM_UPD, 0, 1, 9878 len); 9879 9880 ret = rtw89_h2c_tx(rtwdev, skb, false); 9881 if (ret) { 9882 rtw89_err(rtwdev, "failed to send h2c\n"); 9883 goto fail; 9884 } 9885 9886 return 0; 9887 fail: 9888 dev_kfree_skb_any(skb); 9889 9890 return ret; 9891 } 9892 EXPORT_SYMBOL(rtw89_fw_h2c_wow_cam_update_v1); 9893 9894 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, 9895 struct rtw89_vif_link *rtwvif_link, 9896 bool enable) 9897 { 9898 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 9899 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; 9900 struct rtw89_h2c_wow_gtk_ofld *h2c; 9901 u8 macid = rtwvif_link->mac_id; 9902 u32 len = sizeof(*h2c); 9903 u8 pkt_id_sa_query = 0; 9904 struct sk_buff *skb; 9905 u8 pkt_id_eapol = 0; 9906 int ret; 9907 9908 if (!rtw_wow->gtk_alg) 9909 return 0; 9910 9911 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9912 if (!skb) { 9913 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); 9914 return -ENOMEM; 9915 } 9916 9917 skb_put(skb, len); 9918 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; 9919 9920 if (!enable) 9921 goto hdr; 9922 9923 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 9924 RTW89_PKT_OFLD_TYPE_EAPOL_KEY, 9925 &pkt_id_eapol); 9926 if (ret) 9927 goto fail; 9928 9929 if (gtk_info->igtk_keyid) { 9930 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 9931 RTW89_PKT_OFLD_TYPE_SA_QUERY, 9932 &pkt_id_sa_query); 9933 if (ret) 9934 goto fail; 9935 } 9936 9937 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 9938 le32_encode_bits(!!memchr_inv(gtk_info->txmickey, 0, 9939 sizeof(gtk_info->txmickey)), 9940 RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 9941 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 9942 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 9943 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | 9944 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); 9945 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, 9946 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | 9947 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); 9948 h2c->gtk_info = rtw_wow->gtk_info; 9949 9950 hdr: 9951 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9952 H2C_CAT_MAC, 9953 H2C_CL_MAC_WOW, 9954 H2C_FUNC_GTK_OFLD, 0, 1, 9955 len); 9956 9957 ret = rtw89_h2c_tx(rtwdev, skb, false); 9958 if (ret) { 9959 rtw89_err(rtwdev, "failed to send h2c\n"); 9960 goto fail; 9961 } 9962 return 0; 9963 fail: 9964 dev_kfree_skb_any(skb); 9965 9966 return ret; 9967 } 9968 9969 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 9970 bool enable) 9971 { 9972 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait; 9973 struct rtw89_h2c_fwips *h2c; 9974 u32 len = sizeof(*h2c); 9975 struct sk_buff *skb; 9976 9977 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 9978 if (!skb) { 9979 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n"); 9980 return -ENOMEM; 9981 } 9982 skb_put(skb, len); 9983 h2c = (struct rtw89_h2c_fwips *)skb->data; 9984 9985 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) | 9986 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE); 9987 9988 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 9989 H2C_CAT_MAC, 9990 H2C_CL_MAC_PS, 9991 H2C_FUNC_IPS_CFG, 0, 1, 9992 len); 9993 9994 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG); 9995 } 9996 9997 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) 9998 { 9999 struct rtw89_wait_info *wait = &rtwdev->wow.wait; 10000 struct rtw89_h2c_wow_aoac *h2c; 10001 u32 len = sizeof(*h2c); 10002 struct sk_buff *skb; 10003 10004 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 10005 if (!skb) { 10006 rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); 10007 return -ENOMEM; 10008 } 10009 10010 skb_put(skb, len); 10011 10012 /* This H2C only nofity firmware to generate AOAC report C2H, 10013 * no need any parameter. 10014 */ 10015 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10016 H2C_CAT_MAC, 10017 H2C_CL_MAC_WOW, 10018 H2C_FUNC_AOAC_REPORT_REQ, 1, 0, 10019 len); 10020 10021 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC); 10022 } 10023 10024 /* Return < 0, if failures happen during waiting for the condition. 10025 * Return 0, when waiting for the condition succeeds. 10026 * Return > 0, if the wait is considered unreachable due to driver/FW design, 10027 * where 1 means during SER. 10028 */ 10029 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 10030 struct rtw89_wait_info *wait, unsigned int cond) 10031 { 10032 struct rtw89_wait_response *prep; 10033 int ret = 0; 10034 10035 lockdep_assert_wiphy(rtwdev->hw->wiphy); 10036 10037 prep = rtw89_wait_for_cond_prep(wait, cond); 10038 if (IS_ERR(prep)) 10039 goto out; 10040 10041 ret = rtw89_h2c_tx(rtwdev, skb, false); 10042 if (ret) { 10043 rtw89_err(rtwdev, "failed to send h2c\n"); 10044 dev_kfree_skb_any(skb); 10045 ret = -EBUSY; 10046 goto out; 10047 } 10048 10049 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) { 10050 ret = 1; 10051 goto out; 10052 } 10053 10054 out: 10055 return rtw89_wait_for_cond_eval(wait, prep, ret); 10056 } 10057 10058 #define H2C_ADD_MCC_LEN 16 10059 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 10060 const struct rtw89_fw_mcc_add_req *p) 10061 { 10062 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 10063 struct sk_buff *skb; 10064 unsigned int cond; 10065 10066 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 10067 if (!skb) { 10068 rtw89_err(rtwdev, 10069 "failed to alloc skb for add mcc\n"); 10070 return -ENOMEM; 10071 } 10072 10073 skb_put(skb, H2C_ADD_MCC_LEN); 10074 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 10075 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 10076 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 10077 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 10078 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 10079 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 10080 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 10081 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 10082 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 10083 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 10084 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 10085 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 10086 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 10087 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 10088 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 10089 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 10090 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 10091 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 10092 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 10093 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 10094 10095 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10096 H2C_CAT_MAC, 10097 H2C_CL_MCC, 10098 H2C_FUNC_ADD_MCC, 0, 0, 10099 H2C_ADD_MCC_LEN); 10100 10101 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 10102 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 10103 } 10104 10105 #define H2C_START_MCC_LEN 12 10106 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 10107 const struct rtw89_fw_mcc_start_req *p) 10108 { 10109 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 10110 struct sk_buff *skb; 10111 unsigned int cond; 10112 10113 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 10114 if (!skb) { 10115 rtw89_err(rtwdev, 10116 "failed to alloc skb for start mcc\n"); 10117 return -ENOMEM; 10118 } 10119 10120 skb_put(skb, H2C_START_MCC_LEN); 10121 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 10122 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 10123 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 10124 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 10125 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 10126 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 10127 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 10128 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 10129 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 10130 10131 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10132 H2C_CAT_MAC, 10133 H2C_CL_MCC, 10134 H2C_FUNC_START_MCC, 0, 0, 10135 H2C_START_MCC_LEN); 10136 10137 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 10138 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 10139 } 10140 10141 #define H2C_STOP_MCC_LEN 4 10142 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 10143 bool prev_groups) 10144 { 10145 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 10146 struct sk_buff *skb; 10147 unsigned int cond; 10148 10149 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 10150 if (!skb) { 10151 rtw89_err(rtwdev, 10152 "failed to alloc skb for stop mcc\n"); 10153 return -ENOMEM; 10154 } 10155 10156 skb_put(skb, H2C_STOP_MCC_LEN); 10157 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 10158 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 10159 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 10160 10161 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10162 H2C_CAT_MAC, 10163 H2C_CL_MCC, 10164 H2C_FUNC_STOP_MCC, 0, 0, 10165 H2C_STOP_MCC_LEN); 10166 10167 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 10168 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 10169 } 10170 10171 #define H2C_DEL_MCC_GROUP_LEN 4 10172 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 10173 bool prev_groups) 10174 { 10175 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 10176 struct sk_buff *skb; 10177 unsigned int cond; 10178 10179 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 10180 if (!skb) { 10181 rtw89_err(rtwdev, 10182 "failed to alloc skb for del mcc group\n"); 10183 return -ENOMEM; 10184 } 10185 10186 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 10187 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 10188 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 10189 10190 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10191 H2C_CAT_MAC, 10192 H2C_CL_MCC, 10193 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 10194 H2C_DEL_MCC_GROUP_LEN); 10195 10196 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 10197 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 10198 } 10199 10200 #define H2C_RESET_MCC_GROUP_LEN 4 10201 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 10202 { 10203 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 10204 struct sk_buff *skb; 10205 unsigned int cond; 10206 10207 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 10208 if (!skb) { 10209 rtw89_err(rtwdev, 10210 "failed to alloc skb for reset mcc group\n"); 10211 return -ENOMEM; 10212 } 10213 10214 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 10215 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 10216 10217 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10218 H2C_CAT_MAC, 10219 H2C_CL_MCC, 10220 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 10221 H2C_RESET_MCC_GROUP_LEN); 10222 10223 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 10224 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 10225 } 10226 10227 #define H2C_MCC_REQ_TSF_LEN 4 10228 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 10229 const struct rtw89_fw_mcc_tsf_req *req, 10230 struct rtw89_mac_mcc_tsf_rpt *rpt) 10231 { 10232 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 10233 struct rtw89_mac_mcc_tsf_rpt *tmp; 10234 struct sk_buff *skb; 10235 unsigned int cond; 10236 int ret; 10237 10238 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 10239 if (!skb) { 10240 rtw89_err(rtwdev, 10241 "failed to alloc skb for mcc req tsf\n"); 10242 return -ENOMEM; 10243 } 10244 10245 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 10246 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 10247 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 10248 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 10249 10250 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10251 H2C_CAT_MAC, 10252 H2C_CL_MCC, 10253 H2C_FUNC_MCC_REQ_TSF, 0, 0, 10254 H2C_MCC_REQ_TSF_LEN); 10255 10256 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 10257 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 10258 if (ret) 10259 return ret; 10260 10261 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 10262 *rpt = *tmp; 10263 10264 return 0; 10265 } 10266 10267 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 10268 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 10269 u8 *bitmap) 10270 { 10271 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 10272 struct sk_buff *skb; 10273 unsigned int cond; 10274 u8 map_len; 10275 u8 h2c_len; 10276 10277 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 10278 map_len = RTW89_MAX_MAC_ID_NUM / 8; 10279 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 10280 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 10281 if (!skb) { 10282 rtw89_err(rtwdev, 10283 "failed to alloc skb for mcc macid bitmap\n"); 10284 return -ENOMEM; 10285 } 10286 10287 skb_put(skb, h2c_len); 10288 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 10289 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 10290 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 10291 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 10292 10293 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10294 H2C_CAT_MAC, 10295 H2C_CL_MCC, 10296 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 10297 h2c_len); 10298 10299 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 10300 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 10301 } 10302 10303 #define H2C_MCC_SYNC_LEN 4 10304 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 10305 u8 target, u8 offset) 10306 { 10307 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 10308 struct sk_buff *skb; 10309 unsigned int cond; 10310 10311 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 10312 if (!skb) { 10313 rtw89_err(rtwdev, 10314 "failed to alloc skb for mcc sync\n"); 10315 return -ENOMEM; 10316 } 10317 10318 skb_put(skb, H2C_MCC_SYNC_LEN); 10319 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 10320 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 10321 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 10322 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 10323 10324 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10325 H2C_CAT_MAC, 10326 H2C_CL_MCC, 10327 H2C_FUNC_MCC_SYNC, 0, 0, 10328 H2C_MCC_SYNC_LEN); 10329 10330 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 10331 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 10332 } 10333 10334 #define H2C_MCC_SET_DURATION_LEN 20 10335 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 10336 const struct rtw89_fw_mcc_duration *p) 10337 { 10338 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 10339 struct sk_buff *skb; 10340 unsigned int cond; 10341 10342 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 10343 if (!skb) { 10344 rtw89_err(rtwdev, 10345 "failed to alloc skb for mcc set duration\n"); 10346 return -ENOMEM; 10347 } 10348 10349 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 10350 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 10351 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 10352 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 10353 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 10354 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 10355 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 10356 p->start_tsf_low); 10357 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 10358 p->start_tsf_high); 10359 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 10360 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 10361 10362 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10363 H2C_CAT_MAC, 10364 H2C_CL_MCC, 10365 H2C_FUNC_MCC_SET_DURATION, 0, 0, 10366 H2C_MCC_SET_DURATION_LEN); 10367 10368 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 10369 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 10370 } 10371 10372 static 10373 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 10374 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 10375 struct rtw89_h2c_mrc_add_slot *slot_h2c) 10376 { 10377 bool fill_h2c = !!slot_h2c; 10378 unsigned int i; 10379 10380 if (!fill_h2c) 10381 goto calc_len; 10382 10383 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 10384 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 10385 le32_encode_bits(slot_arg->courtesy_en, 10386 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 10387 le32_encode_bits(slot_arg->role_num, 10388 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 10389 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 10390 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 10391 le32_encode_bits(slot_arg->courtesy_target, 10392 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 10393 10394 for (i = 0; i < slot_arg->role_num; i++) { 10395 slot_h2c->roles[i].w0 = 10396 le32_encode_bits(slot_arg->roles[i].macid, 10397 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 10398 le32_encode_bits(slot_arg->roles[i].role_type, 10399 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 10400 le32_encode_bits(slot_arg->roles[i].is_master, 10401 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 10402 le32_encode_bits(slot_arg->roles[i].en_tx_null, 10403 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 10404 le32_encode_bits(false, 10405 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 10406 le32_encode_bits(false, 10407 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 10408 slot_h2c->roles[i].w1 = 10409 le32_encode_bits(slot_arg->roles[i].central_ch, 10410 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 10411 le32_encode_bits(slot_arg->roles[i].primary_ch, 10412 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 10413 le32_encode_bits(slot_arg->roles[i].bw, 10414 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 10415 le32_encode_bits(slot_arg->roles[i].band, 10416 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 10417 le32_encode_bits(slot_arg->roles[i].null_early, 10418 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 10419 le32_encode_bits(false, 10420 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 10421 le32_encode_bits(true, 10422 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 10423 slot_h2c->roles[i].macid_main_bitmap = 10424 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 10425 slot_h2c->roles[i].macid_paired_bitmap = 10426 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 10427 } 10428 10429 calc_len: 10430 return struct_size(slot_h2c, roles, slot_arg->role_num); 10431 } 10432 10433 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 10434 const struct rtw89_fw_mrc_add_arg *arg) 10435 { 10436 struct rtw89_h2c_mrc_add *h2c_head; 10437 struct sk_buff *skb; 10438 unsigned int i; 10439 void *tmp; 10440 u32 len; 10441 int ret; 10442 10443 len = sizeof(*h2c_head); 10444 for (i = 0; i < arg->slot_num; i++) 10445 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 10446 10447 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 10448 if (!skb) { 10449 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 10450 return -ENOMEM; 10451 } 10452 10453 skb_put(skb, len); 10454 tmp = skb->data; 10455 10456 h2c_head = tmp; 10457 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 10458 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 10459 le32_encode_bits(arg->sch_type, 10460 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 10461 le32_encode_bits(arg->slot_num, 10462 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 10463 le32_encode_bits(arg->btc_in_sch, 10464 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 10465 10466 tmp += sizeof(*h2c_head); 10467 for (i = 0; i < arg->slot_num; i++) 10468 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 10469 10470 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10471 H2C_CAT_MAC, 10472 H2C_CL_MRC, 10473 H2C_FUNC_ADD_MRC, 0, 0, 10474 len); 10475 10476 ret = rtw89_h2c_tx(rtwdev, skb, false); 10477 if (ret) { 10478 rtw89_err(rtwdev, "failed to send h2c\n"); 10479 dev_kfree_skb_any(skb); 10480 return -EBUSY; 10481 } 10482 10483 return 0; 10484 } 10485 10486 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 10487 const struct rtw89_fw_mrc_start_arg *arg) 10488 { 10489 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 10490 struct rtw89_h2c_mrc_start *h2c; 10491 u32 len = sizeof(*h2c); 10492 struct sk_buff *skb; 10493 unsigned int cond; 10494 10495 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 10496 if (!skb) { 10497 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 10498 return -ENOMEM; 10499 } 10500 10501 skb_put(skb, len); 10502 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 10503 10504 h2c->w0 = le32_encode_bits(arg->sch_idx, 10505 RTW89_H2C_MRC_START_W0_SCH_IDX) | 10506 le32_encode_bits(arg->old_sch_idx, 10507 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 10508 le32_encode_bits(arg->action, 10509 RTW89_H2C_MRC_START_W0_ACTION); 10510 10511 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 10512 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 10513 10514 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10515 H2C_CAT_MAC, 10516 H2C_CL_MRC, 10517 H2C_FUNC_START_MRC, 0, 0, 10518 len); 10519 10520 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 10521 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 10522 } 10523 10524 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx) 10525 { 10526 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 10527 struct rtw89_h2c_mrc_del *h2c; 10528 u32 len = sizeof(*h2c); 10529 struct sk_buff *skb; 10530 unsigned int cond; 10531 10532 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 10533 if (!skb) { 10534 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 10535 return -ENOMEM; 10536 } 10537 10538 skb_put(skb, len); 10539 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 10540 10541 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) | 10542 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX); 10543 10544 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10545 H2C_CAT_MAC, 10546 H2C_CL_MRC, 10547 H2C_FUNC_DEL_MRC, 0, 0, 10548 len); 10549 10550 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 10551 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 10552 } 10553 10554 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 10555 const struct rtw89_fw_mrc_req_tsf_arg *arg, 10556 struct rtw89_mac_mrc_tsf_rpt *rpt) 10557 { 10558 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 10559 struct rtw89_h2c_mrc_req_tsf *h2c; 10560 struct rtw89_mac_mrc_tsf_rpt *tmp; 10561 struct sk_buff *skb; 10562 unsigned int i; 10563 u32 len; 10564 int ret; 10565 10566 len = struct_size(h2c, infos, arg->num); 10567 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 10568 if (!skb) { 10569 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 10570 return -ENOMEM; 10571 } 10572 10573 skb_put(skb, len); 10574 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 10575 10576 h2c->req_tsf_num = arg->num; 10577 for (i = 0; i < arg->num; i++) 10578 h2c->infos[i] = 10579 u8_encode_bits(arg->infos[i].band, 10580 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 10581 u8_encode_bits(arg->infos[i].port, 10582 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 10583 10584 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10585 H2C_CAT_MAC, 10586 H2C_CL_MRC, 10587 H2C_FUNC_MRC_REQ_TSF, 0, 0, 10588 len); 10589 10590 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 10591 if (ret) 10592 return ret; 10593 10594 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 10595 *rpt = *tmp; 10596 10597 return 0; 10598 } 10599 10600 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 10601 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 10602 { 10603 struct rtw89_h2c_mrc_upd_bitmap *h2c; 10604 u32 len = sizeof(*h2c); 10605 struct sk_buff *skb; 10606 int ret; 10607 10608 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 10609 if (!skb) { 10610 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 10611 return -ENOMEM; 10612 } 10613 10614 skb_put(skb, len); 10615 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 10616 10617 h2c->w0 = le32_encode_bits(arg->sch_idx, 10618 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 10619 le32_encode_bits(arg->action, 10620 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 10621 le32_encode_bits(arg->macid, 10622 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 10623 h2c->w1 = le32_encode_bits(arg->client_macid, 10624 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 10625 10626 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10627 H2C_CAT_MAC, 10628 H2C_CL_MRC, 10629 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 10630 len); 10631 10632 ret = rtw89_h2c_tx(rtwdev, skb, false); 10633 if (ret) { 10634 rtw89_err(rtwdev, "failed to send h2c\n"); 10635 dev_kfree_skb_any(skb); 10636 return -EBUSY; 10637 } 10638 10639 return 0; 10640 } 10641 10642 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 10643 const struct rtw89_fw_mrc_sync_arg *arg) 10644 { 10645 struct rtw89_h2c_mrc_sync *h2c; 10646 u32 len = sizeof(*h2c); 10647 struct sk_buff *skb; 10648 int ret; 10649 10650 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 10651 if (!skb) { 10652 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 10653 return -ENOMEM; 10654 } 10655 10656 skb_put(skb, len); 10657 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 10658 10659 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 10660 le32_encode_bits(arg->src.port, 10661 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 10662 le32_encode_bits(arg->src.band, 10663 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 10664 le32_encode_bits(arg->dest.port, 10665 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 10666 le32_encode_bits(arg->dest.band, 10667 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 10668 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 10669 10670 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10671 H2C_CAT_MAC, 10672 H2C_CL_MRC, 10673 H2C_FUNC_MRC_SYNC, 0, 0, 10674 len); 10675 10676 ret = rtw89_h2c_tx(rtwdev, skb, false); 10677 if (ret) { 10678 rtw89_err(rtwdev, "failed to send h2c\n"); 10679 dev_kfree_skb_any(skb); 10680 return -EBUSY; 10681 } 10682 10683 return 0; 10684 } 10685 10686 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 10687 const struct rtw89_fw_mrc_upd_duration_arg *arg) 10688 { 10689 struct rtw89_h2c_mrc_upd_duration *h2c; 10690 struct sk_buff *skb; 10691 unsigned int i; 10692 u32 len; 10693 int ret; 10694 10695 len = struct_size(h2c, slots, arg->slot_num); 10696 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 10697 if (!skb) { 10698 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 10699 return -ENOMEM; 10700 } 10701 10702 skb_put(skb, len); 10703 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 10704 10705 h2c->w0 = le32_encode_bits(arg->sch_idx, 10706 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 10707 le32_encode_bits(arg->slot_num, 10708 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 10709 le32_encode_bits(false, 10710 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 10711 10712 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 10713 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 10714 10715 for (i = 0; i < arg->slot_num; i++) { 10716 h2c->slots[i] = 10717 le32_encode_bits(arg->slots[i].slot_idx, 10718 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 10719 le32_encode_bits(arg->slots[i].duration, 10720 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 10721 } 10722 10723 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10724 H2C_CAT_MAC, 10725 H2C_CL_MRC, 10726 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 10727 len); 10728 10729 ret = rtw89_h2c_tx(rtwdev, skb, false); 10730 if (ret) { 10731 rtw89_err(rtwdev, "failed to send h2c\n"); 10732 dev_kfree_skb_any(skb); 10733 return -EBUSY; 10734 } 10735 10736 return 0; 10737 } 10738 10739 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en) 10740 { 10741 struct rtw89_h2c_ap_info *h2c; 10742 u32 len = sizeof(*h2c); 10743 struct sk_buff *skb; 10744 int ret; 10745 10746 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 10747 if (!skb) { 10748 rtw89_err(rtwdev, "failed to alloc skb for ap info\n"); 10749 return -ENOMEM; 10750 } 10751 10752 skb_put(skb, len); 10753 h2c = (struct rtw89_h2c_ap_info *)skb->data; 10754 10755 h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN); 10756 10757 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10758 H2C_CAT_MAC, 10759 H2C_CL_AP, 10760 H2C_FUNC_AP_INFO, 0, 0, 10761 len); 10762 10763 ret = rtw89_h2c_tx(rtwdev, skb, false); 10764 if (ret) { 10765 rtw89_err(rtwdev, "failed to send h2c\n"); 10766 dev_kfree_skb_any(skb); 10767 return -EBUSY; 10768 } 10769 10770 return 0; 10771 } 10772 10773 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en) 10774 { 10775 int ret; 10776 10777 if (en) { 10778 if (refcount_inc_not_zero(&rtwdev->refcount_ap_info)) 10779 return 0; 10780 } else { 10781 if (!refcount_dec_and_test(&rtwdev->refcount_ap_info)) 10782 return 0; 10783 } 10784 10785 ret = rtw89_fw_h2c_ap_info(rtwdev, en); 10786 if (ret) { 10787 if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 10788 return ret; 10789 10790 /* During recovery, neither driver nor stack has full error 10791 * handling, so show a warning, but return 0 with refcount 10792 * increased normally. It can avoid underflow when calling 10793 * with @en == false later. 10794 */ 10795 rtw89_warn(rtwdev, "h2c ap_info failed during SER\n"); 10796 } 10797 10798 if (en) 10799 refcount_set(&rtwdev->refcount_ap_info, 1); 10800 10801 return 0; 10802 } 10803 10804 int rtw89_fw_h2c_mlo_link_cfg(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 10805 bool enable) 10806 { 10807 struct rtw89_wait_info *wait = &rtwdev->mlo.wait; 10808 struct rtw89_h2c_mlo_link_cfg *h2c; 10809 u8 mac_id = rtwvif_link->mac_id; 10810 u32 len = sizeof(*h2c); 10811 struct sk_buff *skb; 10812 unsigned int cond; 10813 int ret; 10814 10815 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 10816 if (!skb) { 10817 rtw89_err(rtwdev, "failed to alloc skb for mlo link cfg\n"); 10818 return -ENOMEM; 10819 } 10820 10821 skb_put(skb, len); 10822 h2c = (struct rtw89_h2c_mlo_link_cfg *)skb->data; 10823 10824 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_MLO_LINK_CFG_W0_MACID) | 10825 le32_encode_bits(enable, RTW89_H2C_MLO_LINK_CFG_W0_OPTION); 10826 10827 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 10828 H2C_CAT_MAC, 10829 H2C_CL_MLO, 10830 H2C_FUNC_MLO_LINK_CFG, 0, 0, 10831 len); 10832 10833 cond = RTW89_MLO_WAIT_COND(mac_id, H2C_FUNC_MLO_LINK_CFG); 10834 10835 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 10836 if (ret) { 10837 rtw89_err(rtwdev, "mlo link cfg (%s link id %u) failed: %d\n", 10838 str_enable_disable(enable), rtwvif_link->link_id, ret); 10839 return ret; 10840 } 10841 10842 return 0; 10843 } 10844 10845 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 10846 { 10847 static const u8 zeros[U8_MAX] = {}; 10848 10849 return memcmp(ext_ptr, zeros, ext_len) == 0; 10850 } 10851 10852 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 10853 ({ \ 10854 u8 __var_sz = sizeof(*(e)); \ 10855 bool __accept; \ 10856 if (__var_sz >= (ent_sz)) \ 10857 __accept = true; \ 10858 else \ 10859 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 10860 (ent_sz) - __var_sz);\ 10861 __accept; \ 10862 }) 10863 10864 static bool 10865 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 10866 const void *cursor, 10867 const struct rtw89_txpwr_conf *conf) 10868 { 10869 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 10870 return false; 10871 10872 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 10873 return false; 10874 10875 switch (e->rs) { 10876 case RTW89_RS_CCK: 10877 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 10878 return false; 10879 break; 10880 case RTW89_RS_OFDM: 10881 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 10882 return false; 10883 break; 10884 case RTW89_RS_MCS: 10885 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 10886 e->nss >= RTW89_NSS_NUM || 10887 e->ofdma >= RTW89_OFDMA_NUM) 10888 return false; 10889 break; 10890 case RTW89_RS_HEDCM: 10891 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 10892 e->nss >= RTW89_NSS_HEDCM_NUM || 10893 e->ofdma >= RTW89_OFDMA_NUM) 10894 return false; 10895 break; 10896 case RTW89_RS_OFFSET: 10897 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 10898 return false; 10899 break; 10900 default: 10901 return false; 10902 } 10903 10904 return true; 10905 } 10906 10907 static 10908 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 10909 const struct rtw89_txpwr_table *tbl) 10910 { 10911 const struct rtw89_txpwr_conf *conf = tbl->data; 10912 struct rtw89_fw_txpwr_byrate_entry entry = {}; 10913 struct rtw89_txpwr_byrate *byr_head; 10914 struct rtw89_rate_desc desc = {}; 10915 const void *cursor; 10916 u32 data; 10917 s8 *byr; 10918 int i; 10919 10920 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 10921 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 10922 continue; 10923 10924 byr_head = &rtwdev->byr[entry.band][entry.bw]; 10925 data = le32_to_cpu(entry.data); 10926 desc.ofdma = entry.ofdma; 10927 desc.nss = entry.nss; 10928 desc.rs = entry.rs; 10929 10930 for (i = 0; i < entry.len; i++, data >>= 8) { 10931 desc.idx = entry.shf + i; 10932 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 10933 *byr = data & 0xff; 10934 } 10935 } 10936 } 10937 10938 static bool 10939 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 10940 const void *cursor, 10941 const struct rtw89_txpwr_conf *conf) 10942 { 10943 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 10944 return false; 10945 10946 if (e->bw >= RTW89_2G_BW_NUM) 10947 return false; 10948 if (e->nt >= RTW89_NTX_NUM) 10949 return false; 10950 if (e->rs >= RTW89_RS_LMT_NUM) 10951 return false; 10952 if (e->bf >= RTW89_BF_NUM) 10953 return false; 10954 if (e->regd >= RTW89_REGD_NUM) 10955 return false; 10956 if (e->ch_idx >= RTW89_2G_CH_NUM) 10957 return false; 10958 10959 return true; 10960 } 10961 10962 static 10963 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 10964 { 10965 const struct rtw89_txpwr_conf *conf = &data->conf; 10966 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 10967 const void *cursor; 10968 10969 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 10970 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 10971 continue; 10972 10973 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 10974 [entry.ch_idx] = entry.v; 10975 } 10976 } 10977 10978 static bool 10979 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 10980 const void *cursor, 10981 const struct rtw89_txpwr_conf *conf) 10982 { 10983 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 10984 return false; 10985 10986 if (e->bw >= RTW89_5G_BW_NUM) 10987 return false; 10988 if (e->nt >= RTW89_NTX_NUM) 10989 return false; 10990 if (e->rs >= RTW89_RS_LMT_NUM) 10991 return false; 10992 if (e->bf >= RTW89_BF_NUM) 10993 return false; 10994 if (e->regd >= RTW89_REGD_NUM) 10995 return false; 10996 if (e->ch_idx >= RTW89_5G_CH_NUM) 10997 return false; 10998 10999 return true; 11000 } 11001 11002 static 11003 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 11004 { 11005 const struct rtw89_txpwr_conf *conf = &data->conf; 11006 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 11007 const void *cursor; 11008 11009 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 11010 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 11011 continue; 11012 11013 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 11014 [entry.ch_idx] = entry.v; 11015 } 11016 } 11017 11018 static bool 11019 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 11020 const void *cursor, 11021 const struct rtw89_txpwr_conf *conf) 11022 { 11023 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 11024 return false; 11025 11026 if (e->bw >= RTW89_6G_BW_NUM) 11027 return false; 11028 if (e->nt >= RTW89_NTX_NUM) 11029 return false; 11030 if (e->rs >= RTW89_RS_LMT_NUM) 11031 return false; 11032 if (e->bf >= RTW89_BF_NUM) 11033 return false; 11034 if (e->regd >= RTW89_REGD_NUM) 11035 return false; 11036 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 11037 return false; 11038 if (e->ch_idx >= RTW89_6G_CH_NUM) 11039 return false; 11040 11041 return true; 11042 } 11043 11044 static 11045 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 11046 { 11047 const struct rtw89_txpwr_conf *conf = &data->conf; 11048 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 11049 const void *cursor; 11050 11051 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 11052 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 11053 continue; 11054 11055 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 11056 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 11057 } 11058 } 11059 11060 static bool 11061 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 11062 const void *cursor, 11063 const struct rtw89_txpwr_conf *conf) 11064 { 11065 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 11066 return false; 11067 11068 if (e->ru >= RTW89_RU_NUM) 11069 return false; 11070 if (e->nt >= RTW89_NTX_NUM) 11071 return false; 11072 if (e->regd >= RTW89_REGD_NUM) 11073 return false; 11074 if (e->ch_idx >= RTW89_2G_CH_NUM) 11075 return false; 11076 11077 return true; 11078 } 11079 11080 static 11081 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 11082 { 11083 const struct rtw89_txpwr_conf *conf = &data->conf; 11084 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 11085 const void *cursor; 11086 11087 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 11088 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 11089 continue; 11090 11091 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 11092 } 11093 } 11094 11095 static bool 11096 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 11097 const void *cursor, 11098 const struct rtw89_txpwr_conf *conf) 11099 { 11100 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 11101 return false; 11102 11103 if (e->ru >= RTW89_RU_NUM) 11104 return false; 11105 if (e->nt >= RTW89_NTX_NUM) 11106 return false; 11107 if (e->regd >= RTW89_REGD_NUM) 11108 return false; 11109 if (e->ch_idx >= RTW89_5G_CH_NUM) 11110 return false; 11111 11112 return true; 11113 } 11114 11115 static 11116 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 11117 { 11118 const struct rtw89_txpwr_conf *conf = &data->conf; 11119 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 11120 const void *cursor; 11121 11122 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 11123 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 11124 continue; 11125 11126 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 11127 } 11128 } 11129 11130 static bool 11131 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 11132 const void *cursor, 11133 const struct rtw89_txpwr_conf *conf) 11134 { 11135 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 11136 return false; 11137 11138 if (e->ru >= RTW89_RU_NUM) 11139 return false; 11140 if (e->nt >= RTW89_NTX_NUM) 11141 return false; 11142 if (e->regd >= RTW89_REGD_NUM) 11143 return false; 11144 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 11145 return false; 11146 if (e->ch_idx >= RTW89_6G_CH_NUM) 11147 return false; 11148 11149 return true; 11150 } 11151 11152 static 11153 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 11154 { 11155 const struct rtw89_txpwr_conf *conf = &data->conf; 11156 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 11157 const void *cursor; 11158 11159 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 11160 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 11161 continue; 11162 11163 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 11164 [entry.ch_idx] = entry.v; 11165 } 11166 } 11167 11168 static bool 11169 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 11170 const void *cursor, 11171 const struct rtw89_txpwr_conf *conf) 11172 { 11173 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 11174 return false; 11175 11176 if (e->band >= RTW89_BAND_NUM) 11177 return false; 11178 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 11179 return false; 11180 if (e->regd >= RTW89_REGD_NUM) 11181 return false; 11182 11183 return true; 11184 } 11185 11186 static 11187 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 11188 { 11189 const struct rtw89_txpwr_conf *conf = &data->conf; 11190 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 11191 const void *cursor; 11192 11193 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 11194 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 11195 continue; 11196 11197 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 11198 } 11199 } 11200 11201 static bool 11202 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 11203 const void *cursor, 11204 const struct rtw89_txpwr_conf *conf) 11205 { 11206 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 11207 return false; 11208 11209 if (e->band >= RTW89_BAND_NUM) 11210 return false; 11211 if (e->regd >= RTW89_REGD_NUM) 11212 return false; 11213 11214 return true; 11215 } 11216 11217 static 11218 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 11219 { 11220 const struct rtw89_txpwr_conf *conf = &data->conf; 11221 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 11222 const void *cursor; 11223 11224 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 11225 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 11226 continue; 11227 11228 data->v[entry.band][entry.regd] = entry.v; 11229 } 11230 } 11231 11232 static bool rtw89_fw_has_da_txpwr_table(struct rtw89_dev *rtwdev, 11233 const struct rtw89_rfe_parms *parms) 11234 { 11235 const struct rtw89_chip_info *chip = rtwdev->chip; 11236 11237 if (chip->support_bands & BIT(NL80211_BAND_2GHZ) && 11238 !(parms->rule_da_2ghz.lmt && parms->rule_da_2ghz.lmt_ru)) 11239 return false; 11240 11241 if (chip->support_bands & BIT(NL80211_BAND_5GHZ) && 11242 !(parms->rule_da_5ghz.lmt && parms->rule_da_5ghz.lmt_ru)) 11243 return false; 11244 11245 if (chip->support_bands & BIT(NL80211_BAND_6GHZ) && 11246 !(parms->rule_da_6ghz.lmt && parms->rule_da_6ghz.lmt_ru)) 11247 return false; 11248 11249 return true; 11250 } 11251 11252 const struct rtw89_rfe_parms * 11253 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 11254 const struct rtw89_rfe_parms *init) 11255 { 11256 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 11257 struct rtw89_rfe_parms *parms; 11258 11259 if (!rfe_data) 11260 return init; 11261 11262 parms = &rfe_data->rfe_parms; 11263 if (init) 11264 *parms = *init; 11265 11266 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 11267 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 11268 rfe_data->byrate.tbl.size = 0; /* don't care here */ 11269 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 11270 parms->byr_tbl = &rfe_data->byrate.tbl; 11271 } 11272 11273 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 11274 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 11275 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 11276 } 11277 11278 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 11279 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 11280 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 11281 } 11282 11283 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 11284 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 11285 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 11286 } 11287 11288 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_2ghz.conf)) { 11289 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->da_lmt_2ghz); 11290 parms->rule_da_2ghz.lmt = &rfe_data->da_lmt_2ghz.v; 11291 } 11292 11293 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_5ghz.conf)) { 11294 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->da_lmt_5ghz); 11295 parms->rule_da_5ghz.lmt = &rfe_data->da_lmt_5ghz.v; 11296 } 11297 11298 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_6ghz.conf)) { 11299 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->da_lmt_6ghz); 11300 parms->rule_da_6ghz.lmt = &rfe_data->da_lmt_6ghz.v; 11301 } 11302 11303 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 11304 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 11305 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 11306 } 11307 11308 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 11309 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 11310 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 11311 } 11312 11313 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 11314 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 11315 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 11316 } 11317 11318 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_2ghz.conf)) { 11319 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->da_lmt_ru_2ghz); 11320 parms->rule_da_2ghz.lmt_ru = &rfe_data->da_lmt_ru_2ghz.v; 11321 } 11322 11323 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_5ghz.conf)) { 11324 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->da_lmt_ru_5ghz); 11325 parms->rule_da_5ghz.lmt_ru = &rfe_data->da_lmt_ru_5ghz.v; 11326 } 11327 11328 if (rtw89_txpwr_conf_valid(&rfe_data->da_lmt_ru_6ghz.conf)) { 11329 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->da_lmt_ru_6ghz); 11330 parms->rule_da_6ghz.lmt_ru = &rfe_data->da_lmt_ru_6ghz.v; 11331 } 11332 11333 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 11334 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 11335 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 11336 } 11337 11338 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 11339 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 11340 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 11341 } 11342 11343 parms->has_da = rtw89_fw_has_da_txpwr_table(rtwdev, parms); 11344 11345 return parms; 11346 } 11347