1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/if_arp.h> 6 #include "cam.h" 7 #include "chan.h" 8 #include "coex.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "phy.h" 13 #include "ps.h" 14 #include "reg.h" 15 #include "util.h" 16 #include "wow.h" 17 18 struct rtw89_eapol_2_of_2 { 19 u8 gtkbody[14]; 20 u8 key_des_ver; 21 u8 rsvd[92]; 22 } __packed; 23 24 struct rtw89_sa_query { 25 u8 category; 26 u8 action; 27 } __packed; 28 29 struct rtw89_arp_rsp { 30 u8 llc_hdr[sizeof(rfc1042_header)]; 31 __be16 llc_type; 32 struct arphdr arp_hdr; 33 u8 sender_hw[ETH_ALEN]; 34 __be32 sender_ip; 35 u8 target_hw[ETH_ALEN]; 36 __be32 target_ip; 37 } __packed; 38 39 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 40 41 union rtw89_fw_element_arg { 42 size_t offset; 43 enum rtw89_rf_path rf_path; 44 enum rtw89_fw_type fw_type; 45 }; 46 47 struct rtw89_fw_element_handler { 48 int (*fn)(struct rtw89_dev *rtwdev, 49 const struct rtw89_fw_element_hdr *elm, 50 const union rtw89_fw_element_arg arg); 51 const union rtw89_fw_element_arg arg; 52 const char *name; 53 }; 54 55 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 56 struct sk_buff *skb); 57 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 58 struct rtw89_wait_info *wait, unsigned int cond); 59 static int __parse_security_section(struct rtw89_dev *rtwdev, 60 struct rtw89_fw_bin_info *info, 61 struct rtw89_fw_hdr_section_info *section_info, 62 const void *content, 63 u32 *mssc_len); 64 65 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 66 bool header) 67 { 68 struct sk_buff *skb; 69 u32 header_len = 0; 70 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 71 72 if (header) 73 header_len = H2C_HEADER_LEN; 74 75 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 76 if (!skb) 77 return NULL; 78 skb_reserve(skb, header_len + h2c_desc_size); 79 memset(skb->data, 0, len); 80 81 return skb; 82 } 83 84 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 85 { 86 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 87 } 88 89 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 90 { 91 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 92 } 93 94 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 95 { 96 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 97 u8 val; 98 int ret; 99 100 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 101 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 102 1, FWDL_WAIT_CNT, false, rtwdev, type); 103 if (ret) { 104 switch (val) { 105 case RTW89_FWDL_CHECKSUM_FAIL: 106 rtw89_err(rtwdev, "fw checksum fail\n"); 107 return -EINVAL; 108 109 case RTW89_FWDL_SECURITY_FAIL: 110 rtw89_err(rtwdev, "fw security fail\n"); 111 return -EINVAL; 112 113 case RTW89_FWDL_CV_NOT_MATCH: 114 rtw89_err(rtwdev, "fw cv not match\n"); 115 return -EINVAL; 116 117 default: 118 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 119 return -EBUSY; 120 } 121 } 122 123 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 124 125 return 0; 126 } 127 128 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 129 struct rtw89_fw_bin_info *info) 130 { 131 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 132 const struct rtw89_chip_info *chip = rtwdev->chip; 133 struct rtw89_fw_hdr_section_info *section_info; 134 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 135 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 136 const struct rtw89_fw_hdr_section *section; 137 const u8 *fw_end = fw + len; 138 const u8 *bin; 139 u32 base_hdr_len; 140 u32 mssc_len; 141 int ret; 142 u32 i; 143 144 if (!info) 145 return -EINVAL; 146 147 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 148 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 149 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 150 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE); 151 152 if (info->dynamic_hdr_en) { 153 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 154 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 155 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 156 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 157 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 158 return -EINVAL; 159 } 160 } else { 161 info->hdr_len = base_hdr_len; 162 info->dynamic_hdr_len = 0; 163 } 164 165 bin = fw + info->hdr_len; 166 167 /* jump to section header */ 168 section_info = info->section_info; 169 for (i = 0; i < info->section_num; i++) { 170 section = &fw_hdr->sections[i]; 171 section_info->type = 172 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 173 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 174 175 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 176 section_info->len += FWDL_SECTION_CHKSUM_LEN; 177 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 178 section_info->dladdr = 179 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 180 section_info->addr = bin; 181 182 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 183 section_info->mssc = 184 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 185 186 ret = __parse_security_section(rtwdev, info, section_info, 187 bin, &mssc_len); 188 if (ret) 189 return ret; 190 191 if (sec->secure_boot && chip->chip_id == RTL8852B) 192 section_info->len_override = 960; 193 } else { 194 section_info->mssc = 0; 195 mssc_len = 0; 196 } 197 198 rtw89_debug(rtwdev, RTW89_DBG_FW, 199 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 200 i, section_info->type, section_info->len, 201 section_info->mssc, mssc_len, bin - fw); 202 rtw89_debug(rtwdev, RTW89_DBG_FW, 203 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 204 section_info->ignore, section_info->key_addr, 205 section_info->key_addr ? 206 section_info->key_addr - section_info->addr : 0, 207 section_info->key_len, section_info->key_idx); 208 209 bin += section_info->len + mssc_len; 210 section_info++; 211 } 212 213 if (fw_end != bin) { 214 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 215 return -EINVAL; 216 } 217 218 return 0; 219 } 220 221 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 222 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 223 u32 rmp_tbl_size, u32 *key_idx) 224 { 225 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 226 u32 sel_byte_idx; 227 u32 mss_sel_idx; 228 u8 sel_bit_idx; 229 int i; 230 231 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 232 if (!mss_hdr->defen) 233 return -ENOENT; 234 235 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 236 sec->mss_key_num; 237 } else { 238 if (mss_hdr->defen) 239 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 240 else 241 mss_sel_idx = 0; 242 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 243 le16_to_cpu(mss_hdr->msscust_max) + 244 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 245 sec->mss_key_num; 246 } 247 248 sel_byte_idx = mss_sel_idx >> 3; 249 sel_bit_idx = mss_sel_idx & 0x7; 250 251 if (sel_byte_idx >= rmp_tbl_size) 252 return -EFAULT; 253 254 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 255 return -ENOENT; 256 257 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 258 259 for (i = 0; i < sel_byte_idx; i++) 260 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 261 262 return 0; 263 } 264 265 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 266 struct rtw89_fw_bin_info *info, 267 struct rtw89_fw_hdr_section_info *section_info, 268 const void *content, 269 u32 *mssc_len) 270 { 271 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 272 const union rtw89_fw_section_mssc_content *section_content = content; 273 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 274 u32 rmp_tbl_size; 275 u32 key_sign_len; 276 u32 real_key_idx; 277 u32 sb_sel_ver; 278 int ret; 279 280 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 281 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 282 return -ENOENT; 283 } 284 285 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 286 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 287 le16_to_cpu(mss_hdr->msscust_max) * 288 mss_hdr->mssdev_max) >> 3; 289 if (mss_hdr->defen) 290 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 291 } else { 292 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 293 mss_hdr->rmpfmt); 294 return -EINVAL; 295 } 296 297 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 298 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 299 rmp_tbl_size, (int)sizeof(*mss_hdr), 300 le32_to_cpu(mss_hdr->key_raw_offset)); 301 return -EINVAL; 302 } 303 304 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 305 if (!key_sign_len) 306 key_sign_len = 512; 307 308 if (info->dsp_checksum) 309 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 310 311 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 312 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 313 314 if (!sec->secure_boot) 315 goto out; 316 317 sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v); 318 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 319 goto ignore; 320 321 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 322 if (ret) 323 goto ignore; 324 325 section_info->key_addr = content + section_info->len + 326 le32_to_cpu(mss_hdr->key_raw_offset) + 327 key_sign_len * real_key_idx; 328 section_info->key_len = key_sign_len; 329 section_info->key_idx = real_key_idx; 330 331 out: 332 if (info->secure_section_exist) { 333 section_info->ignore = true; 334 return 0; 335 } 336 337 info->secure_section_exist = true; 338 339 return 0; 340 341 ignore: 342 section_info->ignore = true; 343 344 return 0; 345 } 346 347 static int __parse_security_section(struct rtw89_dev *rtwdev, 348 struct rtw89_fw_bin_info *info, 349 struct rtw89_fw_hdr_section_info *section_info, 350 const void *content, 351 u32 *mssc_len) 352 { 353 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 354 int ret; 355 356 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) { 357 ret = __parse_formatted_mssc(rtwdev, info, section_info, 358 content, mssc_len); 359 if (ret) 360 return -EINVAL; 361 } else { 362 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 363 if (info->dsp_checksum) 364 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 365 366 if (sec->secure_boot) { 367 if (sec->mss_idx >= section_info->mssc) 368 return -EFAULT; 369 section_info->key_addr = content + section_info->len + 370 sec->mss_idx * FWDL_SECURITY_SIGLEN; 371 section_info->key_len = FWDL_SECURITY_SIGLEN; 372 } 373 374 info->secure_section_exist = true; 375 } 376 377 return 0; 378 } 379 380 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 381 struct rtw89_fw_bin_info *info) 382 { 383 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 384 struct rtw89_fw_hdr_section_info *section_info; 385 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 386 const struct rtw89_fw_hdr_section_v1 *section; 387 const u8 *fw_end = fw + len; 388 const u8 *bin; 389 u32 base_hdr_len; 390 u32 mssc_len; 391 int ret; 392 u32 i; 393 394 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 395 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 396 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 397 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 398 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE); 399 400 if (info->dynamic_hdr_en) { 401 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 402 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 403 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 404 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 405 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 406 return -EINVAL; 407 } 408 } else { 409 info->hdr_len = base_hdr_len; 410 info->dynamic_hdr_len = 0; 411 } 412 413 bin = fw + info->hdr_len; 414 415 /* jump to section header */ 416 section_info = info->section_info; 417 for (i = 0; i < info->section_num; i++) { 418 section = &fw_hdr->sections[i]; 419 420 section_info->type = 421 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 422 section_info->len = 423 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 424 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 425 section_info->len += FWDL_SECTION_CHKSUM_LEN; 426 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 427 section_info->dladdr = 428 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 429 section_info->addr = bin; 430 431 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 432 section_info->mssc = 433 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 434 435 ret = __parse_security_section(rtwdev, info, section_info, 436 bin, &mssc_len); 437 if (ret) 438 return ret; 439 } else { 440 section_info->mssc = 0; 441 mssc_len = 0; 442 } 443 444 rtw89_debug(rtwdev, RTW89_DBG_FW, 445 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 446 i, section_info->type, section_info->len, 447 section_info->mssc, mssc_len, bin - fw); 448 rtw89_debug(rtwdev, RTW89_DBG_FW, 449 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 450 section_info->ignore, section_info->key_addr, 451 section_info->key_addr ? 452 section_info->key_addr - section_info->addr : 0, 453 section_info->key_len, section_info->key_idx); 454 455 bin += section_info->len + mssc_len; 456 section_info++; 457 } 458 459 if (fw_end != bin) { 460 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 461 return -EINVAL; 462 } 463 464 if (!info->secure_section_exist) 465 rtw89_warn(rtwdev, "no firmware secure section\n"); 466 467 return 0; 468 } 469 470 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 471 const struct rtw89_fw_suit *fw_suit, 472 struct rtw89_fw_bin_info *info) 473 { 474 const u8 *fw = fw_suit->data; 475 u32 len = fw_suit->size; 476 477 if (!fw || !len) { 478 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 479 return -ENOENT; 480 } 481 482 switch (fw_suit->hdr_ver) { 483 case 0: 484 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 485 case 1: 486 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 487 default: 488 return -ENOENT; 489 } 490 } 491 492 static 493 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 494 struct rtw89_fw_suit *fw_suit, bool nowarn) 495 { 496 struct rtw89_fw_info *fw_info = &rtwdev->fw; 497 const struct firmware *firmware = fw_info->req.firmware; 498 const u8 *mfw = firmware->data; 499 u32 mfw_len = firmware->size; 500 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 501 const struct rtw89_mfw_info *mfw_info = NULL, *tmp; 502 int i; 503 504 if (mfw_hdr->sig != RTW89_MFW_SIG) { 505 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 506 /* legacy firmware support normal type only */ 507 if (type != RTW89_FW_NORMAL) 508 return -EINVAL; 509 fw_suit->data = mfw; 510 fw_suit->size = mfw_len; 511 return 0; 512 } 513 514 for (i = 0; i < mfw_hdr->fw_nr; i++) { 515 tmp = &mfw_hdr->info[i]; 516 if (tmp->type != type) 517 continue; 518 519 if (type == RTW89_FW_LOGFMT) { 520 mfw_info = tmp; 521 goto found; 522 } 523 524 /* Version order of WiFi firmware in firmware file are not in order, 525 * pass all firmware to find the equal or less but closest version. 526 */ 527 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { 528 if (!mfw_info || mfw_info->cv < tmp->cv) 529 mfw_info = tmp; 530 } 531 } 532 533 if (mfw_info) 534 goto found; 535 536 if (!nowarn) 537 rtw89_err(rtwdev, "no suitable firmware found\n"); 538 return -ENOENT; 539 540 found: 541 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 542 fw_suit->size = le32_to_cpu(mfw_info->size); 543 return 0; 544 } 545 546 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 547 { 548 struct rtw89_fw_info *fw_info = &rtwdev->fw; 549 const struct firmware *firmware = fw_info->req.firmware; 550 const struct rtw89_mfw_hdr *mfw_hdr = 551 (const struct rtw89_mfw_hdr *)firmware->data; 552 const struct rtw89_mfw_info *mfw_info; 553 u32 size; 554 555 if (mfw_hdr->sig != RTW89_MFW_SIG) { 556 rtw89_warn(rtwdev, "not mfw format\n"); 557 return 0; 558 } 559 560 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 561 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 562 563 return size; 564 } 565 566 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 567 struct rtw89_fw_suit *fw_suit, 568 const struct rtw89_fw_hdr *hdr) 569 { 570 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 571 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 572 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 573 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 574 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 575 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 576 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 577 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 578 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 579 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 580 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 581 } 582 583 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 584 struct rtw89_fw_suit *fw_suit, 585 const struct rtw89_fw_hdr_v1 *hdr) 586 { 587 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 588 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 589 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 590 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 591 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 592 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 593 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 594 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 595 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 596 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 597 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 598 } 599 600 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 601 enum rtw89_fw_type type, 602 struct rtw89_fw_suit *fw_suit) 603 { 604 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 605 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 606 607 if (type == RTW89_FW_LOGFMT) 608 return 0; 609 610 fw_suit->type = type; 611 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 612 613 switch (fw_suit->hdr_ver) { 614 case 0: 615 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 616 break; 617 case 1: 618 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 619 break; 620 default: 621 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 622 fw_suit->hdr_ver); 623 return -ENOENT; 624 } 625 626 rtw89_info(rtwdev, 627 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 628 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 629 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 630 631 return 0; 632 } 633 634 static 635 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 636 bool nowarn) 637 { 638 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 639 int ret; 640 641 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 642 if (ret) 643 return ret; 644 645 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 646 } 647 648 static 649 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 650 const struct rtw89_fw_element_hdr *elm, 651 const union rtw89_fw_element_arg arg) 652 { 653 enum rtw89_fw_type type = arg.fw_type; 654 struct rtw89_hal *hal = &rtwdev->hal; 655 struct rtw89_fw_suit *fw_suit; 656 657 /* Version of BB MCU is in decreasing order in firmware file, so take 658 * first equal or less version, which is equal or less but closest version. 659 */ 660 if (hal->cv < elm->u.bbmcu.cv) 661 return 1; /* ignore this element */ 662 663 fw_suit = rtw89_fw_suit_get(rtwdev, type); 664 if (fw_suit->data) 665 return 1; /* ignore this element (a firmware is taken already) */ 666 667 fw_suit->data = elm->u.bbmcu.contents; 668 fw_suit->size = le32_to_cpu(elm->size); 669 670 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 671 } 672 673 #define __DEF_FW_FEAT_COND(__cond, __op) \ 674 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 675 { \ 676 return suit_ver_code __op comp_ver_code; \ 677 } 678 679 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 680 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 681 __DEF_FW_FEAT_COND(lt, <); /* less than */ 682 683 struct __fw_feat_cfg { 684 enum rtw89_core_chip_id chip_id; 685 enum rtw89_fw_feature feature; 686 u32 ver_code; 687 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 688 }; 689 690 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 691 { \ 692 .chip_id = _chip, \ 693 .feature = RTW89_FW_FEATURE_ ## _feat, \ 694 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 695 .cond = __fw_feat_cond_ ## _cond, \ 696 } 697 698 static const struct __fw_feat_cfg fw_feat_tbl[] = { 699 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 700 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 701 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 702 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 703 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 704 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 705 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 706 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX), 707 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 708 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 709 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 710 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 711 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 712 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER), 713 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX), 714 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 715 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 716 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER), 717 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 718 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER), 719 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 720 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 721 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 722 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 723 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 724 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 725 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), 726 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 727 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 728 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0), 729 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 730 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 731 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0), 732 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO), 733 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0), 734 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO), 735 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0), 736 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1), 737 }; 738 739 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 740 const struct rtw89_chip_info *chip, 741 u32 ver_code) 742 { 743 int i; 744 745 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 746 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 747 748 if (chip->chip_id != ent->chip_id) 749 continue; 750 751 if (ent->cond(ver_code, ent->ver_code)) 752 RTW89_SET_FW_FEATURE(ent->feature, fw); 753 } 754 } 755 756 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 757 { 758 const struct rtw89_chip_info *chip = rtwdev->chip; 759 const struct rtw89_fw_suit *fw_suit; 760 u32 suit_ver_code; 761 762 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 763 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 764 765 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 766 } 767 768 const struct firmware * 769 rtw89_early_fw_feature_recognize(struct device *device, 770 const struct rtw89_chip_info *chip, 771 struct rtw89_fw_info *early_fw, 772 int *used_fw_format) 773 { 774 const struct firmware *firmware; 775 char fw_name[64]; 776 int fw_format; 777 u32 ver_code; 778 int ret; 779 780 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 781 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 782 chip->fw_basename, fw_format); 783 784 ret = request_firmware(&firmware, fw_name, device); 785 if (!ret) { 786 dev_info(device, "loaded firmware %s\n", fw_name); 787 *used_fw_format = fw_format; 788 break; 789 } 790 } 791 792 if (ret) { 793 dev_err(device, "failed to early request firmware: %d\n", ret); 794 return NULL; 795 } 796 797 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 798 799 if (!ver_code) 800 goto out; 801 802 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 803 804 out: 805 return firmware; 806 } 807 808 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 809 { 810 const struct rtw89_chip_info *chip = rtwdev->chip; 811 int ret; 812 813 if (chip->try_ce_fw) { 814 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 815 if (!ret) 816 goto normal_done; 817 } 818 819 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 820 if (ret) 821 return ret; 822 823 normal_done: 824 /* It still works if wowlan firmware isn't existing. */ 825 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 826 827 /* It still works if log format file isn't existing. */ 828 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 829 830 rtw89_fw_recognize_features(rtwdev); 831 832 rtw89_coex_recognize_ver(rtwdev); 833 834 return 0; 835 } 836 837 static 838 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 839 const struct rtw89_fw_element_hdr *elm, 840 const union rtw89_fw_element_arg arg) 841 { 842 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 843 struct rtw89_phy_table *tbl; 844 struct rtw89_reg2_def *regs; 845 enum rtw89_rf_path rf_path; 846 u32 n_regs, i; 847 u8 idx; 848 849 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 850 if (!tbl) 851 return -ENOMEM; 852 853 switch (le32_to_cpu(elm->id)) { 854 case RTW89_FW_ELEMENT_ID_BB_REG: 855 elm_info->bb_tbl = tbl; 856 break; 857 case RTW89_FW_ELEMENT_ID_BB_GAIN: 858 elm_info->bb_gain = tbl; 859 break; 860 case RTW89_FW_ELEMENT_ID_RADIO_A: 861 case RTW89_FW_ELEMENT_ID_RADIO_B: 862 case RTW89_FW_ELEMENT_ID_RADIO_C: 863 case RTW89_FW_ELEMENT_ID_RADIO_D: 864 rf_path = arg.rf_path; 865 idx = elm->u.reg2.idx; 866 867 elm_info->rf_radio[idx] = tbl; 868 tbl->rf_path = rf_path; 869 tbl->config = rtw89_phy_config_rf_reg_v1; 870 break; 871 case RTW89_FW_ELEMENT_ID_RF_NCTL: 872 elm_info->rf_nctl = tbl; 873 break; 874 default: 875 kfree(tbl); 876 return -ENOENT; 877 } 878 879 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 880 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); 881 if (!regs) 882 goto out; 883 884 for (i = 0; i < n_regs; i++) { 885 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 886 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 887 } 888 889 tbl->n_regs = n_regs; 890 tbl->regs = regs; 891 892 return 0; 893 894 out: 895 kfree(tbl); 896 return -ENOMEM; 897 } 898 899 static 900 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 901 const struct rtw89_fw_element_hdr *elm, 902 const union rtw89_fw_element_arg arg) 903 { 904 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 905 const unsigned long offset = arg.offset; 906 struct rtw89_efuse *efuse = &rtwdev->efuse; 907 struct rtw89_txpwr_conf *conf; 908 909 if (!rtwdev->rfe_data) { 910 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 911 if (!rtwdev->rfe_data) 912 return -ENOMEM; 913 } 914 915 conf = (void *)rtwdev->rfe_data + offset; 916 917 /* if multiple matched, take the last eventually */ 918 if (txpwr_elm->rfe_type == efuse->rfe_type) 919 goto setup; 920 921 /* without one is matched, accept default */ 922 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 923 (!rtw89_txpwr_conf_valid(conf) || 924 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 925 goto setup; 926 927 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 928 elm->id, txpwr_elm->rfe_type); 929 return 0; 930 931 setup: 932 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 933 elm->id, txpwr_elm->rfe_type); 934 935 conf->rfe_type = txpwr_elm->rfe_type; 936 conf->ent_sz = txpwr_elm->ent_sz; 937 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 938 conf->data = txpwr_elm->content; 939 return 0; 940 } 941 942 static 943 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 944 const struct rtw89_fw_element_hdr *elm, 945 const union rtw89_fw_element_arg arg) 946 { 947 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 948 const struct rtw89_chip_info *chip = rtwdev->chip; 949 u32 needed_bitmap = 0; 950 u32 offset = 0; 951 int subband; 952 u32 bitmap; 953 int type; 954 955 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 956 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 957 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 958 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 959 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 960 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 961 962 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 963 964 if ((bitmap & needed_bitmap) != needed_bitmap) { 965 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n", 966 needed_bitmap, bitmap); 967 return -ENOENT; 968 } 969 970 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL); 971 if (!elm_info->txpwr_trk) 972 return -ENOMEM; 973 974 for (type = 0; bitmap; type++, bitmap >>= 1) { 975 if (!(bitmap & BIT(0))) 976 continue; 977 978 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 979 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 980 subband = 4; 981 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 982 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 983 subband = 3; 984 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 985 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 986 subband = 1; 987 else 988 break; 989 990 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 991 992 offset += subband; 993 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 994 goto err; 995 } 996 997 return 0; 998 999 err: 1000 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 1001 offset, le32_to_cpu(elm->size)); 1002 kfree(elm_info->txpwr_trk); 1003 elm_info->txpwr_trk = NULL; 1004 1005 return -EFAULT; 1006 } 1007 1008 static 1009 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 1010 const struct rtw89_fw_element_hdr *elm, 1011 const union rtw89_fw_element_arg arg) 1012 { 1013 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1014 u8 rfk_id; 1015 1016 if (elm_info->rfk_log_fmt) 1017 goto allocated; 1018 1019 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL); 1020 if (!elm_info->rfk_log_fmt) 1021 return 1; /* this is an optional element, so just ignore this */ 1022 1023 allocated: 1024 rfk_id = elm->u.rfk_log_fmt.rfk_id; 1025 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 1026 return 1; 1027 1028 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 1029 1030 return 0; 1031 } 1032 1033 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 1034 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 1035 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 1036 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 1037 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 1038 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 1039 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 1040 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 1041 { .rf_path = RF_PATH_A }, "radio A"}, 1042 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 1043 { .rf_path = RF_PATH_B }, NULL}, 1044 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 1045 { .rf_path = RF_PATH_C }, NULL}, 1046 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 1047 { .rf_path = RF_PATH_D }, NULL}, 1048 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 1049 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 1050 rtw89_fw_recognize_txpwr_from_elm, 1051 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 1052 }, 1053 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 1054 rtw89_fw_recognize_txpwr_from_elm, 1055 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 1056 }, 1057 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 1058 rtw89_fw_recognize_txpwr_from_elm, 1059 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 1060 }, 1061 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 1062 rtw89_fw_recognize_txpwr_from_elm, 1063 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 1064 }, 1065 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 1066 rtw89_fw_recognize_txpwr_from_elm, 1067 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 1068 }, 1069 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 1070 rtw89_fw_recognize_txpwr_from_elm, 1071 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 1072 }, 1073 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 1074 rtw89_fw_recognize_txpwr_from_elm, 1075 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 1076 }, 1077 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 1078 rtw89_fw_recognize_txpwr_from_elm, 1079 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 1080 }, 1081 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 1082 rtw89_fw_recognize_txpwr_from_elm, 1083 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 1084 }, 1085 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 1086 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 1087 }, 1088 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 1089 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 1090 }, 1091 }; 1092 1093 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 1094 { 1095 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1096 const struct firmware *firmware = fw_info->req.firmware; 1097 const struct rtw89_chip_info *chip = rtwdev->chip; 1098 u32 unrecognized_elements = chip->needed_fw_elms; 1099 const struct rtw89_fw_element_handler *handler; 1100 const struct rtw89_fw_element_hdr *hdr; 1101 u32 elm_size; 1102 u32 elem_id; 1103 u32 offset; 1104 int ret; 1105 1106 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1107 1108 offset = rtw89_mfw_get_size(rtwdev); 1109 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1110 if (offset == 0) 1111 return -EINVAL; 1112 1113 while (offset + sizeof(*hdr) < firmware->size) { 1114 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1115 1116 elm_size = le32_to_cpu(hdr->size); 1117 if (offset + elm_size >= firmware->size) { 1118 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1119 break; 1120 } 1121 1122 elem_id = le32_to_cpu(hdr->id); 1123 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1124 goto next; 1125 1126 handler = &__fw_element_handlers[elem_id]; 1127 if (!handler->fn) 1128 goto next; 1129 1130 ret = handler->fn(rtwdev, hdr, handler->arg); 1131 if (ret == 1) /* ignore this element */ 1132 goto next; 1133 if (ret) 1134 return ret; 1135 1136 if (handler->name) 1137 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1138 handler->name, hdr->ver); 1139 1140 unrecognized_elements &= ~BIT(elem_id); 1141 next: 1142 offset += sizeof(*hdr) + elm_size; 1143 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1144 } 1145 1146 if (unrecognized_elements) { 1147 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1148 unrecognized_elements); 1149 return -ENOENT; 1150 } 1151 1152 return 0; 1153 } 1154 1155 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1156 u8 type, u8 cat, u8 class, u8 func, 1157 bool rack, bool dack, u32 len) 1158 { 1159 struct fwcmd_hdr *hdr; 1160 1161 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1162 1163 if (!(rtwdev->fw.h2c_seq % 4)) 1164 rack = true; 1165 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1166 FIELD_PREP(H2C_HDR_CAT, cat) | 1167 FIELD_PREP(H2C_HDR_CLASS, class) | 1168 FIELD_PREP(H2C_HDR_FUNC, func) | 1169 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1170 1171 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1172 len + H2C_HEADER_LEN) | 1173 (rack ? H2C_HDR_REC_ACK : 0) | 1174 (dack ? H2C_HDR_DONE_ACK : 0)); 1175 1176 rtwdev->fw.h2c_seq++; 1177 } 1178 1179 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1180 struct sk_buff *skb, 1181 u8 type, u8 cat, u8 class, u8 func, 1182 u32 len) 1183 { 1184 struct fwcmd_hdr *hdr; 1185 1186 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1187 1188 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1189 FIELD_PREP(H2C_HDR_CAT, cat) | 1190 FIELD_PREP(H2C_HDR_CLASS, class) | 1191 FIELD_PREP(H2C_HDR_FUNC, func) | 1192 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1193 1194 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1195 len + H2C_HEADER_LEN)); 1196 } 1197 1198 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1199 struct rtw89_fw_bin_info *info, 1200 struct rtw89_fw_hdr *fw_hdr) 1201 { 1202 struct rtw89_fw_hdr_section_info *section_info; 1203 struct rtw89_fw_hdr_section *section; 1204 int i; 1205 1206 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1207 FW_HDR_W7_PART_SIZE); 1208 1209 for (i = 0; i < info->section_num; i++) { 1210 section_info = &info->section_info[i]; 1211 1212 if (!section_info->len_override) 1213 continue; 1214 1215 section = &fw_hdr->sections[i]; 1216 le32p_replace_bits(§ion->w1, section_info->len_override, 1217 FWSECTION_HDR_W1_SEC_SIZE); 1218 } 1219 1220 return 0; 1221 } 1222 1223 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1224 struct rtw89_fw_bin_info *info, 1225 struct rtw89_fw_hdr_v1 *fw_hdr) 1226 { 1227 struct rtw89_fw_hdr_section_info *section_info; 1228 struct rtw89_fw_hdr_section_v1 *section; 1229 u8 dst_sec_idx = 0; 1230 u8 sec_idx; 1231 1232 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1233 FW_HDR_V1_W7_PART_SIZE); 1234 1235 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1236 section_info = &info->section_info[sec_idx]; 1237 section = &fw_hdr->sections[sec_idx]; 1238 1239 if (section_info->ignore) 1240 continue; 1241 1242 if (dst_sec_idx != sec_idx) 1243 fw_hdr->sections[dst_sec_idx] = *section; 1244 1245 dst_sec_idx++; 1246 } 1247 1248 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1249 1250 return (info->section_num - dst_sec_idx) * sizeof(*section); 1251 } 1252 1253 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1254 const struct rtw89_fw_suit *fw_suit, 1255 struct rtw89_fw_bin_info *info) 1256 { 1257 u32 len = info->hdr_len - info->dynamic_hdr_len; 1258 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1259 const u8 *fw = fw_suit->data; 1260 struct rtw89_fw_hdr *fw_hdr; 1261 struct sk_buff *skb; 1262 u32 truncated; 1263 u32 ret = 0; 1264 1265 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1266 if (!skb) { 1267 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1268 return -ENOMEM; 1269 } 1270 1271 skb_put_data(skb, fw, len); 1272 1273 switch (fw_suit->hdr_ver) { 1274 case 0: 1275 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1276 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1277 break; 1278 case 1: 1279 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1280 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1281 break; 1282 default: 1283 ret = -EOPNOTSUPP; 1284 goto fail; 1285 } 1286 1287 if (truncated) { 1288 len -= truncated; 1289 skb_trim(skb, len); 1290 } 1291 1292 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1293 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1294 H2C_FUNC_MAC_FWHDR_DL, len); 1295 1296 ret = rtw89_h2c_tx(rtwdev, skb, false); 1297 if (ret) { 1298 rtw89_err(rtwdev, "failed to send h2c\n"); 1299 ret = -1; 1300 goto fail; 1301 } 1302 1303 return 0; 1304 fail: 1305 dev_kfree_skb_any(skb); 1306 1307 return ret; 1308 } 1309 1310 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1311 const struct rtw89_fw_suit *fw_suit, 1312 struct rtw89_fw_bin_info *info) 1313 { 1314 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1315 int ret; 1316 1317 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1318 if (ret) { 1319 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1320 return ret; 1321 } 1322 1323 ret = mac->fwdl_check_path_ready(rtwdev, false); 1324 if (ret) { 1325 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1326 return ret; 1327 } 1328 1329 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1330 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1331 1332 return 0; 1333 } 1334 1335 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1336 struct rtw89_fw_hdr_section_info *info) 1337 { 1338 struct sk_buff *skb; 1339 const u8 *section = info->addr; 1340 u32 residue_len = info->len; 1341 bool copy_key = false; 1342 u32 pkt_len; 1343 int ret; 1344 1345 if (info->ignore) 1346 return 0; 1347 1348 if (info->len_override) { 1349 if (info->len_override > info->len) 1350 rtw89_warn(rtwdev, "override length %u larger than original %u\n", 1351 info->len_override, info->len); 1352 else 1353 residue_len = info->len_override; 1354 } 1355 1356 if (info->key_addr && info->key_len) { 1357 if (residue_len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) 1358 rtw89_warn(rtwdev, 1359 "ignore to copy key data because of len %d, %d, %d, %d\n", 1360 info->len, FWDL_SECTION_PER_PKT_LEN, 1361 info->key_len, residue_len); 1362 else 1363 copy_key = true; 1364 } 1365 1366 while (residue_len) { 1367 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 1368 pkt_len = FWDL_SECTION_PER_PKT_LEN; 1369 else 1370 pkt_len = residue_len; 1371 1372 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1373 if (!skb) { 1374 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1375 return -ENOMEM; 1376 } 1377 skb_put_data(skb, section, pkt_len); 1378 1379 if (copy_key) 1380 memcpy(skb->data + pkt_len - info->key_len, 1381 info->key_addr, info->key_len); 1382 1383 ret = rtw89_h2c_tx(rtwdev, skb, true); 1384 if (ret) { 1385 rtw89_err(rtwdev, "failed to send h2c\n"); 1386 ret = -1; 1387 goto fail; 1388 } 1389 1390 section += pkt_len; 1391 residue_len -= pkt_len; 1392 } 1393 1394 return 0; 1395 fail: 1396 dev_kfree_skb_any(skb); 1397 1398 return ret; 1399 } 1400 1401 static enum rtw89_fwdl_check_type 1402 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1403 const struct rtw89_fw_suit *fw_suit) 1404 { 1405 switch (fw_suit->type) { 1406 case RTW89_FW_BBMCU0: 1407 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1408 case RTW89_FW_BBMCU1: 1409 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1410 default: 1411 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1412 } 1413 } 1414 1415 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1416 const struct rtw89_fw_suit *fw_suit, 1417 struct rtw89_fw_bin_info *info) 1418 { 1419 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1420 const struct rtw89_chip_info *chip = rtwdev->chip; 1421 enum rtw89_fwdl_check_type chk_type; 1422 u8 section_num = info->section_num; 1423 int ret; 1424 1425 while (section_num--) { 1426 ret = __rtw89_fw_download_main(rtwdev, section_info); 1427 if (ret) 1428 return ret; 1429 section_info++; 1430 } 1431 1432 if (chip->chip_gen == RTW89_CHIP_AX) 1433 return 0; 1434 1435 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1436 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1437 if (ret) { 1438 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1439 fw_suit->type); 1440 return ret; 1441 } 1442 1443 return 0; 1444 } 1445 1446 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1447 { 1448 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1449 u32 addr = R_AX_DBG_PORT_SEL; 1450 u32 val32; 1451 u16 index; 1452 1453 if (chip_gen == RTW89_CHIP_BE) { 1454 addr = R_BE_WLCPU_PORT_PC; 1455 goto dump; 1456 } 1457 1458 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1459 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1460 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1461 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1462 1463 dump: 1464 for (index = 0; index < 15; index++) { 1465 val32 = rtw89_read32(rtwdev, addr); 1466 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1467 fsleep(10); 1468 } 1469 } 1470 1471 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1472 { 1473 u32 val32; 1474 1475 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1476 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1477 1478 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1479 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1480 1481 rtw89_fw_prog_cnt_dump(rtwdev); 1482 } 1483 1484 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1485 struct rtw89_fw_suit *fw_suit) 1486 { 1487 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1488 struct rtw89_fw_bin_info info = {}; 1489 int ret; 1490 1491 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1492 if (ret) { 1493 rtw89_err(rtwdev, "parse fw header fail\n"); 1494 return ret; 1495 } 1496 1497 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode); 1498 1499 if (rtwdev->chip->chip_id == RTL8922A && 1500 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1501 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1502 1503 ret = mac->fwdl_check_path_ready(rtwdev, true); 1504 if (ret) { 1505 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1506 return ret; 1507 } 1508 1509 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1510 if (ret) 1511 return ret; 1512 1513 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1514 if (ret) 1515 return ret; 1516 1517 return 0; 1518 } 1519 1520 static 1521 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1522 bool include_bb) 1523 { 1524 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1525 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1526 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1527 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1528 int ret; 1529 int i; 1530 1531 mac->disable_cpu(rtwdev); 1532 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1533 if (ret) 1534 return ret; 1535 1536 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1537 if (ret) 1538 goto fwdl_err; 1539 1540 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1541 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1542 1543 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1544 if (ret) 1545 goto fwdl_err; 1546 } 1547 1548 fw_info->h2c_seq = 0; 1549 fw_info->rec_seq = 0; 1550 fw_info->h2c_counter = 0; 1551 fw_info->c2h_counter = 0; 1552 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1553 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1554 1555 mdelay(5); 1556 1557 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1558 if (ret) { 1559 rtw89_warn(rtwdev, "download firmware fail\n"); 1560 goto fwdl_err; 1561 } 1562 1563 return ret; 1564 1565 fwdl_err: 1566 rtw89_fw_dl_fail_dump(rtwdev); 1567 return ret; 1568 } 1569 1570 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1571 bool include_bb) 1572 { 1573 int retry; 1574 int ret; 1575 1576 for (retry = 0; retry < 5; retry++) { 1577 ret = __rtw89_fw_download(rtwdev, type, include_bb); 1578 if (!ret) 1579 return 0; 1580 } 1581 1582 return ret; 1583 } 1584 1585 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1586 { 1587 struct rtw89_fw_info *fw = &rtwdev->fw; 1588 1589 wait_for_completion(&fw->req.completion); 1590 if (!fw->req.firmware) 1591 return -EINVAL; 1592 1593 return 0; 1594 } 1595 1596 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1597 struct rtw89_fw_req_info *req, 1598 const char *fw_name, bool nowarn) 1599 { 1600 int ret; 1601 1602 if (req->firmware) { 1603 rtw89_debug(rtwdev, RTW89_DBG_FW, 1604 "full firmware has been early requested\n"); 1605 complete_all(&req->completion); 1606 return 0; 1607 } 1608 1609 if (nowarn) 1610 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1611 else 1612 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1613 1614 complete_all(&req->completion); 1615 1616 return ret; 1617 } 1618 1619 void rtw89_load_firmware_work(struct work_struct *work) 1620 { 1621 struct rtw89_dev *rtwdev = 1622 container_of(work, struct rtw89_dev, load_firmware_work); 1623 const struct rtw89_chip_info *chip = rtwdev->chip; 1624 char fw_name[64]; 1625 1626 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1627 chip->fw_basename, rtwdev->fw.fw_format); 1628 1629 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1630 } 1631 1632 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1633 { 1634 if (!tbl) 1635 return; 1636 1637 kfree(tbl->regs); 1638 kfree(tbl); 1639 } 1640 1641 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1642 { 1643 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1644 int i; 1645 1646 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1647 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1648 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1649 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1650 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1651 1652 kfree(elm_info->txpwr_trk); 1653 kfree(elm_info->rfk_log_fmt); 1654 } 1655 1656 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1657 { 1658 struct rtw89_fw_info *fw = &rtwdev->fw; 1659 1660 cancel_work_sync(&rtwdev->load_firmware_work); 1661 1662 if (fw->req.firmware) { 1663 release_firmware(fw->req.firmware); 1664 1665 /* assign NULL back in case rtw89_free_ieee80211_hw() 1666 * try to release the same one again. 1667 */ 1668 fw->req.firmware = NULL; 1669 } 1670 1671 kfree(fw->log.fmts); 1672 rtw89_unload_firmware_elements(rtwdev); 1673 } 1674 1675 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1676 { 1677 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1678 u32 i; 1679 1680 if (fmt_id > fw_log->last_fmt_id) 1681 return 0; 1682 1683 for (i = 0; i < fw_log->fmt_count; i++) { 1684 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1685 return i; 1686 } 1687 return 0; 1688 } 1689 1690 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1691 { 1692 struct rtw89_fw_log *log = &rtwdev->fw.log; 1693 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1694 struct rtw89_fw_suit *suit = &log->suit; 1695 const void *fmts_ptr, *fmts_end_ptr; 1696 u32 fmt_count; 1697 int i; 1698 1699 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1700 fmt_count = le32_to_cpu(suit_hdr->count); 1701 log->fmt_ids = suit_hdr->ids; 1702 fmts_ptr = &suit_hdr->ids[fmt_count]; 1703 fmts_end_ptr = suit->data + suit->size; 1704 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1705 if (!log->fmts) 1706 return -ENOMEM; 1707 1708 for (i = 0; i < fmt_count; i++) { 1709 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1710 if (!fmts_ptr) 1711 break; 1712 1713 (*log->fmts)[i] = fmts_ptr; 1714 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1715 log->fmt_count++; 1716 fmts_ptr += strlen(fmts_ptr); 1717 } 1718 1719 return 0; 1720 } 1721 1722 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1723 { 1724 struct rtw89_fw_log *log = &rtwdev->fw.log; 1725 struct rtw89_fw_suit *suit = &log->suit; 1726 1727 if (!suit || !suit->data) { 1728 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1729 return -EINVAL; 1730 } 1731 if (log->fmts) 1732 return 0; 1733 1734 return rtw89_fw_log_create_fmts_dict(rtwdev); 1735 } 1736 1737 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 1738 const struct rtw89_fw_c2h_log_fmt *log_fmt, 1739 u32 fmt_idx, u8 para_int, bool raw_data) 1740 { 1741 const char *(*fmts)[] = rtwdev->fw.log.fmts; 1742 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 1743 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 1744 int i; 1745 1746 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 1747 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 1748 log_fmt->argc); 1749 return; 1750 } 1751 1752 if (para_int) 1753 for (i = 0 ; i < log_fmt->argc; i++) 1754 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 1755 1756 if (raw_data) { 1757 if (para_int) 1758 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1759 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 1760 para_int, log_fmt->argc, (int)sizeof(args), args); 1761 else 1762 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1763 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 1764 para_int, log_fmt->argc, log_fmt->u.raw); 1765 } else { 1766 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 1767 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 1768 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 1769 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 1770 args[0xf]); 1771 } 1772 1773 rtw89_info(rtwdev, "C2H log: %s", str_buf); 1774 } 1775 1776 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 1777 { 1778 const struct rtw89_fw_c2h_log_fmt *log_fmt; 1779 u8 para_int; 1780 u32 fmt_idx; 1781 1782 if (len < RTW89_C2H_HEADER_LEN) { 1783 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 1784 return; 1785 } 1786 1787 buf += RTW89_C2H_HEADER_LEN; 1788 len -= RTW89_C2H_HEADER_LEN; 1789 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 1790 1791 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 1792 goto plain_log; 1793 1794 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 1795 goto plain_log; 1796 1797 if (!rtwdev->fw.log.fmts) 1798 return; 1799 1800 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 1801 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 1802 1803 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 1804 rtw89_info(rtwdev, "C2H log: %s%s", 1805 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 1806 else if (fmt_idx != 0 && para_int) 1807 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 1808 else 1809 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 1810 return; 1811 1812 plain_log: 1813 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 1814 1815 } 1816 1817 #define H2C_CAM_LEN 60 1818 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 1819 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr) 1820 { 1821 struct sk_buff *skb; 1822 int ret; 1823 1824 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 1825 if (!skb) { 1826 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1827 return -ENOMEM; 1828 } 1829 skb_put(skb, H2C_CAM_LEN); 1830 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, scan_mac_addr, 1831 skb->data); 1832 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, skb->data); 1833 1834 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1835 H2C_CAT_MAC, 1836 H2C_CL_MAC_ADDR_CAM_UPDATE, 1837 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 1838 H2C_CAM_LEN); 1839 1840 ret = rtw89_h2c_tx(rtwdev, skb, false); 1841 if (ret) { 1842 rtw89_err(rtwdev, "failed to send h2c\n"); 1843 goto fail; 1844 } 1845 1846 return 0; 1847 fail: 1848 dev_kfree_skb_any(skb); 1849 1850 return ret; 1851 } 1852 1853 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 1854 struct rtw89_vif_link *rtwvif_link, 1855 struct rtw89_sta_link *rtwsta_link) 1856 { 1857 struct rtw89_h2c_dctlinfo_ud_v1 *h2c; 1858 u32 len = sizeof(*h2c); 1859 struct sk_buff *skb; 1860 int ret; 1861 1862 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1863 if (!skb) { 1864 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1865 return -ENOMEM; 1866 } 1867 skb_put(skb, len); 1868 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; 1869 1870 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c); 1871 1872 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1873 H2C_CAT_MAC, 1874 H2C_CL_MAC_FR_EXCHG, 1875 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 1876 len); 1877 1878 ret = rtw89_h2c_tx(rtwdev, skb, false); 1879 if (ret) { 1880 rtw89_err(rtwdev, "failed to send h2c\n"); 1881 goto fail; 1882 } 1883 1884 return 0; 1885 fail: 1886 dev_kfree_skb_any(skb); 1887 1888 return ret; 1889 } 1890 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 1891 1892 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 1893 struct rtw89_vif_link *rtwvif_link, 1894 struct rtw89_sta_link *rtwsta_link) 1895 { 1896 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1897 u32 len = sizeof(*h2c); 1898 struct sk_buff *skb; 1899 int ret; 1900 1901 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1902 if (!skb) { 1903 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1904 return -ENOMEM; 1905 } 1906 skb_put(skb, len); 1907 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1908 1909 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c); 1910 1911 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1912 H2C_CAT_MAC, 1913 H2C_CL_MAC_FR_EXCHG, 1914 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1915 len); 1916 1917 ret = rtw89_h2c_tx(rtwdev, skb, false); 1918 if (ret) { 1919 rtw89_err(rtwdev, "failed to send h2c\n"); 1920 goto fail; 1921 } 1922 1923 return 0; 1924 fail: 1925 dev_kfree_skb_any(skb); 1926 1927 return ret; 1928 } 1929 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 1930 1931 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 1932 struct rtw89_vif_link *rtwvif_link, 1933 struct rtw89_sta_link *rtwsta_link) 1934 { 1935 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 1936 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1937 u32 len = sizeof(*h2c); 1938 struct sk_buff *skb; 1939 int ret; 1940 1941 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1942 if (!skb) { 1943 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 1944 return -ENOMEM; 1945 } 1946 skb_put(skb, len); 1947 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1948 1949 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 1950 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 1951 1952 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 1953 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 1954 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 1955 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 1956 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 1957 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 1958 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 1959 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 1960 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 1961 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 1962 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 1963 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 1964 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 1965 1966 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1967 H2C_CAT_MAC, 1968 H2C_CL_MAC_FR_EXCHG, 1969 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1970 len); 1971 1972 ret = rtw89_h2c_tx(rtwdev, skb, false); 1973 if (ret) { 1974 rtw89_err(rtwdev, "failed to send h2c\n"); 1975 goto fail; 1976 } 1977 1978 return 0; 1979 fail: 1980 dev_kfree_skb_any(skb); 1981 1982 return ret; 1983 } 1984 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 1985 1986 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, 1987 struct rtw89_vif_link *rtwvif_link, 1988 struct rtw89_sta_link *rtwsta_link, 1989 bool valid, struct ieee80211_ampdu_params *params) 1990 { 1991 const struct rtw89_chip_info *chip = rtwdev->chip; 1992 struct rtw89_h2c_ba_cam *h2c; 1993 u8 macid = rtwsta_link->mac_id; 1994 u32 len = sizeof(*h2c); 1995 struct sk_buff *skb; 1996 u8 entry_idx; 1997 int ret; 1998 1999 ret = valid ? 2000 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2001 &entry_idx) : 2002 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2003 &entry_idx); 2004 if (ret) { 2005 /* it still works even if we don't have static BA CAM, because 2006 * hardware can create dynamic BA CAM automatically. 2007 */ 2008 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2009 "failed to %s entry tid=%d for h2c ba cam\n", 2010 valid ? "alloc" : "free", params->tid); 2011 return 0; 2012 } 2013 2014 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2015 if (!skb) { 2016 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2017 return -ENOMEM; 2018 } 2019 skb_put(skb, len); 2020 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2021 2022 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 2023 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 2024 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 2025 else 2026 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 2027 if (!valid) 2028 goto end; 2029 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 2030 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 2031 if (params->buf_size > 64) 2032 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2033 else 2034 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2035 /* If init req is set, hw will set the ssn */ 2036 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 2037 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 2038 2039 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 2040 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 2041 le32_encode_bits(rtwvif_link->mac_idx, 2042 RTW89_H2C_BA_CAM_W1_BAND); 2043 } 2044 2045 end: 2046 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2047 H2C_CAT_MAC, 2048 H2C_CL_BA_CAM, 2049 H2C_FUNC_MAC_BA_CAM, 0, 1, 2050 len); 2051 2052 ret = rtw89_h2c_tx(rtwdev, skb, false); 2053 if (ret) { 2054 rtw89_err(rtwdev, "failed to send h2c\n"); 2055 goto fail; 2056 } 2057 2058 return 0; 2059 fail: 2060 dev_kfree_skb_any(skb); 2061 2062 return ret; 2063 } 2064 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 2065 2066 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 2067 u8 entry_idx, u8 uid) 2068 { 2069 struct rtw89_h2c_ba_cam *h2c; 2070 u32 len = sizeof(*h2c); 2071 struct sk_buff *skb; 2072 int ret; 2073 2074 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2075 if (!skb) { 2076 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 2077 return -ENOMEM; 2078 } 2079 skb_put(skb, len); 2080 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2081 2082 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 2083 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 2084 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 2085 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 2086 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 2087 2088 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2089 H2C_CAT_MAC, 2090 H2C_CL_BA_CAM, 2091 H2C_FUNC_MAC_BA_CAM, 0, 1, 2092 len); 2093 2094 ret = rtw89_h2c_tx(rtwdev, skb, false); 2095 if (ret) { 2096 rtw89_err(rtwdev, "failed to send h2c\n"); 2097 goto fail; 2098 } 2099 2100 return 0; 2101 fail: 2102 dev_kfree_skb_any(skb); 2103 2104 return ret; 2105 } 2106 2107 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 2108 { 2109 const struct rtw89_chip_info *chip = rtwdev->chip; 2110 u8 entry_idx = chip->bacam_num; 2111 u8 uid = 0; 2112 int i; 2113 2114 for (i = 0; i < chip->bacam_dynamic_num; i++) { 2115 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 2116 entry_idx++; 2117 uid++; 2118 } 2119 } 2120 2121 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, 2122 struct rtw89_vif_link *rtwvif_link, 2123 struct rtw89_sta_link *rtwsta_link, 2124 bool valid, struct ieee80211_ampdu_params *params) 2125 { 2126 const struct rtw89_chip_info *chip = rtwdev->chip; 2127 struct rtw89_h2c_ba_cam_v1 *h2c; 2128 u8 macid = rtwsta_link->mac_id; 2129 u32 len = sizeof(*h2c); 2130 struct sk_buff *skb; 2131 u8 entry_idx; 2132 u8 bmap_size; 2133 int ret; 2134 2135 ret = valid ? 2136 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2137 &entry_idx) : 2138 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2139 &entry_idx); 2140 if (ret) { 2141 /* it still works even if we don't have static BA CAM, because 2142 * hardware can create dynamic BA CAM automatically. 2143 */ 2144 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2145 "failed to %s entry tid=%d for h2c ba cam\n", 2146 valid ? "alloc" : "free", params->tid); 2147 return 0; 2148 } 2149 2150 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2151 if (!skb) { 2152 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2153 return -ENOMEM; 2154 } 2155 skb_put(skb, len); 2156 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2157 2158 if (params->buf_size > 512) 2159 bmap_size = 10; 2160 else if (params->buf_size > 256) 2161 bmap_size = 8; 2162 else if (params->buf_size > 64) 2163 bmap_size = 4; 2164 else 2165 bmap_size = 0; 2166 2167 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2168 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2169 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2170 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2171 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2172 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2173 2174 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2175 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2176 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2177 le32_encode_bits(!!rtwvif_link->mac_idx, 2178 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2179 2180 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2181 H2C_CAT_MAC, 2182 H2C_CL_BA_CAM, 2183 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2184 len); 2185 2186 ret = rtw89_h2c_tx(rtwdev, skb, false); 2187 if (ret) { 2188 rtw89_err(rtwdev, "failed to send h2c\n"); 2189 goto fail; 2190 } 2191 2192 return 0; 2193 fail: 2194 dev_kfree_skb_any(skb); 2195 2196 return ret; 2197 } 2198 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2199 2200 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2201 u8 offset, u8 mac_idx) 2202 { 2203 struct rtw89_h2c_ba_cam_init *h2c; 2204 u32 len = sizeof(*h2c); 2205 struct sk_buff *skb; 2206 int ret; 2207 2208 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2209 if (!skb) { 2210 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2211 return -ENOMEM; 2212 } 2213 skb_put(skb, len); 2214 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2215 2216 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2217 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2218 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2219 2220 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2221 H2C_CAT_MAC, 2222 H2C_CL_BA_CAM, 2223 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2224 len); 2225 2226 ret = rtw89_h2c_tx(rtwdev, skb, false); 2227 if (ret) { 2228 rtw89_err(rtwdev, "failed to send h2c\n"); 2229 goto fail; 2230 } 2231 2232 return 0; 2233 fail: 2234 dev_kfree_skb_any(skb); 2235 2236 return ret; 2237 } 2238 2239 #define H2C_LOG_CFG_LEN 12 2240 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2241 { 2242 struct sk_buff *skb; 2243 u32 comp = 0; 2244 int ret; 2245 2246 if (enable) 2247 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2248 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2249 BIT(RTW89_FW_LOG_COMP_SCAN); 2250 2251 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2252 if (!skb) { 2253 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2254 return -ENOMEM; 2255 } 2256 2257 skb_put(skb, H2C_LOG_CFG_LEN); 2258 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2259 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2260 SET_LOG_CFG_COMP(skb->data, comp); 2261 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2262 2263 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2264 H2C_CAT_MAC, 2265 H2C_CL_FW_INFO, 2266 H2C_FUNC_LOG_CFG, 0, 0, 2267 H2C_LOG_CFG_LEN); 2268 2269 ret = rtw89_h2c_tx(rtwdev, skb, false); 2270 if (ret) { 2271 rtw89_err(rtwdev, "failed to send h2c\n"); 2272 goto fail; 2273 } 2274 2275 return 0; 2276 fail: 2277 dev_kfree_skb_any(skb); 2278 2279 return ret; 2280 } 2281 2282 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 2283 struct rtw89_vif_link *rtwvif_link) 2284 { 2285 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 2286 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; 2287 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2288 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2289 struct rtw89_eapol_2_of_2 *eapol_pkt; 2290 struct ieee80211_bss_conf *bss_conf; 2291 struct ieee80211_hdr_3addr *hdr; 2292 struct sk_buff *skb; 2293 u8 key_des_ver; 2294 2295 if (rtw_wow->ptk_alg == 3) 2296 key_des_ver = 1; 2297 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) 2298 key_des_ver = 2; 2299 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) 2300 key_des_ver = 3; 2301 else 2302 key_des_ver = 0; 2303 2304 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); 2305 if (!skb) 2306 return NULL; 2307 2308 hdr = skb_put_zero(skb, sizeof(*hdr)); 2309 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2310 IEEE80211_FCTL_TODS | 2311 IEEE80211_FCTL_PROTECTED); 2312 2313 rcu_read_lock(); 2314 2315 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2316 2317 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2318 ether_addr_copy(hdr->addr2, bss_conf->addr); 2319 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2320 2321 rcu_read_unlock(); 2322 2323 skb_put_zero(skb, sec_hdr_len); 2324 2325 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); 2326 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); 2327 eapol_pkt->key_des_ver = key_des_ver; 2328 2329 return skb; 2330 } 2331 2332 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, 2333 struct rtw89_vif_link *rtwvif_link) 2334 { 2335 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2336 struct ieee80211_bss_conf *bss_conf; 2337 struct ieee80211_hdr_3addr *hdr; 2338 struct rtw89_sa_query *sa_query; 2339 struct sk_buff *skb; 2340 2341 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); 2342 if (!skb) 2343 return NULL; 2344 2345 hdr = skb_put_zero(skb, sizeof(*hdr)); 2346 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2347 IEEE80211_STYPE_ACTION | 2348 IEEE80211_FCTL_PROTECTED); 2349 2350 rcu_read_lock(); 2351 2352 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2353 2354 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2355 ether_addr_copy(hdr->addr2, bss_conf->addr); 2356 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2357 2358 rcu_read_unlock(); 2359 2360 skb_put_zero(skb, sec_hdr_len); 2361 2362 sa_query = skb_put_zero(skb, sizeof(*sa_query)); 2363 sa_query->category = WLAN_CATEGORY_SA_QUERY; 2364 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; 2365 2366 return skb; 2367 } 2368 2369 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, 2370 struct rtw89_vif_link *rtwvif_link) 2371 { 2372 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 2373 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2374 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2375 struct ieee80211_hdr_3addr *hdr; 2376 struct rtw89_arp_rsp *arp_skb; 2377 struct arphdr *arp_hdr; 2378 struct sk_buff *skb; 2379 __le16 fc; 2380 2381 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); 2382 if (!skb) 2383 return NULL; 2384 2385 hdr = skb_put_zero(skb, sizeof(*hdr)); 2386 2387 if (rtw_wow->ptk_alg) 2388 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | 2389 IEEE80211_FCTL_PROTECTED); 2390 else 2391 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); 2392 2393 hdr->frame_control = fc; 2394 ether_addr_copy(hdr->addr1, rtwvif_link->bssid); 2395 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr); 2396 ether_addr_copy(hdr->addr3, rtwvif_link->bssid); 2397 2398 skb_put_zero(skb, sec_hdr_len); 2399 2400 arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); 2401 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 2402 arp_skb->llc_type = htons(ETH_P_ARP); 2403 2404 arp_hdr = &arp_skb->arp_hdr; 2405 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 2406 arp_hdr->ar_pro = htons(ETH_P_IP); 2407 arp_hdr->ar_hln = ETH_ALEN; 2408 arp_hdr->ar_pln = 4; 2409 arp_hdr->ar_op = htons(ARPOP_REPLY); 2410 2411 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr); 2412 arp_skb->sender_ip = rtwvif->ip_addr; 2413 2414 return skb; 2415 } 2416 2417 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2418 struct rtw89_vif_link *rtwvif_link, 2419 enum rtw89_fw_pkt_ofld_type type, 2420 u8 *id) 2421 { 2422 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 2423 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; 2424 struct rtw89_pktofld_info *info; 2425 struct sk_buff *skb; 2426 int ret; 2427 2428 info = kzalloc(sizeof(*info), GFP_KERNEL); 2429 if (!info) 2430 return -ENOMEM; 2431 2432 switch (type) { 2433 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2434 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2435 break; 2436 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2437 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2438 break; 2439 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2440 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false); 2441 break; 2442 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2443 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true); 2444 break; 2445 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2446 skb = rtw89_eapol_get(rtwdev, rtwvif_link); 2447 break; 2448 case RTW89_PKT_OFLD_TYPE_SA_QUERY: 2449 skb = rtw89_sa_query_get(rtwdev, rtwvif_link); 2450 break; 2451 case RTW89_PKT_OFLD_TYPE_ARP_RSP: 2452 skb = rtw89_arp_response_get(rtwdev, rtwvif_link); 2453 break; 2454 default: 2455 goto err; 2456 } 2457 2458 if (!skb) 2459 goto err; 2460 2461 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 2462 kfree_skb(skb); 2463 2464 if (ret) 2465 goto err; 2466 2467 list_add_tail(&info->list, &rtwvif_link->general_pkt_list); 2468 *id = info->id; 2469 return 0; 2470 2471 err: 2472 kfree(info); 2473 return -ENOMEM; 2474 } 2475 2476 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 2477 struct rtw89_vif_link *rtwvif_link, 2478 bool notify_fw) 2479 { 2480 struct list_head *pkt_list = &rtwvif_link->general_pkt_list; 2481 struct rtw89_pktofld_info *info, *tmp; 2482 2483 list_for_each_entry_safe(info, tmp, pkt_list, list) { 2484 if (notify_fw) 2485 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2486 else 2487 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 2488 list_del(&info->list); 2489 kfree(info); 2490 } 2491 } 2492 2493 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 2494 { 2495 struct rtw89_vif_link *rtwvif_link; 2496 struct rtw89_vif *rtwvif; 2497 unsigned int link_id; 2498 2499 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2500 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 2501 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link, 2502 notify_fw); 2503 } 2504 2505 #define H2C_GENERAL_PKT_LEN 6 2506 #define H2C_GENERAL_PKT_ID_UND 0xff 2507 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 2508 struct rtw89_vif_link *rtwvif_link, u8 macid) 2509 { 2510 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 2511 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 2512 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 2513 struct sk_buff *skb; 2514 int ret; 2515 2516 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2517 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 2518 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2519 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 2520 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2521 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 2522 2523 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 2524 if (!skb) { 2525 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2526 return -ENOMEM; 2527 } 2528 skb_put(skb, H2C_GENERAL_PKT_LEN); 2529 SET_GENERAL_PKT_MACID(skb->data, macid); 2530 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2531 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 2532 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 2533 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 2534 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2535 2536 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2537 H2C_CAT_MAC, 2538 H2C_CL_FW_INFO, 2539 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 2540 H2C_GENERAL_PKT_LEN); 2541 2542 ret = rtw89_h2c_tx(rtwdev, skb, false); 2543 if (ret) { 2544 rtw89_err(rtwdev, "failed to send h2c\n"); 2545 goto fail; 2546 } 2547 2548 return 0; 2549 fail: 2550 dev_kfree_skb_any(skb); 2551 2552 return ret; 2553 } 2554 2555 #define H2C_LPS_PARM_LEN 8 2556 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 2557 struct rtw89_lps_parm *lps_param) 2558 { 2559 struct sk_buff *skb; 2560 int ret; 2561 2562 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 2563 if (!skb) { 2564 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2565 return -ENOMEM; 2566 } 2567 skb_put(skb, H2C_LPS_PARM_LEN); 2568 2569 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 2570 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 2571 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 2572 SET_LPS_PARM_RLBM(skb->data, 1); 2573 SET_LPS_PARM_SMARTPS(skb->data, 1); 2574 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 2575 SET_LPS_PARM_VOUAPSD(skb->data, 0); 2576 SET_LPS_PARM_VIUAPSD(skb->data, 0); 2577 SET_LPS_PARM_BEUAPSD(skb->data, 0); 2578 SET_LPS_PARM_BKUAPSD(skb->data, 0); 2579 2580 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2581 H2C_CAT_MAC, 2582 H2C_CL_MAC_PS, 2583 H2C_FUNC_MAC_LPS_PARM, 0, !lps_param->psmode, 2584 H2C_LPS_PARM_LEN); 2585 2586 ret = rtw89_h2c_tx(rtwdev, skb, false); 2587 if (ret) { 2588 rtw89_err(rtwdev, "failed to send h2c\n"); 2589 goto fail; 2590 } 2591 2592 return 0; 2593 fail: 2594 dev_kfree_skb_any(skb); 2595 2596 return ret; 2597 } 2598 2599 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2600 { 2601 const struct rtw89_chip_info *chip = rtwdev->chip; 2602 const struct rtw89_chan *chan; 2603 struct rtw89_vif_link *rtwvif_link; 2604 struct rtw89_h2c_lps_ch_info *h2c; 2605 u32 len = sizeof(*h2c); 2606 unsigned int link_id; 2607 struct sk_buff *skb; 2608 bool no_chan = true; 2609 u8 phy_idx; 2610 u32 done; 2611 int ret; 2612 2613 if (chip->chip_gen != RTW89_CHIP_BE) 2614 return 0; 2615 2616 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2617 if (!skb) { 2618 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 2619 return -ENOMEM; 2620 } 2621 skb_put(skb, len); 2622 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2623 2624 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2625 phy_idx = rtwvif_link->phy_idx; 2626 if (phy_idx >= ARRAY_SIZE(h2c->info)) 2627 continue; 2628 2629 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2630 no_chan = false; 2631 2632 h2c->info[phy_idx].central_ch = chan->channel; 2633 h2c->info[phy_idx].pri_ch = chan->primary_channel; 2634 h2c->info[phy_idx].band = chan->band_type; 2635 h2c->info[phy_idx].bw = chan->band_width; 2636 } 2637 2638 if (no_chan) { 2639 rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n"); 2640 ret = -ENOENT; 2641 goto fail; 2642 } 2643 2644 h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2645 2646 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2647 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2648 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 2649 2650 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2651 ret = rtw89_h2c_tx(rtwdev, skb, false); 2652 if (ret) { 2653 rtw89_err(rtwdev, "failed to send h2c\n"); 2654 goto fail; 2655 } 2656 2657 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2658 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2659 if (ret) 2660 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 2661 2662 return 0; 2663 fail: 2664 dev_kfree_skb_any(skb); 2665 2666 return ret; 2667 } 2668 2669 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, 2670 struct rtw89_vif *rtwvif) 2671 { 2672 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be; 2673 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 2674 const struct rtw89_chip_info *chip = rtwdev->chip; 2675 struct rtw89_h2c_lps_ml_cmn_info *h2c; 2676 struct rtw89_vif_link *rtwvif_link; 2677 const struct rtw89_chan *chan; 2678 u8 bw_idx = RTW89_BB_BW_20_40; 2679 u32 len = sizeof(*h2c); 2680 unsigned int link_id; 2681 struct sk_buff *skb; 2682 u8 gain_band; 2683 u32 done; 2684 u8 path; 2685 int ret; 2686 int i; 2687 2688 if (chip->chip_gen != RTW89_CHIP_BE) 2689 return 0; 2690 2691 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2692 if (!skb) { 2693 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n"); 2694 return -ENOMEM; 2695 } 2696 skb_put(skb, len); 2697 h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data; 2698 2699 h2c->fmt_id = 0x1; 2700 2701 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2702 2703 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2704 path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A; 2705 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2706 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type); 2707 2708 h2c->central_ch[rtwvif_link->phy_idx] = chan->channel; 2709 h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel; 2710 h2c->band[rtwvif_link->phy_idx] = chan->band_type; 2711 h2c->bw[rtwvif_link->phy_idx] = chan->band_width; 2712 if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6) 2713 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1; 2714 else 2715 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2; 2716 2717 /* Fill BW20 RX gain table for beacon mode */ 2718 for (i = 0; i < TIA_GAIN_NUM; i++) { 2719 h2c->tia_gain[rtwvif_link->phy_idx][i] = 2720 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]); 2721 } 2722 memcpy(h2c->lna_gain[rtwvif_link->phy_idx], 2723 gain->lna_gain[gain_band][bw_idx][path], 2724 LNA_GAIN_NUM); 2725 } 2726 2727 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2728 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2729 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len); 2730 2731 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2732 ret = rtw89_h2c_tx(rtwdev, skb, false); 2733 if (ret) { 2734 rtw89_err(rtwdev, "failed to send h2c\n"); 2735 goto fail; 2736 } 2737 2738 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2739 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2740 if (ret) 2741 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n"); 2742 2743 return 0; 2744 fail: 2745 dev_kfree_skb_any(skb); 2746 2747 return ret; 2748 } 2749 2750 #define H2C_P2P_ACT_LEN 20 2751 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, 2752 struct rtw89_vif_link *rtwvif_link, 2753 struct ieee80211_bss_conf *bss_conf, 2754 struct ieee80211_p2p_noa_desc *desc, 2755 u8 act, u8 noa_id) 2756 { 2757 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 2758 u8 ctwindow_oppps = bss_conf->p2p_noa_attr.oppps_ctwindow; 2759 struct sk_buff *skb; 2760 u8 *cmd; 2761 int ret; 2762 2763 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 2764 if (!skb) { 2765 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 2766 return -ENOMEM; 2767 } 2768 skb_put(skb, H2C_P2P_ACT_LEN); 2769 cmd = skb->data; 2770 2771 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id); 2772 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 2773 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 2774 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 2775 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 2776 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 2777 if (desc) { 2778 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 2779 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 2780 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 2781 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 2782 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 2783 } 2784 2785 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2786 H2C_CAT_MAC, H2C_CL_MAC_PS, 2787 H2C_FUNC_P2P_ACT, 0, 0, 2788 H2C_P2P_ACT_LEN); 2789 2790 ret = rtw89_h2c_tx(rtwdev, skb, false); 2791 if (ret) { 2792 rtw89_err(rtwdev, "failed to send h2c\n"); 2793 goto fail; 2794 } 2795 2796 return 0; 2797 fail: 2798 dev_kfree_skb_any(skb); 2799 2800 return ret; 2801 } 2802 2803 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 2804 struct sk_buff *skb) 2805 { 2806 const struct rtw89_chip_info *chip = rtwdev->chip; 2807 struct rtw89_hal *hal = &rtwdev->hal; 2808 u8 ntx_path; 2809 u8 map_b; 2810 2811 if (chip->rf_path_num == 1) { 2812 ntx_path = RF_A; 2813 map_b = 0; 2814 } else { 2815 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 2816 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 2817 } 2818 2819 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 2820 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 2821 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 2822 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 2823 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 2824 } 2825 2826 #define H2C_CMC_TBL_LEN 68 2827 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 2828 struct rtw89_vif_link *rtwvif_link, 2829 struct rtw89_sta_link *rtwsta_link) 2830 { 2831 const struct rtw89_chip_info *chip = rtwdev->chip; 2832 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2833 struct sk_buff *skb; 2834 int ret; 2835 2836 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2837 if (!skb) { 2838 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2839 return -ENOMEM; 2840 } 2841 skb_put(skb, H2C_CMC_TBL_LEN); 2842 SET_CTRL_INFO_MACID(skb->data, macid); 2843 SET_CTRL_INFO_OPERATION(skb->data, 1); 2844 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2845 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 2846 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 2847 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 2848 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 2849 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 2850 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 2851 } 2852 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 2853 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 2854 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 2855 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2856 2857 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2858 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2859 chip->h2c_cctl_func_id, 0, 1, 2860 H2C_CMC_TBL_LEN); 2861 2862 ret = rtw89_h2c_tx(rtwdev, skb, false); 2863 if (ret) { 2864 rtw89_err(rtwdev, "failed to send h2c\n"); 2865 goto fail; 2866 } 2867 2868 return 0; 2869 fail: 2870 dev_kfree_skb_any(skb); 2871 2872 return ret; 2873 } 2874 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 2875 2876 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2877 struct rtw89_vif_link *rtwvif_link, 2878 struct rtw89_sta_link *rtwsta_link) 2879 { 2880 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2881 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2882 u32 len = sizeof(*h2c); 2883 struct sk_buff *skb; 2884 int ret; 2885 2886 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2887 if (!skb) { 2888 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 2889 return -ENOMEM; 2890 } 2891 skb_put(skb, len); 2892 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2893 2894 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 2895 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2896 2897 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE); 2898 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 2899 2900 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 2901 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 2902 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2903 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 2904 2905 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 2906 2907 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 2908 2909 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 2910 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 2911 2912 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 2913 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 2914 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 2915 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 2916 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 2917 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 2918 2919 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 2920 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 2921 2922 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 2923 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 2924 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 2925 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 2926 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 2927 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 2928 2929 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 2930 2931 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 2932 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 2933 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 2934 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 2935 2936 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 2937 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 2938 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 2939 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 2940 2941 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2942 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2943 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 2944 len); 2945 2946 ret = rtw89_h2c_tx(rtwdev, skb, false); 2947 if (ret) { 2948 rtw89_err(rtwdev, "failed to send h2c\n"); 2949 goto fail; 2950 } 2951 2952 return 0; 2953 fail: 2954 dev_kfree_skb_any(skb); 2955 2956 return ret; 2957 } 2958 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 2959 2960 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 2961 struct ieee80211_link_sta *link_sta, 2962 u8 *pads) 2963 { 2964 bool ppe_th; 2965 u8 ppe16, ppe8; 2966 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 2967 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0]; 2968 u8 ru_bitmap; 2969 u8 n, idx, sh; 2970 u16 ppe; 2971 int i; 2972 2973 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 2974 link_sta->he_cap.he_cap_elem.phy_cap_info[6]); 2975 if (!ppe_th) { 2976 u8 pad; 2977 2978 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 2979 link_sta->he_cap.he_cap_elem.phy_cap_info[9]); 2980 2981 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 2982 pads[i] = pad; 2983 2984 return; 2985 } 2986 2987 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 2988 n = hweight8(ru_bitmap); 2989 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 2990 2991 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 2992 if (!(ru_bitmap & BIT(i))) { 2993 pads[i] = 1; 2994 continue; 2995 } 2996 2997 idx = n >> 3; 2998 sh = n & 7; 2999 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 3000 3001 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx])); 3002 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3003 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 3004 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3005 3006 if (ppe16 != 7 && ppe8 == 7) 3007 pads[i] = RTW89_PE_DURATION_16; 3008 else if (ppe8 != 7) 3009 pads[i] = RTW89_PE_DURATION_8; 3010 else 3011 pads[i] = RTW89_PE_DURATION_0; 3012 } 3013 } 3014 3015 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 3016 struct rtw89_vif_link *rtwvif_link, 3017 struct rtw89_sta_link *rtwsta_link) 3018 { 3019 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3020 const struct rtw89_chip_info *chip = rtwdev->chip; 3021 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3022 rtwvif_link->chanctx_idx); 3023 struct ieee80211_link_sta *link_sta; 3024 struct sk_buff *skb; 3025 u8 pads[RTW89_PPE_BW_NUM]; 3026 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3027 u16 lowest_rate; 3028 int ret; 3029 3030 memset(pads, 0, sizeof(pads)); 3031 3032 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3033 if (!skb) { 3034 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3035 return -ENOMEM; 3036 } 3037 3038 rcu_read_lock(); 3039 3040 if (rtwsta_link) 3041 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3042 3043 if (rtwsta_link && link_sta->he_cap.has_he) 3044 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3045 3046 if (vif->p2p) 3047 lowest_rate = RTW89_HW_RATE_OFDM6; 3048 else if (chan->band_type == RTW89_BAND_2G) 3049 lowest_rate = RTW89_HW_RATE_CCK1; 3050 else 3051 lowest_rate = RTW89_HW_RATE_OFDM6; 3052 3053 skb_put(skb, H2C_CMC_TBL_LEN); 3054 SET_CTRL_INFO_MACID(skb->data, mac_id); 3055 SET_CTRL_INFO_OPERATION(skb->data, 1); 3056 SET_CMC_TBL_DISRTSFB(skb->data, 1); 3057 SET_CMC_TBL_DISDATAFB(skb->data, 1); 3058 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 3059 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 3060 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 3061 if (vif->type == NL80211_IFTYPE_STATION) 3062 SET_CMC_TBL_ULDL(skb->data, 1); 3063 else 3064 SET_CMC_TBL_ULDL(skb->data, 0); 3065 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port); 3066 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 3067 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3068 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3069 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3070 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3071 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3072 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3073 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3074 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3075 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3076 } 3077 if (rtwsta_link) 3078 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 3079 link_sta->he_cap.has_he); 3080 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3081 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3082 3083 rcu_read_unlock(); 3084 3085 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3086 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3087 chip->h2c_cctl_func_id, 0, 1, 3088 H2C_CMC_TBL_LEN); 3089 3090 ret = rtw89_h2c_tx(rtwdev, skb, false); 3091 if (ret) { 3092 rtw89_err(rtwdev, "failed to send h2c\n"); 3093 goto fail; 3094 } 3095 3096 return 0; 3097 fail: 3098 dev_kfree_skb_any(skb); 3099 3100 return ret; 3101 } 3102 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 3103 3104 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 3105 struct ieee80211_link_sta *link_sta, 3106 u8 *pads) 3107 { 3108 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3109 u16 ppe_thres_hdr; 3110 u8 ppe16, ppe8; 3111 u8 n, idx, sh; 3112 u8 ru_bitmap; 3113 bool ppe_th; 3114 u16 ppe; 3115 int i; 3116 3117 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3118 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 3119 if (!ppe_th) { 3120 u8 pad; 3121 3122 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3123 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 3124 3125 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3126 pads[i] = pad; 3127 3128 return; 3129 } 3130 3131 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres); 3132 ru_bitmap = u16_get_bits(ppe_thres_hdr, 3133 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 3134 n = hweight8(ru_bitmap); 3135 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 3136 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3137 3138 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3139 if (!(ru_bitmap & BIT(i))) { 3140 pads[i] = 1; 3141 continue; 3142 } 3143 3144 idx = n >> 3; 3145 sh = n & 7; 3146 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 3147 3148 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx); 3149 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3150 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 3151 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3152 3153 if (ppe16 != 7 && ppe8 == 7) 3154 pads[i] = RTW89_PE_DURATION_16_20; 3155 else if (ppe8 != 7) 3156 pads[i] = RTW89_PE_DURATION_8; 3157 else 3158 pads[i] = RTW89_PE_DURATION_0; 3159 } 3160 } 3161 3162 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3163 struct rtw89_vif_link *rtwvif_link, 3164 struct rtw89_sta_link *rtwsta_link) 3165 { 3166 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3167 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3168 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3169 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3170 struct ieee80211_bss_conf *bss_conf; 3171 struct ieee80211_link_sta *link_sta; 3172 u8 pads[RTW89_PPE_BW_NUM]; 3173 u32 len = sizeof(*h2c); 3174 struct sk_buff *skb; 3175 u16 lowest_rate; 3176 int ret; 3177 3178 memset(pads, 0, sizeof(pads)); 3179 3180 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3181 if (!skb) { 3182 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3183 return -ENOMEM; 3184 } 3185 3186 rcu_read_lock(); 3187 3188 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3189 3190 if (rtwsta_link) { 3191 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3192 3193 if (link_sta->eht_cap.has_eht) 3194 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads); 3195 else if (link_sta->he_cap.has_he) 3196 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3197 } 3198 3199 if (vif->p2p) 3200 lowest_rate = RTW89_HW_RATE_OFDM6; 3201 else if (chan->band_type == RTW89_BAND_2G) 3202 lowest_rate = RTW89_HW_RATE_CCK1; 3203 else 3204 lowest_rate = RTW89_HW_RATE_OFDM6; 3205 3206 skb_put(skb, len); 3207 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3208 3209 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3210 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3211 3212 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 3213 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 3214 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 3215 CCTLINFO_G7_W0_DISDATAFB); 3216 3217 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3218 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3219 3220 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3221 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3222 3223 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3224 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3225 3226 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 3227 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 3228 3229 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3230 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 3231 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 3232 } 3233 3234 if (bss_conf->eht_support) { 3235 u16 punct = bss_conf->chanreq.oper.punctured; 3236 3237 h2c->w4 |= le32_encode_bits(~punct, 3238 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3239 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3240 } 3241 3242 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 3243 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3244 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 3245 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3246 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 3247 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3248 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 3249 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3250 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 3251 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3252 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 3253 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 3254 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 3255 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 3256 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3257 3258 h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 3259 CCTLINFO_G7_W6_ULDL); 3260 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL); 3261 3262 if (rtwsta_link) { 3263 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he, 3264 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3265 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3266 } 3267 3268 rcu_read_unlock(); 3269 3270 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3271 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3272 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3273 len); 3274 3275 ret = rtw89_h2c_tx(rtwdev, skb, false); 3276 if (ret) { 3277 rtw89_err(rtwdev, "failed to send h2c\n"); 3278 goto fail; 3279 } 3280 3281 return 0; 3282 fail: 3283 dev_kfree_skb_any(skb); 3284 3285 return ret; 3286 } 3287 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 3288 3289 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3290 struct rtw89_vif_link *rtwvif_link, 3291 struct rtw89_sta_link *rtwsta_link) 3292 { 3293 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 3294 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3295 u32 len = sizeof(*h2c); 3296 struct sk_buff *skb; 3297 u16 agg_num = 0; 3298 u8 ba_bmap = 0; 3299 int ret; 3300 u8 tid; 3301 3302 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3303 if (!skb) { 3304 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 3305 return -ENOMEM; 3306 } 3307 skb_put(skb, len); 3308 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3309 3310 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 3311 if (agg_num == 0) 3312 agg_num = rtwsta->ampdu_params[tid].agg_num; 3313 else 3314 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 3315 } 3316 3317 if (agg_num <= 0x20) 3318 ba_bmap = 3; 3319 else if (agg_num > 0x20 && agg_num <= 0x40) 3320 ba_bmap = 0; 3321 else if (agg_num > 0x40 && agg_num <= 0x80) 3322 ba_bmap = 1; 3323 else if (agg_num > 0x80 && agg_num <= 0x100) 3324 ba_bmap = 2; 3325 else if (agg_num > 0x100 && agg_num <= 0x200) 3326 ba_bmap = 4; 3327 else if (agg_num > 0x200 && agg_num <= 0x400) 3328 ba_bmap = 5; 3329 3330 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 3331 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3332 3333 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 3334 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 3335 3336 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3337 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3338 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 3339 len); 3340 3341 ret = rtw89_h2c_tx(rtwdev, skb, false); 3342 if (ret) { 3343 rtw89_err(rtwdev, "failed to send h2c\n"); 3344 goto fail; 3345 } 3346 3347 return 0; 3348 fail: 3349 dev_kfree_skb_any(skb); 3350 3351 return ret; 3352 } 3353 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 3354 3355 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 3356 struct rtw89_sta_link *rtwsta_link) 3357 { 3358 const struct rtw89_chip_info *chip = rtwdev->chip; 3359 struct sk_buff *skb; 3360 int ret; 3361 3362 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3363 if (!skb) { 3364 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3365 return -ENOMEM; 3366 } 3367 skb_put(skb, H2C_CMC_TBL_LEN); 3368 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3369 SET_CTRL_INFO_OPERATION(skb->data, 1); 3370 if (rtwsta_link->cctl_tx_time) { 3371 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 3372 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time); 3373 } 3374 if (rtwsta_link->cctl_tx_retry_limit) { 3375 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 3376 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt); 3377 } 3378 3379 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3380 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3381 chip->h2c_cctl_func_id, 0, 1, 3382 H2C_CMC_TBL_LEN); 3383 3384 ret = rtw89_h2c_tx(rtwdev, skb, false); 3385 if (ret) { 3386 rtw89_err(rtwdev, "failed to send h2c\n"); 3387 goto fail; 3388 } 3389 3390 return 0; 3391 fail: 3392 dev_kfree_skb_any(skb); 3393 3394 return ret; 3395 } 3396 3397 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 3398 struct rtw89_sta_link *rtwsta_link) 3399 { 3400 const struct rtw89_chip_info *chip = rtwdev->chip; 3401 struct sk_buff *skb; 3402 int ret; 3403 3404 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 3405 return 0; 3406 3407 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3408 if (!skb) { 3409 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3410 return -ENOMEM; 3411 } 3412 skb_put(skb, H2C_CMC_TBL_LEN); 3413 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3414 SET_CTRL_INFO_OPERATION(skb->data, 1); 3415 3416 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3417 3418 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3419 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3420 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 3421 H2C_CMC_TBL_LEN); 3422 3423 ret = rtw89_h2c_tx(rtwdev, skb, false); 3424 if (ret) { 3425 rtw89_err(rtwdev, "failed to send h2c\n"); 3426 goto fail; 3427 } 3428 3429 return 0; 3430 fail: 3431 dev_kfree_skb_any(skb); 3432 3433 return ret; 3434 } 3435 3436 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 3437 struct rtw89_vif_link *rtwvif_link) 3438 { 3439 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3440 rtwvif_link->chanctx_idx); 3441 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3442 struct rtw89_h2c_bcn_upd *h2c; 3443 struct sk_buff *skb_beacon; 3444 struct ieee80211_hdr *hdr; 3445 u32 len = sizeof(*h2c); 3446 struct sk_buff *skb; 3447 int bcn_total_len; 3448 u16 beacon_rate; 3449 u16 tim_offset; 3450 void *noa_data; 3451 u8 noa_len; 3452 int ret; 3453 3454 if (vif->p2p) 3455 beacon_rate = RTW89_HW_RATE_OFDM6; 3456 else if (chan->band_type == RTW89_BAND_2G) 3457 beacon_rate = RTW89_HW_RATE_CCK1; 3458 else 3459 beacon_rate = RTW89_HW_RATE_OFDM6; 3460 3461 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3462 NULL, 0); 3463 if (!skb_beacon) { 3464 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3465 return -ENOMEM; 3466 } 3467 3468 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3469 if (noa_len && 3470 (noa_len <= skb_tailroom(skb_beacon) || 3471 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3472 skb_put_data(skb_beacon, noa_data, noa_len); 3473 } 3474 3475 hdr = (struct ieee80211_hdr *)skb_beacon; 3476 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3477 3478 bcn_total_len = len + skb_beacon->len; 3479 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3480 if (!skb) { 3481 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3482 dev_kfree_skb_any(skb_beacon); 3483 return -ENOMEM; 3484 } 3485 skb_put(skb, len); 3486 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 3487 3488 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) | 3489 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 3490 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 3491 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 3492 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 3493 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 3494 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 3495 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 3496 3497 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3498 dev_kfree_skb_any(skb_beacon); 3499 3500 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3501 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3502 H2C_FUNC_MAC_BCN_UPD, 0, 1, 3503 bcn_total_len); 3504 3505 ret = rtw89_h2c_tx(rtwdev, skb, false); 3506 if (ret) { 3507 rtw89_err(rtwdev, "failed to send h2c\n"); 3508 dev_kfree_skb_any(skb); 3509 return ret; 3510 } 3511 3512 return 0; 3513 } 3514 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 3515 3516 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 3517 struct rtw89_vif_link *rtwvif_link) 3518 { 3519 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3520 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3521 struct rtw89_h2c_bcn_upd_be *h2c; 3522 struct sk_buff *skb_beacon; 3523 struct ieee80211_hdr *hdr; 3524 u32 len = sizeof(*h2c); 3525 struct sk_buff *skb; 3526 int bcn_total_len; 3527 u16 beacon_rate; 3528 u16 tim_offset; 3529 void *noa_data; 3530 u8 noa_len; 3531 int ret; 3532 3533 if (vif->p2p) 3534 beacon_rate = RTW89_HW_RATE_OFDM6; 3535 else if (chan->band_type == RTW89_BAND_2G) 3536 beacon_rate = RTW89_HW_RATE_CCK1; 3537 else 3538 beacon_rate = RTW89_HW_RATE_OFDM6; 3539 3540 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3541 NULL, 0); 3542 if (!skb_beacon) { 3543 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3544 return -ENOMEM; 3545 } 3546 3547 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3548 if (noa_len && 3549 (noa_len <= skb_tailroom(skb_beacon) || 3550 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3551 skb_put_data(skb_beacon, noa_data, noa_len); 3552 } 3553 3554 hdr = (struct ieee80211_hdr *)skb_beacon; 3555 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3556 3557 bcn_total_len = len + skb_beacon->len; 3558 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3559 if (!skb) { 3560 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3561 dev_kfree_skb_any(skb_beacon); 3562 return -ENOMEM; 3563 } 3564 skb_put(skb, len); 3565 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 3566 3567 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 3568 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 3569 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 3570 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 3571 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 3572 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 3573 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 3574 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 3575 3576 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3577 dev_kfree_skb_any(skb_beacon); 3578 3579 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3580 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3581 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 3582 bcn_total_len); 3583 3584 ret = rtw89_h2c_tx(rtwdev, skb, false); 3585 if (ret) { 3586 rtw89_err(rtwdev, "failed to send h2c\n"); 3587 goto fail; 3588 } 3589 3590 return 0; 3591 3592 fail: 3593 dev_kfree_skb_any(skb); 3594 3595 return ret; 3596 } 3597 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 3598 3599 #define H2C_ROLE_MAINTAIN_LEN 4 3600 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 3601 struct rtw89_vif_link *rtwvif_link, 3602 struct rtw89_sta_link *rtwsta_link, 3603 enum rtw89_upd_mode upd_mode) 3604 { 3605 struct sk_buff *skb; 3606 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3607 u8 self_role; 3608 int ret; 3609 3610 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3611 if (rtwsta_link) 3612 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3613 else 3614 self_role = rtwvif_link->self_role; 3615 } else { 3616 self_role = rtwvif_link->self_role; 3617 } 3618 3619 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 3620 if (!skb) { 3621 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3622 return -ENOMEM; 3623 } 3624 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 3625 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 3626 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 3627 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 3628 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif_link->wifi_role); 3629 3630 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3631 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3632 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 3633 H2C_ROLE_MAINTAIN_LEN); 3634 3635 ret = rtw89_h2c_tx(rtwdev, skb, false); 3636 if (ret) { 3637 rtw89_err(rtwdev, "failed to send h2c\n"); 3638 goto fail; 3639 } 3640 3641 return 0; 3642 fail: 3643 dev_kfree_skb_any(skb); 3644 3645 return ret; 3646 } 3647 3648 static enum rtw89_fw_sta_type 3649 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3650 struct rtw89_sta_link *rtwsta_link) 3651 { 3652 struct ieee80211_bss_conf *bss_conf; 3653 struct ieee80211_link_sta *link_sta; 3654 enum rtw89_fw_sta_type type; 3655 3656 rcu_read_lock(); 3657 3658 if (!rtwsta_link) 3659 goto by_vif; 3660 3661 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3662 3663 if (link_sta->eht_cap.has_eht) 3664 type = RTW89_FW_BE_STA; 3665 else if (link_sta->he_cap.has_he) 3666 type = RTW89_FW_AX_STA; 3667 else 3668 type = RTW89_FW_N_AC_STA; 3669 3670 goto out; 3671 3672 by_vif: 3673 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3674 3675 if (bss_conf->eht_support) 3676 type = RTW89_FW_BE_STA; 3677 else if (bss_conf->he_support) 3678 type = RTW89_FW_AX_STA; 3679 else 3680 type = RTW89_FW_N_AC_STA; 3681 3682 out: 3683 rcu_read_unlock(); 3684 3685 return type; 3686 } 3687 3688 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3689 struct rtw89_sta_link *rtwsta_link, bool dis_conn) 3690 { 3691 struct sk_buff *skb; 3692 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3693 u8 self_role = rtwvif_link->self_role; 3694 enum rtw89_fw_sta_type sta_type; 3695 u8 net_type = rtwvif_link->net_type; 3696 struct rtw89_h2c_join_v1 *h2c_v1; 3697 struct rtw89_h2c_join *h2c; 3698 u32 len = sizeof(*h2c); 3699 bool format_v1 = false; 3700 int ret; 3701 3702 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 3703 len = sizeof(*h2c_v1); 3704 format_v1 = true; 3705 } 3706 3707 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) { 3708 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3709 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 3710 } 3711 3712 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3713 if (!skb) { 3714 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3715 return -ENOMEM; 3716 } 3717 skb_put(skb, len); 3718 h2c = (struct rtw89_h2c_join *)skb->data; 3719 3720 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 3721 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 3722 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 3723 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) | 3724 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) | 3725 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 3726 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 3727 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 3728 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 3729 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 3730 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 3731 le32_encode_bits(rtwvif_link->wifi_role, 3732 RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 3733 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 3734 3735 if (!format_v1) 3736 goto done; 3737 3738 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 3739 3740 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link); 3741 3742 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE); 3743 h2c_v1->w2 = 0; 3744 3745 done: 3746 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3747 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3748 H2C_FUNC_MAC_JOININFO, 0, 1, 3749 len); 3750 3751 ret = rtw89_h2c_tx(rtwdev, skb, false); 3752 if (ret) { 3753 rtw89_err(rtwdev, "failed to send h2c\n"); 3754 goto fail; 3755 } 3756 3757 return 0; 3758 fail: 3759 dev_kfree_skb_any(skb); 3760 3761 return ret; 3762 } 3763 3764 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 3765 { 3766 struct rtw89_h2c_notify_dbcc *h2c; 3767 u32 len = sizeof(*h2c); 3768 struct sk_buff *skb; 3769 int ret; 3770 3771 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3772 if (!skb) { 3773 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 3774 return -ENOMEM; 3775 } 3776 skb_put(skb, len); 3777 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 3778 3779 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 3780 3781 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3782 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3783 H2C_FUNC_NOTIFY_DBCC, 0, 1, 3784 len); 3785 3786 ret = rtw89_h2c_tx(rtwdev, skb, false); 3787 if (ret) { 3788 rtw89_err(rtwdev, "failed to send h2c\n"); 3789 goto fail; 3790 } 3791 3792 return 0; 3793 fail: 3794 dev_kfree_skb_any(skb); 3795 3796 return ret; 3797 } 3798 3799 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 3800 bool pause) 3801 { 3802 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 3803 struct rtw89_fw_macid_pause_grp *h2c; 3804 __le32 set = cpu_to_le32(BIT(sh)); 3805 u8 h2c_macid_pause_id; 3806 struct sk_buff *skb; 3807 u32 len; 3808 int ret; 3809 3810 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 3811 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 3812 len = sizeof(*h2c_new); 3813 } else { 3814 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 3815 len = sizeof(*h2c); 3816 } 3817 3818 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3819 if (!skb) { 3820 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 3821 return -ENOMEM; 3822 } 3823 skb_put(skb, len); 3824 3825 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 3826 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 3827 3828 h2c_new->n[0].pause_mask_grp[grp] = set; 3829 h2c_new->n[0].sleep_mask_grp[grp] = set; 3830 if (pause) { 3831 h2c_new->n[0].pause_grp[grp] = set; 3832 h2c_new->n[0].sleep_grp[grp] = set; 3833 } 3834 } else { 3835 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 3836 3837 h2c->mask_grp[grp] = set; 3838 if (pause) 3839 h2c->pause_grp[grp] = set; 3840 } 3841 3842 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3843 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3844 h2c_macid_pause_id, 1, 0, 3845 len); 3846 3847 ret = rtw89_h2c_tx(rtwdev, skb, false); 3848 if (ret) { 3849 rtw89_err(rtwdev, "failed to send h2c\n"); 3850 goto fail; 3851 } 3852 3853 return 0; 3854 fail: 3855 dev_kfree_skb_any(skb); 3856 3857 return ret; 3858 } 3859 3860 #define H2C_EDCA_LEN 12 3861 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3862 u8 ac, u32 val) 3863 { 3864 struct sk_buff *skb; 3865 int ret; 3866 3867 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 3868 if (!skb) { 3869 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 3870 return -ENOMEM; 3871 } 3872 skb_put(skb, H2C_EDCA_LEN); 3873 RTW89_SET_EDCA_SEL(skb->data, 0); 3874 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx); 3875 RTW89_SET_EDCA_WMM(skb->data, 0); 3876 RTW89_SET_EDCA_AC(skb->data, ac); 3877 RTW89_SET_EDCA_PARAM(skb->data, val); 3878 3879 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3880 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3881 H2C_FUNC_USR_EDCA, 0, 1, 3882 H2C_EDCA_LEN); 3883 3884 ret = rtw89_h2c_tx(rtwdev, skb, false); 3885 if (ret) { 3886 rtw89_err(rtwdev, "failed to send h2c\n"); 3887 goto fail; 3888 } 3889 3890 return 0; 3891 fail: 3892 dev_kfree_skb_any(skb); 3893 3894 return ret; 3895 } 3896 3897 #define H2C_TSF32_TOGL_LEN 4 3898 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, 3899 struct rtw89_vif_link *rtwvif_link, 3900 bool en) 3901 { 3902 struct sk_buff *skb; 3903 u16 early_us = en ? 2000 : 0; 3904 u8 *cmd; 3905 int ret; 3906 3907 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 3908 if (!skb) { 3909 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 3910 return -ENOMEM; 3911 } 3912 skb_put(skb, H2C_TSF32_TOGL_LEN); 3913 cmd = skb->data; 3914 3915 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx); 3916 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 3917 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port); 3918 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 3919 3920 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3921 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3922 H2C_FUNC_TSF32_TOGL, 0, 0, 3923 H2C_TSF32_TOGL_LEN); 3924 3925 ret = rtw89_h2c_tx(rtwdev, skb, false); 3926 if (ret) { 3927 rtw89_err(rtwdev, "failed to send h2c\n"); 3928 goto fail; 3929 } 3930 3931 return 0; 3932 fail: 3933 dev_kfree_skb_any(skb); 3934 3935 return ret; 3936 } 3937 3938 #define H2C_OFLD_CFG_LEN 8 3939 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 3940 { 3941 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 3942 struct sk_buff *skb; 3943 int ret; 3944 3945 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 3946 if (!skb) { 3947 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 3948 return -ENOMEM; 3949 } 3950 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 3951 3952 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3953 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3954 H2C_FUNC_OFLD_CFG, 0, 1, 3955 H2C_OFLD_CFG_LEN); 3956 3957 ret = rtw89_h2c_tx(rtwdev, skb, false); 3958 if (ret) { 3959 rtw89_err(rtwdev, "failed to send h2c\n"); 3960 goto fail; 3961 } 3962 3963 return 0; 3964 fail: 3965 dev_kfree_skb_any(skb); 3966 3967 return ret; 3968 } 3969 3970 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv) 3971 { 3972 struct rtw89_h2c_tx_duty *h2c; 3973 u32 len = sizeof(*h2c); 3974 struct sk_buff *skb; 3975 u16 pause, active; 3976 int ret; 3977 3978 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3979 if (!skb) { 3980 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n"); 3981 return -ENOMEM; 3982 } 3983 3984 skb_put(skb, len); 3985 h2c = (struct rtw89_h2c_tx_duty *)skb->data; 3986 3987 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100); 3988 3989 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) { 3990 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP); 3991 } else { 3992 active = 100 - lv * RTW89_THERMAL_PROT_STEP; 3993 pause = 100 - active; 3994 3995 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) | 3996 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK); 3997 } 3998 3999 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4000 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4001 H2C_FUNC_TX_DUTY, 0, 0, len); 4002 4003 ret = rtw89_h2c_tx(rtwdev, skb, false); 4004 if (ret) { 4005 rtw89_err(rtwdev, "failed to send h2c\n"); 4006 goto fail; 4007 } 4008 4009 return 0; 4010 fail: 4011 dev_kfree_skb_any(skb); 4012 4013 return ret; 4014 } 4015 4016 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 4017 struct rtw89_vif_link *rtwvif_link, 4018 bool connect) 4019 { 4020 struct ieee80211_bss_conf *bss_conf; 4021 s32 thold = RTW89_DEFAULT_CQM_THOLD; 4022 u32 hyst = RTW89_DEFAULT_CQM_HYST; 4023 struct rtw89_h2c_bcnfltr *h2c; 4024 u32 len = sizeof(*h2c); 4025 struct sk_buff *skb; 4026 int ret; 4027 4028 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4029 return -EINVAL; 4030 4031 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4032 return -EINVAL; 4033 4034 rcu_read_lock(); 4035 4036 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 4037 4038 if (bss_conf->cqm_rssi_hyst) 4039 hyst = bss_conf->cqm_rssi_hyst; 4040 if (bss_conf->cqm_rssi_thold) 4041 thold = bss_conf->cqm_rssi_thold; 4042 4043 rcu_read_unlock(); 4044 4045 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4046 if (!skb) { 4047 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 4048 return -ENOMEM; 4049 } 4050 4051 skb_put(skb, len); 4052 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 4053 4054 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 4055 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 4056 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 4057 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 4058 RTW89_H2C_BCNFLTR_W0_MODE) | 4059 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 4060 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 4061 le32_encode_bits(thold + MAX_RSSI, 4062 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 4063 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 4064 4065 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4066 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4067 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 4068 4069 ret = rtw89_h2c_tx(rtwdev, skb, false); 4070 if (ret) { 4071 rtw89_err(rtwdev, "failed to send h2c\n"); 4072 goto fail; 4073 } 4074 4075 return 0; 4076 fail: 4077 dev_kfree_skb_any(skb); 4078 4079 return ret; 4080 } 4081 4082 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 4083 struct rtw89_rx_phy_ppdu *phy_ppdu) 4084 { 4085 struct rtw89_h2c_ofld_rssi *h2c; 4086 u32 len = sizeof(*h2c); 4087 struct sk_buff *skb; 4088 s8 rssi; 4089 int ret; 4090 4091 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4092 return -EINVAL; 4093 4094 if (!phy_ppdu) 4095 return -EINVAL; 4096 4097 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4098 if (!skb) { 4099 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 4100 return -ENOMEM; 4101 } 4102 4103 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 4104 skb_put(skb, len); 4105 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 4106 4107 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 4108 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 4109 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 4110 4111 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4112 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4113 H2C_FUNC_OFLD_RSSI, 0, 1, len); 4114 4115 ret = rtw89_h2c_tx(rtwdev, skb, false); 4116 if (ret) { 4117 rtw89_err(rtwdev, "failed to send h2c\n"); 4118 goto fail; 4119 } 4120 4121 return 0; 4122 fail: 4123 dev_kfree_skb_any(skb); 4124 4125 return ret; 4126 } 4127 4128 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4129 { 4130 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 4131 struct rtw89_traffic_stats *stats = &rtwvif->stats; 4132 struct rtw89_h2c_ofld *h2c; 4133 u32 len = sizeof(*h2c); 4134 struct sk_buff *skb; 4135 int ret; 4136 4137 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4138 return -EINVAL; 4139 4140 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4141 if (!skb) { 4142 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 4143 return -ENOMEM; 4144 } 4145 4146 skb_put(skb, len); 4147 h2c = (struct rtw89_h2c_ofld *)skb->data; 4148 4149 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 4150 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 4151 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 4152 4153 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4154 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4155 H2C_FUNC_OFLD_TP, 0, 1, len); 4156 4157 ret = rtw89_h2c_tx(rtwdev, skb, false); 4158 if (ret) { 4159 rtw89_err(rtwdev, "failed to send h2c\n"); 4160 goto fail; 4161 } 4162 4163 return 0; 4164 fail: 4165 dev_kfree_skb_any(skb); 4166 4167 return ret; 4168 } 4169 4170 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 4171 { 4172 const struct rtw89_chip_info *chip = rtwdev->chip; 4173 struct rtw89_h2c_ra_v1 *h2c_v1; 4174 struct rtw89_h2c_ra *h2c; 4175 u32 len = sizeof(*h2c); 4176 bool format_v1 = false; 4177 struct sk_buff *skb; 4178 int ret; 4179 4180 if (chip->chip_gen == RTW89_CHIP_BE) { 4181 len = sizeof(*h2c_v1); 4182 format_v1 = true; 4183 } 4184 4185 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4186 if (!skb) { 4187 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4188 return -ENOMEM; 4189 } 4190 skb_put(skb, len); 4191 h2c = (struct rtw89_h2c_ra *)skb->data; 4192 rtw89_debug(rtwdev, RTW89_DBG_RA, 4193 "ra cmd msk: %llx ", ra->ra_mask); 4194 4195 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 4196 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 4197 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 4198 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 4199 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 4200 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 4201 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 4202 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 4203 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 4204 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 4205 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 4206 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 4207 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 4208 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 4209 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 4210 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 4211 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 4212 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 4213 4214 if (!format_v1) 4215 goto csi; 4216 4217 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 4218 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 4219 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 4220 4221 csi: 4222 if (!csi) 4223 goto done; 4224 4225 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 4226 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 4227 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 4228 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 4229 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 4230 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 4231 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 4232 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 4233 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 4234 4235 done: 4236 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4237 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 4238 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 4239 len); 4240 4241 ret = rtw89_h2c_tx(rtwdev, skb, false); 4242 if (ret) { 4243 rtw89_err(rtwdev, "failed to send h2c\n"); 4244 goto fail; 4245 } 4246 4247 return 0; 4248 fail: 4249 dev_kfree_skb_any(skb); 4250 4251 return ret; 4252 } 4253 4254 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 4255 { 4256 struct rtw89_btc *btc = &rtwdev->btc; 4257 struct rtw89_btc_dm *dm = &btc->dm; 4258 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 4259 struct rtw89_btc_module *module = &init_info->module; 4260 struct rtw89_btc_ant_info *ant = &module->ant; 4261 struct rtw89_h2c_cxinit *h2c; 4262 u32 len = sizeof(*h2c); 4263 struct sk_buff *skb; 4264 int ret; 4265 4266 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4267 if (!skb) { 4268 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 4269 return -ENOMEM; 4270 } 4271 skb_put(skb, len); 4272 h2c = (struct rtw89_h2c_cxinit *)skb->data; 4273 4274 h2c->hdr.type = type; 4275 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 4276 4277 h2c->ant_type = ant->type; 4278 h2c->ant_num = ant->num; 4279 h2c->ant_iso = ant->isolation; 4280 h2c->ant_info = 4281 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 4282 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 4283 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 4284 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 4285 4286 h2c->mod_rfe = module->rfe_type; 4287 h2c->mod_cv = module->cv; 4288 h2c->mod_info = 4289 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 4290 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 4291 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 4292 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 4293 h2c->mod_adie_kt = module->kt_ver_adie; 4294 h2c->wl_gch = init_info->wl_guard_ch; 4295 4296 h2c->info = 4297 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 4298 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 4299 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 4300 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 4301 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 4302 4303 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4304 H2C_CAT_OUTSRC, BTFC_SET, 4305 SET_DRV_INFO, 0, 0, 4306 len); 4307 4308 ret = rtw89_h2c_tx(rtwdev, skb, false); 4309 if (ret) { 4310 rtw89_err(rtwdev, "failed to send h2c\n"); 4311 goto fail; 4312 } 4313 4314 return 0; 4315 fail: 4316 dev_kfree_skb_any(skb); 4317 4318 return ret; 4319 } 4320 4321 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 4322 { 4323 struct rtw89_btc *btc = &rtwdev->btc; 4324 struct rtw89_btc_dm *dm = &btc->dm; 4325 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 4326 struct rtw89_h2c_cxinit_v7 *h2c; 4327 u32 len = sizeof(*h2c); 4328 struct sk_buff *skb; 4329 int ret; 4330 4331 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4332 if (!skb) { 4333 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 4334 return -ENOMEM; 4335 } 4336 skb_put(skb, len); 4337 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 4338 4339 h2c->hdr.type = type; 4340 h2c->hdr.ver = btc->ver->fcxinit; 4341 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4342 h2c->init = *init_info; 4343 4344 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4345 H2C_CAT_OUTSRC, BTFC_SET, 4346 SET_DRV_INFO, 0, 0, 4347 len); 4348 4349 ret = rtw89_h2c_tx(rtwdev, skb, false); 4350 if (ret) { 4351 rtw89_err(rtwdev, "failed to send h2c\n"); 4352 goto fail; 4353 } 4354 4355 return 0; 4356 fail: 4357 dev_kfree_skb_any(skb); 4358 4359 return ret; 4360 } 4361 4362 #define PORT_DATA_OFFSET 4 4363 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 4364 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 4365 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 4366 4367 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 4368 { 4369 struct rtw89_btc *btc = &rtwdev->btc; 4370 const struct rtw89_btc_ver *ver = btc->ver; 4371 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4372 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 4373 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4374 struct rtw89_btc_wl_active_role *active = role_info->active_role; 4375 struct sk_buff *skb; 4376 u32 len; 4377 u8 offset = 0; 4378 u8 *cmd; 4379 int ret; 4380 int i; 4381 4382 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 4383 4384 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4385 if (!skb) { 4386 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4387 return -ENOMEM; 4388 } 4389 skb_put(skb, len); 4390 cmd = skb->data; 4391 4392 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4393 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4394 4395 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4396 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4397 4398 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4399 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4400 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4401 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4402 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4403 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4404 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4405 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4406 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4407 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4408 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4409 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4410 4411 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4412 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4413 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4414 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4415 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4416 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4417 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4418 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4419 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4420 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4421 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4422 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4423 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4424 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4425 } 4426 4427 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4428 H2C_CAT_OUTSRC, BTFC_SET, 4429 SET_DRV_INFO, 0, 0, 4430 len); 4431 4432 ret = rtw89_h2c_tx(rtwdev, skb, false); 4433 if (ret) { 4434 rtw89_err(rtwdev, "failed to send h2c\n"); 4435 goto fail; 4436 } 4437 4438 return 0; 4439 fail: 4440 dev_kfree_skb_any(skb); 4441 4442 return ret; 4443 } 4444 4445 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 4446 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4447 4448 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 4449 { 4450 struct rtw89_btc *btc = &rtwdev->btc; 4451 const struct rtw89_btc_ver *ver = btc->ver; 4452 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4453 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 4454 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4455 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 4456 struct sk_buff *skb; 4457 u32 len; 4458 u8 *cmd, offset; 4459 int ret; 4460 int i; 4461 4462 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 4463 4464 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4465 if (!skb) { 4466 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4467 return -ENOMEM; 4468 } 4469 skb_put(skb, len); 4470 cmd = skb->data; 4471 4472 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4473 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4474 4475 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4476 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4477 4478 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4479 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4480 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4481 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4482 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4483 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4484 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4485 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4486 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4487 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4488 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4489 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4490 4491 offset = PORT_DATA_OFFSET; 4492 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4493 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4494 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4495 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4496 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4497 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4498 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4499 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4500 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4501 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4502 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4503 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4504 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4505 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4506 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 4507 } 4508 4509 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4510 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4511 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4512 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4513 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4514 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4515 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4516 4517 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4518 H2C_CAT_OUTSRC, BTFC_SET, 4519 SET_DRV_INFO, 0, 0, 4520 len); 4521 4522 ret = rtw89_h2c_tx(rtwdev, skb, false); 4523 if (ret) { 4524 rtw89_err(rtwdev, "failed to send h2c\n"); 4525 goto fail; 4526 } 4527 4528 return 0; 4529 fail: 4530 dev_kfree_skb_any(skb); 4531 4532 return ret; 4533 } 4534 4535 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 4536 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4537 4538 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 4539 { 4540 struct rtw89_btc *btc = &rtwdev->btc; 4541 const struct rtw89_btc_ver *ver = btc->ver; 4542 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4543 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 4544 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4545 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 4546 struct sk_buff *skb; 4547 u32 len; 4548 u8 *cmd, offset; 4549 int ret; 4550 int i; 4551 4552 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 4553 4554 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4555 if (!skb) { 4556 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4557 return -ENOMEM; 4558 } 4559 skb_put(skb, len); 4560 cmd = skb->data; 4561 4562 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4563 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4564 4565 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4566 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4567 4568 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4569 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4570 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4571 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4572 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4573 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4574 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4575 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4576 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4577 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4578 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4579 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4580 4581 offset = PORT_DATA_OFFSET; 4582 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4583 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 4584 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 4585 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 4586 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 4587 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 4588 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 4589 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 4590 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 4591 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 4592 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 4593 } 4594 4595 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4596 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4597 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4598 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4599 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4600 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4601 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4602 4603 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4604 H2C_CAT_OUTSRC, BTFC_SET, 4605 SET_DRV_INFO, 0, 0, 4606 len); 4607 4608 ret = rtw89_h2c_tx(rtwdev, skb, false); 4609 if (ret) { 4610 rtw89_err(rtwdev, "failed to send h2c\n"); 4611 goto fail; 4612 } 4613 4614 return 0; 4615 fail: 4616 dev_kfree_skb_any(skb); 4617 4618 return ret; 4619 } 4620 4621 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type) 4622 { 4623 struct rtw89_btc *btc = &rtwdev->btc; 4624 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7; 4625 struct rtw89_h2c_cxrole_v7 *h2c; 4626 u32 len = sizeof(*h2c); 4627 struct sk_buff *skb; 4628 int ret; 4629 4630 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4631 if (!skb) { 4632 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4633 return -ENOMEM; 4634 } 4635 skb_put(skb, len); 4636 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data; 4637 4638 h2c->hdr.type = type; 4639 h2c->hdr.ver = btc->ver->fwlrole; 4640 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4641 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4642 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4643 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4644 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4645 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en); 4646 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg); 4647 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy); 4648 4649 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4650 H2C_CAT_OUTSRC, BTFC_SET, 4651 SET_DRV_INFO, 0, 0, 4652 len); 4653 4654 ret = rtw89_h2c_tx(rtwdev, skb, false); 4655 if (ret) { 4656 rtw89_err(rtwdev, "failed to send h2c\n"); 4657 goto fail; 4658 } 4659 4660 return 0; 4661 fail: 4662 dev_kfree_skb_any(skb); 4663 4664 return ret; 4665 } 4666 4667 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 4668 { 4669 struct rtw89_btc *btc = &rtwdev->btc; 4670 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 4671 struct rtw89_h2c_cxrole_v8 *h2c; 4672 u32 len = sizeof(*h2c); 4673 struct sk_buff *skb; 4674 int ret; 4675 4676 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4677 if (!skb) { 4678 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4679 return -ENOMEM; 4680 } 4681 skb_put(skb, len); 4682 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 4683 4684 h2c->hdr.type = type; 4685 h2c->hdr.ver = btc->ver->fwlrole; 4686 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4687 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4688 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4689 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4690 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4691 4692 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4693 H2C_CAT_OUTSRC, BTFC_SET, 4694 SET_DRV_INFO, 0, 0, 4695 len); 4696 4697 ret = rtw89_h2c_tx(rtwdev, skb, false); 4698 if (ret) { 4699 rtw89_err(rtwdev, "failed to send h2c\n"); 4700 goto fail; 4701 } 4702 4703 return 0; 4704 fail: 4705 dev_kfree_skb_any(skb); 4706 4707 return ret; 4708 } 4709 4710 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 4711 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 4712 { 4713 struct rtw89_btc *btc = &rtwdev->btc; 4714 const struct rtw89_btc_ver *ver = btc->ver; 4715 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 4716 struct sk_buff *skb; 4717 u8 *cmd; 4718 int ret; 4719 4720 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 4721 if (!skb) { 4722 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4723 return -ENOMEM; 4724 } 4725 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 4726 cmd = skb->data; 4727 4728 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4729 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 4730 4731 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 4732 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 4733 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 4734 if (ver->fcxctrl == 0) 4735 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 4736 4737 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4738 H2C_CAT_OUTSRC, BTFC_SET, 4739 SET_DRV_INFO, 0, 0, 4740 H2C_LEN_CXDRVINFO_CTRL); 4741 4742 ret = rtw89_h2c_tx(rtwdev, skb, false); 4743 if (ret) { 4744 rtw89_err(rtwdev, "failed to send h2c\n"); 4745 goto fail; 4746 } 4747 4748 return 0; 4749 fail: 4750 dev_kfree_skb_any(skb); 4751 4752 return ret; 4753 } 4754 4755 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 4756 { 4757 struct rtw89_btc *btc = &rtwdev->btc; 4758 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 4759 struct rtw89_h2c_cxctrl_v7 *h2c; 4760 u32 len = sizeof(*h2c); 4761 struct sk_buff *skb; 4762 int ret; 4763 4764 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4765 if (!skb) { 4766 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n"); 4767 return -ENOMEM; 4768 } 4769 skb_put(skb, len); 4770 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 4771 4772 h2c->hdr.type = type; 4773 h2c->hdr.ver = btc->ver->fcxctrl; 4774 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 4775 h2c->ctrl = *ctrl; 4776 4777 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4778 H2C_CAT_OUTSRC, BTFC_SET, 4779 SET_DRV_INFO, 0, 0, len); 4780 4781 ret = rtw89_h2c_tx(rtwdev, skb, false); 4782 if (ret) { 4783 rtw89_err(rtwdev, "failed to send h2c\n"); 4784 goto fail; 4785 } 4786 4787 return 0; 4788 fail: 4789 dev_kfree_skb_any(skb); 4790 4791 return ret; 4792 } 4793 4794 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 4795 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 4796 { 4797 struct rtw89_btc *btc = &rtwdev->btc; 4798 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 4799 struct sk_buff *skb; 4800 u8 *cmd; 4801 int ret; 4802 4803 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 4804 if (!skb) { 4805 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 4806 return -ENOMEM; 4807 } 4808 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 4809 cmd = skb->data; 4810 4811 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4812 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 4813 4814 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 4815 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 4816 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 4817 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 4818 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 4819 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 4820 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 4821 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 4822 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 4823 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 4824 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 4825 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 4826 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 4827 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 4828 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 4829 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 4830 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 4831 4832 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4833 H2C_CAT_OUTSRC, BTFC_SET, 4834 SET_DRV_INFO, 0, 0, 4835 H2C_LEN_CXDRVINFO_TRX); 4836 4837 ret = rtw89_h2c_tx(rtwdev, skb, false); 4838 if (ret) { 4839 rtw89_err(rtwdev, "failed to send h2c\n"); 4840 goto fail; 4841 } 4842 4843 return 0; 4844 fail: 4845 dev_kfree_skb_any(skb); 4846 4847 return ret; 4848 } 4849 4850 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 4851 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 4852 { 4853 struct rtw89_btc *btc = &rtwdev->btc; 4854 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4855 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 4856 struct sk_buff *skb; 4857 u8 *cmd; 4858 int ret; 4859 4860 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 4861 if (!skb) { 4862 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4863 return -ENOMEM; 4864 } 4865 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 4866 cmd = skb->data; 4867 4868 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4869 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 4870 4871 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 4872 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 4873 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 4874 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 4875 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 4876 4877 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4878 H2C_CAT_OUTSRC, BTFC_SET, 4879 SET_DRV_INFO, 0, 0, 4880 H2C_LEN_CXDRVINFO_RFK); 4881 4882 ret = rtw89_h2c_tx(rtwdev, skb, false); 4883 if (ret) { 4884 rtw89_err(rtwdev, "failed to send h2c\n"); 4885 goto fail; 4886 } 4887 4888 return 0; 4889 fail: 4890 dev_kfree_skb_any(skb); 4891 4892 return ret; 4893 } 4894 4895 #define H2C_LEN_PKT_OFLD 4 4896 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 4897 { 4898 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4899 struct sk_buff *skb; 4900 unsigned int cond; 4901 u8 *cmd; 4902 int ret; 4903 4904 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 4905 if (!skb) { 4906 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4907 return -ENOMEM; 4908 } 4909 skb_put(skb, H2C_LEN_PKT_OFLD); 4910 cmd = skb->data; 4911 4912 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 4913 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 4914 4915 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4916 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4917 H2C_FUNC_PACKET_OFLD, 1, 1, 4918 H2C_LEN_PKT_OFLD); 4919 4920 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 4921 4922 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4923 if (ret < 0) { 4924 rtw89_debug(rtwdev, RTW89_DBG_FW, 4925 "failed to del pkt ofld: id %d, ret %d\n", 4926 id, ret); 4927 return ret; 4928 } 4929 4930 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 4931 return 0; 4932 } 4933 4934 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 4935 struct sk_buff *skb_ofld) 4936 { 4937 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4938 struct sk_buff *skb; 4939 unsigned int cond; 4940 u8 *cmd; 4941 u8 alloc_id; 4942 int ret; 4943 4944 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 4945 RTW89_MAX_PKT_OFLD_NUM); 4946 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 4947 return -ENOSPC; 4948 4949 *id = alloc_id; 4950 4951 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 4952 if (!skb) { 4953 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4954 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 4955 return -ENOMEM; 4956 } 4957 skb_put(skb, H2C_LEN_PKT_OFLD); 4958 cmd = skb->data; 4959 4960 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 4961 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 4962 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 4963 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 4964 4965 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4966 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4967 H2C_FUNC_PACKET_OFLD, 1, 1, 4968 H2C_LEN_PKT_OFLD + skb_ofld->len); 4969 4970 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 4971 4972 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4973 if (ret < 0) { 4974 rtw89_debug(rtwdev, RTW89_DBG_FW, 4975 "failed to add pkt ofld: id %d, ret %d\n", 4976 alloc_id, ret); 4977 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 4978 return ret; 4979 } 4980 4981 return 0; 4982 } 4983 4984 static 4985 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num, 4986 struct list_head *chan_list) 4987 { 4988 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4989 struct rtw89_h2c_chinfo_elem *elem; 4990 struct rtw89_mac_chinfo *ch_info; 4991 struct rtw89_h2c_chinfo *h2c; 4992 struct sk_buff *skb; 4993 unsigned int cond; 4994 int skb_len; 4995 int ret; 4996 4997 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 4998 4999 skb_len = struct_size(h2c, elem, ch_num); 5000 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5001 if (!skb) { 5002 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5003 return -ENOMEM; 5004 } 5005 skb_put(skb, sizeof(*h2c)); 5006 h2c = (struct rtw89_h2c_chinfo *)skb->data; 5007 5008 h2c->ch_num = ch_num; 5009 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5010 5011 list_for_each_entry(ch_info, chan_list, list) { 5012 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 5013 5014 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 5015 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 5016 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 5017 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 5018 5019 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 5020 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 5021 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 5022 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 5023 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 5024 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 5025 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 5026 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 5027 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 5028 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 5029 5030 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 5031 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 5032 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 5033 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 5034 5035 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 5036 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 5037 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 5038 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 5039 } 5040 5041 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5042 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5043 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5044 5045 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5046 5047 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5048 if (ret) { 5049 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5050 return ret; 5051 } 5052 5053 return 0; 5054 } 5055 5056 static 5057 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 5058 struct list_head *chan_list, 5059 struct rtw89_vif_link *rtwvif_link) 5060 { 5061 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5062 struct rtw89_h2c_chinfo_elem_be *elem; 5063 struct rtw89_mac_chinfo_be *ch_info; 5064 struct rtw89_h2c_chinfo_be *h2c; 5065 struct sk_buff *skb; 5066 unsigned int cond; 5067 u8 ver = U8_MAX; 5068 int skb_len; 5069 int ret; 5070 5071 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE); 5072 5073 skb_len = struct_size(h2c, elem, ch_num); 5074 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5075 if (!skb) { 5076 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5077 return -ENOMEM; 5078 } 5079 5080 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5081 ver = 0; 5082 5083 skb_put(skb, sizeof(*h2c)); 5084 h2c = (struct rtw89_h2c_chinfo_be *)skb->data; 5085 5086 h2c->ch_num = ch_num; 5087 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5088 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx, 5089 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 5090 5091 list_for_each_entry(ch_info, chan_list, list) { 5092 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 5093 5094 elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 5095 le32_encode_bits(ch_info->central_ch, 5096 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 5097 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 5098 5099 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 5100 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 5101 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 5102 le32_encode_bits(ch_info->pause_data, 5103 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 5104 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 5105 le32_encode_bits(ch_info->rand_seq_num, 5106 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 5107 le32_encode_bits(ch_info->notify_action, 5108 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 5109 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 5110 RTW89_H2C_CHINFO_BE_W1_PROBE) | 5111 le32_encode_bits(ch_info->leave_crit, 5112 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 5113 le32_encode_bits(ch_info->chkpt_timer, 5114 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 5115 5116 elem->w2 = le32_encode_bits(ch_info->leave_time, 5117 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 5118 le32_encode_bits(ch_info->leave_th, 5119 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 5120 le32_encode_bits(ch_info->tx_pkt_ctrl, 5121 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 5122 5123 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 5124 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 5125 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 5126 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 5127 5128 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 5129 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 5130 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 5131 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 5132 5133 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 5134 le32_encode_bits(ch_info->fw_probe0_ssids, 5135 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 5136 5137 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 5138 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 5139 le32_encode_bits(ch_info->fw_probe0_bssids, 5140 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 5141 if (ver == 0) 5142 elem->w0 |= 5143 le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD); 5144 else 5145 elem->w7 = le32_encode_bits(ch_info->period, 5146 RTW89_H2C_CHINFO_BE_W7_PERIOD_V1); 5147 } 5148 5149 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5150 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5151 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5152 5153 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5154 5155 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5156 if (ret) { 5157 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5158 return ret; 5159 } 5160 5161 return 0; 5162 } 5163 5164 #define RTW89_SCAN_DELAY_TSF_UNIT 104800 5165 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, 5166 struct rtw89_scan_option *option, 5167 struct rtw89_vif_link *rtwvif_link, 5168 bool wowlan) 5169 { 5170 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5171 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 5172 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE; 5173 struct rtw89_h2c_scanofld *h2c; 5174 u32 len = sizeof(*h2c); 5175 struct sk_buff *skb; 5176 unsigned int cond; 5177 u64 tsf = 0; 5178 int ret; 5179 5180 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5181 if (!skb) { 5182 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5183 return -ENOMEM; 5184 } 5185 skb_put(skb, len); 5186 h2c = (struct rtw89_h2c_scanofld *)skb->data; 5187 5188 if (option->delay) { 5189 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 5190 if (ret) { 5191 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret); 5192 scan_mode = RTW89_SCAN_IMMEDIATE; 5193 } else { 5194 scan_mode = RTW89_SCAN_DELAY; 5195 tsf += (u64)option->delay * RTW89_SCAN_DELAY_TSF_UNIT; 5196 } 5197 } 5198 5199 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 5200 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 5201 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) | 5202 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 5203 5204 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 5205 le32_encode_bits(option->target_ch_mode, 5206 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 5207 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) | 5208 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 5209 5210 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) | 5211 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD); 5212 5213 if (option->target_ch_mode) { 5214 h2c->w1 |= le32_encode_bits(op->band_width, 5215 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 5216 le32_encode_bits(op->primary_channel, 5217 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 5218 le32_encode_bits(op->channel, 5219 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 5220 h2c->w0 |= le32_encode_bits(op->band_type, 5221 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 5222 } 5223 5224 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf), 5225 RTW89_H2C_SCANOFLD_W3_TSF_HIGH); 5226 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf), 5227 RTW89_H2C_SCANOFLD_W4_TSF_LOW); 5228 5229 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5230 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5231 H2C_FUNC_SCANOFLD, 1, 1, 5232 len); 5233 5234 if (option->enable) 5235 cond = RTW89_SCANOFLD_WAIT_COND_START; 5236 else 5237 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 5238 5239 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5240 if (ret) { 5241 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 5242 return ret; 5243 } 5244 5245 return 0; 5246 } 5247 5248 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 5249 struct rtw89_scan_option *option) 5250 { 5251 struct ieee80211_supported_band *sband; 5252 struct ieee80211_channel *chan; 5253 u8 i, idx; 5254 5255 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 5256 if (!sband) { 5257 option->prohib_chan = U64_MAX; 5258 return; 5259 } 5260 5261 for (i = 0; i < sband->n_channels; i++) { 5262 chan = &sband->channels[i]; 5263 if (chan->flags & IEEE80211_CHAN_DISABLED) { 5264 idx = (chan->hw_value - 1) / 4; 5265 option->prohib_chan |= BIT(idx); 5266 } 5267 } 5268 } 5269 5270 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 5271 struct rtw89_scan_option *option, 5272 struct rtw89_vif_link *rtwvif_link, 5273 bool wowlan) 5274 { 5275 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 5276 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5277 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5278 struct cfg80211_scan_request *req = rtwvif->scan_req; 5279 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 5280 struct rtw89_chan *op = &scan_info->op_chan; 5281 struct rtw89_h2c_scanofld_be_opch *opch; 5282 struct rtw89_pktofld_info *pkt_info; 5283 struct rtw89_h2c_scanofld_be *h2c; 5284 struct sk_buff *skb; 5285 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 5286 u8 opch_size = sizeof(*opch) * option->num_opch; 5287 u8 probe_id[NUM_NL80211_BANDS]; 5288 u8 cfg_len = sizeof(*h2c); 5289 unsigned int cond; 5290 u8 ver = U8_MAX; 5291 void *ptr; 5292 int ret; 5293 u32 len; 5294 u8 i; 5295 5296 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 5297 5298 len = cfg_len + macc_role_size + opch_size; 5299 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5300 if (!skb) { 5301 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5302 return -ENOMEM; 5303 } 5304 5305 skb_put(skb, len); 5306 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 5307 ptr = skb->data; 5308 5309 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 5310 5311 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5312 ver = 0; 5313 5314 if (!wowlan) { 5315 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 5316 if (pkt_info->wildcard_6ghz) { 5317 /* Provide wildcard as template */ 5318 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 5319 break; 5320 } 5321 } 5322 } 5323 5324 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 5325 le32_encode_bits(option->scan_mode, 5326 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 5327 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 5328 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 5329 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 5330 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 5331 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 5332 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 5333 5334 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 5335 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 5336 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 5337 5338 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 5339 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 5340 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 5341 5342 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 5343 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 5344 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 5345 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 5346 5347 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 5348 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 5349 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 5350 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 5351 le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 5352 5353 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 5354 5355 h2c->w6 = le32_encode_bits(option->prohib_chan, 5356 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 5357 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 5358 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 5359 if (!wowlan && req->no_cck) { 5360 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); 5361 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, 5362 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | 5363 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5364 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | 5365 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5366 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); 5367 } 5368 5369 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { 5370 cfg_len = offsetofend(typeof(*h2c), w8); 5371 goto flex_member; 5372 } 5373 5374 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), 5375 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | 5376 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0), 5377 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) | 5378 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0), 5379 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP); 5380 5381 flex_member: 5382 ptr += cfg_len; 5383 5384 for (i = 0; i < option->num_macc_role; i++) { 5385 macc_role = ptr; 5386 macc_role->w0 = 5387 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 5388 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 5389 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 5390 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 5391 ptr += sizeof(*macc_role); 5392 } 5393 5394 for (i = 0; i < option->num_opch; i++) { 5395 opch = ptr; 5396 opch->w0 = le32_encode_bits(rtwvif_link->mac_id, 5397 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 5398 le32_encode_bits(option->band, 5399 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 5400 le32_encode_bits(rtwvif_link->port, 5401 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 5402 le32_encode_bits(RTW89_SCAN_OPMODE_INTV, 5403 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 5404 le32_encode_bits(true, 5405 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 5406 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10, 5407 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 5408 5409 opch->w1 = le32_encode_bits(op->band_type, 5410 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 5411 le32_encode_bits(op->band_width, 5412 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 5413 le32_encode_bits(0x3, 5414 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 5415 le32_encode_bits(op->primary_channel, 5416 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 5417 le32_encode_bits(op->channel, 5418 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 5419 5420 opch->w2 = le32_encode_bits(0, 5421 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 5422 le32_encode_bits(0, 5423 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 5424 le32_encode_bits(2, 5425 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS); 5426 5427 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5428 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 5429 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5430 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 5431 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5432 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 5433 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5434 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 5435 5436 if (ver == 0) 5437 opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME, 5438 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION); 5439 else 5440 opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME, 5441 RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1); 5442 ptr += sizeof(*opch); 5443 } 5444 5445 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5446 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5447 H2C_FUNC_SCANOFLD_BE, 1, 1, 5448 len); 5449 5450 if (option->enable) 5451 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 5452 else 5453 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 5454 5455 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5456 if (ret) { 5457 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 5458 return ret; 5459 } 5460 5461 return 0; 5462 } 5463 5464 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 5465 struct rtw89_fw_h2c_rf_reg_info *info, 5466 u16 len, u8 page) 5467 { 5468 struct sk_buff *skb; 5469 u8 class = info->rf_path == RF_PATH_A ? 5470 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 5471 int ret; 5472 5473 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5474 if (!skb) { 5475 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 5476 return -ENOMEM; 5477 } 5478 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 5479 5480 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5481 H2C_CAT_OUTSRC, class, page, 0, 0, 5482 len); 5483 5484 ret = rtw89_h2c_tx(rtwdev, skb, false); 5485 if (ret) { 5486 rtw89_err(rtwdev, "failed to send h2c\n"); 5487 goto fail; 5488 } 5489 5490 return 0; 5491 fail: 5492 dev_kfree_skb_any(skb); 5493 5494 return ret; 5495 } 5496 5497 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 5498 { 5499 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; 5500 struct rtw89_fw_h2c_rf_get_mccch *mccch; 5501 struct sk_buff *skb; 5502 int ret; 5503 u8 idx; 5504 5505 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 5506 if (!skb) { 5507 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5508 return -ENOMEM; 5509 } 5510 skb_put(skb, sizeof(*mccch)); 5511 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 5512 5513 idx = rfk_mcc->table_idx; 5514 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 5515 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 5516 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 5517 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 5518 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 5519 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 5520 5521 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5522 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 5523 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 5524 sizeof(*mccch)); 5525 5526 ret = rtw89_h2c_tx(rtwdev, skb, false); 5527 if (ret) { 5528 rtw89_err(rtwdev, "failed to send h2c\n"); 5529 goto fail; 5530 } 5531 5532 return 0; 5533 fail: 5534 dev_kfree_skb_any(skb); 5535 5536 return ret; 5537 } 5538 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 5539 5540 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 5541 enum rtw89_phy_idx phy_idx) 5542 { 5543 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 5544 struct rtw89_fw_h2c_rfk_pre_info_common *common; 5545 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; 5546 struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1; 5547 struct rtw89_fw_h2c_rfk_pre_info *h2c; 5548 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH]; 5549 u32 len = sizeof(*h2c); 5550 struct sk_buff *skb; 5551 u8 ver = U8_MAX; 5552 u8 tbl, path; 5553 u32 val32; 5554 int ret; 5555 5556 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) { 5557 len = sizeof(*h2c_v1); 5558 ver = 1; 5559 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 5560 len = sizeof(*h2c_v0); 5561 ver = 0; 5562 } 5563 5564 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5565 if (!skb) { 5566 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 5567 return -ENOMEM; 5568 } 5569 skb_put(skb, len); 5570 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 5571 common = &h2c->base_v1.common; 5572 5573 common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 5574 5575 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 5576 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH); 5577 5578 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 5579 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5580 common->dbcc.ch[path][tbl] = 5581 cpu_to_le32(rfk_mcc->data[path].ch[tbl]); 5582 common->dbcc.band[path][tbl] = 5583 cpu_to_le32(rfk_mcc->data[path].band[tbl]); 5584 } 5585 } 5586 5587 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5588 tbl_sel[path] = rfk_mcc->data[path].table_idx; 5589 5590 common->tbl.cur_ch[path] = 5591 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]); 5592 common->tbl.cur_band[path] = 5593 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]); 5594 5595 if (ver <= 1) 5596 continue; 5597 5598 h2c->cur_bandwidth[path] = 5599 cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]); 5600 } 5601 5602 common->phy_idx = cpu_to_le32(phy_idx); 5603 5604 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ 5605 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; 5606 5607 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]); 5608 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]); 5609 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]); 5610 5611 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 5612 h2c_v0->ktbl_sel0 = cpu_to_le32(val32); 5613 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 5614 h2c_v0->ktbl_sel1 = cpu_to_le32(val32); 5615 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 5616 h2c_v0->rfmod0 = cpu_to_le32(val32); 5617 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 5618 h2c_v0->rfmod1 = cpu_to_le32(val32); 5619 5620 if (rtw89_is_mlo_1_1(rtwdev)) 5621 h2c_v0->mlo_1_1 = cpu_to_le32(1); 5622 5623 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 5624 5625 goto done; 5626 } 5627 5628 if (rtw89_is_mlo_1_1(rtwdev)) { 5629 h2c_v1 = &h2c->base_v1; 5630 h2c_v1->mlo_1_1 = cpu_to_le32(1); 5631 } 5632 done: 5633 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5634 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5635 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 5636 len); 5637 5638 ret = rtw89_h2c_tx(rtwdev, skb, false); 5639 if (ret) { 5640 rtw89_err(rtwdev, "failed to send h2c\n"); 5641 goto fail; 5642 } 5643 5644 return 0; 5645 fail: 5646 dev_kfree_skb_any(skb); 5647 5648 return ret; 5649 } 5650 5651 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5652 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode) 5653 { 5654 struct rtw89_hal *hal = &rtwdev->hal; 5655 struct rtw89_h2c_rf_tssi *h2c; 5656 u32 len = sizeof(*h2c); 5657 struct sk_buff *skb; 5658 int ret; 5659 5660 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5661 if (!skb) { 5662 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 5663 return -ENOMEM; 5664 } 5665 skb_put(skb, len); 5666 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 5667 5668 h2c->len = cpu_to_le16(len); 5669 h2c->phy = phy_idx; 5670 h2c->ch = chan->channel; 5671 h2c->bw = chan->band_width; 5672 h2c->band = chan->band_type; 5673 h2c->hwtx_en = true; 5674 h2c->cv = hal->cv; 5675 h2c->tssi_mode = tssi_mode; 5676 5677 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 5678 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 5679 5680 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5681 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5682 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 5683 5684 ret = rtw89_h2c_tx(rtwdev, skb, false); 5685 if (ret) { 5686 rtw89_err(rtwdev, "failed to send h2c\n"); 5687 goto fail; 5688 } 5689 5690 return 0; 5691 fail: 5692 dev_kfree_skb_any(skb); 5693 5694 return ret; 5695 } 5696 5697 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5698 const struct rtw89_chan *chan) 5699 { 5700 struct rtw89_h2c_rf_iqk *h2c; 5701 u32 len = sizeof(*h2c); 5702 struct sk_buff *skb; 5703 int ret; 5704 5705 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5706 if (!skb) { 5707 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 5708 return -ENOMEM; 5709 } 5710 skb_put(skb, len); 5711 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 5712 5713 h2c->phy_idx = cpu_to_le32(phy_idx); 5714 h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en); 5715 5716 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5717 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5718 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 5719 5720 ret = rtw89_h2c_tx(rtwdev, skb, false); 5721 if (ret) { 5722 rtw89_err(rtwdev, "failed to send h2c\n"); 5723 goto fail; 5724 } 5725 5726 return 0; 5727 fail: 5728 dev_kfree_skb_any(skb); 5729 5730 return ret; 5731 } 5732 5733 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5734 const struct rtw89_chan *chan) 5735 { 5736 struct rtw89_h2c_rf_dpk *h2c; 5737 u32 len = sizeof(*h2c); 5738 struct sk_buff *skb; 5739 int ret; 5740 5741 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5742 if (!skb) { 5743 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 5744 return -ENOMEM; 5745 } 5746 skb_put(skb, len); 5747 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 5748 5749 h2c->len = len; 5750 h2c->phy = phy_idx; 5751 h2c->dpk_enable = true; 5752 h2c->kpath = RF_AB; 5753 h2c->cur_band = chan->band_type; 5754 h2c->cur_bw = chan->band_width; 5755 h2c->cur_ch = chan->channel; 5756 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5757 5758 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5759 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5760 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 5761 5762 ret = rtw89_h2c_tx(rtwdev, skb, false); 5763 if (ret) { 5764 rtw89_err(rtwdev, "failed to send h2c\n"); 5765 goto fail; 5766 } 5767 5768 return 0; 5769 fail: 5770 dev_kfree_skb_any(skb); 5771 5772 return ret; 5773 } 5774 5775 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5776 const struct rtw89_chan *chan) 5777 { 5778 struct rtw89_hal *hal = &rtwdev->hal; 5779 struct rtw89_h2c_rf_txgapk *h2c; 5780 u32 len = sizeof(*h2c); 5781 struct sk_buff *skb; 5782 int ret; 5783 5784 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5785 if (!skb) { 5786 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 5787 return -ENOMEM; 5788 } 5789 skb_put(skb, len); 5790 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 5791 5792 h2c->len = len; 5793 h2c->ktype = 2; 5794 h2c->phy = phy_idx; 5795 h2c->kpath = RF_AB; 5796 h2c->band = chan->band_type; 5797 h2c->bw = chan->band_width; 5798 h2c->ch = chan->channel; 5799 h2c->cv = hal->cv; 5800 5801 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5802 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5803 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 5804 5805 ret = rtw89_h2c_tx(rtwdev, skb, false); 5806 if (ret) { 5807 rtw89_err(rtwdev, "failed to send h2c\n"); 5808 goto fail; 5809 } 5810 5811 return 0; 5812 fail: 5813 dev_kfree_skb_any(skb); 5814 5815 return ret; 5816 } 5817 5818 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5819 const struct rtw89_chan *chan) 5820 { 5821 struct rtw89_h2c_rf_dack *h2c; 5822 u32 len = sizeof(*h2c); 5823 struct sk_buff *skb; 5824 int ret; 5825 5826 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5827 if (!skb) { 5828 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 5829 return -ENOMEM; 5830 } 5831 skb_put(skb, len); 5832 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 5833 5834 h2c->len = cpu_to_le32(len); 5835 h2c->phy = cpu_to_le32(phy_idx); 5836 h2c->type = cpu_to_le32(0); 5837 5838 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5839 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5840 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 5841 5842 ret = rtw89_h2c_tx(rtwdev, skb, false); 5843 if (ret) { 5844 rtw89_err(rtwdev, "failed to send h2c\n"); 5845 goto fail; 5846 } 5847 5848 return 0; 5849 fail: 5850 dev_kfree_skb_any(skb); 5851 5852 return ret; 5853 } 5854 5855 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5856 const struct rtw89_chan *chan, bool is_chl_k) 5857 { 5858 struct rtw89_h2c_rf_rxdck_v0 *v0; 5859 struct rtw89_h2c_rf_rxdck *h2c; 5860 u32 len = sizeof(*h2c); 5861 struct sk_buff *skb; 5862 int ver = -1; 5863 int ret; 5864 5865 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) { 5866 len = sizeof(*v0); 5867 ver = 0; 5868 } 5869 5870 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5871 if (!skb) { 5872 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 5873 return -ENOMEM; 5874 } 5875 skb_put(skb, len); 5876 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data; 5877 5878 v0->len = len; 5879 v0->phy = phy_idx; 5880 v0->is_afe = false; 5881 v0->kpath = RF_AB; 5882 v0->cur_band = chan->band_type; 5883 v0->cur_bw = chan->band_width; 5884 v0->cur_ch = chan->channel; 5885 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5886 5887 if (ver == 0) 5888 goto hdr; 5889 5890 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 5891 h2c->is_chl_k = is_chl_k; 5892 5893 hdr: 5894 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5895 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5896 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 5897 5898 ret = rtw89_h2c_tx(rtwdev, skb, false); 5899 if (ret) { 5900 rtw89_err(rtwdev, "failed to send h2c\n"); 5901 goto fail; 5902 } 5903 5904 return 0; 5905 fail: 5906 dev_kfree_skb_any(skb); 5907 5908 return ret; 5909 } 5910 5911 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 5912 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 5913 bool rack, bool dack) 5914 { 5915 struct sk_buff *skb; 5916 int ret; 5917 5918 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5919 if (!skb) { 5920 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 5921 return -ENOMEM; 5922 } 5923 skb_put_data(skb, buf, len); 5924 5925 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5926 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 5927 len); 5928 5929 ret = rtw89_h2c_tx(rtwdev, skb, false); 5930 if (ret) { 5931 rtw89_err(rtwdev, "failed to send h2c\n"); 5932 goto fail; 5933 } 5934 5935 return 0; 5936 fail: 5937 dev_kfree_skb_any(skb); 5938 5939 return ret; 5940 } 5941 5942 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 5943 { 5944 struct sk_buff *skb; 5945 int ret; 5946 5947 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 5948 if (!skb) { 5949 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 5950 return -ENOMEM; 5951 } 5952 skb_put_data(skb, buf, len); 5953 5954 ret = rtw89_h2c_tx(rtwdev, skb, false); 5955 if (ret) { 5956 rtw89_err(rtwdev, "failed to send h2c\n"); 5957 goto fail; 5958 } 5959 5960 return 0; 5961 fail: 5962 dev_kfree_skb_any(skb); 5963 5964 return ret; 5965 } 5966 5967 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 5968 { 5969 struct rtw89_early_h2c *early_h2c; 5970 5971 lockdep_assert_held(&rtwdev->mutex); 5972 5973 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 5974 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 5975 } 5976 } 5977 5978 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 5979 { 5980 struct rtw89_early_h2c *early_h2c, *tmp; 5981 5982 mutex_lock(&rtwdev->mutex); 5983 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 5984 list_del(&early_h2c->list); 5985 kfree(early_h2c->h2c); 5986 kfree(early_h2c); 5987 } 5988 mutex_unlock(&rtwdev->mutex); 5989 } 5990 5991 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 5992 { 5993 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 5994 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 5995 5996 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 5997 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 5998 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 5999 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 6000 } 6001 6002 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 6003 struct sk_buff *c2h) 6004 { 6005 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6006 u8 category = attr->category; 6007 u8 class = attr->class; 6008 u8 func = attr->func; 6009 6010 switch (category) { 6011 default: 6012 return false; 6013 case RTW89_C2H_CAT_MAC: 6014 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 6015 case RTW89_C2H_CAT_OUTSRC: 6016 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 6017 } 6018 } 6019 6020 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 6021 { 6022 rtw89_fw_c2h_parse_attr(c2h); 6023 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 6024 goto enqueue; 6025 6026 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 6027 dev_kfree_skb_any(c2h); 6028 return; 6029 6030 enqueue: 6031 skb_queue_tail(&rtwdev->c2h_queue, c2h); 6032 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 6033 } 6034 6035 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 6036 struct sk_buff *skb) 6037 { 6038 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 6039 u8 category = attr->category; 6040 u8 class = attr->class; 6041 u8 func = attr->func; 6042 u16 len = attr->len; 6043 bool dump = true; 6044 6045 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 6046 return; 6047 6048 switch (category) { 6049 case RTW89_C2H_CAT_TEST: 6050 break; 6051 case RTW89_C2H_CAT_MAC: 6052 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 6053 if (class == RTW89_MAC_C2H_CLASS_INFO && 6054 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 6055 dump = false; 6056 break; 6057 case RTW89_C2H_CAT_OUTSRC: 6058 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 6059 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 6060 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 6061 else 6062 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 6063 break; 6064 } 6065 6066 if (dump) 6067 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 6068 } 6069 6070 void rtw89_fw_c2h_work(struct work_struct *work) 6071 { 6072 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 6073 c2h_work); 6074 struct sk_buff *skb, *tmp; 6075 6076 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 6077 skb_unlink(skb, &rtwdev->c2h_queue); 6078 mutex_lock(&rtwdev->mutex); 6079 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 6080 mutex_unlock(&rtwdev->mutex); 6081 dev_kfree_skb_any(skb); 6082 } 6083 } 6084 6085 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 6086 struct rtw89_mac_h2c_info *info) 6087 { 6088 const struct rtw89_chip_info *chip = rtwdev->chip; 6089 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6090 const u32 *h2c_reg = chip->h2c_regs; 6091 u8 i, val, len; 6092 int ret; 6093 6094 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 6095 rtwdev, chip->h2c_ctrl_reg); 6096 if (ret) { 6097 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 6098 return ret; 6099 } 6100 6101 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 6102 sizeof(info->u.h2creg[0])); 6103 6104 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 6105 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 6106 6107 for (i = 0; i < RTW89_H2CREG_MAX; i++) 6108 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 6109 6110 fw_info->h2c_counter++; 6111 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 6112 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 6113 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 6114 6115 return 0; 6116 } 6117 6118 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 6119 struct rtw89_mac_c2h_info *info) 6120 { 6121 const struct rtw89_chip_info *chip = rtwdev->chip; 6122 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6123 const u32 *c2h_reg = chip->c2h_regs; 6124 u32 ret; 6125 u8 i, val; 6126 6127 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 6128 6129 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 6130 RTW89_C2H_TIMEOUT, false, rtwdev, 6131 chip->c2h_ctrl_reg); 6132 if (ret) { 6133 rtw89_warn(rtwdev, "c2h reg timeout\n"); 6134 return ret; 6135 } 6136 6137 for (i = 0; i < RTW89_C2HREG_MAX; i++) 6138 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 6139 6140 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 6141 6142 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 6143 info->content_len = 6144 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 6145 RTW89_C2HREG_HDR_LEN; 6146 6147 fw_info->c2h_counter++; 6148 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 6149 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 6150 6151 return 0; 6152 } 6153 6154 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 6155 struct rtw89_mac_h2c_info *h2c_info, 6156 struct rtw89_mac_c2h_info *c2h_info) 6157 { 6158 u32 ret; 6159 6160 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 6161 lockdep_assert_held(&rtwdev->mutex); 6162 6163 if (!h2c_info && !c2h_info) 6164 return -EINVAL; 6165 6166 if (!h2c_info) 6167 goto recv_c2h; 6168 6169 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 6170 if (ret) 6171 return ret; 6172 6173 recv_c2h: 6174 if (!c2h_info) 6175 return 0; 6176 6177 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 6178 if (ret) 6179 return ret; 6180 6181 return 0; 6182 } 6183 6184 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 6185 { 6186 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 6187 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 6188 return; 6189 } 6190 6191 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 6192 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 6193 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 6194 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 6195 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 6196 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 6197 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 6198 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 6199 6200 rtw89_fw_prog_cnt_dump(rtwdev); 6201 } 6202 6203 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 6204 { 6205 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6206 struct rtw89_pktofld_info *info, *tmp; 6207 u8 idx; 6208 6209 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 6210 if (!(rtwdev->chip->support_bands & BIT(idx))) 6211 continue; 6212 6213 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 6214 if (test_bit(info->id, rtwdev->pkt_offload)) 6215 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 6216 list_del(&info->list); 6217 kfree(info); 6218 } 6219 } 6220 } 6221 6222 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 6223 struct cfg80211_scan_request *req, 6224 struct rtw89_pktofld_info *info, 6225 enum nl80211_band band, u8 ssid_idx) 6226 { 6227 if (band != NL80211_BAND_6GHZ) 6228 return false; 6229 6230 if (req->ssids[ssid_idx].ssid_len) { 6231 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 6232 req->ssids[ssid_idx].ssid_len); 6233 info->ssid_len = req->ssids[ssid_idx].ssid_len; 6234 return false; 6235 } else { 6236 info->wildcard_6ghz = true; 6237 return true; 6238 } 6239 } 6240 6241 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 6242 struct rtw89_vif_link *rtwvif_link, 6243 struct sk_buff *skb, u8 ssid_idx) 6244 { 6245 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6246 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6247 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 6248 struct cfg80211_scan_request *req = rtwvif->scan_req; 6249 struct rtw89_pktofld_info *info; 6250 struct sk_buff *new; 6251 int ret = 0; 6252 u8 band; 6253 6254 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 6255 if (!(rtwdev->chip->support_bands & BIT(band))) 6256 continue; 6257 6258 new = skb_copy(skb, GFP_KERNEL); 6259 if (!new) { 6260 ret = -ENOMEM; 6261 goto out; 6262 } 6263 skb_put_data(new, ies->ies[band], ies->len[band]); 6264 skb_put_data(new, ies->common_ies, ies->common_ie_len); 6265 6266 info = kzalloc(sizeof(*info), GFP_KERNEL); 6267 if (!info) { 6268 ret = -ENOMEM; 6269 kfree_skb(new); 6270 goto out; 6271 } 6272 6273 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx); 6274 6275 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 6276 if (ret) { 6277 kfree_skb(new); 6278 kfree(info); 6279 goto out; 6280 } 6281 6282 list_add_tail(&info->list, &scan_info->pkt_list[band]); 6283 kfree_skb(new); 6284 } 6285 out: 6286 return ret; 6287 } 6288 6289 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 6290 struct rtw89_vif_link *rtwvif_link) 6291 { 6292 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6293 struct cfg80211_scan_request *req = rtwvif->scan_req; 6294 struct sk_buff *skb; 6295 u8 num = req->n_ssids, i; 6296 int ret; 6297 6298 for (i = 0; i < num; i++) { 6299 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 6300 req->ssids[i].ssid, 6301 req->ssids[i].ssid_len, 6302 req->ie_len); 6303 if (!skb) 6304 return -ENOMEM; 6305 6306 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i); 6307 kfree_skb(skb); 6308 6309 if (ret) 6310 return ret; 6311 } 6312 6313 return 0; 6314 } 6315 6316 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 6317 struct ieee80211_scan_ies *ies, 6318 struct cfg80211_scan_request *req, 6319 struct rtw89_mac_chinfo *ch_info) 6320 { 6321 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6322 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6323 struct cfg80211_scan_6ghz_params *params; 6324 struct rtw89_pktofld_info *info, *tmp; 6325 struct ieee80211_hdr *hdr; 6326 struct sk_buff *skb; 6327 bool found; 6328 int ret = 0; 6329 u8 i; 6330 6331 if (!req->n_6ghz_params) 6332 return 0; 6333 6334 for (i = 0; i < req->n_6ghz_params; i++) { 6335 params = &req->scan_6ghz_params[i]; 6336 6337 if (req->channels[params->channel_idx]->hw_value != 6338 ch_info->pri_ch) 6339 continue; 6340 6341 found = false; 6342 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 6343 if (ether_addr_equal(tmp->bssid, params->bssid)) { 6344 found = true; 6345 break; 6346 } 6347 } 6348 if (found) 6349 continue; 6350 6351 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 6352 NULL, 0, req->ie_len); 6353 if (!skb) 6354 return -ENOMEM; 6355 6356 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 6357 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 6358 hdr = (struct ieee80211_hdr *)skb->data; 6359 ether_addr_copy(hdr->addr3, params->bssid); 6360 6361 info = kzalloc(sizeof(*info), GFP_KERNEL); 6362 if (!info) { 6363 ret = -ENOMEM; 6364 kfree_skb(skb); 6365 goto out; 6366 } 6367 6368 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 6369 if (ret) { 6370 kfree_skb(skb); 6371 kfree(info); 6372 goto out; 6373 } 6374 6375 ether_addr_copy(info->bssid, params->bssid); 6376 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 6377 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 6378 6379 ch_info->tx_pkt = true; 6380 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6381 6382 kfree_skb(skb); 6383 } 6384 6385 out: 6386 return ret; 6387 } 6388 6389 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, 6390 int chan_type, int ssid_num, 6391 struct rtw89_mac_chinfo *ch_info) 6392 { 6393 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6394 struct rtw89_pktofld_info *info; 6395 u8 probe_count = 0; 6396 6397 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6398 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6399 ch_info->bw = RTW89_SCAN_WIDTH; 6400 ch_info->tx_pkt = true; 6401 ch_info->cfg_tx_pwr = false; 6402 ch_info->tx_pwr_idx = 0; 6403 ch_info->tx_null = false; 6404 ch_info->pause_data = false; 6405 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6406 6407 if (ssid_num) { 6408 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6409 if (info->channel_6ghz && 6410 ch_info->pri_ch != info->channel_6ghz) 6411 continue; 6412 else if (info->channel_6ghz && probe_count != 0) 6413 ch_info->period += RTW89_CHANNEL_TIME_6G; 6414 6415 if (info->wildcard_6ghz) 6416 continue; 6417 6418 ch_info->pkt_id[probe_count++] = info->id; 6419 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6420 break; 6421 } 6422 ch_info->num_pkt = probe_count; 6423 } 6424 6425 switch (chan_type) { 6426 case RTW89_CHAN_DFS: 6427 if (ch_info->ch_band != RTW89_BAND_6G) 6428 ch_info->period = max_t(u8, ch_info->period, 6429 RTW89_DFS_CHAN_TIME); 6430 ch_info->dwell_time = RTW89_DWELL_TIME; 6431 break; 6432 case RTW89_CHAN_ACTIVE: 6433 break; 6434 default: 6435 rtw89_err(rtwdev, "Channel type out of bound\n"); 6436 } 6437 } 6438 6439 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 6440 int ssid_num, 6441 struct rtw89_mac_chinfo *ch_info) 6442 { 6443 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6444 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6445 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6446 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 6447 struct cfg80211_scan_request *req = rtwvif->scan_req; 6448 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 6449 struct rtw89_pktofld_info *info; 6450 u8 band, probe_count = 0; 6451 int ret; 6452 6453 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6454 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6455 ch_info->bw = RTW89_SCAN_WIDTH; 6456 ch_info->tx_pkt = true; 6457 ch_info->cfg_tx_pwr = false; 6458 ch_info->tx_pwr_idx = 0; 6459 ch_info->tx_null = false; 6460 ch_info->pause_data = false; 6461 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6462 6463 if (ch_info->ch_band == RTW89_BAND_6G) { 6464 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6465 !ch_info->is_psc) { 6466 ch_info->tx_pkt = false; 6467 if (!req->duration_mandatory) 6468 ch_info->period -= RTW89_DWELL_TIME_6G; 6469 } 6470 } 6471 6472 ret = rtw89_update_6ghz_rnr_chan(rtwdev, ies, req, ch_info); 6473 if (ret) 6474 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 6475 6476 if (ssid_num) { 6477 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6478 6479 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6480 if (info->channel_6ghz && 6481 ch_info->pri_ch != info->channel_6ghz) 6482 continue; 6483 else if (info->channel_6ghz && probe_count != 0) 6484 ch_info->period += RTW89_CHANNEL_TIME_6G; 6485 6486 if (info->wildcard_6ghz) 6487 continue; 6488 6489 ch_info->pkt_id[probe_count++] = info->id; 6490 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6491 break; 6492 } 6493 ch_info->num_pkt = probe_count; 6494 } 6495 6496 switch (chan_type) { 6497 case RTW89_CHAN_OPERATE: 6498 ch_info->central_ch = op->channel; 6499 ch_info->pri_ch = op->primary_channel; 6500 ch_info->ch_band = op->band_type; 6501 ch_info->bw = op->band_width; 6502 ch_info->tx_null = true; 6503 ch_info->num_pkt = 0; 6504 break; 6505 case RTW89_CHAN_DFS: 6506 if (ch_info->ch_band != RTW89_BAND_6G) 6507 ch_info->period = max_t(u8, ch_info->period, 6508 RTW89_DFS_CHAN_TIME); 6509 ch_info->dwell_time = RTW89_DWELL_TIME; 6510 ch_info->pause_data = true; 6511 break; 6512 case RTW89_CHAN_ACTIVE: 6513 ch_info->pause_data = true; 6514 break; 6515 default: 6516 rtw89_err(rtwdev, "Channel type out of bound\n"); 6517 } 6518 } 6519 6520 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6521 int ssid_num, 6522 struct rtw89_mac_chinfo_be *ch_info) 6523 { 6524 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6525 struct rtw89_pktofld_info *info; 6526 u8 probe_count = 0, i; 6527 6528 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6529 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6530 ch_info->bw = RTW89_SCAN_WIDTH; 6531 ch_info->tx_null = false; 6532 ch_info->pause_data = false; 6533 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6534 6535 if (ssid_num) { 6536 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6537 ch_info->pkt_id[probe_count++] = info->id; 6538 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6539 break; 6540 } 6541 } 6542 6543 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6544 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6545 6546 switch (chan_type) { 6547 case RTW89_CHAN_DFS: 6548 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6549 ch_info->dwell_time = RTW89_DWELL_TIME; 6550 break; 6551 case RTW89_CHAN_ACTIVE: 6552 break; 6553 default: 6554 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6555 break; 6556 } 6557 } 6558 6559 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6560 int ssid_num, 6561 struct rtw89_mac_chinfo_be *ch_info) 6562 { 6563 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6564 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6565 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6566 struct cfg80211_scan_request *req = rtwvif->scan_req; 6567 struct rtw89_pktofld_info *info; 6568 u8 band, probe_count = 0, i; 6569 6570 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6571 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6572 ch_info->bw = RTW89_SCAN_WIDTH; 6573 ch_info->tx_null = false; 6574 ch_info->pause_data = false; 6575 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6576 6577 if (ssid_num) { 6578 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6579 6580 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6581 if (info->channel_6ghz && 6582 ch_info->pri_ch != info->channel_6ghz) 6583 continue; 6584 6585 if (info->wildcard_6ghz) 6586 continue; 6587 6588 ch_info->pkt_id[probe_count++] = info->id; 6589 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6590 break; 6591 } 6592 } 6593 6594 if (ch_info->ch_band == RTW89_BAND_6G) { 6595 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6596 !ch_info->is_psc) { 6597 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6598 if (!req->duration_mandatory) 6599 ch_info->period -= RTW89_DWELL_TIME_6G; 6600 } 6601 } 6602 6603 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6604 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6605 6606 switch (chan_type) { 6607 case RTW89_CHAN_DFS: 6608 if (ch_info->ch_band != RTW89_BAND_6G) 6609 ch_info->period = 6610 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6611 ch_info->dwell_time = RTW89_DWELL_TIME; 6612 ch_info->pause_data = true; 6613 break; 6614 case RTW89_CHAN_ACTIVE: 6615 ch_info->pause_data = true; 6616 break; 6617 default: 6618 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6619 break; 6620 } 6621 } 6622 6623 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6624 struct rtw89_vif_link *rtwvif_link) 6625 { 6626 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6627 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6628 struct rtw89_mac_chinfo *ch_info, *tmp; 6629 struct ieee80211_channel *channel; 6630 struct list_head chan_list; 6631 int list_len; 6632 enum rtw89_chan_type type; 6633 int ret = 0; 6634 u32 idx; 6635 6636 INIT_LIST_HEAD(&chan_list); 6637 for (idx = 0, list_len = 0; 6638 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 6639 idx++, list_len++) { 6640 channel = nd_config->channels[idx]; 6641 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6642 if (!ch_info) { 6643 ret = -ENOMEM; 6644 goto out; 6645 } 6646 6647 ch_info->period = RTW89_CHANNEL_TIME; 6648 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6649 ch_info->central_ch = channel->hw_value; 6650 ch_info->pri_ch = channel->hw_value; 6651 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6652 6653 if (channel->flags & 6654 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6655 type = RTW89_CHAN_DFS; 6656 else 6657 type = RTW89_CHAN_ACTIVE; 6658 6659 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); 6660 list_add_tail(&ch_info->list, &chan_list); 6661 } 6662 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6663 6664 out: 6665 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6666 list_del(&ch_info->list); 6667 kfree(ch_info); 6668 } 6669 6670 return ret; 6671 } 6672 6673 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6674 struct rtw89_vif_link *rtwvif_link, bool connected) 6675 { 6676 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6677 struct cfg80211_scan_request *req = rtwvif->scan_req; 6678 struct rtw89_mac_chinfo *ch_info, *tmp; 6679 struct ieee80211_channel *channel; 6680 struct list_head chan_list; 6681 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 6682 int list_len, off_chan_time = 0; 6683 enum rtw89_chan_type type; 6684 int ret = 0; 6685 u32 idx; 6686 6687 INIT_LIST_HEAD(&chan_list); 6688 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6689 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 6690 idx++, list_len++) { 6691 channel = req->channels[idx]; 6692 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6693 if (!ch_info) { 6694 ret = -ENOMEM; 6695 goto out; 6696 } 6697 6698 if (req->duration) 6699 ch_info->period = req->duration; 6700 else if (channel->band == NL80211_BAND_6GHZ) 6701 ch_info->period = RTW89_CHANNEL_TIME_6G + 6702 RTW89_DWELL_TIME_6G; 6703 else 6704 ch_info->period = RTW89_CHANNEL_TIME; 6705 6706 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6707 ch_info->central_ch = channel->hw_value; 6708 ch_info->pri_ch = channel->hw_value; 6709 ch_info->rand_seq_num = random_seq; 6710 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6711 6712 if (channel->flags & 6713 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6714 type = RTW89_CHAN_DFS; 6715 else 6716 type = RTW89_CHAN_ACTIVE; 6717 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 6718 6719 if (connected && 6720 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 6721 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 6722 if (!tmp) { 6723 ret = -ENOMEM; 6724 kfree(ch_info); 6725 goto out; 6726 } 6727 6728 type = RTW89_CHAN_OPERATE; 6729 tmp->period = req->duration_mandatory ? 6730 req->duration : RTW89_CHANNEL_TIME; 6731 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 6732 list_add_tail(&tmp->list, &chan_list); 6733 off_chan_time = 0; 6734 list_len++; 6735 } 6736 list_add_tail(&ch_info->list, &chan_list); 6737 off_chan_time += ch_info->period; 6738 } 6739 rtwdev->scan_info.last_chan_idx = idx; 6740 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6741 6742 out: 6743 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6744 list_del(&ch_info->list); 6745 kfree(ch_info); 6746 } 6747 6748 return ret; 6749 } 6750 6751 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6752 struct rtw89_vif_link *rtwvif_link) 6753 { 6754 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6755 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6756 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6757 struct ieee80211_channel *channel; 6758 struct list_head chan_list; 6759 enum rtw89_chan_type type; 6760 int list_len, ret; 6761 u32 idx; 6762 6763 INIT_LIST_HEAD(&chan_list); 6764 6765 for (idx = 0, list_len = 0; 6766 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 6767 idx++, list_len++) { 6768 channel = nd_config->channels[idx]; 6769 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6770 if (!ch_info) { 6771 ret = -ENOMEM; 6772 goto out; 6773 } 6774 6775 ch_info->period = RTW89_CHANNEL_TIME; 6776 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6777 ch_info->central_ch = channel->hw_value; 6778 ch_info->pri_ch = channel->hw_value; 6779 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6780 6781 if (channel->flags & 6782 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6783 type = RTW89_CHAN_DFS; 6784 else 6785 type = RTW89_CHAN_ACTIVE; 6786 6787 rtw89_pno_scan_add_chan_be(rtwdev, type, 6788 nd_config->n_match_sets, ch_info); 6789 list_add_tail(&ch_info->list, &chan_list); 6790 } 6791 6792 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 6793 rtwvif_link); 6794 6795 out: 6796 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6797 list_del(&ch_info->list); 6798 kfree(ch_info); 6799 } 6800 6801 return ret; 6802 } 6803 6804 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6805 struct rtw89_vif_link *rtwvif_link, bool connected) 6806 { 6807 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6808 struct cfg80211_scan_request *req = rtwvif->scan_req; 6809 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6810 struct ieee80211_channel *channel; 6811 struct list_head chan_list; 6812 enum rtw89_chan_type type; 6813 int list_len, ret; 6814 bool random_seq; 6815 u32 idx; 6816 6817 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 6818 INIT_LIST_HEAD(&chan_list); 6819 6820 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6821 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 6822 idx++, list_len++) { 6823 channel = req->channels[idx]; 6824 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6825 if (!ch_info) { 6826 ret = -ENOMEM; 6827 goto out; 6828 } 6829 6830 if (req->duration) 6831 ch_info->period = req->duration; 6832 else if (channel->band == NL80211_BAND_6GHZ) 6833 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6834 else 6835 ch_info->period = RTW89_CHANNEL_TIME; 6836 6837 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6838 ch_info->central_ch = channel->hw_value; 6839 ch_info->pri_ch = channel->hw_value; 6840 ch_info->rand_seq_num = random_seq; 6841 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6842 6843 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6844 type = RTW89_CHAN_DFS; 6845 else 6846 type = RTW89_CHAN_ACTIVE; 6847 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 6848 6849 list_add_tail(&ch_info->list, &chan_list); 6850 } 6851 6852 rtwdev->scan_info.last_chan_idx = idx; 6853 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 6854 rtwvif_link); 6855 6856 out: 6857 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6858 list_del(&ch_info->list); 6859 kfree(ch_info); 6860 } 6861 6862 return ret; 6863 } 6864 6865 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 6866 struct rtw89_vif_link *rtwvif_link, bool connected) 6867 { 6868 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6869 int ret; 6870 6871 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link); 6872 if (ret) { 6873 rtw89_err(rtwdev, "Update probe request failed\n"); 6874 goto out; 6875 } 6876 ret = mac->add_chan_list(rtwdev, rtwvif_link, connected); 6877 out: 6878 return ret; 6879 } 6880 6881 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, 6882 struct rtw89_vif_link *rtwvif_link, 6883 struct ieee80211_scan_request *scan_req) 6884 { 6885 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6886 struct cfg80211_scan_request *req = &scan_req->req; 6887 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 6888 rtwvif_link->chanctx_idx); 6889 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6890 u32 rx_fltr = rtwdev->hal.rx_fltr; 6891 u8 mac_addr[ETH_ALEN]; 6892 u32 reg; 6893 6894 /* clone op and keep it during scan */ 6895 rtwdev->scan_info.op_chan = *chan; 6896 6897 rtwdev->scan_info.scanning_vif = rtwvif_link; 6898 rtwdev->scan_info.last_chan_idx = 0; 6899 rtwdev->scan_info.abort = false; 6900 rtwvif->scan_ies = &scan_req->ies; 6901 rtwvif->scan_req = req; 6902 ieee80211_stop_queues(rtwdev->hw); 6903 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false); 6904 6905 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 6906 get_random_mask_addr(mac_addr, req->mac_addr, 6907 req->mac_addr_mask); 6908 else 6909 ether_addr_copy(mac_addr, rtwvif_link->mac_addr); 6910 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true); 6911 6912 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 6913 rx_fltr &= ~B_AX_A_BC; 6914 rx_fltr &= ~B_AX_A_A1_MATCH; 6915 6916 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 6917 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr); 6918 6919 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); 6920 } 6921 6922 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, 6923 struct rtw89_vif_link *rtwvif_link, 6924 bool aborted) 6925 { 6926 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6927 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6928 struct cfg80211_scan_info info = { 6929 .aborted = aborted, 6930 }; 6931 struct rtw89_vif *rtwvif; 6932 u32 reg; 6933 6934 if (!rtwvif_link) 6935 return; 6936 6937 rtw89_chanctx_proceed(rtwdev); 6938 6939 rtwvif = rtwvif_link->rtwvif; 6940 6941 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 6942 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); 6943 6944 rtw89_core_scan_complete(rtwdev, rtwvif_link, true); 6945 ieee80211_scan_completed(rtwdev->hw, &info); 6946 ieee80211_wake_queues(rtwdev->hw); 6947 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true); 6948 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 6949 6950 rtw89_release_pkt_list(rtwdev); 6951 rtwvif->scan_req = NULL; 6952 rtwvif->scan_ies = NULL; 6953 scan_info->last_chan_idx = 0; 6954 scan_info->scanning_vif = NULL; 6955 scan_info->abort = false; 6956 } 6957 6958 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, 6959 struct rtw89_vif_link *rtwvif_link) 6960 { 6961 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6962 int ret; 6963 6964 scan_info->abort = true; 6965 6966 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false); 6967 if (ret) 6968 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); 6969 6970 /* Indicate ieee80211_scan_completed() before returning, which is safe 6971 * because scan abort command always waits for completion of 6972 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan 6973 * work properly. 6974 */ 6975 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true); 6976 } 6977 6978 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 6979 { 6980 struct rtw89_vif_link *rtwvif_link; 6981 struct rtw89_vif *rtwvif; 6982 unsigned int link_id; 6983 6984 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 6985 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 6986 /* This variable implies connected or during attempt to connect */ 6987 if (!is_zero_ether_addr(rtwvif_link->bssid)) 6988 return true; 6989 } 6990 } 6991 6992 return false; 6993 } 6994 6995 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, 6996 struct rtw89_vif_link *rtwvif_link, 6997 bool enable) 6998 { 6999 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7000 struct rtw89_scan_option opt = {0}; 7001 bool connected; 7002 int ret = 0; 7003 7004 if (!rtwvif_link) 7005 return -EINVAL; 7006 7007 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 7008 opt.enable = enable; 7009 opt.target_ch_mode = connected; 7010 if (enable) { 7011 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, connected); 7012 if (ret) 7013 goto out; 7014 } 7015 7016 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 7017 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 7018 opt.scan_mode = RTW89_SCAN_MODE_SA; 7019 opt.band = rtwvif_link->mac_idx; 7020 opt.num_macc_role = 0; 7021 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 7022 opt.num_opch = connected ? 1 : 0; 7023 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 7024 } 7025 7026 ret = mac->scan_offload(rtwdev, &opt, rtwvif_link, false); 7027 out: 7028 return ret; 7029 } 7030 7031 #define H2C_FW_CPU_EXCEPTION_LEN 4 7032 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 7033 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 7034 { 7035 struct sk_buff *skb; 7036 int ret; 7037 7038 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 7039 if (!skb) { 7040 rtw89_err(rtwdev, 7041 "failed to alloc skb for fw cpu exception\n"); 7042 return -ENOMEM; 7043 } 7044 7045 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 7046 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 7047 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 7048 7049 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7050 H2C_CAT_TEST, 7051 H2C_CL_FW_STATUS_TEST, 7052 H2C_FUNC_CPU_EXCEPTION, 0, 0, 7053 H2C_FW_CPU_EXCEPTION_LEN); 7054 7055 ret = rtw89_h2c_tx(rtwdev, skb, false); 7056 if (ret) { 7057 rtw89_err(rtwdev, "failed to send h2c\n"); 7058 goto fail; 7059 } 7060 7061 return 0; 7062 7063 fail: 7064 dev_kfree_skb_any(skb); 7065 return ret; 7066 } 7067 7068 #define H2C_PKT_DROP_LEN 24 7069 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 7070 const struct rtw89_pkt_drop_params *params) 7071 { 7072 struct sk_buff *skb; 7073 int ret; 7074 7075 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 7076 if (!skb) { 7077 rtw89_err(rtwdev, 7078 "failed to alloc skb for packet drop\n"); 7079 return -ENOMEM; 7080 } 7081 7082 switch (params->sel) { 7083 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 7084 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 7085 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 7086 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 7087 case RTW89_PKT_DROP_SEL_BAND_ONCE: 7088 break; 7089 default: 7090 rtw89_debug(rtwdev, RTW89_DBG_FW, 7091 "H2C of pkt drop might not fully support sel: %d yet\n", 7092 params->sel); 7093 break; 7094 } 7095 7096 skb_put(skb, H2C_PKT_DROP_LEN); 7097 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 7098 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 7099 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 7100 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 7101 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 7102 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 7103 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 7104 params->macid_band_sel[0]); 7105 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 7106 params->macid_band_sel[1]); 7107 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 7108 params->macid_band_sel[2]); 7109 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 7110 params->macid_band_sel[3]); 7111 7112 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7113 H2C_CAT_MAC, 7114 H2C_CL_MAC_FW_OFLD, 7115 H2C_FUNC_PKT_DROP, 0, 0, 7116 H2C_PKT_DROP_LEN); 7117 7118 ret = rtw89_h2c_tx(rtwdev, skb, false); 7119 if (ret) { 7120 rtw89_err(rtwdev, "failed to send h2c\n"); 7121 goto fail; 7122 } 7123 7124 return 0; 7125 7126 fail: 7127 dev_kfree_skb_any(skb); 7128 return ret; 7129 } 7130 7131 #define H2C_KEEP_ALIVE_LEN 4 7132 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7133 bool enable) 7134 { 7135 struct sk_buff *skb; 7136 u8 pkt_id = 0; 7137 int ret; 7138 7139 if (enable) { 7140 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7141 RTW89_PKT_OFLD_TYPE_NULL_DATA, 7142 &pkt_id); 7143 if (ret) 7144 return -EPERM; 7145 } 7146 7147 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 7148 if (!skb) { 7149 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7150 return -ENOMEM; 7151 } 7152 7153 skb_put(skb, H2C_KEEP_ALIVE_LEN); 7154 7155 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 7156 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 7157 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 7158 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id); 7159 7160 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7161 H2C_CAT_MAC, 7162 H2C_CL_MAC_WOW, 7163 H2C_FUNC_KEEP_ALIVE, 0, 1, 7164 H2C_KEEP_ALIVE_LEN); 7165 7166 ret = rtw89_h2c_tx(rtwdev, skb, false); 7167 if (ret) { 7168 rtw89_err(rtwdev, "failed to send h2c\n"); 7169 goto fail; 7170 } 7171 7172 return 0; 7173 7174 fail: 7175 dev_kfree_skb_any(skb); 7176 7177 return ret; 7178 } 7179 7180 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7181 bool enable) 7182 { 7183 struct rtw89_h2c_arp_offload *h2c; 7184 u32 len = sizeof(*h2c); 7185 struct sk_buff *skb; 7186 u8 pkt_id = 0; 7187 int ret; 7188 7189 if (enable) { 7190 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7191 RTW89_PKT_OFLD_TYPE_ARP_RSP, 7192 &pkt_id); 7193 if (ret) 7194 return ret; 7195 } 7196 7197 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7198 if (!skb) { 7199 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); 7200 return -ENOMEM; 7201 } 7202 7203 skb_put(skb, len); 7204 h2c = (struct rtw89_h2c_arp_offload *)skb->data; 7205 7206 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | 7207 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | 7208 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | 7209 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); 7210 7211 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7212 H2C_CAT_MAC, 7213 H2C_CL_MAC_WOW, 7214 H2C_FUNC_ARP_OFLD, 0, 1, 7215 len); 7216 7217 ret = rtw89_h2c_tx(rtwdev, skb, false); 7218 if (ret) { 7219 rtw89_err(rtwdev, "failed to send h2c\n"); 7220 goto fail; 7221 } 7222 7223 return 0; 7224 7225 fail: 7226 dev_kfree_skb_any(skb); 7227 7228 return ret; 7229 } 7230 7231 #define H2C_DISCONNECT_DETECT_LEN 8 7232 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 7233 struct rtw89_vif_link *rtwvif_link, bool enable) 7234 { 7235 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7236 struct sk_buff *skb; 7237 u8 macid = rtwvif_link->mac_id; 7238 int ret; 7239 7240 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 7241 if (!skb) { 7242 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7243 return -ENOMEM; 7244 } 7245 7246 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 7247 7248 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 7249 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 7250 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 7251 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 7252 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 7253 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 7254 } 7255 7256 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7257 H2C_CAT_MAC, 7258 H2C_CL_MAC_WOW, 7259 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 7260 H2C_DISCONNECT_DETECT_LEN); 7261 7262 ret = rtw89_h2c_tx(rtwdev, skb, false); 7263 if (ret) { 7264 rtw89_err(rtwdev, "failed to send h2c\n"); 7265 goto fail; 7266 } 7267 7268 return 0; 7269 7270 fail: 7271 dev_kfree_skb_any(skb); 7272 7273 return ret; 7274 } 7275 7276 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7277 bool enable) 7278 { 7279 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7280 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 7281 struct rtw89_h2c_cfg_nlo *h2c; 7282 u32 len = sizeof(*h2c); 7283 struct sk_buff *skb; 7284 int ret, i; 7285 7286 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7287 if (!skb) { 7288 rtw89_err(rtwdev, "failed to alloc skb for nlo\n"); 7289 return -ENOMEM; 7290 } 7291 7292 skb_put(skb, len); 7293 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data; 7294 7295 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) | 7296 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) | 7297 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID); 7298 7299 if (enable) { 7300 h2c->nlo_cnt = nd_config->n_match_sets; 7301 for (i = 0 ; i < nd_config->n_match_sets; i++) { 7302 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len; 7303 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid, 7304 nd_config->match_sets[i].ssid.ssid_len); 7305 } 7306 } 7307 7308 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7309 H2C_CAT_MAC, 7310 H2C_CL_MAC_WOW, 7311 H2C_FUNC_NLO, 0, 1, 7312 len); 7313 7314 ret = rtw89_h2c_tx(rtwdev, skb, false); 7315 if (ret) { 7316 rtw89_err(rtwdev, "failed to send h2c\n"); 7317 goto fail; 7318 } 7319 7320 return 0; 7321 7322 fail: 7323 dev_kfree_skb_any(skb); 7324 return ret; 7325 } 7326 7327 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7328 bool enable) 7329 { 7330 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7331 struct rtw89_h2c_wow_global *h2c; 7332 u8 macid = rtwvif_link->mac_id; 7333 u32 len = sizeof(*h2c); 7334 struct sk_buff *skb; 7335 int ret; 7336 7337 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7338 if (!skb) { 7339 rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); 7340 return -ENOMEM; 7341 } 7342 7343 skb_put(skb, len); 7344 h2c = (struct rtw89_h2c_wow_global *)skb->data; 7345 7346 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | 7347 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | 7348 le32_encode_bits(rtw_wow->ptk_alg, 7349 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | 7350 le32_encode_bits(rtw_wow->gtk_alg, 7351 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); 7352 h2c->key_info = rtw_wow->key_info; 7353 7354 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7355 H2C_CAT_MAC, 7356 H2C_CL_MAC_WOW, 7357 H2C_FUNC_WOW_GLOBAL, 0, 1, 7358 len); 7359 7360 ret = rtw89_h2c_tx(rtwdev, skb, false); 7361 if (ret) { 7362 rtw89_err(rtwdev, "failed to send h2c\n"); 7363 goto fail; 7364 } 7365 7366 return 0; 7367 7368 fail: 7369 dev_kfree_skb_any(skb); 7370 7371 return ret; 7372 } 7373 7374 #define H2C_WAKEUP_CTRL_LEN 4 7375 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 7376 struct rtw89_vif_link *rtwvif_link, 7377 bool enable) 7378 { 7379 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7380 struct sk_buff *skb; 7381 u8 macid = rtwvif_link->mac_id; 7382 int ret; 7383 7384 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 7385 if (!skb) { 7386 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); 7387 return -ENOMEM; 7388 } 7389 7390 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 7391 7392 if (rtw_wow->pattern_cnt) 7393 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 7394 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 7395 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 7396 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 7397 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 7398 7399 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 7400 7401 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7402 H2C_CAT_MAC, 7403 H2C_CL_MAC_WOW, 7404 H2C_FUNC_WAKEUP_CTRL, 0, 1, 7405 H2C_WAKEUP_CTRL_LEN); 7406 7407 ret = rtw89_h2c_tx(rtwdev, skb, false); 7408 if (ret) { 7409 rtw89_err(rtwdev, "failed to send h2c\n"); 7410 goto fail; 7411 } 7412 7413 return 0; 7414 7415 fail: 7416 dev_kfree_skb_any(skb); 7417 7418 return ret; 7419 } 7420 7421 #define H2C_WOW_CAM_UPD_LEN 24 7422 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 7423 struct rtw89_wow_cam_info *cam_info) 7424 { 7425 struct sk_buff *skb; 7426 int ret; 7427 7428 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 7429 if (!skb) { 7430 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7431 return -ENOMEM; 7432 } 7433 7434 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 7435 7436 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 7437 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 7438 if (cam_info->valid) { 7439 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 7440 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 7441 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 7442 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 7443 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 7444 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 7445 cam_info->negative_pattern_match); 7446 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 7447 cam_info->skip_mac_hdr); 7448 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 7449 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 7450 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 7451 } 7452 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 7453 7454 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7455 H2C_CAT_MAC, 7456 H2C_CL_MAC_WOW, 7457 H2C_FUNC_WOW_CAM_UPD, 0, 1, 7458 H2C_WOW_CAM_UPD_LEN); 7459 7460 ret = rtw89_h2c_tx(rtwdev, skb, false); 7461 if (ret) { 7462 rtw89_err(rtwdev, "failed to send h2c\n"); 7463 goto fail; 7464 } 7465 7466 return 0; 7467 fail: 7468 dev_kfree_skb_any(skb); 7469 7470 return ret; 7471 } 7472 7473 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, 7474 struct rtw89_vif_link *rtwvif_link, 7475 bool enable) 7476 { 7477 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7478 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; 7479 struct rtw89_h2c_wow_gtk_ofld *h2c; 7480 u8 macid = rtwvif_link->mac_id; 7481 u32 len = sizeof(*h2c); 7482 u8 pkt_id_sa_query = 0; 7483 struct sk_buff *skb; 7484 u8 pkt_id_eapol = 0; 7485 int ret; 7486 7487 if (!rtw_wow->gtk_alg) 7488 return 0; 7489 7490 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7491 if (!skb) { 7492 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); 7493 return -ENOMEM; 7494 } 7495 7496 skb_put(skb, len); 7497 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; 7498 7499 if (!enable) 7500 goto hdr; 7501 7502 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7503 RTW89_PKT_OFLD_TYPE_EAPOL_KEY, 7504 &pkt_id_eapol); 7505 if (ret) 7506 goto fail; 7507 7508 if (gtk_info->igtk_keyid) { 7509 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7510 RTW89_PKT_OFLD_TYPE_SA_QUERY, 7511 &pkt_id_sa_query); 7512 if (ret) 7513 goto fail; 7514 } 7515 7516 /* not support TKIP yet */ 7517 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 7518 le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 7519 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 7520 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 7521 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | 7522 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); 7523 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, 7524 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | 7525 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); 7526 h2c->gtk_info = rtw_wow->gtk_info; 7527 7528 hdr: 7529 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7530 H2C_CAT_MAC, 7531 H2C_CL_MAC_WOW, 7532 H2C_FUNC_GTK_OFLD, 0, 1, 7533 len); 7534 7535 ret = rtw89_h2c_tx(rtwdev, skb, false); 7536 if (ret) { 7537 rtw89_err(rtwdev, "failed to send h2c\n"); 7538 goto fail; 7539 } 7540 return 0; 7541 fail: 7542 dev_kfree_skb_any(skb); 7543 7544 return ret; 7545 } 7546 7547 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7548 bool enable) 7549 { 7550 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait; 7551 struct rtw89_h2c_fwips *h2c; 7552 u32 len = sizeof(*h2c); 7553 struct sk_buff *skb; 7554 7555 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7556 if (!skb) { 7557 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n"); 7558 return -ENOMEM; 7559 } 7560 skb_put(skb, len); 7561 h2c = (struct rtw89_h2c_fwips *)skb->data; 7562 7563 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) | 7564 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE); 7565 7566 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7567 H2C_CAT_MAC, 7568 H2C_CL_MAC_PS, 7569 H2C_FUNC_IPS_CFG, 0, 1, 7570 len); 7571 7572 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG); 7573 } 7574 7575 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) 7576 { 7577 struct rtw89_wait_info *wait = &rtwdev->wow.wait; 7578 struct rtw89_h2c_wow_aoac *h2c; 7579 u32 len = sizeof(*h2c); 7580 struct sk_buff *skb; 7581 7582 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7583 if (!skb) { 7584 rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); 7585 return -ENOMEM; 7586 } 7587 7588 skb_put(skb, len); 7589 7590 /* This H2C only nofity firmware to generate AOAC report C2H, 7591 * no need any parameter. 7592 */ 7593 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7594 H2C_CAT_MAC, 7595 H2C_CL_MAC_WOW, 7596 H2C_FUNC_AOAC_REPORT_REQ, 1, 0, 7597 len); 7598 7599 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC); 7600 } 7601 7602 /* Return < 0, if failures happen during waiting for the condition. 7603 * Return 0, when waiting for the condition succeeds. 7604 * Return > 0, if the wait is considered unreachable due to driver/FW design, 7605 * where 1 means during SER. 7606 */ 7607 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 7608 struct rtw89_wait_info *wait, unsigned int cond) 7609 { 7610 int ret; 7611 7612 ret = rtw89_h2c_tx(rtwdev, skb, false); 7613 if (ret) { 7614 rtw89_err(rtwdev, "failed to send h2c\n"); 7615 dev_kfree_skb_any(skb); 7616 return -EBUSY; 7617 } 7618 7619 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 7620 return 1; 7621 7622 return rtw89_wait_for_cond(wait, cond); 7623 } 7624 7625 #define H2C_ADD_MCC_LEN 16 7626 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 7627 const struct rtw89_fw_mcc_add_req *p) 7628 { 7629 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7630 struct sk_buff *skb; 7631 unsigned int cond; 7632 7633 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 7634 if (!skb) { 7635 rtw89_err(rtwdev, 7636 "failed to alloc skb for add mcc\n"); 7637 return -ENOMEM; 7638 } 7639 7640 skb_put(skb, H2C_ADD_MCC_LEN); 7641 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 7642 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 7643 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 7644 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 7645 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 7646 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 7647 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 7648 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 7649 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 7650 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 7651 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 7652 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 7653 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 7654 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 7655 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 7656 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 7657 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 7658 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 7659 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 7660 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 7661 7662 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7663 H2C_CAT_MAC, 7664 H2C_CL_MCC, 7665 H2C_FUNC_ADD_MCC, 0, 0, 7666 H2C_ADD_MCC_LEN); 7667 7668 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 7669 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7670 } 7671 7672 #define H2C_START_MCC_LEN 12 7673 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 7674 const struct rtw89_fw_mcc_start_req *p) 7675 { 7676 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7677 struct sk_buff *skb; 7678 unsigned int cond; 7679 7680 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 7681 if (!skb) { 7682 rtw89_err(rtwdev, 7683 "failed to alloc skb for start mcc\n"); 7684 return -ENOMEM; 7685 } 7686 7687 skb_put(skb, H2C_START_MCC_LEN); 7688 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 7689 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 7690 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 7691 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 7692 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 7693 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 7694 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 7695 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 7696 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 7697 7698 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7699 H2C_CAT_MAC, 7700 H2C_CL_MCC, 7701 H2C_FUNC_START_MCC, 0, 0, 7702 H2C_START_MCC_LEN); 7703 7704 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 7705 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7706 } 7707 7708 #define H2C_STOP_MCC_LEN 4 7709 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7710 bool prev_groups) 7711 { 7712 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7713 struct sk_buff *skb; 7714 unsigned int cond; 7715 7716 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 7717 if (!skb) { 7718 rtw89_err(rtwdev, 7719 "failed to alloc skb for stop mcc\n"); 7720 return -ENOMEM; 7721 } 7722 7723 skb_put(skb, H2C_STOP_MCC_LEN); 7724 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 7725 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 7726 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 7727 7728 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7729 H2C_CAT_MAC, 7730 H2C_CL_MCC, 7731 H2C_FUNC_STOP_MCC, 0, 0, 7732 H2C_STOP_MCC_LEN); 7733 7734 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 7735 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7736 } 7737 7738 #define H2C_DEL_MCC_GROUP_LEN 4 7739 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 7740 bool prev_groups) 7741 { 7742 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7743 struct sk_buff *skb; 7744 unsigned int cond; 7745 7746 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 7747 if (!skb) { 7748 rtw89_err(rtwdev, 7749 "failed to alloc skb for del mcc group\n"); 7750 return -ENOMEM; 7751 } 7752 7753 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 7754 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 7755 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 7756 7757 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7758 H2C_CAT_MAC, 7759 H2C_CL_MCC, 7760 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 7761 H2C_DEL_MCC_GROUP_LEN); 7762 7763 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 7764 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7765 } 7766 7767 #define H2C_RESET_MCC_GROUP_LEN 4 7768 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 7769 { 7770 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7771 struct sk_buff *skb; 7772 unsigned int cond; 7773 7774 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 7775 if (!skb) { 7776 rtw89_err(rtwdev, 7777 "failed to alloc skb for reset mcc group\n"); 7778 return -ENOMEM; 7779 } 7780 7781 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 7782 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 7783 7784 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7785 H2C_CAT_MAC, 7786 H2C_CL_MCC, 7787 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 7788 H2C_RESET_MCC_GROUP_LEN); 7789 7790 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 7791 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7792 } 7793 7794 #define H2C_MCC_REQ_TSF_LEN 4 7795 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 7796 const struct rtw89_fw_mcc_tsf_req *req, 7797 struct rtw89_mac_mcc_tsf_rpt *rpt) 7798 { 7799 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7800 struct rtw89_mac_mcc_tsf_rpt *tmp; 7801 struct sk_buff *skb; 7802 unsigned int cond; 7803 int ret; 7804 7805 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 7806 if (!skb) { 7807 rtw89_err(rtwdev, 7808 "failed to alloc skb for mcc req tsf\n"); 7809 return -ENOMEM; 7810 } 7811 7812 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 7813 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 7814 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 7815 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 7816 7817 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7818 H2C_CAT_MAC, 7819 H2C_CL_MCC, 7820 H2C_FUNC_MCC_REQ_TSF, 0, 0, 7821 H2C_MCC_REQ_TSF_LEN); 7822 7823 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 7824 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7825 if (ret) 7826 return ret; 7827 7828 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 7829 *rpt = *tmp; 7830 7831 return 0; 7832 } 7833 7834 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 7835 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7836 u8 *bitmap) 7837 { 7838 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7839 struct sk_buff *skb; 7840 unsigned int cond; 7841 u8 map_len; 7842 u8 h2c_len; 7843 7844 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 7845 map_len = RTW89_MAX_MAC_ID_NUM / 8; 7846 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 7847 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 7848 if (!skb) { 7849 rtw89_err(rtwdev, 7850 "failed to alloc skb for mcc macid bitmap\n"); 7851 return -ENOMEM; 7852 } 7853 7854 skb_put(skb, h2c_len); 7855 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 7856 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 7857 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 7858 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 7859 7860 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7861 H2C_CAT_MAC, 7862 H2C_CL_MCC, 7863 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 7864 h2c_len); 7865 7866 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 7867 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7868 } 7869 7870 #define H2C_MCC_SYNC_LEN 4 7871 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 7872 u8 target, u8 offset) 7873 { 7874 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7875 struct sk_buff *skb; 7876 unsigned int cond; 7877 7878 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 7879 if (!skb) { 7880 rtw89_err(rtwdev, 7881 "failed to alloc skb for mcc sync\n"); 7882 return -ENOMEM; 7883 } 7884 7885 skb_put(skb, H2C_MCC_SYNC_LEN); 7886 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 7887 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 7888 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 7889 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 7890 7891 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7892 H2C_CAT_MAC, 7893 H2C_CL_MCC, 7894 H2C_FUNC_MCC_SYNC, 0, 0, 7895 H2C_MCC_SYNC_LEN); 7896 7897 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 7898 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7899 } 7900 7901 #define H2C_MCC_SET_DURATION_LEN 20 7902 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 7903 const struct rtw89_fw_mcc_duration *p) 7904 { 7905 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7906 struct sk_buff *skb; 7907 unsigned int cond; 7908 7909 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 7910 if (!skb) { 7911 rtw89_err(rtwdev, 7912 "failed to alloc skb for mcc set duration\n"); 7913 return -ENOMEM; 7914 } 7915 7916 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 7917 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 7918 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 7919 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 7920 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 7921 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 7922 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 7923 p->start_tsf_low); 7924 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 7925 p->start_tsf_high); 7926 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 7927 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 7928 7929 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7930 H2C_CAT_MAC, 7931 H2C_CL_MCC, 7932 H2C_FUNC_MCC_SET_DURATION, 0, 0, 7933 H2C_MCC_SET_DURATION_LEN); 7934 7935 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 7936 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7937 } 7938 7939 static 7940 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 7941 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 7942 struct rtw89_h2c_mrc_add_slot *slot_h2c) 7943 { 7944 bool fill_h2c = !!slot_h2c; 7945 unsigned int i; 7946 7947 if (!fill_h2c) 7948 goto calc_len; 7949 7950 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 7951 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 7952 le32_encode_bits(slot_arg->courtesy_en, 7953 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 7954 le32_encode_bits(slot_arg->role_num, 7955 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 7956 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 7957 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 7958 le32_encode_bits(slot_arg->courtesy_target, 7959 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 7960 7961 for (i = 0; i < slot_arg->role_num; i++) { 7962 slot_h2c->roles[i].w0 = 7963 le32_encode_bits(slot_arg->roles[i].macid, 7964 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 7965 le32_encode_bits(slot_arg->roles[i].role_type, 7966 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 7967 le32_encode_bits(slot_arg->roles[i].is_master, 7968 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 7969 le32_encode_bits(slot_arg->roles[i].en_tx_null, 7970 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 7971 le32_encode_bits(false, 7972 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 7973 le32_encode_bits(false, 7974 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 7975 slot_h2c->roles[i].w1 = 7976 le32_encode_bits(slot_arg->roles[i].central_ch, 7977 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 7978 le32_encode_bits(slot_arg->roles[i].primary_ch, 7979 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 7980 le32_encode_bits(slot_arg->roles[i].bw, 7981 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 7982 le32_encode_bits(slot_arg->roles[i].band, 7983 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 7984 le32_encode_bits(slot_arg->roles[i].null_early, 7985 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 7986 le32_encode_bits(false, 7987 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 7988 le32_encode_bits(true, 7989 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 7990 slot_h2c->roles[i].macid_main_bitmap = 7991 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 7992 slot_h2c->roles[i].macid_paired_bitmap = 7993 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 7994 } 7995 7996 calc_len: 7997 return struct_size(slot_h2c, roles, slot_arg->role_num); 7998 } 7999 8000 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 8001 const struct rtw89_fw_mrc_add_arg *arg) 8002 { 8003 struct rtw89_h2c_mrc_add *h2c_head; 8004 struct sk_buff *skb; 8005 unsigned int i; 8006 void *tmp; 8007 u32 len; 8008 int ret; 8009 8010 len = sizeof(*h2c_head); 8011 for (i = 0; i < arg->slot_num; i++) 8012 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 8013 8014 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8015 if (!skb) { 8016 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 8017 return -ENOMEM; 8018 } 8019 8020 skb_put(skb, len); 8021 tmp = skb->data; 8022 8023 h2c_head = tmp; 8024 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 8025 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 8026 le32_encode_bits(arg->sch_type, 8027 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 8028 le32_encode_bits(arg->slot_num, 8029 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 8030 le32_encode_bits(arg->btc_in_sch, 8031 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 8032 8033 tmp += sizeof(*h2c_head); 8034 for (i = 0; i < arg->slot_num; i++) 8035 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 8036 8037 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8038 H2C_CAT_MAC, 8039 H2C_CL_MRC, 8040 H2C_FUNC_ADD_MRC, 0, 0, 8041 len); 8042 8043 ret = rtw89_h2c_tx(rtwdev, skb, false); 8044 if (ret) { 8045 rtw89_err(rtwdev, "failed to send h2c\n"); 8046 dev_kfree_skb_any(skb); 8047 return -EBUSY; 8048 } 8049 8050 return 0; 8051 } 8052 8053 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 8054 const struct rtw89_fw_mrc_start_arg *arg) 8055 { 8056 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8057 struct rtw89_h2c_mrc_start *h2c; 8058 u32 len = sizeof(*h2c); 8059 struct sk_buff *skb; 8060 unsigned int cond; 8061 8062 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8063 if (!skb) { 8064 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 8065 return -ENOMEM; 8066 } 8067 8068 skb_put(skb, len); 8069 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 8070 8071 h2c->w0 = le32_encode_bits(arg->sch_idx, 8072 RTW89_H2C_MRC_START_W0_SCH_IDX) | 8073 le32_encode_bits(arg->old_sch_idx, 8074 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 8075 le32_encode_bits(arg->action, 8076 RTW89_H2C_MRC_START_W0_ACTION); 8077 8078 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 8079 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 8080 8081 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8082 H2C_CAT_MAC, 8083 H2C_CL_MRC, 8084 H2C_FUNC_START_MRC, 0, 0, 8085 len); 8086 8087 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 8088 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8089 } 8090 8091 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx) 8092 { 8093 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8094 struct rtw89_h2c_mrc_del *h2c; 8095 u32 len = sizeof(*h2c); 8096 struct sk_buff *skb; 8097 unsigned int cond; 8098 8099 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8100 if (!skb) { 8101 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 8102 return -ENOMEM; 8103 } 8104 8105 skb_put(skb, len); 8106 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 8107 8108 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) | 8109 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX); 8110 8111 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8112 H2C_CAT_MAC, 8113 H2C_CL_MRC, 8114 H2C_FUNC_DEL_MRC, 0, 0, 8115 len); 8116 8117 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 8118 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8119 } 8120 8121 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 8122 const struct rtw89_fw_mrc_req_tsf_arg *arg, 8123 struct rtw89_mac_mrc_tsf_rpt *rpt) 8124 { 8125 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8126 struct rtw89_h2c_mrc_req_tsf *h2c; 8127 struct rtw89_mac_mrc_tsf_rpt *tmp; 8128 struct sk_buff *skb; 8129 unsigned int i; 8130 u32 len; 8131 int ret; 8132 8133 len = struct_size(h2c, infos, arg->num); 8134 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8135 if (!skb) { 8136 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 8137 return -ENOMEM; 8138 } 8139 8140 skb_put(skb, len); 8141 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 8142 8143 h2c->req_tsf_num = arg->num; 8144 for (i = 0; i < arg->num; i++) 8145 h2c->infos[i] = 8146 u8_encode_bits(arg->infos[i].band, 8147 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 8148 u8_encode_bits(arg->infos[i].port, 8149 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 8150 8151 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8152 H2C_CAT_MAC, 8153 H2C_CL_MRC, 8154 H2C_FUNC_MRC_REQ_TSF, 0, 0, 8155 len); 8156 8157 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 8158 if (ret) 8159 return ret; 8160 8161 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 8162 *rpt = *tmp; 8163 8164 return 0; 8165 } 8166 8167 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 8168 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 8169 { 8170 struct rtw89_h2c_mrc_upd_bitmap *h2c; 8171 u32 len = sizeof(*h2c); 8172 struct sk_buff *skb; 8173 int ret; 8174 8175 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8176 if (!skb) { 8177 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 8178 return -ENOMEM; 8179 } 8180 8181 skb_put(skb, len); 8182 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 8183 8184 h2c->w0 = le32_encode_bits(arg->sch_idx, 8185 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 8186 le32_encode_bits(arg->action, 8187 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 8188 le32_encode_bits(arg->macid, 8189 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 8190 h2c->w1 = le32_encode_bits(arg->client_macid, 8191 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 8192 8193 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8194 H2C_CAT_MAC, 8195 H2C_CL_MRC, 8196 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 8197 len); 8198 8199 ret = rtw89_h2c_tx(rtwdev, skb, false); 8200 if (ret) { 8201 rtw89_err(rtwdev, "failed to send h2c\n"); 8202 dev_kfree_skb_any(skb); 8203 return -EBUSY; 8204 } 8205 8206 return 0; 8207 } 8208 8209 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 8210 const struct rtw89_fw_mrc_sync_arg *arg) 8211 { 8212 struct rtw89_h2c_mrc_sync *h2c; 8213 u32 len = sizeof(*h2c); 8214 struct sk_buff *skb; 8215 int ret; 8216 8217 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8218 if (!skb) { 8219 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 8220 return -ENOMEM; 8221 } 8222 8223 skb_put(skb, len); 8224 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 8225 8226 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 8227 le32_encode_bits(arg->src.port, 8228 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 8229 le32_encode_bits(arg->src.band, 8230 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 8231 le32_encode_bits(arg->dest.port, 8232 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 8233 le32_encode_bits(arg->dest.band, 8234 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 8235 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 8236 8237 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8238 H2C_CAT_MAC, 8239 H2C_CL_MRC, 8240 H2C_FUNC_MRC_SYNC, 0, 0, 8241 len); 8242 8243 ret = rtw89_h2c_tx(rtwdev, skb, false); 8244 if (ret) { 8245 rtw89_err(rtwdev, "failed to send h2c\n"); 8246 dev_kfree_skb_any(skb); 8247 return -EBUSY; 8248 } 8249 8250 return 0; 8251 } 8252 8253 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 8254 const struct rtw89_fw_mrc_upd_duration_arg *arg) 8255 { 8256 struct rtw89_h2c_mrc_upd_duration *h2c; 8257 struct sk_buff *skb; 8258 unsigned int i; 8259 u32 len; 8260 int ret; 8261 8262 len = struct_size(h2c, slots, arg->slot_num); 8263 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8264 if (!skb) { 8265 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 8266 return -ENOMEM; 8267 } 8268 8269 skb_put(skb, len); 8270 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 8271 8272 h2c->w0 = le32_encode_bits(arg->sch_idx, 8273 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 8274 le32_encode_bits(arg->slot_num, 8275 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 8276 le32_encode_bits(false, 8277 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 8278 8279 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 8280 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 8281 8282 for (i = 0; i < arg->slot_num; i++) { 8283 h2c->slots[i] = 8284 le32_encode_bits(arg->slots[i].slot_idx, 8285 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 8286 le32_encode_bits(arg->slots[i].duration, 8287 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 8288 } 8289 8290 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8291 H2C_CAT_MAC, 8292 H2C_CL_MRC, 8293 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 8294 len); 8295 8296 ret = rtw89_h2c_tx(rtwdev, skb, false); 8297 if (ret) { 8298 rtw89_err(rtwdev, "failed to send h2c\n"); 8299 dev_kfree_skb_any(skb); 8300 return -EBUSY; 8301 } 8302 8303 return 0; 8304 } 8305 8306 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en) 8307 { 8308 struct rtw89_h2c_ap_info *h2c; 8309 u32 len = sizeof(*h2c); 8310 struct sk_buff *skb; 8311 int ret; 8312 8313 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8314 if (!skb) { 8315 rtw89_err(rtwdev, "failed to alloc skb for ap info\n"); 8316 return -ENOMEM; 8317 } 8318 8319 skb_put(skb, len); 8320 h2c = (struct rtw89_h2c_ap_info *)skb->data; 8321 8322 h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN); 8323 8324 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8325 H2C_CAT_MAC, 8326 H2C_CL_AP, 8327 H2C_FUNC_AP_INFO, 0, 0, 8328 len); 8329 8330 ret = rtw89_h2c_tx(rtwdev, skb, false); 8331 if (ret) { 8332 rtw89_err(rtwdev, "failed to send h2c\n"); 8333 dev_kfree_skb_any(skb); 8334 return -EBUSY; 8335 } 8336 8337 return 0; 8338 } 8339 8340 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en) 8341 { 8342 int ret; 8343 8344 if (en) { 8345 if (refcount_inc_not_zero(&rtwdev->refcount_ap_info)) 8346 return 0; 8347 } else { 8348 if (!refcount_dec_and_test(&rtwdev->refcount_ap_info)) 8349 return 0; 8350 } 8351 8352 ret = rtw89_fw_h2c_ap_info(rtwdev, en); 8353 if (ret) { 8354 if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 8355 return ret; 8356 8357 /* During recovery, neither driver nor stack has full error 8358 * handling, so show a warning, but return 0 with refcount 8359 * increased normally. It can avoid underflow when calling 8360 * with @en == false later. 8361 */ 8362 rtw89_warn(rtwdev, "h2c ap_info failed during SER\n"); 8363 } 8364 8365 if (en) 8366 refcount_set(&rtwdev->refcount_ap_info, 1); 8367 8368 return 0; 8369 } 8370 8371 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 8372 { 8373 static const u8 zeros[U8_MAX] = {}; 8374 8375 return memcmp(ext_ptr, zeros, ext_len) == 0; 8376 } 8377 8378 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 8379 ({ \ 8380 u8 __var_sz = sizeof(*(e)); \ 8381 bool __accept; \ 8382 if (__var_sz >= (ent_sz)) \ 8383 __accept = true; \ 8384 else \ 8385 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 8386 (ent_sz) - __var_sz);\ 8387 __accept; \ 8388 }) 8389 8390 static bool 8391 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 8392 const void *cursor, 8393 const struct rtw89_txpwr_conf *conf) 8394 { 8395 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8396 return false; 8397 8398 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 8399 return false; 8400 8401 switch (e->rs) { 8402 case RTW89_RS_CCK: 8403 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 8404 return false; 8405 break; 8406 case RTW89_RS_OFDM: 8407 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 8408 return false; 8409 break; 8410 case RTW89_RS_MCS: 8411 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 8412 e->nss >= RTW89_NSS_NUM || 8413 e->ofdma >= RTW89_OFDMA_NUM) 8414 return false; 8415 break; 8416 case RTW89_RS_HEDCM: 8417 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 8418 e->nss >= RTW89_NSS_HEDCM_NUM || 8419 e->ofdma >= RTW89_OFDMA_NUM) 8420 return false; 8421 break; 8422 case RTW89_RS_OFFSET: 8423 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 8424 return false; 8425 break; 8426 default: 8427 return false; 8428 } 8429 8430 return true; 8431 } 8432 8433 static 8434 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 8435 const struct rtw89_txpwr_table *tbl) 8436 { 8437 const struct rtw89_txpwr_conf *conf = tbl->data; 8438 struct rtw89_fw_txpwr_byrate_entry entry = {}; 8439 struct rtw89_txpwr_byrate *byr_head; 8440 struct rtw89_rate_desc desc = {}; 8441 const void *cursor; 8442 u32 data; 8443 s8 *byr; 8444 int i; 8445 8446 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8447 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 8448 continue; 8449 8450 byr_head = &rtwdev->byr[entry.band][entry.bw]; 8451 data = le32_to_cpu(entry.data); 8452 desc.ofdma = entry.ofdma; 8453 desc.nss = entry.nss; 8454 desc.rs = entry.rs; 8455 8456 for (i = 0; i < entry.len; i++, data >>= 8) { 8457 desc.idx = entry.shf + i; 8458 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 8459 *byr = data & 0xff; 8460 } 8461 } 8462 } 8463 8464 static bool 8465 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 8466 const void *cursor, 8467 const struct rtw89_txpwr_conf *conf) 8468 { 8469 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8470 return false; 8471 8472 if (e->bw >= RTW89_2G_BW_NUM) 8473 return false; 8474 if (e->nt >= RTW89_NTX_NUM) 8475 return false; 8476 if (e->rs >= RTW89_RS_LMT_NUM) 8477 return false; 8478 if (e->bf >= RTW89_BF_NUM) 8479 return false; 8480 if (e->regd >= RTW89_REGD_NUM) 8481 return false; 8482 if (e->ch_idx >= RTW89_2G_CH_NUM) 8483 return false; 8484 8485 return true; 8486 } 8487 8488 static 8489 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 8490 { 8491 const struct rtw89_txpwr_conf *conf = &data->conf; 8492 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 8493 const void *cursor; 8494 8495 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8496 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 8497 continue; 8498 8499 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8500 [entry.ch_idx] = entry.v; 8501 } 8502 } 8503 8504 static bool 8505 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 8506 const void *cursor, 8507 const struct rtw89_txpwr_conf *conf) 8508 { 8509 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8510 return false; 8511 8512 if (e->bw >= RTW89_5G_BW_NUM) 8513 return false; 8514 if (e->nt >= RTW89_NTX_NUM) 8515 return false; 8516 if (e->rs >= RTW89_RS_LMT_NUM) 8517 return false; 8518 if (e->bf >= RTW89_BF_NUM) 8519 return false; 8520 if (e->regd >= RTW89_REGD_NUM) 8521 return false; 8522 if (e->ch_idx >= RTW89_5G_CH_NUM) 8523 return false; 8524 8525 return true; 8526 } 8527 8528 static 8529 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 8530 { 8531 const struct rtw89_txpwr_conf *conf = &data->conf; 8532 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 8533 const void *cursor; 8534 8535 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8536 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 8537 continue; 8538 8539 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8540 [entry.ch_idx] = entry.v; 8541 } 8542 } 8543 8544 static bool 8545 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 8546 const void *cursor, 8547 const struct rtw89_txpwr_conf *conf) 8548 { 8549 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8550 return false; 8551 8552 if (e->bw >= RTW89_6G_BW_NUM) 8553 return false; 8554 if (e->nt >= RTW89_NTX_NUM) 8555 return false; 8556 if (e->rs >= RTW89_RS_LMT_NUM) 8557 return false; 8558 if (e->bf >= RTW89_BF_NUM) 8559 return false; 8560 if (e->regd >= RTW89_REGD_NUM) 8561 return false; 8562 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8563 return false; 8564 if (e->ch_idx >= RTW89_6G_CH_NUM) 8565 return false; 8566 8567 return true; 8568 } 8569 8570 static 8571 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 8572 { 8573 const struct rtw89_txpwr_conf *conf = &data->conf; 8574 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 8575 const void *cursor; 8576 8577 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8578 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 8579 continue; 8580 8581 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8582 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 8583 } 8584 } 8585 8586 static bool 8587 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 8588 const void *cursor, 8589 const struct rtw89_txpwr_conf *conf) 8590 { 8591 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8592 return false; 8593 8594 if (e->ru >= RTW89_RU_NUM) 8595 return false; 8596 if (e->nt >= RTW89_NTX_NUM) 8597 return false; 8598 if (e->regd >= RTW89_REGD_NUM) 8599 return false; 8600 if (e->ch_idx >= RTW89_2G_CH_NUM) 8601 return false; 8602 8603 return true; 8604 } 8605 8606 static 8607 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 8608 { 8609 const struct rtw89_txpwr_conf *conf = &data->conf; 8610 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 8611 const void *cursor; 8612 8613 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8614 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 8615 continue; 8616 8617 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8618 } 8619 } 8620 8621 static bool 8622 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 8623 const void *cursor, 8624 const struct rtw89_txpwr_conf *conf) 8625 { 8626 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8627 return false; 8628 8629 if (e->ru >= RTW89_RU_NUM) 8630 return false; 8631 if (e->nt >= RTW89_NTX_NUM) 8632 return false; 8633 if (e->regd >= RTW89_REGD_NUM) 8634 return false; 8635 if (e->ch_idx >= RTW89_5G_CH_NUM) 8636 return false; 8637 8638 return true; 8639 } 8640 8641 static 8642 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 8643 { 8644 const struct rtw89_txpwr_conf *conf = &data->conf; 8645 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 8646 const void *cursor; 8647 8648 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8649 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 8650 continue; 8651 8652 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8653 } 8654 } 8655 8656 static bool 8657 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 8658 const void *cursor, 8659 const struct rtw89_txpwr_conf *conf) 8660 { 8661 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8662 return false; 8663 8664 if (e->ru >= RTW89_RU_NUM) 8665 return false; 8666 if (e->nt >= RTW89_NTX_NUM) 8667 return false; 8668 if (e->regd >= RTW89_REGD_NUM) 8669 return false; 8670 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8671 return false; 8672 if (e->ch_idx >= RTW89_6G_CH_NUM) 8673 return false; 8674 8675 return true; 8676 } 8677 8678 static 8679 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 8680 { 8681 const struct rtw89_txpwr_conf *conf = &data->conf; 8682 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 8683 const void *cursor; 8684 8685 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8686 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 8687 continue; 8688 8689 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 8690 [entry.ch_idx] = entry.v; 8691 } 8692 } 8693 8694 static bool 8695 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 8696 const void *cursor, 8697 const struct rtw89_txpwr_conf *conf) 8698 { 8699 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8700 return false; 8701 8702 if (e->band >= RTW89_BAND_NUM) 8703 return false; 8704 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 8705 return false; 8706 if (e->regd >= RTW89_REGD_NUM) 8707 return false; 8708 8709 return true; 8710 } 8711 8712 static 8713 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 8714 { 8715 const struct rtw89_txpwr_conf *conf = &data->conf; 8716 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 8717 const void *cursor; 8718 8719 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8720 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 8721 continue; 8722 8723 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 8724 } 8725 } 8726 8727 static bool 8728 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 8729 const void *cursor, 8730 const struct rtw89_txpwr_conf *conf) 8731 { 8732 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8733 return false; 8734 8735 if (e->band >= RTW89_BAND_NUM) 8736 return false; 8737 if (e->regd >= RTW89_REGD_NUM) 8738 return false; 8739 8740 return true; 8741 } 8742 8743 static 8744 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 8745 { 8746 const struct rtw89_txpwr_conf *conf = &data->conf; 8747 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 8748 const void *cursor; 8749 8750 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8751 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 8752 continue; 8753 8754 data->v[entry.band][entry.regd] = entry.v; 8755 } 8756 } 8757 8758 const struct rtw89_rfe_parms * 8759 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 8760 const struct rtw89_rfe_parms *init) 8761 { 8762 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 8763 struct rtw89_rfe_parms *parms; 8764 8765 if (!rfe_data) 8766 return init; 8767 8768 parms = &rfe_data->rfe_parms; 8769 if (init) 8770 *parms = *init; 8771 8772 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 8773 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 8774 rfe_data->byrate.tbl.size = 0; /* don't care here */ 8775 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 8776 parms->byr_tbl = &rfe_data->byrate.tbl; 8777 } 8778 8779 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 8780 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 8781 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 8782 } 8783 8784 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 8785 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 8786 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 8787 } 8788 8789 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 8790 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 8791 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 8792 } 8793 8794 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 8795 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 8796 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 8797 } 8798 8799 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 8800 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 8801 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 8802 } 8803 8804 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 8805 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 8806 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 8807 } 8808 8809 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 8810 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 8811 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 8812 } 8813 8814 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 8815 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 8816 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 8817 } 8818 8819 return parms; 8820 } 8821