1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/if_arp.h> 6 #include "cam.h" 7 #include "chan.h" 8 #include "coex.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "phy.h" 13 #include "ps.h" 14 #include "reg.h" 15 #include "util.h" 16 #include "wow.h" 17 18 struct rtw89_eapol_2_of_2 { 19 u8 gtkbody[14]; 20 u8 key_des_ver; 21 u8 rsvd[92]; 22 } __packed; 23 24 struct rtw89_sa_query { 25 u8 category; 26 u8 action; 27 } __packed; 28 29 struct rtw89_arp_rsp { 30 u8 llc_hdr[sizeof(rfc1042_header)]; 31 __be16 llc_type; 32 struct arphdr arp_hdr; 33 u8 sender_hw[ETH_ALEN]; 34 __be32 sender_ip; 35 u8 target_hw[ETH_ALEN]; 36 __be32 target_ip; 37 } __packed; 38 39 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 40 41 union rtw89_fw_element_arg { 42 size_t offset; 43 enum rtw89_rf_path rf_path; 44 enum rtw89_fw_type fw_type; 45 }; 46 47 struct rtw89_fw_element_handler { 48 int (*fn)(struct rtw89_dev *rtwdev, 49 const struct rtw89_fw_element_hdr *elm, 50 const union rtw89_fw_element_arg arg); 51 const union rtw89_fw_element_arg arg; 52 const char *name; 53 }; 54 55 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 56 struct sk_buff *skb); 57 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 58 struct rtw89_wait_info *wait, unsigned int cond); 59 static int __parse_security_section(struct rtw89_dev *rtwdev, 60 struct rtw89_fw_bin_info *info, 61 struct rtw89_fw_hdr_section_info *section_info, 62 const void *content, 63 u32 *mssc_len); 64 65 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 66 bool header) 67 { 68 struct sk_buff *skb; 69 u32 header_len = 0; 70 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 71 72 if (header) 73 header_len = H2C_HEADER_LEN; 74 75 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 76 if (!skb) 77 return NULL; 78 skb_reserve(skb, header_len + h2c_desc_size); 79 memset(skb->data, 0, len); 80 81 return skb; 82 } 83 84 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 85 { 86 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 87 } 88 89 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 90 { 91 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 92 } 93 94 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 95 { 96 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 97 u8 val; 98 int ret; 99 100 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 101 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 102 1, FWDL_WAIT_CNT, false, rtwdev, type); 103 if (ret) { 104 switch (val) { 105 case RTW89_FWDL_CHECKSUM_FAIL: 106 rtw89_err(rtwdev, "fw checksum fail\n"); 107 return -EINVAL; 108 109 case RTW89_FWDL_SECURITY_FAIL: 110 rtw89_err(rtwdev, "fw security fail\n"); 111 return -EINVAL; 112 113 case RTW89_FWDL_CV_NOT_MATCH: 114 rtw89_err(rtwdev, "fw cv not match\n"); 115 return -EINVAL; 116 117 default: 118 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 119 return -EBUSY; 120 } 121 } 122 123 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 124 125 return 0; 126 } 127 128 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 129 struct rtw89_fw_bin_info *info) 130 { 131 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 132 const struct rtw89_chip_info *chip = rtwdev->chip; 133 struct rtw89_fw_hdr_section_info *section_info; 134 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 135 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 136 const struct rtw89_fw_hdr_section *section; 137 const u8 *fw_end = fw + len; 138 const u8 *bin; 139 u32 base_hdr_len; 140 u32 mssc_len; 141 int ret; 142 u32 i; 143 144 if (!info) 145 return -EINVAL; 146 147 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 148 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 149 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 150 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE); 151 152 if (info->dynamic_hdr_en) { 153 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 154 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 155 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 156 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 157 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 158 return -EINVAL; 159 } 160 } else { 161 info->hdr_len = base_hdr_len; 162 info->dynamic_hdr_len = 0; 163 } 164 165 bin = fw + info->hdr_len; 166 167 /* jump to section header */ 168 section_info = info->section_info; 169 for (i = 0; i < info->section_num; i++) { 170 section = &fw_hdr->sections[i]; 171 section_info->type = 172 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 173 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 174 175 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 176 section_info->len += FWDL_SECTION_CHKSUM_LEN; 177 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 178 section_info->dladdr = 179 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 180 section_info->addr = bin; 181 182 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 183 section_info->mssc = 184 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 185 186 ret = __parse_security_section(rtwdev, info, section_info, 187 bin, &mssc_len); 188 if (ret) 189 return ret; 190 191 if (sec->secure_boot && chip->chip_id == RTL8852B) 192 section_info->len_override = 960; 193 } else { 194 section_info->mssc = 0; 195 mssc_len = 0; 196 } 197 198 rtw89_debug(rtwdev, RTW89_DBG_FW, 199 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 200 i, section_info->type, section_info->len, 201 section_info->mssc, mssc_len, bin - fw); 202 rtw89_debug(rtwdev, RTW89_DBG_FW, 203 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 204 section_info->ignore, section_info->key_addr, 205 section_info->key_addr ? 206 section_info->key_addr - section_info->addr : 0, 207 section_info->key_len, section_info->key_idx); 208 209 bin += section_info->len + mssc_len; 210 section_info++; 211 } 212 213 if (fw_end != bin) { 214 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 215 return -EINVAL; 216 } 217 218 return 0; 219 } 220 221 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 222 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 223 u32 rmp_tbl_size, u32 *key_idx) 224 { 225 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 226 u32 sel_byte_idx; 227 u32 mss_sel_idx; 228 u8 sel_bit_idx; 229 int i; 230 231 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 232 if (!mss_hdr->defen) 233 return -ENOENT; 234 235 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 236 sec->mss_key_num; 237 } else { 238 if (mss_hdr->defen) 239 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 240 else 241 mss_sel_idx = 0; 242 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 243 le16_to_cpu(mss_hdr->msscust_max) + 244 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 245 sec->mss_key_num; 246 } 247 248 sel_byte_idx = mss_sel_idx >> 3; 249 sel_bit_idx = mss_sel_idx & 0x7; 250 251 if (sel_byte_idx >= rmp_tbl_size) 252 return -EFAULT; 253 254 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 255 return -ENOENT; 256 257 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 258 259 for (i = 0; i < sel_byte_idx; i++) 260 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 261 262 return 0; 263 } 264 265 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 266 struct rtw89_fw_bin_info *info, 267 struct rtw89_fw_hdr_section_info *section_info, 268 const void *content, 269 u32 *mssc_len) 270 { 271 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 272 const union rtw89_fw_section_mssc_content *section_content = content; 273 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 274 u32 rmp_tbl_size; 275 u32 key_sign_len; 276 u32 real_key_idx; 277 u32 sb_sel_ver; 278 int ret; 279 280 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 281 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 282 return -ENOENT; 283 } 284 285 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 286 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 287 le16_to_cpu(mss_hdr->msscust_max) * 288 mss_hdr->mssdev_max) >> 3; 289 if (mss_hdr->defen) 290 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 291 } else { 292 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 293 mss_hdr->rmpfmt); 294 return -EINVAL; 295 } 296 297 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 298 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 299 rmp_tbl_size, (int)sizeof(*mss_hdr), 300 le32_to_cpu(mss_hdr->key_raw_offset)); 301 return -EINVAL; 302 } 303 304 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 305 if (!key_sign_len) 306 key_sign_len = 512; 307 308 if (info->dsp_checksum) 309 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 310 311 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 312 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 313 314 if (!sec->secure_boot) 315 goto out; 316 317 sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v); 318 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 319 goto ignore; 320 321 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 322 if (ret) 323 goto ignore; 324 325 section_info->key_addr = content + section_info->len + 326 le32_to_cpu(mss_hdr->key_raw_offset) + 327 key_sign_len * real_key_idx; 328 section_info->key_len = key_sign_len; 329 section_info->key_idx = real_key_idx; 330 331 out: 332 if (info->secure_section_exist) { 333 section_info->ignore = true; 334 return 0; 335 } 336 337 info->secure_section_exist = true; 338 339 return 0; 340 341 ignore: 342 section_info->ignore = true; 343 344 return 0; 345 } 346 347 static int __parse_security_section(struct rtw89_dev *rtwdev, 348 struct rtw89_fw_bin_info *info, 349 struct rtw89_fw_hdr_section_info *section_info, 350 const void *content, 351 u32 *mssc_len) 352 { 353 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 354 int ret; 355 356 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) { 357 ret = __parse_formatted_mssc(rtwdev, info, section_info, 358 content, mssc_len); 359 if (ret) 360 return -EINVAL; 361 } else { 362 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 363 if (info->dsp_checksum) 364 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 365 366 if (sec->secure_boot) { 367 if (sec->mss_idx >= section_info->mssc) 368 return -EFAULT; 369 section_info->key_addr = content + section_info->len + 370 sec->mss_idx * FWDL_SECURITY_SIGLEN; 371 section_info->key_len = FWDL_SECURITY_SIGLEN; 372 } 373 374 info->secure_section_exist = true; 375 } 376 377 return 0; 378 } 379 380 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 381 struct rtw89_fw_bin_info *info) 382 { 383 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 384 struct rtw89_fw_hdr_section_info *section_info; 385 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 386 const struct rtw89_fw_hdr_section_v1 *section; 387 const u8 *fw_end = fw + len; 388 const u8 *bin; 389 u32 base_hdr_len; 390 u32 mssc_len; 391 int ret; 392 u32 i; 393 394 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 395 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 396 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 397 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 398 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE); 399 400 if (info->dynamic_hdr_en) { 401 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 402 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 403 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 404 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 405 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 406 return -EINVAL; 407 } 408 } else { 409 info->hdr_len = base_hdr_len; 410 info->dynamic_hdr_len = 0; 411 } 412 413 bin = fw + info->hdr_len; 414 415 /* jump to section header */ 416 section_info = info->section_info; 417 for (i = 0; i < info->section_num; i++) { 418 section = &fw_hdr->sections[i]; 419 420 section_info->type = 421 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 422 section_info->len = 423 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 424 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 425 section_info->len += FWDL_SECTION_CHKSUM_LEN; 426 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 427 section_info->dladdr = 428 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 429 section_info->addr = bin; 430 431 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 432 section_info->mssc = 433 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 434 435 ret = __parse_security_section(rtwdev, info, section_info, 436 bin, &mssc_len); 437 if (ret) 438 return ret; 439 } else { 440 section_info->mssc = 0; 441 mssc_len = 0; 442 } 443 444 rtw89_debug(rtwdev, RTW89_DBG_FW, 445 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 446 i, section_info->type, section_info->len, 447 section_info->mssc, mssc_len, bin - fw); 448 rtw89_debug(rtwdev, RTW89_DBG_FW, 449 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 450 section_info->ignore, section_info->key_addr, 451 section_info->key_addr ? 452 section_info->key_addr - section_info->addr : 0, 453 section_info->key_len, section_info->key_idx); 454 455 bin += section_info->len + mssc_len; 456 section_info++; 457 } 458 459 if (fw_end != bin) { 460 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 461 return -EINVAL; 462 } 463 464 if (!info->secure_section_exist) 465 rtw89_warn(rtwdev, "no firmware secure section\n"); 466 467 return 0; 468 } 469 470 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 471 const struct rtw89_fw_suit *fw_suit, 472 struct rtw89_fw_bin_info *info) 473 { 474 const u8 *fw = fw_suit->data; 475 u32 len = fw_suit->size; 476 477 if (!fw || !len) { 478 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 479 return -ENOENT; 480 } 481 482 switch (fw_suit->hdr_ver) { 483 case 0: 484 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 485 case 1: 486 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 487 default: 488 return -ENOENT; 489 } 490 } 491 492 static 493 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 494 struct rtw89_fw_suit *fw_suit, bool nowarn) 495 { 496 struct rtw89_fw_info *fw_info = &rtwdev->fw; 497 const struct firmware *firmware = fw_info->req.firmware; 498 const u8 *mfw = firmware->data; 499 u32 mfw_len = firmware->size; 500 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 501 const struct rtw89_mfw_info *mfw_info = NULL, *tmp; 502 int i; 503 504 if (mfw_hdr->sig != RTW89_MFW_SIG) { 505 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 506 /* legacy firmware support normal type only */ 507 if (type != RTW89_FW_NORMAL) 508 return -EINVAL; 509 fw_suit->data = mfw; 510 fw_suit->size = mfw_len; 511 return 0; 512 } 513 514 for (i = 0; i < mfw_hdr->fw_nr; i++) { 515 tmp = &mfw_hdr->info[i]; 516 if (tmp->type != type) 517 continue; 518 519 if (type == RTW89_FW_LOGFMT) { 520 mfw_info = tmp; 521 goto found; 522 } 523 524 /* Version order of WiFi firmware in firmware file are not in order, 525 * pass all firmware to find the equal or less but closest version. 526 */ 527 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { 528 if (!mfw_info || mfw_info->cv < tmp->cv) 529 mfw_info = tmp; 530 } 531 } 532 533 if (mfw_info) 534 goto found; 535 536 if (!nowarn) 537 rtw89_err(rtwdev, "no suitable firmware found\n"); 538 return -ENOENT; 539 540 found: 541 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 542 fw_suit->size = le32_to_cpu(mfw_info->size); 543 return 0; 544 } 545 546 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 547 { 548 struct rtw89_fw_info *fw_info = &rtwdev->fw; 549 const struct firmware *firmware = fw_info->req.firmware; 550 const struct rtw89_mfw_hdr *mfw_hdr = 551 (const struct rtw89_mfw_hdr *)firmware->data; 552 const struct rtw89_mfw_info *mfw_info; 553 u32 size; 554 555 if (mfw_hdr->sig != RTW89_MFW_SIG) { 556 rtw89_warn(rtwdev, "not mfw format\n"); 557 return 0; 558 } 559 560 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 561 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 562 563 return size; 564 } 565 566 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 567 struct rtw89_fw_suit *fw_suit, 568 const struct rtw89_fw_hdr *hdr) 569 { 570 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 571 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 572 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 573 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 574 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 575 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 576 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 577 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 578 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 579 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 580 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 581 } 582 583 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 584 struct rtw89_fw_suit *fw_suit, 585 const struct rtw89_fw_hdr_v1 *hdr) 586 { 587 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 588 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 589 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 590 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 591 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 592 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 593 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 594 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 595 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 596 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 597 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 598 } 599 600 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 601 enum rtw89_fw_type type, 602 struct rtw89_fw_suit *fw_suit) 603 { 604 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 605 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 606 607 if (type == RTW89_FW_LOGFMT) 608 return 0; 609 610 fw_suit->type = type; 611 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 612 613 switch (fw_suit->hdr_ver) { 614 case 0: 615 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 616 break; 617 case 1: 618 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 619 break; 620 default: 621 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 622 fw_suit->hdr_ver); 623 return -ENOENT; 624 } 625 626 rtw89_info(rtwdev, 627 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 628 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 629 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 630 631 return 0; 632 } 633 634 static 635 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 636 bool nowarn) 637 { 638 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 639 int ret; 640 641 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 642 if (ret) 643 return ret; 644 645 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 646 } 647 648 static 649 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 650 const struct rtw89_fw_element_hdr *elm, 651 const union rtw89_fw_element_arg arg) 652 { 653 enum rtw89_fw_type type = arg.fw_type; 654 struct rtw89_hal *hal = &rtwdev->hal; 655 struct rtw89_fw_suit *fw_suit; 656 657 /* Version of BB MCU is in decreasing order in firmware file, so take 658 * first equal or less version, which is equal or less but closest version. 659 */ 660 if (hal->cv < elm->u.bbmcu.cv) 661 return 1; /* ignore this element */ 662 663 fw_suit = rtw89_fw_suit_get(rtwdev, type); 664 if (fw_suit->data) 665 return 1; /* ignore this element (a firmware is taken already) */ 666 667 fw_suit->data = elm->u.bbmcu.contents; 668 fw_suit->size = le32_to_cpu(elm->size); 669 670 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 671 } 672 673 #define __DEF_FW_FEAT_COND(__cond, __op) \ 674 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 675 { \ 676 return suit_ver_code __op comp_ver_code; \ 677 } 678 679 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 680 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 681 __DEF_FW_FEAT_COND(lt, <); /* less than */ 682 683 struct __fw_feat_cfg { 684 enum rtw89_core_chip_id chip_id; 685 enum rtw89_fw_feature feature; 686 u32 ver_code; 687 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 688 }; 689 690 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 691 { \ 692 .chip_id = _chip, \ 693 .feature = RTW89_FW_FEATURE_ ## _feat, \ 694 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 695 .cond = __fw_feat_cond_ ## _cond, \ 696 } 697 698 static const struct __fw_feat_cfg fw_feat_tbl[] = { 699 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 700 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 701 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 702 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 703 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 704 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 705 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 706 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX), 707 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 708 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 709 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 710 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 711 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 712 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX), 713 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 714 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 715 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER), 716 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 717 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 718 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 719 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 720 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 721 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 722 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 723 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), 724 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 725 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 726 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0), 727 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 728 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 729 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0), 730 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0), 731 }; 732 733 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 734 const struct rtw89_chip_info *chip, 735 u32 ver_code) 736 { 737 int i; 738 739 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 740 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 741 742 if (chip->chip_id != ent->chip_id) 743 continue; 744 745 if (ent->cond(ver_code, ent->ver_code)) 746 RTW89_SET_FW_FEATURE(ent->feature, fw); 747 } 748 } 749 750 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 751 { 752 const struct rtw89_chip_info *chip = rtwdev->chip; 753 const struct rtw89_fw_suit *fw_suit; 754 u32 suit_ver_code; 755 756 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 757 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 758 759 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 760 } 761 762 const struct firmware * 763 rtw89_early_fw_feature_recognize(struct device *device, 764 const struct rtw89_chip_info *chip, 765 struct rtw89_fw_info *early_fw, 766 int *used_fw_format) 767 { 768 const struct firmware *firmware; 769 char fw_name[64]; 770 int fw_format; 771 u32 ver_code; 772 int ret; 773 774 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 775 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 776 chip->fw_basename, fw_format); 777 778 ret = request_firmware(&firmware, fw_name, device); 779 if (!ret) { 780 dev_info(device, "loaded firmware %s\n", fw_name); 781 *used_fw_format = fw_format; 782 break; 783 } 784 } 785 786 if (ret) { 787 dev_err(device, "failed to early request firmware: %d\n", ret); 788 return NULL; 789 } 790 791 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 792 793 if (!ver_code) 794 goto out; 795 796 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 797 798 out: 799 return firmware; 800 } 801 802 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 803 { 804 const struct rtw89_chip_info *chip = rtwdev->chip; 805 int ret; 806 807 if (chip->try_ce_fw) { 808 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 809 if (!ret) 810 goto normal_done; 811 } 812 813 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 814 if (ret) 815 return ret; 816 817 normal_done: 818 /* It still works if wowlan firmware isn't existing. */ 819 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 820 821 /* It still works if log format file isn't existing. */ 822 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 823 824 rtw89_fw_recognize_features(rtwdev); 825 826 rtw89_coex_recognize_ver(rtwdev); 827 828 return 0; 829 } 830 831 static 832 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 833 const struct rtw89_fw_element_hdr *elm, 834 const union rtw89_fw_element_arg arg) 835 { 836 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 837 struct rtw89_phy_table *tbl; 838 struct rtw89_reg2_def *regs; 839 enum rtw89_rf_path rf_path; 840 u32 n_regs, i; 841 u8 idx; 842 843 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 844 if (!tbl) 845 return -ENOMEM; 846 847 switch (le32_to_cpu(elm->id)) { 848 case RTW89_FW_ELEMENT_ID_BB_REG: 849 elm_info->bb_tbl = tbl; 850 break; 851 case RTW89_FW_ELEMENT_ID_BB_GAIN: 852 elm_info->bb_gain = tbl; 853 break; 854 case RTW89_FW_ELEMENT_ID_RADIO_A: 855 case RTW89_FW_ELEMENT_ID_RADIO_B: 856 case RTW89_FW_ELEMENT_ID_RADIO_C: 857 case RTW89_FW_ELEMENT_ID_RADIO_D: 858 rf_path = arg.rf_path; 859 idx = elm->u.reg2.idx; 860 861 elm_info->rf_radio[idx] = tbl; 862 tbl->rf_path = rf_path; 863 tbl->config = rtw89_phy_config_rf_reg_v1; 864 break; 865 case RTW89_FW_ELEMENT_ID_RF_NCTL: 866 elm_info->rf_nctl = tbl; 867 break; 868 default: 869 kfree(tbl); 870 return -ENOENT; 871 } 872 873 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 874 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); 875 if (!regs) 876 goto out; 877 878 for (i = 0; i < n_regs; i++) { 879 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 880 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 881 } 882 883 tbl->n_regs = n_regs; 884 tbl->regs = regs; 885 886 return 0; 887 888 out: 889 kfree(tbl); 890 return -ENOMEM; 891 } 892 893 static 894 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 895 const struct rtw89_fw_element_hdr *elm, 896 const union rtw89_fw_element_arg arg) 897 { 898 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 899 const unsigned long offset = arg.offset; 900 struct rtw89_efuse *efuse = &rtwdev->efuse; 901 struct rtw89_txpwr_conf *conf; 902 903 if (!rtwdev->rfe_data) { 904 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 905 if (!rtwdev->rfe_data) 906 return -ENOMEM; 907 } 908 909 conf = (void *)rtwdev->rfe_data + offset; 910 911 /* if multiple matched, take the last eventually */ 912 if (txpwr_elm->rfe_type == efuse->rfe_type) 913 goto setup; 914 915 /* without one is matched, accept default */ 916 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 917 (!rtw89_txpwr_conf_valid(conf) || 918 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 919 goto setup; 920 921 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 922 elm->id, txpwr_elm->rfe_type); 923 return 0; 924 925 setup: 926 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 927 elm->id, txpwr_elm->rfe_type); 928 929 conf->rfe_type = txpwr_elm->rfe_type; 930 conf->ent_sz = txpwr_elm->ent_sz; 931 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 932 conf->data = txpwr_elm->content; 933 return 0; 934 } 935 936 static 937 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 938 const struct rtw89_fw_element_hdr *elm, 939 const union rtw89_fw_element_arg arg) 940 { 941 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 942 const struct rtw89_chip_info *chip = rtwdev->chip; 943 u32 needed_bitmap = 0; 944 u32 offset = 0; 945 int subband; 946 u32 bitmap; 947 int type; 948 949 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 950 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 951 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 952 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 953 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 954 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 955 956 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 957 958 if ((bitmap & needed_bitmap) != needed_bitmap) { 959 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n", 960 needed_bitmap, bitmap); 961 return -ENOENT; 962 } 963 964 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL); 965 if (!elm_info->txpwr_trk) 966 return -ENOMEM; 967 968 for (type = 0; bitmap; type++, bitmap >>= 1) { 969 if (!(bitmap & BIT(0))) 970 continue; 971 972 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 973 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 974 subband = 4; 975 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 976 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 977 subband = 3; 978 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 979 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 980 subband = 1; 981 else 982 break; 983 984 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 985 986 offset += subband; 987 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 988 goto err; 989 } 990 991 return 0; 992 993 err: 994 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 995 offset, le32_to_cpu(elm->size)); 996 kfree(elm_info->txpwr_trk); 997 elm_info->txpwr_trk = NULL; 998 999 return -EFAULT; 1000 } 1001 1002 static 1003 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 1004 const struct rtw89_fw_element_hdr *elm, 1005 const union rtw89_fw_element_arg arg) 1006 { 1007 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1008 u8 rfk_id; 1009 1010 if (elm_info->rfk_log_fmt) 1011 goto allocated; 1012 1013 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL); 1014 if (!elm_info->rfk_log_fmt) 1015 return 1; /* this is an optional element, so just ignore this */ 1016 1017 allocated: 1018 rfk_id = elm->u.rfk_log_fmt.rfk_id; 1019 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 1020 return 1; 1021 1022 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 1023 1024 return 0; 1025 } 1026 1027 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 1028 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 1029 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 1030 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 1031 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 1032 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 1033 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 1034 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 1035 { .rf_path = RF_PATH_A }, "radio A"}, 1036 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 1037 { .rf_path = RF_PATH_B }, NULL}, 1038 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 1039 { .rf_path = RF_PATH_C }, NULL}, 1040 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 1041 { .rf_path = RF_PATH_D }, NULL}, 1042 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 1043 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 1044 rtw89_fw_recognize_txpwr_from_elm, 1045 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 1046 }, 1047 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 1048 rtw89_fw_recognize_txpwr_from_elm, 1049 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 1050 }, 1051 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 1052 rtw89_fw_recognize_txpwr_from_elm, 1053 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 1054 }, 1055 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 1056 rtw89_fw_recognize_txpwr_from_elm, 1057 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 1058 }, 1059 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 1060 rtw89_fw_recognize_txpwr_from_elm, 1061 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 1062 }, 1063 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 1064 rtw89_fw_recognize_txpwr_from_elm, 1065 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 1066 }, 1067 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 1068 rtw89_fw_recognize_txpwr_from_elm, 1069 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 1070 }, 1071 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 1072 rtw89_fw_recognize_txpwr_from_elm, 1073 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 1074 }, 1075 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 1076 rtw89_fw_recognize_txpwr_from_elm, 1077 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 1078 }, 1079 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 1080 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 1081 }, 1082 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 1083 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 1084 }, 1085 }; 1086 1087 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 1088 { 1089 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1090 const struct firmware *firmware = fw_info->req.firmware; 1091 const struct rtw89_chip_info *chip = rtwdev->chip; 1092 u32 unrecognized_elements = chip->needed_fw_elms; 1093 const struct rtw89_fw_element_handler *handler; 1094 const struct rtw89_fw_element_hdr *hdr; 1095 u32 elm_size; 1096 u32 elem_id; 1097 u32 offset; 1098 int ret; 1099 1100 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1101 1102 offset = rtw89_mfw_get_size(rtwdev); 1103 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1104 if (offset == 0) 1105 return -EINVAL; 1106 1107 while (offset + sizeof(*hdr) < firmware->size) { 1108 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1109 1110 elm_size = le32_to_cpu(hdr->size); 1111 if (offset + elm_size >= firmware->size) { 1112 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1113 break; 1114 } 1115 1116 elem_id = le32_to_cpu(hdr->id); 1117 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1118 goto next; 1119 1120 handler = &__fw_element_handlers[elem_id]; 1121 if (!handler->fn) 1122 goto next; 1123 1124 ret = handler->fn(rtwdev, hdr, handler->arg); 1125 if (ret == 1) /* ignore this element */ 1126 goto next; 1127 if (ret) 1128 return ret; 1129 1130 if (handler->name) 1131 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1132 handler->name, hdr->ver); 1133 1134 unrecognized_elements &= ~BIT(elem_id); 1135 next: 1136 offset += sizeof(*hdr) + elm_size; 1137 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1138 } 1139 1140 if (unrecognized_elements) { 1141 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1142 unrecognized_elements); 1143 return -ENOENT; 1144 } 1145 1146 return 0; 1147 } 1148 1149 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1150 u8 type, u8 cat, u8 class, u8 func, 1151 bool rack, bool dack, u32 len) 1152 { 1153 struct fwcmd_hdr *hdr; 1154 1155 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1156 1157 if (!(rtwdev->fw.h2c_seq % 4)) 1158 rack = true; 1159 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1160 FIELD_PREP(H2C_HDR_CAT, cat) | 1161 FIELD_PREP(H2C_HDR_CLASS, class) | 1162 FIELD_PREP(H2C_HDR_FUNC, func) | 1163 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1164 1165 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1166 len + H2C_HEADER_LEN) | 1167 (rack ? H2C_HDR_REC_ACK : 0) | 1168 (dack ? H2C_HDR_DONE_ACK : 0)); 1169 1170 rtwdev->fw.h2c_seq++; 1171 } 1172 1173 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1174 struct sk_buff *skb, 1175 u8 type, u8 cat, u8 class, u8 func, 1176 u32 len) 1177 { 1178 struct fwcmd_hdr *hdr; 1179 1180 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1181 1182 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1183 FIELD_PREP(H2C_HDR_CAT, cat) | 1184 FIELD_PREP(H2C_HDR_CLASS, class) | 1185 FIELD_PREP(H2C_HDR_FUNC, func) | 1186 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1187 1188 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1189 len + H2C_HEADER_LEN)); 1190 } 1191 1192 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1193 struct rtw89_fw_bin_info *info, 1194 struct rtw89_fw_hdr *fw_hdr) 1195 { 1196 struct rtw89_fw_hdr_section_info *section_info; 1197 struct rtw89_fw_hdr_section *section; 1198 int i; 1199 1200 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1201 FW_HDR_W7_PART_SIZE); 1202 1203 for (i = 0; i < info->section_num; i++) { 1204 section_info = &info->section_info[i]; 1205 1206 if (!section_info->len_override) 1207 continue; 1208 1209 section = &fw_hdr->sections[i]; 1210 le32p_replace_bits(§ion->w1, section_info->len_override, 1211 FWSECTION_HDR_W1_SEC_SIZE); 1212 } 1213 1214 return 0; 1215 } 1216 1217 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1218 struct rtw89_fw_bin_info *info, 1219 struct rtw89_fw_hdr_v1 *fw_hdr) 1220 { 1221 struct rtw89_fw_hdr_section_info *section_info; 1222 struct rtw89_fw_hdr_section_v1 *section; 1223 u8 dst_sec_idx = 0; 1224 u8 sec_idx; 1225 1226 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1227 FW_HDR_V1_W7_PART_SIZE); 1228 1229 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1230 section_info = &info->section_info[sec_idx]; 1231 section = &fw_hdr->sections[sec_idx]; 1232 1233 if (section_info->ignore) 1234 continue; 1235 1236 if (dst_sec_idx != sec_idx) 1237 fw_hdr->sections[dst_sec_idx] = *section; 1238 1239 dst_sec_idx++; 1240 } 1241 1242 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1243 1244 return (info->section_num - dst_sec_idx) * sizeof(*section); 1245 } 1246 1247 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1248 const struct rtw89_fw_suit *fw_suit, 1249 struct rtw89_fw_bin_info *info) 1250 { 1251 u32 len = info->hdr_len - info->dynamic_hdr_len; 1252 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1253 const u8 *fw = fw_suit->data; 1254 struct rtw89_fw_hdr *fw_hdr; 1255 struct sk_buff *skb; 1256 u32 truncated; 1257 u32 ret = 0; 1258 1259 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1260 if (!skb) { 1261 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1262 return -ENOMEM; 1263 } 1264 1265 skb_put_data(skb, fw, len); 1266 1267 switch (fw_suit->hdr_ver) { 1268 case 0: 1269 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1270 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1271 break; 1272 case 1: 1273 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1274 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1275 break; 1276 default: 1277 ret = -EOPNOTSUPP; 1278 goto fail; 1279 } 1280 1281 if (truncated) { 1282 len -= truncated; 1283 skb_trim(skb, len); 1284 } 1285 1286 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1287 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1288 H2C_FUNC_MAC_FWHDR_DL, len); 1289 1290 ret = rtw89_h2c_tx(rtwdev, skb, false); 1291 if (ret) { 1292 rtw89_err(rtwdev, "failed to send h2c\n"); 1293 ret = -1; 1294 goto fail; 1295 } 1296 1297 return 0; 1298 fail: 1299 dev_kfree_skb_any(skb); 1300 1301 return ret; 1302 } 1303 1304 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1305 const struct rtw89_fw_suit *fw_suit, 1306 struct rtw89_fw_bin_info *info) 1307 { 1308 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1309 int ret; 1310 1311 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1312 if (ret) { 1313 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1314 return ret; 1315 } 1316 1317 ret = mac->fwdl_check_path_ready(rtwdev, false); 1318 if (ret) { 1319 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1320 return ret; 1321 } 1322 1323 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1324 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1325 1326 return 0; 1327 } 1328 1329 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1330 struct rtw89_fw_hdr_section_info *info) 1331 { 1332 struct sk_buff *skb; 1333 const u8 *section = info->addr; 1334 u32 residue_len = info->len; 1335 bool copy_key = false; 1336 u32 pkt_len; 1337 int ret; 1338 1339 if (info->ignore) 1340 return 0; 1341 1342 if (info->len_override) { 1343 if (info->len_override > info->len) 1344 rtw89_warn(rtwdev, "override length %u larger than original %u\n", 1345 info->len_override, info->len); 1346 else 1347 residue_len = info->len_override; 1348 } 1349 1350 if (info->key_addr && info->key_len) { 1351 if (residue_len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) 1352 rtw89_warn(rtwdev, 1353 "ignore to copy key data because of len %d, %d, %d, %d\n", 1354 info->len, FWDL_SECTION_PER_PKT_LEN, 1355 info->key_len, residue_len); 1356 else 1357 copy_key = true; 1358 } 1359 1360 while (residue_len) { 1361 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 1362 pkt_len = FWDL_SECTION_PER_PKT_LEN; 1363 else 1364 pkt_len = residue_len; 1365 1366 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1367 if (!skb) { 1368 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1369 return -ENOMEM; 1370 } 1371 skb_put_data(skb, section, pkt_len); 1372 1373 if (copy_key) 1374 memcpy(skb->data + pkt_len - info->key_len, 1375 info->key_addr, info->key_len); 1376 1377 ret = rtw89_h2c_tx(rtwdev, skb, true); 1378 if (ret) { 1379 rtw89_err(rtwdev, "failed to send h2c\n"); 1380 ret = -1; 1381 goto fail; 1382 } 1383 1384 section += pkt_len; 1385 residue_len -= pkt_len; 1386 } 1387 1388 return 0; 1389 fail: 1390 dev_kfree_skb_any(skb); 1391 1392 return ret; 1393 } 1394 1395 static enum rtw89_fwdl_check_type 1396 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1397 const struct rtw89_fw_suit *fw_suit) 1398 { 1399 switch (fw_suit->type) { 1400 case RTW89_FW_BBMCU0: 1401 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1402 case RTW89_FW_BBMCU1: 1403 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1404 default: 1405 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1406 } 1407 } 1408 1409 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1410 const struct rtw89_fw_suit *fw_suit, 1411 struct rtw89_fw_bin_info *info) 1412 { 1413 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1414 const struct rtw89_chip_info *chip = rtwdev->chip; 1415 enum rtw89_fwdl_check_type chk_type; 1416 u8 section_num = info->section_num; 1417 int ret; 1418 1419 while (section_num--) { 1420 ret = __rtw89_fw_download_main(rtwdev, section_info); 1421 if (ret) 1422 return ret; 1423 section_info++; 1424 } 1425 1426 if (chip->chip_gen == RTW89_CHIP_AX) 1427 return 0; 1428 1429 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1430 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1431 if (ret) { 1432 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1433 fw_suit->type); 1434 return ret; 1435 } 1436 1437 return 0; 1438 } 1439 1440 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1441 { 1442 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1443 u32 addr = R_AX_DBG_PORT_SEL; 1444 u32 val32; 1445 u16 index; 1446 1447 if (chip_gen == RTW89_CHIP_BE) { 1448 addr = R_BE_WLCPU_PORT_PC; 1449 goto dump; 1450 } 1451 1452 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1453 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1454 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1455 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1456 1457 dump: 1458 for (index = 0; index < 15; index++) { 1459 val32 = rtw89_read32(rtwdev, addr); 1460 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1461 fsleep(10); 1462 } 1463 } 1464 1465 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1466 { 1467 u32 val32; 1468 1469 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1470 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1471 1472 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1473 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1474 1475 rtw89_fw_prog_cnt_dump(rtwdev); 1476 } 1477 1478 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1479 struct rtw89_fw_suit *fw_suit) 1480 { 1481 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1482 struct rtw89_fw_bin_info info = {}; 1483 int ret; 1484 1485 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1486 if (ret) { 1487 rtw89_err(rtwdev, "parse fw header fail\n"); 1488 return ret; 1489 } 1490 1491 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode); 1492 1493 if (rtwdev->chip->chip_id == RTL8922A && 1494 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1495 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1496 1497 ret = mac->fwdl_check_path_ready(rtwdev, true); 1498 if (ret) { 1499 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1500 return ret; 1501 } 1502 1503 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1504 if (ret) 1505 return ret; 1506 1507 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1508 if (ret) 1509 return ret; 1510 1511 return 0; 1512 } 1513 1514 static 1515 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1516 bool include_bb) 1517 { 1518 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1519 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1520 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1521 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1522 int ret; 1523 int i; 1524 1525 mac->disable_cpu(rtwdev); 1526 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1527 if (ret) 1528 return ret; 1529 1530 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1531 if (ret) 1532 goto fwdl_err; 1533 1534 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1535 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1536 1537 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1538 if (ret) 1539 goto fwdl_err; 1540 } 1541 1542 fw_info->h2c_seq = 0; 1543 fw_info->rec_seq = 0; 1544 fw_info->h2c_counter = 0; 1545 fw_info->c2h_counter = 0; 1546 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1547 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1548 1549 mdelay(5); 1550 1551 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1552 if (ret) { 1553 rtw89_warn(rtwdev, "download firmware fail\n"); 1554 goto fwdl_err; 1555 } 1556 1557 return ret; 1558 1559 fwdl_err: 1560 rtw89_fw_dl_fail_dump(rtwdev); 1561 return ret; 1562 } 1563 1564 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1565 bool include_bb) 1566 { 1567 int retry; 1568 int ret; 1569 1570 for (retry = 0; retry < 5; retry++) { 1571 ret = __rtw89_fw_download(rtwdev, type, include_bb); 1572 if (!ret) 1573 return 0; 1574 } 1575 1576 return ret; 1577 } 1578 1579 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1580 { 1581 struct rtw89_fw_info *fw = &rtwdev->fw; 1582 1583 wait_for_completion(&fw->req.completion); 1584 if (!fw->req.firmware) 1585 return -EINVAL; 1586 1587 return 0; 1588 } 1589 1590 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1591 struct rtw89_fw_req_info *req, 1592 const char *fw_name, bool nowarn) 1593 { 1594 int ret; 1595 1596 if (req->firmware) { 1597 rtw89_debug(rtwdev, RTW89_DBG_FW, 1598 "full firmware has been early requested\n"); 1599 complete_all(&req->completion); 1600 return 0; 1601 } 1602 1603 if (nowarn) 1604 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1605 else 1606 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1607 1608 complete_all(&req->completion); 1609 1610 return ret; 1611 } 1612 1613 void rtw89_load_firmware_work(struct work_struct *work) 1614 { 1615 struct rtw89_dev *rtwdev = 1616 container_of(work, struct rtw89_dev, load_firmware_work); 1617 const struct rtw89_chip_info *chip = rtwdev->chip; 1618 char fw_name[64]; 1619 1620 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1621 chip->fw_basename, rtwdev->fw.fw_format); 1622 1623 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1624 } 1625 1626 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1627 { 1628 if (!tbl) 1629 return; 1630 1631 kfree(tbl->regs); 1632 kfree(tbl); 1633 } 1634 1635 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1636 { 1637 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1638 int i; 1639 1640 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1641 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1642 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1643 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1644 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1645 1646 kfree(elm_info->txpwr_trk); 1647 kfree(elm_info->rfk_log_fmt); 1648 } 1649 1650 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1651 { 1652 struct rtw89_fw_info *fw = &rtwdev->fw; 1653 1654 cancel_work_sync(&rtwdev->load_firmware_work); 1655 1656 if (fw->req.firmware) { 1657 release_firmware(fw->req.firmware); 1658 1659 /* assign NULL back in case rtw89_free_ieee80211_hw() 1660 * try to release the same one again. 1661 */ 1662 fw->req.firmware = NULL; 1663 } 1664 1665 kfree(fw->log.fmts); 1666 rtw89_unload_firmware_elements(rtwdev); 1667 } 1668 1669 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1670 { 1671 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1672 u32 i; 1673 1674 if (fmt_id > fw_log->last_fmt_id) 1675 return 0; 1676 1677 for (i = 0; i < fw_log->fmt_count; i++) { 1678 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1679 return i; 1680 } 1681 return 0; 1682 } 1683 1684 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1685 { 1686 struct rtw89_fw_log *log = &rtwdev->fw.log; 1687 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1688 struct rtw89_fw_suit *suit = &log->suit; 1689 const void *fmts_ptr, *fmts_end_ptr; 1690 u32 fmt_count; 1691 int i; 1692 1693 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1694 fmt_count = le32_to_cpu(suit_hdr->count); 1695 log->fmt_ids = suit_hdr->ids; 1696 fmts_ptr = &suit_hdr->ids[fmt_count]; 1697 fmts_end_ptr = suit->data + suit->size; 1698 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1699 if (!log->fmts) 1700 return -ENOMEM; 1701 1702 for (i = 0; i < fmt_count; i++) { 1703 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1704 if (!fmts_ptr) 1705 break; 1706 1707 (*log->fmts)[i] = fmts_ptr; 1708 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1709 log->fmt_count++; 1710 fmts_ptr += strlen(fmts_ptr); 1711 } 1712 1713 return 0; 1714 } 1715 1716 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1717 { 1718 struct rtw89_fw_log *log = &rtwdev->fw.log; 1719 struct rtw89_fw_suit *suit = &log->suit; 1720 1721 if (!suit || !suit->data) { 1722 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1723 return -EINVAL; 1724 } 1725 if (log->fmts) 1726 return 0; 1727 1728 return rtw89_fw_log_create_fmts_dict(rtwdev); 1729 } 1730 1731 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 1732 const struct rtw89_fw_c2h_log_fmt *log_fmt, 1733 u32 fmt_idx, u8 para_int, bool raw_data) 1734 { 1735 const char *(*fmts)[] = rtwdev->fw.log.fmts; 1736 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 1737 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 1738 int i; 1739 1740 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 1741 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 1742 log_fmt->argc); 1743 return; 1744 } 1745 1746 if (para_int) 1747 for (i = 0 ; i < log_fmt->argc; i++) 1748 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 1749 1750 if (raw_data) { 1751 if (para_int) 1752 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1753 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 1754 para_int, log_fmt->argc, (int)sizeof(args), args); 1755 else 1756 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1757 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 1758 para_int, log_fmt->argc, log_fmt->u.raw); 1759 } else { 1760 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 1761 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 1762 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 1763 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 1764 args[0xf]); 1765 } 1766 1767 rtw89_info(rtwdev, "C2H log: %s", str_buf); 1768 } 1769 1770 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 1771 { 1772 const struct rtw89_fw_c2h_log_fmt *log_fmt; 1773 u8 para_int; 1774 u32 fmt_idx; 1775 1776 if (len < RTW89_C2H_HEADER_LEN) { 1777 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 1778 return; 1779 } 1780 1781 buf += RTW89_C2H_HEADER_LEN; 1782 len -= RTW89_C2H_HEADER_LEN; 1783 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 1784 1785 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 1786 goto plain_log; 1787 1788 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 1789 goto plain_log; 1790 1791 if (!rtwdev->fw.log.fmts) 1792 return; 1793 1794 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 1795 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 1796 1797 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 1798 rtw89_info(rtwdev, "C2H log: %s%s", 1799 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 1800 else if (fmt_idx != 0 && para_int) 1801 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 1802 else 1803 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 1804 return; 1805 1806 plain_log: 1807 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 1808 1809 } 1810 1811 #define H2C_CAM_LEN 60 1812 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 1813 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr) 1814 { 1815 struct sk_buff *skb; 1816 int ret; 1817 1818 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 1819 if (!skb) { 1820 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1821 return -ENOMEM; 1822 } 1823 skb_put(skb, H2C_CAM_LEN); 1824 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, scan_mac_addr, 1825 skb->data); 1826 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, skb->data); 1827 1828 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1829 H2C_CAT_MAC, 1830 H2C_CL_MAC_ADDR_CAM_UPDATE, 1831 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 1832 H2C_CAM_LEN); 1833 1834 ret = rtw89_h2c_tx(rtwdev, skb, false); 1835 if (ret) { 1836 rtw89_err(rtwdev, "failed to send h2c\n"); 1837 goto fail; 1838 } 1839 1840 return 0; 1841 fail: 1842 dev_kfree_skb_any(skb); 1843 1844 return ret; 1845 } 1846 1847 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 1848 struct rtw89_vif_link *rtwvif_link, 1849 struct rtw89_sta_link *rtwsta_link) 1850 { 1851 struct rtw89_h2c_dctlinfo_ud_v1 *h2c; 1852 u32 len = sizeof(*h2c); 1853 struct sk_buff *skb; 1854 int ret; 1855 1856 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1857 if (!skb) { 1858 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1859 return -ENOMEM; 1860 } 1861 skb_put(skb, len); 1862 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; 1863 1864 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c); 1865 1866 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1867 H2C_CAT_MAC, 1868 H2C_CL_MAC_FR_EXCHG, 1869 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 1870 len); 1871 1872 ret = rtw89_h2c_tx(rtwdev, skb, false); 1873 if (ret) { 1874 rtw89_err(rtwdev, "failed to send h2c\n"); 1875 goto fail; 1876 } 1877 1878 return 0; 1879 fail: 1880 dev_kfree_skb_any(skb); 1881 1882 return ret; 1883 } 1884 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 1885 1886 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 1887 struct rtw89_vif_link *rtwvif_link, 1888 struct rtw89_sta_link *rtwsta_link) 1889 { 1890 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1891 u32 len = sizeof(*h2c); 1892 struct sk_buff *skb; 1893 int ret; 1894 1895 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1896 if (!skb) { 1897 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1898 return -ENOMEM; 1899 } 1900 skb_put(skb, len); 1901 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1902 1903 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c); 1904 1905 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1906 H2C_CAT_MAC, 1907 H2C_CL_MAC_FR_EXCHG, 1908 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1909 len); 1910 1911 ret = rtw89_h2c_tx(rtwdev, skb, false); 1912 if (ret) { 1913 rtw89_err(rtwdev, "failed to send h2c\n"); 1914 goto fail; 1915 } 1916 1917 return 0; 1918 fail: 1919 dev_kfree_skb_any(skb); 1920 1921 return ret; 1922 } 1923 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 1924 1925 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 1926 struct rtw89_vif_link *rtwvif_link, 1927 struct rtw89_sta_link *rtwsta_link) 1928 { 1929 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 1930 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1931 u32 len = sizeof(*h2c); 1932 struct sk_buff *skb; 1933 int ret; 1934 1935 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1936 if (!skb) { 1937 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 1938 return -ENOMEM; 1939 } 1940 skb_put(skb, len); 1941 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1942 1943 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 1944 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 1945 1946 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 1947 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 1948 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 1949 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 1950 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 1951 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 1952 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 1953 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 1954 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 1955 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 1956 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 1957 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 1958 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 1959 1960 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1961 H2C_CAT_MAC, 1962 H2C_CL_MAC_FR_EXCHG, 1963 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1964 len); 1965 1966 ret = rtw89_h2c_tx(rtwdev, skb, false); 1967 if (ret) { 1968 rtw89_err(rtwdev, "failed to send h2c\n"); 1969 goto fail; 1970 } 1971 1972 return 0; 1973 fail: 1974 dev_kfree_skb_any(skb); 1975 1976 return ret; 1977 } 1978 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 1979 1980 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, 1981 struct rtw89_vif_link *rtwvif_link, 1982 struct rtw89_sta_link *rtwsta_link, 1983 bool valid, struct ieee80211_ampdu_params *params) 1984 { 1985 const struct rtw89_chip_info *chip = rtwdev->chip; 1986 struct rtw89_h2c_ba_cam *h2c; 1987 u8 macid = rtwsta_link->mac_id; 1988 u32 len = sizeof(*h2c); 1989 struct sk_buff *skb; 1990 u8 entry_idx; 1991 int ret; 1992 1993 ret = valid ? 1994 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 1995 &entry_idx) : 1996 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 1997 &entry_idx); 1998 if (ret) { 1999 /* it still works even if we don't have static BA CAM, because 2000 * hardware can create dynamic BA CAM automatically. 2001 */ 2002 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2003 "failed to %s entry tid=%d for h2c ba cam\n", 2004 valid ? "alloc" : "free", params->tid); 2005 return 0; 2006 } 2007 2008 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2009 if (!skb) { 2010 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2011 return -ENOMEM; 2012 } 2013 skb_put(skb, len); 2014 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2015 2016 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 2017 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 2018 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 2019 else 2020 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 2021 if (!valid) 2022 goto end; 2023 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 2024 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 2025 if (params->buf_size > 64) 2026 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2027 else 2028 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2029 /* If init req is set, hw will set the ssn */ 2030 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 2031 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 2032 2033 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 2034 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 2035 le32_encode_bits(rtwvif_link->mac_idx, 2036 RTW89_H2C_BA_CAM_W1_BAND); 2037 } 2038 2039 end: 2040 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2041 H2C_CAT_MAC, 2042 H2C_CL_BA_CAM, 2043 H2C_FUNC_MAC_BA_CAM, 0, 1, 2044 len); 2045 2046 ret = rtw89_h2c_tx(rtwdev, skb, false); 2047 if (ret) { 2048 rtw89_err(rtwdev, "failed to send h2c\n"); 2049 goto fail; 2050 } 2051 2052 return 0; 2053 fail: 2054 dev_kfree_skb_any(skb); 2055 2056 return ret; 2057 } 2058 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 2059 2060 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 2061 u8 entry_idx, u8 uid) 2062 { 2063 struct rtw89_h2c_ba_cam *h2c; 2064 u32 len = sizeof(*h2c); 2065 struct sk_buff *skb; 2066 int ret; 2067 2068 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2069 if (!skb) { 2070 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 2071 return -ENOMEM; 2072 } 2073 skb_put(skb, len); 2074 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2075 2076 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 2077 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 2078 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 2079 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 2080 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 2081 2082 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2083 H2C_CAT_MAC, 2084 H2C_CL_BA_CAM, 2085 H2C_FUNC_MAC_BA_CAM, 0, 1, 2086 len); 2087 2088 ret = rtw89_h2c_tx(rtwdev, skb, false); 2089 if (ret) { 2090 rtw89_err(rtwdev, "failed to send h2c\n"); 2091 goto fail; 2092 } 2093 2094 return 0; 2095 fail: 2096 dev_kfree_skb_any(skb); 2097 2098 return ret; 2099 } 2100 2101 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 2102 { 2103 const struct rtw89_chip_info *chip = rtwdev->chip; 2104 u8 entry_idx = chip->bacam_num; 2105 u8 uid = 0; 2106 int i; 2107 2108 for (i = 0; i < chip->bacam_dynamic_num; i++) { 2109 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 2110 entry_idx++; 2111 uid++; 2112 } 2113 } 2114 2115 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, 2116 struct rtw89_vif_link *rtwvif_link, 2117 struct rtw89_sta_link *rtwsta_link, 2118 bool valid, struct ieee80211_ampdu_params *params) 2119 { 2120 const struct rtw89_chip_info *chip = rtwdev->chip; 2121 struct rtw89_h2c_ba_cam_v1 *h2c; 2122 u8 macid = rtwsta_link->mac_id; 2123 u32 len = sizeof(*h2c); 2124 struct sk_buff *skb; 2125 u8 entry_idx; 2126 u8 bmap_size; 2127 int ret; 2128 2129 ret = valid ? 2130 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2131 &entry_idx) : 2132 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2133 &entry_idx); 2134 if (ret) { 2135 /* it still works even if we don't have static BA CAM, because 2136 * hardware can create dynamic BA CAM automatically. 2137 */ 2138 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2139 "failed to %s entry tid=%d for h2c ba cam\n", 2140 valid ? "alloc" : "free", params->tid); 2141 return 0; 2142 } 2143 2144 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2145 if (!skb) { 2146 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2147 return -ENOMEM; 2148 } 2149 skb_put(skb, len); 2150 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2151 2152 if (params->buf_size > 512) 2153 bmap_size = 10; 2154 else if (params->buf_size > 256) 2155 bmap_size = 8; 2156 else if (params->buf_size > 64) 2157 bmap_size = 4; 2158 else 2159 bmap_size = 0; 2160 2161 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2162 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2163 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2164 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2165 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2166 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2167 2168 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2169 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2170 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2171 le32_encode_bits(!!rtwvif_link->mac_idx, 2172 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2173 2174 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2175 H2C_CAT_MAC, 2176 H2C_CL_BA_CAM, 2177 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2178 len); 2179 2180 ret = rtw89_h2c_tx(rtwdev, skb, false); 2181 if (ret) { 2182 rtw89_err(rtwdev, "failed to send h2c\n"); 2183 goto fail; 2184 } 2185 2186 return 0; 2187 fail: 2188 dev_kfree_skb_any(skb); 2189 2190 return ret; 2191 } 2192 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2193 2194 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2195 u8 offset, u8 mac_idx) 2196 { 2197 struct rtw89_h2c_ba_cam_init *h2c; 2198 u32 len = sizeof(*h2c); 2199 struct sk_buff *skb; 2200 int ret; 2201 2202 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2203 if (!skb) { 2204 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2205 return -ENOMEM; 2206 } 2207 skb_put(skb, len); 2208 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2209 2210 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2211 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2212 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2213 2214 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2215 H2C_CAT_MAC, 2216 H2C_CL_BA_CAM, 2217 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2218 len); 2219 2220 ret = rtw89_h2c_tx(rtwdev, skb, false); 2221 if (ret) { 2222 rtw89_err(rtwdev, "failed to send h2c\n"); 2223 goto fail; 2224 } 2225 2226 return 0; 2227 fail: 2228 dev_kfree_skb_any(skb); 2229 2230 return ret; 2231 } 2232 2233 #define H2C_LOG_CFG_LEN 12 2234 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2235 { 2236 struct sk_buff *skb; 2237 u32 comp = 0; 2238 int ret; 2239 2240 if (enable) 2241 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2242 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2243 BIT(RTW89_FW_LOG_COMP_SCAN); 2244 2245 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2246 if (!skb) { 2247 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2248 return -ENOMEM; 2249 } 2250 2251 skb_put(skb, H2C_LOG_CFG_LEN); 2252 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2253 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2254 SET_LOG_CFG_COMP(skb->data, comp); 2255 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2256 2257 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2258 H2C_CAT_MAC, 2259 H2C_CL_FW_INFO, 2260 H2C_FUNC_LOG_CFG, 0, 0, 2261 H2C_LOG_CFG_LEN); 2262 2263 ret = rtw89_h2c_tx(rtwdev, skb, false); 2264 if (ret) { 2265 rtw89_err(rtwdev, "failed to send h2c\n"); 2266 goto fail; 2267 } 2268 2269 return 0; 2270 fail: 2271 dev_kfree_skb_any(skb); 2272 2273 return ret; 2274 } 2275 2276 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 2277 struct rtw89_vif_link *rtwvif_link) 2278 { 2279 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 2280 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; 2281 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2282 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2283 struct rtw89_eapol_2_of_2 *eapol_pkt; 2284 struct ieee80211_bss_conf *bss_conf; 2285 struct ieee80211_hdr_3addr *hdr; 2286 struct sk_buff *skb; 2287 u8 key_des_ver; 2288 2289 if (rtw_wow->ptk_alg == 3) 2290 key_des_ver = 1; 2291 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) 2292 key_des_ver = 2; 2293 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) 2294 key_des_ver = 3; 2295 else 2296 key_des_ver = 0; 2297 2298 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); 2299 if (!skb) 2300 return NULL; 2301 2302 hdr = skb_put_zero(skb, sizeof(*hdr)); 2303 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2304 IEEE80211_FCTL_TODS | 2305 IEEE80211_FCTL_PROTECTED); 2306 2307 rcu_read_lock(); 2308 2309 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2310 2311 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2312 ether_addr_copy(hdr->addr2, bss_conf->addr); 2313 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2314 2315 rcu_read_unlock(); 2316 2317 skb_put_zero(skb, sec_hdr_len); 2318 2319 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); 2320 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); 2321 eapol_pkt->key_des_ver = key_des_ver; 2322 2323 return skb; 2324 } 2325 2326 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, 2327 struct rtw89_vif_link *rtwvif_link) 2328 { 2329 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2330 struct ieee80211_bss_conf *bss_conf; 2331 struct ieee80211_hdr_3addr *hdr; 2332 struct rtw89_sa_query *sa_query; 2333 struct sk_buff *skb; 2334 2335 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); 2336 if (!skb) 2337 return NULL; 2338 2339 hdr = skb_put_zero(skb, sizeof(*hdr)); 2340 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2341 IEEE80211_STYPE_ACTION | 2342 IEEE80211_FCTL_PROTECTED); 2343 2344 rcu_read_lock(); 2345 2346 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2347 2348 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2349 ether_addr_copy(hdr->addr2, bss_conf->addr); 2350 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2351 2352 rcu_read_unlock(); 2353 2354 skb_put_zero(skb, sec_hdr_len); 2355 2356 sa_query = skb_put_zero(skb, sizeof(*sa_query)); 2357 sa_query->category = WLAN_CATEGORY_SA_QUERY; 2358 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; 2359 2360 return skb; 2361 } 2362 2363 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, 2364 struct rtw89_vif_link *rtwvif_link) 2365 { 2366 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 2367 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2368 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2369 struct ieee80211_hdr_3addr *hdr; 2370 struct rtw89_arp_rsp *arp_skb; 2371 struct arphdr *arp_hdr; 2372 struct sk_buff *skb; 2373 __le16 fc; 2374 2375 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); 2376 if (!skb) 2377 return NULL; 2378 2379 hdr = skb_put_zero(skb, sizeof(*hdr)); 2380 2381 if (rtw_wow->ptk_alg) 2382 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | 2383 IEEE80211_FCTL_PROTECTED); 2384 else 2385 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); 2386 2387 hdr->frame_control = fc; 2388 ether_addr_copy(hdr->addr1, rtwvif_link->bssid); 2389 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr); 2390 ether_addr_copy(hdr->addr3, rtwvif_link->bssid); 2391 2392 skb_put_zero(skb, sec_hdr_len); 2393 2394 arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); 2395 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 2396 arp_skb->llc_type = htons(ETH_P_ARP); 2397 2398 arp_hdr = &arp_skb->arp_hdr; 2399 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 2400 arp_hdr->ar_pro = htons(ETH_P_IP); 2401 arp_hdr->ar_hln = ETH_ALEN; 2402 arp_hdr->ar_pln = 4; 2403 arp_hdr->ar_op = htons(ARPOP_REPLY); 2404 2405 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr); 2406 arp_skb->sender_ip = rtwvif->ip_addr; 2407 2408 return skb; 2409 } 2410 2411 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2412 struct rtw89_vif_link *rtwvif_link, 2413 enum rtw89_fw_pkt_ofld_type type, 2414 u8 *id) 2415 { 2416 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 2417 struct rtw89_pktofld_info *info; 2418 struct sk_buff *skb; 2419 int ret; 2420 2421 info = kzalloc(sizeof(*info), GFP_KERNEL); 2422 if (!info) 2423 return -ENOMEM; 2424 2425 switch (type) { 2426 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2427 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2428 break; 2429 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2430 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2431 break; 2432 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2433 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 2434 break; 2435 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2436 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 2437 break; 2438 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2439 skb = rtw89_eapol_get(rtwdev, rtwvif_link); 2440 break; 2441 case RTW89_PKT_OFLD_TYPE_SA_QUERY: 2442 skb = rtw89_sa_query_get(rtwdev, rtwvif_link); 2443 break; 2444 case RTW89_PKT_OFLD_TYPE_ARP_RSP: 2445 skb = rtw89_arp_response_get(rtwdev, rtwvif_link); 2446 break; 2447 default: 2448 goto err; 2449 } 2450 2451 if (!skb) 2452 goto err; 2453 2454 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 2455 kfree_skb(skb); 2456 2457 if (ret) 2458 goto err; 2459 2460 list_add_tail(&info->list, &rtwvif_link->general_pkt_list); 2461 *id = info->id; 2462 return 0; 2463 2464 err: 2465 kfree(info); 2466 return -ENOMEM; 2467 } 2468 2469 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 2470 struct rtw89_vif_link *rtwvif_link, 2471 bool notify_fw) 2472 { 2473 struct list_head *pkt_list = &rtwvif_link->general_pkt_list; 2474 struct rtw89_pktofld_info *info, *tmp; 2475 2476 list_for_each_entry_safe(info, tmp, pkt_list, list) { 2477 if (notify_fw) 2478 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2479 else 2480 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 2481 list_del(&info->list); 2482 kfree(info); 2483 } 2484 } 2485 2486 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 2487 { 2488 struct rtw89_vif_link *rtwvif_link; 2489 struct rtw89_vif *rtwvif; 2490 unsigned int link_id; 2491 2492 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2493 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 2494 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link, 2495 notify_fw); 2496 } 2497 2498 #define H2C_GENERAL_PKT_LEN 6 2499 #define H2C_GENERAL_PKT_ID_UND 0xff 2500 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 2501 struct rtw89_vif_link *rtwvif_link, u8 macid) 2502 { 2503 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 2504 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 2505 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 2506 struct sk_buff *skb; 2507 int ret; 2508 2509 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2510 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 2511 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2512 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 2513 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2514 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 2515 2516 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 2517 if (!skb) { 2518 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2519 return -ENOMEM; 2520 } 2521 skb_put(skb, H2C_GENERAL_PKT_LEN); 2522 SET_GENERAL_PKT_MACID(skb->data, macid); 2523 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2524 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 2525 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 2526 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 2527 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2528 2529 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2530 H2C_CAT_MAC, 2531 H2C_CL_FW_INFO, 2532 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 2533 H2C_GENERAL_PKT_LEN); 2534 2535 ret = rtw89_h2c_tx(rtwdev, skb, false); 2536 if (ret) { 2537 rtw89_err(rtwdev, "failed to send h2c\n"); 2538 goto fail; 2539 } 2540 2541 return 0; 2542 fail: 2543 dev_kfree_skb_any(skb); 2544 2545 return ret; 2546 } 2547 2548 #define H2C_LPS_PARM_LEN 8 2549 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 2550 struct rtw89_lps_parm *lps_param) 2551 { 2552 struct sk_buff *skb; 2553 int ret; 2554 2555 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 2556 if (!skb) { 2557 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2558 return -ENOMEM; 2559 } 2560 skb_put(skb, H2C_LPS_PARM_LEN); 2561 2562 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 2563 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 2564 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 2565 SET_LPS_PARM_RLBM(skb->data, 1); 2566 SET_LPS_PARM_SMARTPS(skb->data, 1); 2567 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 2568 SET_LPS_PARM_VOUAPSD(skb->data, 0); 2569 SET_LPS_PARM_VIUAPSD(skb->data, 0); 2570 SET_LPS_PARM_BEUAPSD(skb->data, 0); 2571 SET_LPS_PARM_BKUAPSD(skb->data, 0); 2572 2573 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2574 H2C_CAT_MAC, 2575 H2C_CL_MAC_PS, 2576 H2C_FUNC_MAC_LPS_PARM, 0, !lps_param->psmode, 2577 H2C_LPS_PARM_LEN); 2578 2579 ret = rtw89_h2c_tx(rtwdev, skb, false); 2580 if (ret) { 2581 rtw89_err(rtwdev, "failed to send h2c\n"); 2582 goto fail; 2583 } 2584 2585 return 0; 2586 fail: 2587 dev_kfree_skb_any(skb); 2588 2589 return ret; 2590 } 2591 2592 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 2593 { 2594 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 2595 rtwvif_link->chanctx_idx); 2596 const struct rtw89_chip_info *chip = rtwdev->chip; 2597 struct rtw89_h2c_lps_ch_info *h2c; 2598 u32 len = sizeof(*h2c); 2599 struct sk_buff *skb; 2600 u32 done; 2601 int ret; 2602 2603 if (chip->chip_gen != RTW89_CHIP_BE) 2604 return 0; 2605 2606 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2607 if (!skb) { 2608 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 2609 return -ENOMEM; 2610 } 2611 skb_put(skb, len); 2612 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2613 2614 h2c->info[0].central_ch = chan->channel; 2615 h2c->info[0].pri_ch = chan->primary_channel; 2616 h2c->info[0].band = chan->band_type; 2617 h2c->info[0].bw = chan->band_width; 2618 h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF); 2619 2620 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2621 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2622 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 2623 2624 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2625 ret = rtw89_h2c_tx(rtwdev, skb, false); 2626 if (ret) { 2627 rtw89_err(rtwdev, "failed to send h2c\n"); 2628 goto fail; 2629 } 2630 2631 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2632 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2633 if (ret) 2634 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 2635 2636 return 0; 2637 fail: 2638 dev_kfree_skb_any(skb); 2639 2640 return ret; 2641 } 2642 2643 #define H2C_P2P_ACT_LEN 20 2644 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, 2645 struct rtw89_vif_link *rtwvif_link, 2646 struct ieee80211_bss_conf *bss_conf, 2647 struct ieee80211_p2p_noa_desc *desc, 2648 u8 act, u8 noa_id) 2649 { 2650 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 2651 u8 ctwindow_oppps = bss_conf->p2p_noa_attr.oppps_ctwindow; 2652 struct sk_buff *skb; 2653 u8 *cmd; 2654 int ret; 2655 2656 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 2657 if (!skb) { 2658 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 2659 return -ENOMEM; 2660 } 2661 skb_put(skb, H2C_P2P_ACT_LEN); 2662 cmd = skb->data; 2663 2664 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id); 2665 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 2666 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 2667 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 2668 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 2669 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 2670 if (desc) { 2671 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 2672 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 2673 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 2674 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 2675 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 2676 } 2677 2678 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2679 H2C_CAT_MAC, H2C_CL_MAC_PS, 2680 H2C_FUNC_P2P_ACT, 0, 0, 2681 H2C_P2P_ACT_LEN); 2682 2683 ret = rtw89_h2c_tx(rtwdev, skb, false); 2684 if (ret) { 2685 rtw89_err(rtwdev, "failed to send h2c\n"); 2686 goto fail; 2687 } 2688 2689 return 0; 2690 fail: 2691 dev_kfree_skb_any(skb); 2692 2693 return ret; 2694 } 2695 2696 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 2697 struct sk_buff *skb) 2698 { 2699 const struct rtw89_chip_info *chip = rtwdev->chip; 2700 struct rtw89_hal *hal = &rtwdev->hal; 2701 u8 ntx_path; 2702 u8 map_b; 2703 2704 if (chip->rf_path_num == 1) { 2705 ntx_path = RF_A; 2706 map_b = 0; 2707 } else { 2708 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 2709 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 2710 } 2711 2712 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 2713 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 2714 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 2715 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 2716 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 2717 } 2718 2719 #define H2C_CMC_TBL_LEN 68 2720 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 2721 struct rtw89_vif_link *rtwvif_link, 2722 struct rtw89_sta_link *rtwsta_link) 2723 { 2724 const struct rtw89_chip_info *chip = rtwdev->chip; 2725 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2726 struct sk_buff *skb; 2727 int ret; 2728 2729 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2730 if (!skb) { 2731 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2732 return -ENOMEM; 2733 } 2734 skb_put(skb, H2C_CMC_TBL_LEN); 2735 SET_CTRL_INFO_MACID(skb->data, macid); 2736 SET_CTRL_INFO_OPERATION(skb->data, 1); 2737 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2738 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 2739 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 2740 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 2741 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 2742 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 2743 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 2744 } 2745 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 2746 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 2747 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 2748 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2749 2750 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2751 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2752 chip->h2c_cctl_func_id, 0, 1, 2753 H2C_CMC_TBL_LEN); 2754 2755 ret = rtw89_h2c_tx(rtwdev, skb, false); 2756 if (ret) { 2757 rtw89_err(rtwdev, "failed to send h2c\n"); 2758 goto fail; 2759 } 2760 2761 return 0; 2762 fail: 2763 dev_kfree_skb_any(skb); 2764 2765 return ret; 2766 } 2767 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 2768 2769 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2770 struct rtw89_vif_link *rtwvif_link, 2771 struct rtw89_sta_link *rtwsta_link) 2772 { 2773 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2774 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2775 u32 len = sizeof(*h2c); 2776 struct sk_buff *skb; 2777 int ret; 2778 2779 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2780 if (!skb) { 2781 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 2782 return -ENOMEM; 2783 } 2784 skb_put(skb, len); 2785 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2786 2787 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 2788 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2789 2790 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE); 2791 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 2792 2793 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 2794 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 2795 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2796 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 2797 2798 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 2799 2800 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 2801 2802 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 2803 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 2804 2805 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 2806 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 2807 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 2808 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 2809 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 2810 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 2811 2812 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 2813 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 2814 2815 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 2816 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 2817 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 2818 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 2819 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 2820 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 2821 2822 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 2823 2824 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 2825 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 2826 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 2827 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 2828 2829 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 2830 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 2831 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 2832 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 2833 2834 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2835 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2836 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 2837 len); 2838 2839 ret = rtw89_h2c_tx(rtwdev, skb, false); 2840 if (ret) { 2841 rtw89_err(rtwdev, "failed to send h2c\n"); 2842 goto fail; 2843 } 2844 2845 return 0; 2846 fail: 2847 dev_kfree_skb_any(skb); 2848 2849 return ret; 2850 } 2851 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 2852 2853 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 2854 struct ieee80211_link_sta *link_sta, 2855 u8 *pads) 2856 { 2857 bool ppe_th; 2858 u8 ppe16, ppe8; 2859 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 2860 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0]; 2861 u8 ru_bitmap; 2862 u8 n, idx, sh; 2863 u16 ppe; 2864 int i; 2865 2866 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 2867 link_sta->he_cap.he_cap_elem.phy_cap_info[6]); 2868 if (!ppe_th) { 2869 u8 pad; 2870 2871 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 2872 link_sta->he_cap.he_cap_elem.phy_cap_info[9]); 2873 2874 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 2875 pads[i] = pad; 2876 2877 return; 2878 } 2879 2880 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 2881 n = hweight8(ru_bitmap); 2882 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 2883 2884 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 2885 if (!(ru_bitmap & BIT(i))) { 2886 pads[i] = 1; 2887 continue; 2888 } 2889 2890 idx = n >> 3; 2891 sh = n & 7; 2892 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 2893 2894 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx])); 2895 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2896 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 2897 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2898 2899 if (ppe16 != 7 && ppe8 == 7) 2900 pads[i] = RTW89_PE_DURATION_16; 2901 else if (ppe8 != 7) 2902 pads[i] = RTW89_PE_DURATION_8; 2903 else 2904 pads[i] = RTW89_PE_DURATION_0; 2905 } 2906 } 2907 2908 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 2909 struct rtw89_vif_link *rtwvif_link, 2910 struct rtw89_sta_link *rtwsta_link) 2911 { 2912 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 2913 const struct rtw89_chip_info *chip = rtwdev->chip; 2914 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 2915 rtwvif_link->chanctx_idx); 2916 struct ieee80211_link_sta *link_sta; 2917 struct sk_buff *skb; 2918 u8 pads[RTW89_PPE_BW_NUM]; 2919 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2920 u16 lowest_rate; 2921 int ret; 2922 2923 memset(pads, 0, sizeof(pads)); 2924 2925 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2926 if (!skb) { 2927 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2928 return -ENOMEM; 2929 } 2930 2931 rcu_read_lock(); 2932 2933 if (rtwsta_link) 2934 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 2935 2936 if (rtwsta_link && link_sta->he_cap.has_he) 2937 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 2938 2939 if (vif->p2p) 2940 lowest_rate = RTW89_HW_RATE_OFDM6; 2941 else if (chan->band_type == RTW89_BAND_2G) 2942 lowest_rate = RTW89_HW_RATE_CCK1; 2943 else 2944 lowest_rate = RTW89_HW_RATE_OFDM6; 2945 2946 skb_put(skb, H2C_CMC_TBL_LEN); 2947 SET_CTRL_INFO_MACID(skb->data, mac_id); 2948 SET_CTRL_INFO_OPERATION(skb->data, 1); 2949 SET_CMC_TBL_DISRTSFB(skb->data, 1); 2950 SET_CMC_TBL_DISDATAFB(skb->data, 1); 2951 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 2952 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 2953 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 2954 if (vif->type == NL80211_IFTYPE_STATION) 2955 SET_CMC_TBL_ULDL(skb->data, 1); 2956 else 2957 SET_CMC_TBL_ULDL(skb->data, 0); 2958 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port); 2959 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 2960 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 2961 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 2962 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 2963 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 2964 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2965 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 2966 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 2967 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 2968 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 2969 } 2970 if (rtwsta_link) 2971 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 2972 link_sta->he_cap.has_he); 2973 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 2974 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2975 2976 rcu_read_unlock(); 2977 2978 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2979 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2980 chip->h2c_cctl_func_id, 0, 1, 2981 H2C_CMC_TBL_LEN); 2982 2983 ret = rtw89_h2c_tx(rtwdev, skb, false); 2984 if (ret) { 2985 rtw89_err(rtwdev, "failed to send h2c\n"); 2986 goto fail; 2987 } 2988 2989 return 0; 2990 fail: 2991 dev_kfree_skb_any(skb); 2992 2993 return ret; 2994 } 2995 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 2996 2997 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 2998 struct ieee80211_link_sta *link_sta, 2999 u8 *pads) 3000 { 3001 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3002 u16 ppe_thres_hdr; 3003 u8 ppe16, ppe8; 3004 u8 n, idx, sh; 3005 u8 ru_bitmap; 3006 bool ppe_th; 3007 u16 ppe; 3008 int i; 3009 3010 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3011 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 3012 if (!ppe_th) { 3013 u8 pad; 3014 3015 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3016 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 3017 3018 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3019 pads[i] = pad; 3020 3021 return; 3022 } 3023 3024 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres); 3025 ru_bitmap = u16_get_bits(ppe_thres_hdr, 3026 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 3027 n = hweight8(ru_bitmap); 3028 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 3029 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3030 3031 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3032 if (!(ru_bitmap & BIT(i))) { 3033 pads[i] = 1; 3034 continue; 3035 } 3036 3037 idx = n >> 3; 3038 sh = n & 7; 3039 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 3040 3041 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx); 3042 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3043 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 3044 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3045 3046 if (ppe16 != 7 && ppe8 == 7) 3047 pads[i] = RTW89_PE_DURATION_16_20; 3048 else if (ppe8 != 7) 3049 pads[i] = RTW89_PE_DURATION_8; 3050 else 3051 pads[i] = RTW89_PE_DURATION_0; 3052 } 3053 } 3054 3055 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3056 struct rtw89_vif_link *rtwvif_link, 3057 struct rtw89_sta_link *rtwsta_link) 3058 { 3059 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3060 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3061 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3062 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3063 struct ieee80211_bss_conf *bss_conf; 3064 struct ieee80211_link_sta *link_sta; 3065 u8 pads[RTW89_PPE_BW_NUM]; 3066 u32 len = sizeof(*h2c); 3067 struct sk_buff *skb; 3068 u16 lowest_rate; 3069 int ret; 3070 3071 memset(pads, 0, sizeof(pads)); 3072 3073 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3074 if (!skb) { 3075 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3076 return -ENOMEM; 3077 } 3078 3079 rcu_read_lock(); 3080 3081 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3082 3083 if (rtwsta_link) { 3084 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3085 3086 if (link_sta->eht_cap.has_eht) 3087 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads); 3088 else if (link_sta->he_cap.has_he) 3089 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3090 } 3091 3092 if (vif->p2p) 3093 lowest_rate = RTW89_HW_RATE_OFDM6; 3094 else if (chan->band_type == RTW89_BAND_2G) 3095 lowest_rate = RTW89_HW_RATE_CCK1; 3096 else 3097 lowest_rate = RTW89_HW_RATE_OFDM6; 3098 3099 skb_put(skb, len); 3100 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3101 3102 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3103 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3104 3105 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 3106 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 3107 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 3108 CCTLINFO_G7_W0_DISDATAFB); 3109 3110 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3111 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3112 3113 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3114 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3115 3116 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3117 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3118 3119 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 3120 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 3121 3122 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3123 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 3124 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 3125 } 3126 3127 if (bss_conf->eht_support) { 3128 u16 punct = bss_conf->chanreq.oper.punctured; 3129 3130 h2c->w4 |= le32_encode_bits(~punct, 3131 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3132 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3133 } 3134 3135 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 3136 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3137 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 3138 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3139 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 3140 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3141 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 3142 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3143 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 3144 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3145 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 3146 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 3147 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 3148 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 3149 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3150 3151 h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 3152 CCTLINFO_G7_W6_ULDL); 3153 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL); 3154 3155 if (rtwsta_link) { 3156 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he, 3157 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3158 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3159 } 3160 3161 rcu_read_unlock(); 3162 3163 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3164 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3165 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3166 len); 3167 3168 ret = rtw89_h2c_tx(rtwdev, skb, false); 3169 if (ret) { 3170 rtw89_err(rtwdev, "failed to send h2c\n"); 3171 goto fail; 3172 } 3173 3174 return 0; 3175 fail: 3176 dev_kfree_skb_any(skb); 3177 3178 return ret; 3179 } 3180 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 3181 3182 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3183 struct rtw89_vif_link *rtwvif_link, 3184 struct rtw89_sta_link *rtwsta_link) 3185 { 3186 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 3187 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3188 u32 len = sizeof(*h2c); 3189 struct sk_buff *skb; 3190 u16 agg_num = 0; 3191 u8 ba_bmap = 0; 3192 int ret; 3193 u8 tid; 3194 3195 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3196 if (!skb) { 3197 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 3198 return -ENOMEM; 3199 } 3200 skb_put(skb, len); 3201 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3202 3203 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 3204 if (agg_num == 0) 3205 agg_num = rtwsta->ampdu_params[tid].agg_num; 3206 else 3207 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 3208 } 3209 3210 if (agg_num <= 0x20) 3211 ba_bmap = 3; 3212 else if (agg_num > 0x20 && agg_num <= 0x40) 3213 ba_bmap = 0; 3214 else if (agg_num > 0x40 && agg_num <= 0x80) 3215 ba_bmap = 1; 3216 else if (agg_num > 0x80 && agg_num <= 0x100) 3217 ba_bmap = 2; 3218 else if (agg_num > 0x100 && agg_num <= 0x200) 3219 ba_bmap = 4; 3220 else if (agg_num > 0x200 && agg_num <= 0x400) 3221 ba_bmap = 5; 3222 3223 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 3224 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3225 3226 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 3227 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 3228 3229 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3230 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3231 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 3232 len); 3233 3234 ret = rtw89_h2c_tx(rtwdev, skb, false); 3235 if (ret) { 3236 rtw89_err(rtwdev, "failed to send h2c\n"); 3237 goto fail; 3238 } 3239 3240 return 0; 3241 fail: 3242 dev_kfree_skb_any(skb); 3243 3244 return ret; 3245 } 3246 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 3247 3248 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 3249 struct rtw89_sta_link *rtwsta_link) 3250 { 3251 const struct rtw89_chip_info *chip = rtwdev->chip; 3252 struct sk_buff *skb; 3253 int ret; 3254 3255 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3256 if (!skb) { 3257 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3258 return -ENOMEM; 3259 } 3260 skb_put(skb, H2C_CMC_TBL_LEN); 3261 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3262 SET_CTRL_INFO_OPERATION(skb->data, 1); 3263 if (rtwsta_link->cctl_tx_time) { 3264 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 3265 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time); 3266 } 3267 if (rtwsta_link->cctl_tx_retry_limit) { 3268 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 3269 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt); 3270 } 3271 3272 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3273 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3274 chip->h2c_cctl_func_id, 0, 1, 3275 H2C_CMC_TBL_LEN); 3276 3277 ret = rtw89_h2c_tx(rtwdev, skb, false); 3278 if (ret) { 3279 rtw89_err(rtwdev, "failed to send h2c\n"); 3280 goto fail; 3281 } 3282 3283 return 0; 3284 fail: 3285 dev_kfree_skb_any(skb); 3286 3287 return ret; 3288 } 3289 3290 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 3291 struct rtw89_sta_link *rtwsta_link) 3292 { 3293 const struct rtw89_chip_info *chip = rtwdev->chip; 3294 struct sk_buff *skb; 3295 int ret; 3296 3297 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 3298 return 0; 3299 3300 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3301 if (!skb) { 3302 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3303 return -ENOMEM; 3304 } 3305 skb_put(skb, H2C_CMC_TBL_LEN); 3306 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3307 SET_CTRL_INFO_OPERATION(skb->data, 1); 3308 3309 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3310 3311 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3312 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3313 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 3314 H2C_CMC_TBL_LEN); 3315 3316 ret = rtw89_h2c_tx(rtwdev, skb, false); 3317 if (ret) { 3318 rtw89_err(rtwdev, "failed to send h2c\n"); 3319 goto fail; 3320 } 3321 3322 return 0; 3323 fail: 3324 dev_kfree_skb_any(skb); 3325 3326 return ret; 3327 } 3328 3329 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 3330 struct rtw89_vif_link *rtwvif_link) 3331 { 3332 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3333 rtwvif_link->chanctx_idx); 3334 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3335 struct rtw89_h2c_bcn_upd *h2c; 3336 struct sk_buff *skb_beacon; 3337 struct ieee80211_hdr *hdr; 3338 u32 len = sizeof(*h2c); 3339 struct sk_buff *skb; 3340 int bcn_total_len; 3341 u16 beacon_rate; 3342 u16 tim_offset; 3343 void *noa_data; 3344 u8 noa_len; 3345 int ret; 3346 3347 if (vif->p2p) 3348 beacon_rate = RTW89_HW_RATE_OFDM6; 3349 else if (chan->band_type == RTW89_BAND_2G) 3350 beacon_rate = RTW89_HW_RATE_CCK1; 3351 else 3352 beacon_rate = RTW89_HW_RATE_OFDM6; 3353 3354 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3355 NULL, 0); 3356 if (!skb_beacon) { 3357 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3358 return -ENOMEM; 3359 } 3360 3361 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3362 if (noa_len && 3363 (noa_len <= skb_tailroom(skb_beacon) || 3364 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3365 skb_put_data(skb_beacon, noa_data, noa_len); 3366 } 3367 3368 hdr = (struct ieee80211_hdr *)skb_beacon; 3369 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3370 3371 bcn_total_len = len + skb_beacon->len; 3372 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3373 if (!skb) { 3374 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3375 dev_kfree_skb_any(skb_beacon); 3376 return -ENOMEM; 3377 } 3378 skb_put(skb, len); 3379 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 3380 3381 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) | 3382 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 3383 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 3384 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 3385 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 3386 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 3387 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 3388 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 3389 3390 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3391 dev_kfree_skb_any(skb_beacon); 3392 3393 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3394 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3395 H2C_FUNC_MAC_BCN_UPD, 0, 1, 3396 bcn_total_len); 3397 3398 ret = rtw89_h2c_tx(rtwdev, skb, false); 3399 if (ret) { 3400 rtw89_err(rtwdev, "failed to send h2c\n"); 3401 dev_kfree_skb_any(skb); 3402 return ret; 3403 } 3404 3405 return 0; 3406 } 3407 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 3408 3409 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 3410 struct rtw89_vif_link *rtwvif_link) 3411 { 3412 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3413 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3414 struct rtw89_h2c_bcn_upd_be *h2c; 3415 struct sk_buff *skb_beacon; 3416 struct ieee80211_hdr *hdr; 3417 u32 len = sizeof(*h2c); 3418 struct sk_buff *skb; 3419 int bcn_total_len; 3420 u16 beacon_rate; 3421 u16 tim_offset; 3422 void *noa_data; 3423 u8 noa_len; 3424 int ret; 3425 3426 if (vif->p2p) 3427 beacon_rate = RTW89_HW_RATE_OFDM6; 3428 else if (chan->band_type == RTW89_BAND_2G) 3429 beacon_rate = RTW89_HW_RATE_CCK1; 3430 else 3431 beacon_rate = RTW89_HW_RATE_OFDM6; 3432 3433 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3434 NULL, 0); 3435 if (!skb_beacon) { 3436 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3437 return -ENOMEM; 3438 } 3439 3440 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3441 if (noa_len && 3442 (noa_len <= skb_tailroom(skb_beacon) || 3443 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3444 skb_put_data(skb_beacon, noa_data, noa_len); 3445 } 3446 3447 hdr = (struct ieee80211_hdr *)skb_beacon; 3448 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3449 3450 bcn_total_len = len + skb_beacon->len; 3451 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3452 if (!skb) { 3453 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3454 dev_kfree_skb_any(skb_beacon); 3455 return -ENOMEM; 3456 } 3457 skb_put(skb, len); 3458 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 3459 3460 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 3461 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 3462 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 3463 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 3464 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 3465 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 3466 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 3467 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 3468 3469 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3470 dev_kfree_skb_any(skb_beacon); 3471 3472 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3473 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3474 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 3475 bcn_total_len); 3476 3477 ret = rtw89_h2c_tx(rtwdev, skb, false); 3478 if (ret) { 3479 rtw89_err(rtwdev, "failed to send h2c\n"); 3480 goto fail; 3481 } 3482 3483 return 0; 3484 3485 fail: 3486 dev_kfree_skb_any(skb); 3487 3488 return ret; 3489 } 3490 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 3491 3492 #define H2C_ROLE_MAINTAIN_LEN 4 3493 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 3494 struct rtw89_vif_link *rtwvif_link, 3495 struct rtw89_sta_link *rtwsta_link, 3496 enum rtw89_upd_mode upd_mode) 3497 { 3498 struct sk_buff *skb; 3499 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3500 u8 self_role; 3501 int ret; 3502 3503 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3504 if (rtwsta_link) 3505 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3506 else 3507 self_role = rtwvif_link->self_role; 3508 } else { 3509 self_role = rtwvif_link->self_role; 3510 } 3511 3512 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 3513 if (!skb) { 3514 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3515 return -ENOMEM; 3516 } 3517 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 3518 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 3519 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 3520 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 3521 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif_link->wifi_role); 3522 3523 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3524 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3525 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 3526 H2C_ROLE_MAINTAIN_LEN); 3527 3528 ret = rtw89_h2c_tx(rtwdev, skb, false); 3529 if (ret) { 3530 rtw89_err(rtwdev, "failed to send h2c\n"); 3531 goto fail; 3532 } 3533 3534 return 0; 3535 fail: 3536 dev_kfree_skb_any(skb); 3537 3538 return ret; 3539 } 3540 3541 static enum rtw89_fw_sta_type 3542 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3543 struct rtw89_sta_link *rtwsta_link) 3544 { 3545 struct ieee80211_bss_conf *bss_conf; 3546 struct ieee80211_link_sta *link_sta; 3547 enum rtw89_fw_sta_type type; 3548 3549 rcu_read_lock(); 3550 3551 if (!rtwsta_link) 3552 goto by_vif; 3553 3554 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3555 3556 if (link_sta->eht_cap.has_eht) 3557 type = RTW89_FW_BE_STA; 3558 else if (link_sta->he_cap.has_he) 3559 type = RTW89_FW_AX_STA; 3560 else 3561 type = RTW89_FW_N_AC_STA; 3562 3563 goto out; 3564 3565 by_vif: 3566 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3567 3568 if (bss_conf->eht_support) 3569 type = RTW89_FW_BE_STA; 3570 else if (bss_conf->he_support) 3571 type = RTW89_FW_AX_STA; 3572 else 3573 type = RTW89_FW_N_AC_STA; 3574 3575 out: 3576 rcu_read_unlock(); 3577 3578 return type; 3579 } 3580 3581 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3582 struct rtw89_sta_link *rtwsta_link, bool dis_conn) 3583 { 3584 struct sk_buff *skb; 3585 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3586 u8 self_role = rtwvif_link->self_role; 3587 enum rtw89_fw_sta_type sta_type; 3588 u8 net_type = rtwvif_link->net_type; 3589 struct rtw89_h2c_join_v1 *h2c_v1; 3590 struct rtw89_h2c_join *h2c; 3591 u32 len = sizeof(*h2c); 3592 bool format_v1 = false; 3593 int ret; 3594 3595 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 3596 len = sizeof(*h2c_v1); 3597 format_v1 = true; 3598 } 3599 3600 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) { 3601 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3602 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 3603 } 3604 3605 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3606 if (!skb) { 3607 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3608 return -ENOMEM; 3609 } 3610 skb_put(skb, len); 3611 h2c = (struct rtw89_h2c_join *)skb->data; 3612 3613 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 3614 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 3615 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 3616 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) | 3617 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) | 3618 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 3619 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 3620 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 3621 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 3622 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 3623 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 3624 le32_encode_bits(rtwvif_link->wifi_role, 3625 RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 3626 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 3627 3628 if (!format_v1) 3629 goto done; 3630 3631 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 3632 3633 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link); 3634 3635 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE); 3636 h2c_v1->w2 = 0; 3637 3638 done: 3639 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3640 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3641 H2C_FUNC_MAC_JOININFO, 0, 1, 3642 len); 3643 3644 ret = rtw89_h2c_tx(rtwdev, skb, false); 3645 if (ret) { 3646 rtw89_err(rtwdev, "failed to send h2c\n"); 3647 goto fail; 3648 } 3649 3650 return 0; 3651 fail: 3652 dev_kfree_skb_any(skb); 3653 3654 return ret; 3655 } 3656 3657 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 3658 { 3659 struct rtw89_h2c_notify_dbcc *h2c; 3660 u32 len = sizeof(*h2c); 3661 struct sk_buff *skb; 3662 int ret; 3663 3664 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3665 if (!skb) { 3666 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 3667 return -ENOMEM; 3668 } 3669 skb_put(skb, len); 3670 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 3671 3672 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 3673 3674 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3675 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3676 H2C_FUNC_NOTIFY_DBCC, 0, 1, 3677 len); 3678 3679 ret = rtw89_h2c_tx(rtwdev, skb, false); 3680 if (ret) { 3681 rtw89_err(rtwdev, "failed to send h2c\n"); 3682 goto fail; 3683 } 3684 3685 return 0; 3686 fail: 3687 dev_kfree_skb_any(skb); 3688 3689 return ret; 3690 } 3691 3692 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 3693 bool pause) 3694 { 3695 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 3696 struct rtw89_fw_macid_pause_grp *h2c; 3697 __le32 set = cpu_to_le32(BIT(sh)); 3698 u8 h2c_macid_pause_id; 3699 struct sk_buff *skb; 3700 u32 len; 3701 int ret; 3702 3703 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 3704 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 3705 len = sizeof(*h2c_new); 3706 } else { 3707 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 3708 len = sizeof(*h2c); 3709 } 3710 3711 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3712 if (!skb) { 3713 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 3714 return -ENOMEM; 3715 } 3716 skb_put(skb, len); 3717 3718 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 3719 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 3720 3721 h2c_new->n[0].pause_mask_grp[grp] = set; 3722 h2c_new->n[0].sleep_mask_grp[grp] = set; 3723 if (pause) { 3724 h2c_new->n[0].pause_grp[grp] = set; 3725 h2c_new->n[0].sleep_grp[grp] = set; 3726 } 3727 } else { 3728 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 3729 3730 h2c->mask_grp[grp] = set; 3731 if (pause) 3732 h2c->pause_grp[grp] = set; 3733 } 3734 3735 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3736 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3737 h2c_macid_pause_id, 1, 0, 3738 len); 3739 3740 ret = rtw89_h2c_tx(rtwdev, skb, false); 3741 if (ret) { 3742 rtw89_err(rtwdev, "failed to send h2c\n"); 3743 goto fail; 3744 } 3745 3746 return 0; 3747 fail: 3748 dev_kfree_skb_any(skb); 3749 3750 return ret; 3751 } 3752 3753 #define H2C_EDCA_LEN 12 3754 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3755 u8 ac, u32 val) 3756 { 3757 struct sk_buff *skb; 3758 int ret; 3759 3760 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 3761 if (!skb) { 3762 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 3763 return -ENOMEM; 3764 } 3765 skb_put(skb, H2C_EDCA_LEN); 3766 RTW89_SET_EDCA_SEL(skb->data, 0); 3767 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx); 3768 RTW89_SET_EDCA_WMM(skb->data, 0); 3769 RTW89_SET_EDCA_AC(skb->data, ac); 3770 RTW89_SET_EDCA_PARAM(skb->data, val); 3771 3772 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3773 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3774 H2C_FUNC_USR_EDCA, 0, 1, 3775 H2C_EDCA_LEN); 3776 3777 ret = rtw89_h2c_tx(rtwdev, skb, false); 3778 if (ret) { 3779 rtw89_err(rtwdev, "failed to send h2c\n"); 3780 goto fail; 3781 } 3782 3783 return 0; 3784 fail: 3785 dev_kfree_skb_any(skb); 3786 3787 return ret; 3788 } 3789 3790 #define H2C_TSF32_TOGL_LEN 4 3791 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, 3792 struct rtw89_vif_link *rtwvif_link, 3793 bool en) 3794 { 3795 struct sk_buff *skb; 3796 u16 early_us = en ? 2000 : 0; 3797 u8 *cmd; 3798 int ret; 3799 3800 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 3801 if (!skb) { 3802 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 3803 return -ENOMEM; 3804 } 3805 skb_put(skb, H2C_TSF32_TOGL_LEN); 3806 cmd = skb->data; 3807 3808 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx); 3809 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 3810 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port); 3811 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 3812 3813 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3814 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3815 H2C_FUNC_TSF32_TOGL, 0, 0, 3816 H2C_TSF32_TOGL_LEN); 3817 3818 ret = rtw89_h2c_tx(rtwdev, skb, false); 3819 if (ret) { 3820 rtw89_err(rtwdev, "failed to send h2c\n"); 3821 goto fail; 3822 } 3823 3824 return 0; 3825 fail: 3826 dev_kfree_skb_any(skb); 3827 3828 return ret; 3829 } 3830 3831 #define H2C_OFLD_CFG_LEN 8 3832 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 3833 { 3834 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 3835 struct sk_buff *skb; 3836 int ret; 3837 3838 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 3839 if (!skb) { 3840 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 3841 return -ENOMEM; 3842 } 3843 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 3844 3845 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3846 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3847 H2C_FUNC_OFLD_CFG, 0, 1, 3848 H2C_OFLD_CFG_LEN); 3849 3850 ret = rtw89_h2c_tx(rtwdev, skb, false); 3851 if (ret) { 3852 rtw89_err(rtwdev, "failed to send h2c\n"); 3853 goto fail; 3854 } 3855 3856 return 0; 3857 fail: 3858 dev_kfree_skb_any(skb); 3859 3860 return ret; 3861 } 3862 3863 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv) 3864 { 3865 struct rtw89_h2c_tx_duty *h2c; 3866 u32 len = sizeof(*h2c); 3867 struct sk_buff *skb; 3868 u16 pause, active; 3869 int ret; 3870 3871 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3872 if (!skb) { 3873 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n"); 3874 return -ENOMEM; 3875 } 3876 3877 skb_put(skb, len); 3878 h2c = (struct rtw89_h2c_tx_duty *)skb->data; 3879 3880 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100); 3881 3882 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) { 3883 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP); 3884 } else { 3885 active = 100 - lv * RTW89_THERMAL_PROT_STEP; 3886 pause = 100 - active; 3887 3888 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) | 3889 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK); 3890 } 3891 3892 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3893 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3894 H2C_FUNC_TX_DUTY, 0, 0, len); 3895 3896 ret = rtw89_h2c_tx(rtwdev, skb, false); 3897 if (ret) { 3898 rtw89_err(rtwdev, "failed to send h2c\n"); 3899 goto fail; 3900 } 3901 3902 return 0; 3903 fail: 3904 dev_kfree_skb_any(skb); 3905 3906 return ret; 3907 } 3908 3909 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 3910 struct rtw89_vif_link *rtwvif_link, 3911 bool connect) 3912 { 3913 struct ieee80211_bss_conf *bss_conf; 3914 s32 thold = RTW89_DEFAULT_CQM_THOLD; 3915 u32 hyst = RTW89_DEFAULT_CQM_HYST; 3916 struct rtw89_h2c_bcnfltr *h2c; 3917 u32 len = sizeof(*h2c); 3918 struct sk_buff *skb; 3919 int ret; 3920 3921 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 3922 return -EINVAL; 3923 3924 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 3925 return -EINVAL; 3926 3927 rcu_read_lock(); 3928 3929 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 3930 3931 if (bss_conf->cqm_rssi_hyst) 3932 hyst = bss_conf->cqm_rssi_hyst; 3933 if (bss_conf->cqm_rssi_thold) 3934 thold = bss_conf->cqm_rssi_thold; 3935 3936 rcu_read_unlock(); 3937 3938 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3939 if (!skb) { 3940 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 3941 return -ENOMEM; 3942 } 3943 3944 skb_put(skb, len); 3945 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 3946 3947 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 3948 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 3949 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 3950 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 3951 RTW89_H2C_BCNFLTR_W0_MODE) | 3952 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 3953 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 3954 le32_encode_bits(thold + MAX_RSSI, 3955 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 3956 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 3957 3958 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3959 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3960 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 3961 3962 ret = rtw89_h2c_tx(rtwdev, skb, false); 3963 if (ret) { 3964 rtw89_err(rtwdev, "failed to send h2c\n"); 3965 goto fail; 3966 } 3967 3968 return 0; 3969 fail: 3970 dev_kfree_skb_any(skb); 3971 3972 return ret; 3973 } 3974 3975 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 3976 struct rtw89_rx_phy_ppdu *phy_ppdu) 3977 { 3978 struct rtw89_h2c_ofld_rssi *h2c; 3979 u32 len = sizeof(*h2c); 3980 struct sk_buff *skb; 3981 s8 rssi; 3982 int ret; 3983 3984 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 3985 return -EINVAL; 3986 3987 if (!phy_ppdu) 3988 return -EINVAL; 3989 3990 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3991 if (!skb) { 3992 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 3993 return -ENOMEM; 3994 } 3995 3996 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 3997 skb_put(skb, len); 3998 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 3999 4000 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 4001 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 4002 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 4003 4004 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4005 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4006 H2C_FUNC_OFLD_RSSI, 0, 1, len); 4007 4008 ret = rtw89_h2c_tx(rtwdev, skb, false); 4009 if (ret) { 4010 rtw89_err(rtwdev, "failed to send h2c\n"); 4011 goto fail; 4012 } 4013 4014 return 0; 4015 fail: 4016 dev_kfree_skb_any(skb); 4017 4018 return ret; 4019 } 4020 4021 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4022 { 4023 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 4024 struct rtw89_traffic_stats *stats = &rtwvif->stats; 4025 struct rtw89_h2c_ofld *h2c; 4026 u32 len = sizeof(*h2c); 4027 struct sk_buff *skb; 4028 int ret; 4029 4030 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4031 return -EINVAL; 4032 4033 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4034 if (!skb) { 4035 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 4036 return -ENOMEM; 4037 } 4038 4039 skb_put(skb, len); 4040 h2c = (struct rtw89_h2c_ofld *)skb->data; 4041 4042 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 4043 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 4044 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 4045 4046 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4047 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4048 H2C_FUNC_OFLD_TP, 0, 1, len); 4049 4050 ret = rtw89_h2c_tx(rtwdev, skb, false); 4051 if (ret) { 4052 rtw89_err(rtwdev, "failed to send h2c\n"); 4053 goto fail; 4054 } 4055 4056 return 0; 4057 fail: 4058 dev_kfree_skb_any(skb); 4059 4060 return ret; 4061 } 4062 4063 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 4064 { 4065 const struct rtw89_chip_info *chip = rtwdev->chip; 4066 struct rtw89_h2c_ra_v1 *h2c_v1; 4067 struct rtw89_h2c_ra *h2c; 4068 u32 len = sizeof(*h2c); 4069 bool format_v1 = false; 4070 struct sk_buff *skb; 4071 int ret; 4072 4073 if (chip->chip_gen == RTW89_CHIP_BE) { 4074 len = sizeof(*h2c_v1); 4075 format_v1 = true; 4076 } 4077 4078 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4079 if (!skb) { 4080 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4081 return -ENOMEM; 4082 } 4083 skb_put(skb, len); 4084 h2c = (struct rtw89_h2c_ra *)skb->data; 4085 rtw89_debug(rtwdev, RTW89_DBG_RA, 4086 "ra cmd msk: %llx ", ra->ra_mask); 4087 4088 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 4089 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 4090 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 4091 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 4092 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 4093 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 4094 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 4095 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 4096 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 4097 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 4098 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 4099 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 4100 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 4101 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 4102 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 4103 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 4104 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 4105 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 4106 4107 if (!format_v1) 4108 goto csi; 4109 4110 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 4111 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 4112 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 4113 4114 csi: 4115 if (!csi) 4116 goto done; 4117 4118 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 4119 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 4120 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 4121 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 4122 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 4123 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 4124 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 4125 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 4126 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 4127 4128 done: 4129 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4130 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 4131 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 4132 len); 4133 4134 ret = rtw89_h2c_tx(rtwdev, skb, false); 4135 if (ret) { 4136 rtw89_err(rtwdev, "failed to send h2c\n"); 4137 goto fail; 4138 } 4139 4140 return 0; 4141 fail: 4142 dev_kfree_skb_any(skb); 4143 4144 return ret; 4145 } 4146 4147 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 4148 { 4149 struct rtw89_btc *btc = &rtwdev->btc; 4150 struct rtw89_btc_dm *dm = &btc->dm; 4151 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 4152 struct rtw89_btc_module *module = &init_info->module; 4153 struct rtw89_btc_ant_info *ant = &module->ant; 4154 struct rtw89_h2c_cxinit *h2c; 4155 u32 len = sizeof(*h2c); 4156 struct sk_buff *skb; 4157 int ret; 4158 4159 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4160 if (!skb) { 4161 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 4162 return -ENOMEM; 4163 } 4164 skb_put(skb, len); 4165 h2c = (struct rtw89_h2c_cxinit *)skb->data; 4166 4167 h2c->hdr.type = type; 4168 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 4169 4170 h2c->ant_type = ant->type; 4171 h2c->ant_num = ant->num; 4172 h2c->ant_iso = ant->isolation; 4173 h2c->ant_info = 4174 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 4175 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 4176 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 4177 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 4178 4179 h2c->mod_rfe = module->rfe_type; 4180 h2c->mod_cv = module->cv; 4181 h2c->mod_info = 4182 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 4183 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 4184 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 4185 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 4186 h2c->mod_adie_kt = module->kt_ver_adie; 4187 h2c->wl_gch = init_info->wl_guard_ch; 4188 4189 h2c->info = 4190 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 4191 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 4192 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 4193 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 4194 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 4195 4196 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4197 H2C_CAT_OUTSRC, BTFC_SET, 4198 SET_DRV_INFO, 0, 0, 4199 len); 4200 4201 ret = rtw89_h2c_tx(rtwdev, skb, false); 4202 if (ret) { 4203 rtw89_err(rtwdev, "failed to send h2c\n"); 4204 goto fail; 4205 } 4206 4207 return 0; 4208 fail: 4209 dev_kfree_skb_any(skb); 4210 4211 return ret; 4212 } 4213 4214 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 4215 { 4216 struct rtw89_btc *btc = &rtwdev->btc; 4217 struct rtw89_btc_dm *dm = &btc->dm; 4218 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 4219 struct rtw89_h2c_cxinit_v7 *h2c; 4220 u32 len = sizeof(*h2c); 4221 struct sk_buff *skb; 4222 int ret; 4223 4224 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4225 if (!skb) { 4226 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 4227 return -ENOMEM; 4228 } 4229 skb_put(skb, len); 4230 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 4231 4232 h2c->hdr.type = type; 4233 h2c->hdr.ver = btc->ver->fcxinit; 4234 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4235 h2c->init = *init_info; 4236 4237 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4238 H2C_CAT_OUTSRC, BTFC_SET, 4239 SET_DRV_INFO, 0, 0, 4240 len); 4241 4242 ret = rtw89_h2c_tx(rtwdev, skb, false); 4243 if (ret) { 4244 rtw89_err(rtwdev, "failed to send h2c\n"); 4245 goto fail; 4246 } 4247 4248 return 0; 4249 fail: 4250 dev_kfree_skb_any(skb); 4251 4252 return ret; 4253 } 4254 4255 #define PORT_DATA_OFFSET 4 4256 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 4257 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 4258 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 4259 4260 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 4261 { 4262 struct rtw89_btc *btc = &rtwdev->btc; 4263 const struct rtw89_btc_ver *ver = btc->ver; 4264 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4265 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 4266 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4267 struct rtw89_btc_wl_active_role *active = role_info->active_role; 4268 struct sk_buff *skb; 4269 u32 len; 4270 u8 offset = 0; 4271 u8 *cmd; 4272 int ret; 4273 int i; 4274 4275 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 4276 4277 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4278 if (!skb) { 4279 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4280 return -ENOMEM; 4281 } 4282 skb_put(skb, len); 4283 cmd = skb->data; 4284 4285 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4286 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4287 4288 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4289 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4290 4291 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4292 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4293 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4294 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4295 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4296 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4297 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4298 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4299 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4300 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4301 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4302 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4303 4304 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4305 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4306 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4307 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4308 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4309 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4310 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4311 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4312 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4313 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4314 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4315 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4316 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4317 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4318 } 4319 4320 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4321 H2C_CAT_OUTSRC, BTFC_SET, 4322 SET_DRV_INFO, 0, 0, 4323 len); 4324 4325 ret = rtw89_h2c_tx(rtwdev, skb, false); 4326 if (ret) { 4327 rtw89_err(rtwdev, "failed to send h2c\n"); 4328 goto fail; 4329 } 4330 4331 return 0; 4332 fail: 4333 dev_kfree_skb_any(skb); 4334 4335 return ret; 4336 } 4337 4338 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 4339 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4340 4341 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 4342 { 4343 struct rtw89_btc *btc = &rtwdev->btc; 4344 const struct rtw89_btc_ver *ver = btc->ver; 4345 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4346 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 4347 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4348 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 4349 struct sk_buff *skb; 4350 u32 len; 4351 u8 *cmd, offset; 4352 int ret; 4353 int i; 4354 4355 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 4356 4357 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4358 if (!skb) { 4359 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4360 return -ENOMEM; 4361 } 4362 skb_put(skb, len); 4363 cmd = skb->data; 4364 4365 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4366 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4367 4368 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4369 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4370 4371 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4372 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4373 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4374 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4375 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4376 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4377 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4378 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4379 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4380 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4381 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4382 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4383 4384 offset = PORT_DATA_OFFSET; 4385 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4386 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4387 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4388 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4389 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4390 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4391 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4392 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4393 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4394 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4395 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4396 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4397 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4398 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4399 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 4400 } 4401 4402 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4403 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4404 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4405 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4406 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4407 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4408 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4409 4410 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4411 H2C_CAT_OUTSRC, BTFC_SET, 4412 SET_DRV_INFO, 0, 0, 4413 len); 4414 4415 ret = rtw89_h2c_tx(rtwdev, skb, false); 4416 if (ret) { 4417 rtw89_err(rtwdev, "failed to send h2c\n"); 4418 goto fail; 4419 } 4420 4421 return 0; 4422 fail: 4423 dev_kfree_skb_any(skb); 4424 4425 return ret; 4426 } 4427 4428 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 4429 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4430 4431 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 4432 { 4433 struct rtw89_btc *btc = &rtwdev->btc; 4434 const struct rtw89_btc_ver *ver = btc->ver; 4435 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4436 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 4437 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4438 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 4439 struct sk_buff *skb; 4440 u32 len; 4441 u8 *cmd, offset; 4442 int ret; 4443 int i; 4444 4445 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 4446 4447 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4448 if (!skb) { 4449 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4450 return -ENOMEM; 4451 } 4452 skb_put(skb, len); 4453 cmd = skb->data; 4454 4455 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4456 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4457 4458 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4459 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4460 4461 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4462 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4463 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4464 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4465 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4466 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4467 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4468 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4469 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4470 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4471 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4472 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4473 4474 offset = PORT_DATA_OFFSET; 4475 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4476 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 4477 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 4478 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 4479 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 4480 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 4481 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 4482 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 4483 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 4484 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 4485 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 4486 } 4487 4488 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4489 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4490 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4491 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4492 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4493 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4494 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4495 4496 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4497 H2C_CAT_OUTSRC, BTFC_SET, 4498 SET_DRV_INFO, 0, 0, 4499 len); 4500 4501 ret = rtw89_h2c_tx(rtwdev, skb, false); 4502 if (ret) { 4503 rtw89_err(rtwdev, "failed to send h2c\n"); 4504 goto fail; 4505 } 4506 4507 return 0; 4508 fail: 4509 dev_kfree_skb_any(skb); 4510 4511 return ret; 4512 } 4513 4514 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type) 4515 { 4516 struct rtw89_btc *btc = &rtwdev->btc; 4517 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7; 4518 struct rtw89_h2c_cxrole_v7 *h2c; 4519 u32 len = sizeof(*h2c); 4520 struct sk_buff *skb; 4521 int ret; 4522 4523 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4524 if (!skb) { 4525 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4526 return -ENOMEM; 4527 } 4528 skb_put(skb, len); 4529 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data; 4530 4531 h2c->hdr.type = type; 4532 h2c->hdr.ver = btc->ver->fwlrole; 4533 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4534 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4535 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4536 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4537 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4538 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en); 4539 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg); 4540 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy); 4541 4542 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4543 H2C_CAT_OUTSRC, BTFC_SET, 4544 SET_DRV_INFO, 0, 0, 4545 len); 4546 4547 ret = rtw89_h2c_tx(rtwdev, skb, false); 4548 if (ret) { 4549 rtw89_err(rtwdev, "failed to send h2c\n"); 4550 goto fail; 4551 } 4552 4553 return 0; 4554 fail: 4555 dev_kfree_skb_any(skb); 4556 4557 return ret; 4558 } 4559 4560 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 4561 { 4562 struct rtw89_btc *btc = &rtwdev->btc; 4563 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 4564 struct rtw89_h2c_cxrole_v8 *h2c; 4565 u32 len = sizeof(*h2c); 4566 struct sk_buff *skb; 4567 int ret; 4568 4569 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4570 if (!skb) { 4571 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4572 return -ENOMEM; 4573 } 4574 skb_put(skb, len); 4575 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 4576 4577 h2c->hdr.type = type; 4578 h2c->hdr.ver = btc->ver->fwlrole; 4579 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4580 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4581 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4582 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4583 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4584 4585 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4586 H2C_CAT_OUTSRC, BTFC_SET, 4587 SET_DRV_INFO, 0, 0, 4588 len); 4589 4590 ret = rtw89_h2c_tx(rtwdev, skb, false); 4591 if (ret) { 4592 rtw89_err(rtwdev, "failed to send h2c\n"); 4593 goto fail; 4594 } 4595 4596 return 0; 4597 fail: 4598 dev_kfree_skb_any(skb); 4599 4600 return ret; 4601 } 4602 4603 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 4604 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 4605 { 4606 struct rtw89_btc *btc = &rtwdev->btc; 4607 const struct rtw89_btc_ver *ver = btc->ver; 4608 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 4609 struct sk_buff *skb; 4610 u8 *cmd; 4611 int ret; 4612 4613 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 4614 if (!skb) { 4615 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4616 return -ENOMEM; 4617 } 4618 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 4619 cmd = skb->data; 4620 4621 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4622 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 4623 4624 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 4625 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 4626 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 4627 if (ver->fcxctrl == 0) 4628 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 4629 4630 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4631 H2C_CAT_OUTSRC, BTFC_SET, 4632 SET_DRV_INFO, 0, 0, 4633 H2C_LEN_CXDRVINFO_CTRL); 4634 4635 ret = rtw89_h2c_tx(rtwdev, skb, false); 4636 if (ret) { 4637 rtw89_err(rtwdev, "failed to send h2c\n"); 4638 goto fail; 4639 } 4640 4641 return 0; 4642 fail: 4643 dev_kfree_skb_any(skb); 4644 4645 return ret; 4646 } 4647 4648 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 4649 { 4650 struct rtw89_btc *btc = &rtwdev->btc; 4651 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 4652 struct rtw89_h2c_cxctrl_v7 *h2c; 4653 u32 len = sizeof(*h2c); 4654 struct sk_buff *skb; 4655 int ret; 4656 4657 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4658 if (!skb) { 4659 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n"); 4660 return -ENOMEM; 4661 } 4662 skb_put(skb, len); 4663 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 4664 4665 h2c->hdr.type = type; 4666 h2c->hdr.ver = btc->ver->fcxctrl; 4667 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 4668 h2c->ctrl = *ctrl; 4669 4670 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4671 H2C_CAT_OUTSRC, BTFC_SET, 4672 SET_DRV_INFO, 0, 0, len); 4673 4674 ret = rtw89_h2c_tx(rtwdev, skb, false); 4675 if (ret) { 4676 rtw89_err(rtwdev, "failed to send h2c\n"); 4677 goto fail; 4678 } 4679 4680 return 0; 4681 fail: 4682 dev_kfree_skb_any(skb); 4683 4684 return ret; 4685 } 4686 4687 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 4688 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 4689 { 4690 struct rtw89_btc *btc = &rtwdev->btc; 4691 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 4692 struct sk_buff *skb; 4693 u8 *cmd; 4694 int ret; 4695 4696 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 4697 if (!skb) { 4698 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 4699 return -ENOMEM; 4700 } 4701 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 4702 cmd = skb->data; 4703 4704 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4705 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 4706 4707 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 4708 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 4709 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 4710 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 4711 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 4712 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 4713 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 4714 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 4715 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 4716 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 4717 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 4718 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 4719 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 4720 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 4721 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 4722 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 4723 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 4724 4725 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4726 H2C_CAT_OUTSRC, BTFC_SET, 4727 SET_DRV_INFO, 0, 0, 4728 H2C_LEN_CXDRVINFO_TRX); 4729 4730 ret = rtw89_h2c_tx(rtwdev, skb, false); 4731 if (ret) { 4732 rtw89_err(rtwdev, "failed to send h2c\n"); 4733 goto fail; 4734 } 4735 4736 return 0; 4737 fail: 4738 dev_kfree_skb_any(skb); 4739 4740 return ret; 4741 } 4742 4743 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 4744 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 4745 { 4746 struct rtw89_btc *btc = &rtwdev->btc; 4747 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4748 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 4749 struct sk_buff *skb; 4750 u8 *cmd; 4751 int ret; 4752 4753 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 4754 if (!skb) { 4755 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4756 return -ENOMEM; 4757 } 4758 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 4759 cmd = skb->data; 4760 4761 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4762 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 4763 4764 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 4765 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 4766 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 4767 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 4768 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 4769 4770 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4771 H2C_CAT_OUTSRC, BTFC_SET, 4772 SET_DRV_INFO, 0, 0, 4773 H2C_LEN_CXDRVINFO_RFK); 4774 4775 ret = rtw89_h2c_tx(rtwdev, skb, false); 4776 if (ret) { 4777 rtw89_err(rtwdev, "failed to send h2c\n"); 4778 goto fail; 4779 } 4780 4781 return 0; 4782 fail: 4783 dev_kfree_skb_any(skb); 4784 4785 return ret; 4786 } 4787 4788 #define H2C_LEN_PKT_OFLD 4 4789 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 4790 { 4791 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4792 struct sk_buff *skb; 4793 unsigned int cond; 4794 u8 *cmd; 4795 int ret; 4796 4797 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 4798 if (!skb) { 4799 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4800 return -ENOMEM; 4801 } 4802 skb_put(skb, H2C_LEN_PKT_OFLD); 4803 cmd = skb->data; 4804 4805 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 4806 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 4807 4808 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4809 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4810 H2C_FUNC_PACKET_OFLD, 1, 1, 4811 H2C_LEN_PKT_OFLD); 4812 4813 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 4814 4815 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4816 if (ret < 0) { 4817 rtw89_debug(rtwdev, RTW89_DBG_FW, 4818 "failed to del pkt ofld: id %d, ret %d\n", 4819 id, ret); 4820 return ret; 4821 } 4822 4823 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 4824 return 0; 4825 } 4826 4827 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 4828 struct sk_buff *skb_ofld) 4829 { 4830 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4831 struct sk_buff *skb; 4832 unsigned int cond; 4833 u8 *cmd; 4834 u8 alloc_id; 4835 int ret; 4836 4837 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 4838 RTW89_MAX_PKT_OFLD_NUM); 4839 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 4840 return -ENOSPC; 4841 4842 *id = alloc_id; 4843 4844 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 4845 if (!skb) { 4846 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4847 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 4848 return -ENOMEM; 4849 } 4850 skb_put(skb, H2C_LEN_PKT_OFLD); 4851 cmd = skb->data; 4852 4853 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 4854 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 4855 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 4856 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 4857 4858 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4859 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4860 H2C_FUNC_PACKET_OFLD, 1, 1, 4861 H2C_LEN_PKT_OFLD + skb_ofld->len); 4862 4863 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 4864 4865 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4866 if (ret < 0) { 4867 rtw89_debug(rtwdev, RTW89_DBG_FW, 4868 "failed to add pkt ofld: id %d, ret %d\n", 4869 alloc_id, ret); 4870 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 4871 return ret; 4872 } 4873 4874 return 0; 4875 } 4876 4877 static 4878 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num, 4879 struct list_head *chan_list) 4880 { 4881 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4882 struct rtw89_h2c_chinfo_elem *elem; 4883 struct rtw89_mac_chinfo *ch_info; 4884 struct rtw89_h2c_chinfo *h2c; 4885 struct sk_buff *skb; 4886 unsigned int cond; 4887 int skb_len; 4888 int ret; 4889 4890 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 4891 4892 skb_len = struct_size(h2c, elem, ch_num); 4893 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 4894 if (!skb) { 4895 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 4896 return -ENOMEM; 4897 } 4898 skb_put(skb, sizeof(*h2c)); 4899 h2c = (struct rtw89_h2c_chinfo *)skb->data; 4900 4901 h2c->ch_num = ch_num; 4902 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 4903 4904 list_for_each_entry(ch_info, chan_list, list) { 4905 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 4906 4907 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 4908 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 4909 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 4910 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 4911 4912 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 4913 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 4914 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 4915 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 4916 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 4917 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 4918 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 4919 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 4920 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 4921 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 4922 4923 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 4924 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 4925 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 4926 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 4927 4928 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 4929 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 4930 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 4931 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 4932 } 4933 4934 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4935 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4936 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 4937 4938 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 4939 4940 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4941 if (ret) { 4942 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 4943 return ret; 4944 } 4945 4946 return 0; 4947 } 4948 4949 static 4950 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 4951 struct list_head *chan_list, 4952 struct rtw89_vif_link *rtwvif_link) 4953 { 4954 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4955 struct rtw89_h2c_chinfo_elem_be *elem; 4956 struct rtw89_mac_chinfo_be *ch_info; 4957 struct rtw89_h2c_chinfo *h2c; 4958 struct sk_buff *skb; 4959 unsigned int cond; 4960 int skb_len; 4961 int ret; 4962 4963 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 4964 4965 skb_len = struct_size(h2c, elem, ch_num); 4966 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 4967 if (!skb) { 4968 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 4969 return -ENOMEM; 4970 } 4971 4972 skb_put(skb, sizeof(*h2c)); 4973 h2c = (struct rtw89_h2c_chinfo *)skb->data; 4974 4975 h2c->ch_num = ch_num; 4976 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 4977 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx, 4978 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 4979 4980 list_for_each_entry(ch_info, chan_list, list) { 4981 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 4982 4983 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) | 4984 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 4985 le32_encode_bits(ch_info->central_ch, 4986 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 4987 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 4988 4989 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 4990 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 4991 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 4992 le32_encode_bits(ch_info->pause_data, 4993 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 4994 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 4995 le32_encode_bits(ch_info->rand_seq_num, 4996 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 4997 le32_encode_bits(ch_info->notify_action, 4998 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 4999 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 5000 RTW89_H2C_CHINFO_BE_W1_PROBE) | 5001 le32_encode_bits(ch_info->leave_crit, 5002 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 5003 le32_encode_bits(ch_info->chkpt_timer, 5004 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 5005 5006 elem->w2 = le32_encode_bits(ch_info->leave_time, 5007 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 5008 le32_encode_bits(ch_info->leave_th, 5009 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 5010 le32_encode_bits(ch_info->tx_pkt_ctrl, 5011 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 5012 5013 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 5014 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 5015 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 5016 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 5017 5018 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 5019 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 5020 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 5021 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 5022 5023 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 5024 le32_encode_bits(ch_info->fw_probe0_ssids, 5025 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 5026 5027 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 5028 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 5029 le32_encode_bits(ch_info->fw_probe0_bssids, 5030 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 5031 } 5032 5033 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5034 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5035 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5036 5037 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5038 5039 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5040 if (ret) { 5041 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5042 return ret; 5043 } 5044 5045 return 0; 5046 } 5047 5048 #define RTW89_SCAN_DELAY_TSF_UNIT 104800 5049 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, 5050 struct rtw89_scan_option *option, 5051 struct rtw89_vif_link *rtwvif_link, 5052 bool wowlan) 5053 { 5054 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5055 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 5056 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE; 5057 struct rtw89_h2c_scanofld *h2c; 5058 u32 len = sizeof(*h2c); 5059 struct sk_buff *skb; 5060 unsigned int cond; 5061 u64 tsf = 0; 5062 int ret; 5063 5064 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5065 if (!skb) { 5066 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5067 return -ENOMEM; 5068 } 5069 skb_put(skb, len); 5070 h2c = (struct rtw89_h2c_scanofld *)skb->data; 5071 5072 if (option->delay) { 5073 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 5074 if (ret) { 5075 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret); 5076 scan_mode = RTW89_SCAN_IMMEDIATE; 5077 } else { 5078 scan_mode = RTW89_SCAN_DELAY; 5079 tsf += (u64)option->delay * RTW89_SCAN_DELAY_TSF_UNIT; 5080 } 5081 } 5082 5083 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 5084 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 5085 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) | 5086 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 5087 5088 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 5089 le32_encode_bits(option->target_ch_mode, 5090 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 5091 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) | 5092 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 5093 5094 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) | 5095 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD); 5096 5097 if (option->target_ch_mode) { 5098 h2c->w1 |= le32_encode_bits(op->band_width, 5099 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 5100 le32_encode_bits(op->primary_channel, 5101 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 5102 le32_encode_bits(op->channel, 5103 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 5104 h2c->w0 |= le32_encode_bits(op->band_type, 5105 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 5106 } 5107 5108 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf), 5109 RTW89_H2C_SCANOFLD_W3_TSF_HIGH); 5110 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf), 5111 RTW89_H2C_SCANOFLD_W4_TSF_LOW); 5112 5113 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5114 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5115 H2C_FUNC_SCANOFLD, 1, 1, 5116 len); 5117 5118 if (option->enable) 5119 cond = RTW89_SCANOFLD_WAIT_COND_START; 5120 else 5121 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 5122 5123 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5124 if (ret) { 5125 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 5126 return ret; 5127 } 5128 5129 return 0; 5130 } 5131 5132 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 5133 struct rtw89_scan_option *option) 5134 { 5135 struct ieee80211_supported_band *sband; 5136 struct ieee80211_channel *chan; 5137 u8 i, idx; 5138 5139 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 5140 if (!sband) { 5141 option->prohib_chan = U64_MAX; 5142 return; 5143 } 5144 5145 for (i = 0; i < sband->n_channels; i++) { 5146 chan = &sband->channels[i]; 5147 if (chan->flags & IEEE80211_CHAN_DISABLED) { 5148 idx = (chan->hw_value - 1) / 4; 5149 option->prohib_chan |= BIT(idx); 5150 } 5151 } 5152 } 5153 5154 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 5155 struct rtw89_scan_option *option, 5156 struct rtw89_vif_link *rtwvif_link, 5157 bool wowlan) 5158 { 5159 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 5160 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5161 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5162 struct cfg80211_scan_request *req = rtwvif->scan_req; 5163 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 5164 struct rtw89_chan *op = &scan_info->op_chan; 5165 struct rtw89_h2c_scanofld_be_opch *opch; 5166 struct rtw89_pktofld_info *pkt_info; 5167 struct rtw89_h2c_scanofld_be *h2c; 5168 struct sk_buff *skb; 5169 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 5170 u8 opch_size = sizeof(*opch) * option->num_opch; 5171 u8 probe_id[NUM_NL80211_BANDS]; 5172 u8 cfg_len = sizeof(*h2c); 5173 unsigned int cond; 5174 void *ptr; 5175 int ret; 5176 u32 len; 5177 u8 i; 5178 5179 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 5180 5181 len = cfg_len + macc_role_size + opch_size; 5182 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5183 if (!skb) { 5184 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5185 return -ENOMEM; 5186 } 5187 5188 skb_put(skb, len); 5189 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 5190 ptr = skb->data; 5191 5192 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 5193 5194 if (!wowlan) { 5195 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 5196 if (pkt_info->wildcard_6ghz) { 5197 /* Provide wildcard as template */ 5198 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 5199 break; 5200 } 5201 } 5202 } 5203 5204 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 5205 le32_encode_bits(option->scan_mode, 5206 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 5207 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 5208 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 5209 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 5210 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 5211 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 5212 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 5213 5214 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 5215 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 5216 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 5217 5218 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 5219 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 5220 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 5221 5222 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 5223 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 5224 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 5225 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 5226 5227 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 5228 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 5229 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 5230 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 5231 le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 5232 5233 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 5234 5235 h2c->w6 = le32_encode_bits(option->prohib_chan, 5236 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 5237 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 5238 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 5239 if (!wowlan && req->no_cck) { 5240 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); 5241 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, 5242 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | 5243 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5244 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | 5245 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5246 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); 5247 } 5248 5249 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { 5250 cfg_len = offsetofend(typeof(*h2c), w8); 5251 goto flex_member; 5252 } 5253 5254 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), 5255 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | 5256 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0), 5257 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) | 5258 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0), 5259 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP); 5260 5261 flex_member: 5262 ptr += cfg_len; 5263 5264 for (i = 0; i < option->num_macc_role; i++) { 5265 macc_role = ptr; 5266 macc_role->w0 = 5267 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 5268 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 5269 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 5270 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 5271 ptr += sizeof(*macc_role); 5272 } 5273 5274 for (i = 0; i < option->num_opch; i++) { 5275 opch = ptr; 5276 opch->w0 = le32_encode_bits(rtwvif_link->mac_id, 5277 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 5278 le32_encode_bits(option->band, 5279 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 5280 le32_encode_bits(rtwvif_link->port, 5281 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 5282 le32_encode_bits(RTW89_SCAN_OPMODE_INTV, 5283 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 5284 le32_encode_bits(true, 5285 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 5286 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10, 5287 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 5288 5289 opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME, 5290 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) | 5291 le32_encode_bits(op->band_type, 5292 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 5293 le32_encode_bits(op->band_width, 5294 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 5295 le32_encode_bits(0x3, 5296 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 5297 le32_encode_bits(op->primary_channel, 5298 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 5299 le32_encode_bits(op->channel, 5300 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 5301 5302 opch->w2 = le32_encode_bits(0, 5303 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 5304 le32_encode_bits(0, 5305 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 5306 le32_encode_bits(2, 5307 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS); 5308 5309 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5310 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 5311 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5312 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 5313 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5314 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 5315 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5316 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 5317 ptr += sizeof(*opch); 5318 } 5319 5320 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5321 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5322 H2C_FUNC_SCANOFLD_BE, 1, 1, 5323 len); 5324 5325 if (option->enable) 5326 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 5327 else 5328 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 5329 5330 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5331 if (ret) { 5332 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 5333 return ret; 5334 } 5335 5336 return 0; 5337 } 5338 5339 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 5340 struct rtw89_fw_h2c_rf_reg_info *info, 5341 u16 len, u8 page) 5342 { 5343 struct sk_buff *skb; 5344 u8 class = info->rf_path == RF_PATH_A ? 5345 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 5346 int ret; 5347 5348 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5349 if (!skb) { 5350 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 5351 return -ENOMEM; 5352 } 5353 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 5354 5355 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5356 H2C_CAT_OUTSRC, class, page, 0, 0, 5357 len); 5358 5359 ret = rtw89_h2c_tx(rtwdev, skb, false); 5360 if (ret) { 5361 rtw89_err(rtwdev, "failed to send h2c\n"); 5362 goto fail; 5363 } 5364 5365 return 0; 5366 fail: 5367 dev_kfree_skb_any(skb); 5368 5369 return ret; 5370 } 5371 5372 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 5373 { 5374 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; 5375 struct rtw89_fw_h2c_rf_get_mccch *mccch; 5376 struct sk_buff *skb; 5377 int ret; 5378 u8 idx; 5379 5380 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 5381 if (!skb) { 5382 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5383 return -ENOMEM; 5384 } 5385 skb_put(skb, sizeof(*mccch)); 5386 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 5387 5388 idx = rfk_mcc->table_idx; 5389 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 5390 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 5391 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 5392 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 5393 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 5394 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 5395 5396 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5397 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 5398 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 5399 sizeof(*mccch)); 5400 5401 ret = rtw89_h2c_tx(rtwdev, skb, false); 5402 if (ret) { 5403 rtw89_err(rtwdev, "failed to send h2c\n"); 5404 goto fail; 5405 } 5406 5407 return 0; 5408 fail: 5409 dev_kfree_skb_any(skb); 5410 5411 return ret; 5412 } 5413 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 5414 5415 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 5416 enum rtw89_phy_idx phy_idx) 5417 { 5418 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 5419 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; 5420 struct rtw89_fw_h2c_rfk_pre_info *h2c; 5421 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH]; 5422 u32 len = sizeof(*h2c); 5423 struct sk_buff *skb; 5424 u8 ver = U8_MAX; 5425 u8 tbl, path; 5426 u32 val32; 5427 int ret; 5428 5429 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 5430 len = sizeof(*h2c_v0); 5431 ver = 0; 5432 } 5433 5434 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5435 if (!skb) { 5436 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 5437 return -ENOMEM; 5438 } 5439 skb_put(skb, len); 5440 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 5441 5442 h2c->common.mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 5443 5444 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 5445 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH); 5446 5447 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 5448 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5449 h2c->common.dbcc.ch[path][tbl] = 5450 cpu_to_le32(rfk_mcc->data[path].ch[tbl]); 5451 h2c->common.dbcc.band[path][tbl] = 5452 cpu_to_le32(rfk_mcc->data[path].band[tbl]); 5453 } 5454 } 5455 5456 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5457 tbl_sel[path] = rfk_mcc->data[path].table_idx; 5458 5459 h2c->common.tbl.cur_ch[path] = 5460 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]); 5461 h2c->common.tbl.cur_band[path] = 5462 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]); 5463 } 5464 5465 h2c->common.phy_idx = cpu_to_le32(phy_idx); 5466 5467 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ 5468 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; 5469 5470 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]); 5471 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]); 5472 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]); 5473 5474 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 5475 h2c_v0->ktbl_sel0 = cpu_to_le32(val32); 5476 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 5477 h2c_v0->ktbl_sel1 = cpu_to_le32(val32); 5478 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 5479 h2c_v0->rfmod0 = cpu_to_le32(val32); 5480 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 5481 h2c_v0->rfmod1 = cpu_to_le32(val32); 5482 5483 if (rtw89_is_mlo_1_1(rtwdev)) 5484 h2c_v0->mlo_1_1 = cpu_to_le32(1); 5485 5486 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 5487 5488 goto done; 5489 } 5490 5491 if (rtw89_is_mlo_1_1(rtwdev)) 5492 h2c->mlo_1_1 = cpu_to_le32(1); 5493 done: 5494 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5495 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5496 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 5497 len); 5498 5499 ret = rtw89_h2c_tx(rtwdev, skb, false); 5500 if (ret) { 5501 rtw89_err(rtwdev, "failed to send h2c\n"); 5502 goto fail; 5503 } 5504 5505 return 0; 5506 fail: 5507 dev_kfree_skb_any(skb); 5508 5509 return ret; 5510 } 5511 5512 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5513 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode) 5514 { 5515 struct rtw89_hal *hal = &rtwdev->hal; 5516 struct rtw89_h2c_rf_tssi *h2c; 5517 u32 len = sizeof(*h2c); 5518 struct sk_buff *skb; 5519 int ret; 5520 5521 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5522 if (!skb) { 5523 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 5524 return -ENOMEM; 5525 } 5526 skb_put(skb, len); 5527 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 5528 5529 h2c->len = cpu_to_le16(len); 5530 h2c->phy = phy_idx; 5531 h2c->ch = chan->channel; 5532 h2c->bw = chan->band_width; 5533 h2c->band = chan->band_type; 5534 h2c->hwtx_en = true; 5535 h2c->cv = hal->cv; 5536 h2c->tssi_mode = tssi_mode; 5537 5538 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 5539 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 5540 5541 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5542 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5543 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 5544 5545 ret = rtw89_h2c_tx(rtwdev, skb, false); 5546 if (ret) { 5547 rtw89_err(rtwdev, "failed to send h2c\n"); 5548 goto fail; 5549 } 5550 5551 return 0; 5552 fail: 5553 dev_kfree_skb_any(skb); 5554 5555 return ret; 5556 } 5557 5558 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5559 const struct rtw89_chan *chan) 5560 { 5561 struct rtw89_h2c_rf_iqk *h2c; 5562 u32 len = sizeof(*h2c); 5563 struct sk_buff *skb; 5564 int ret; 5565 5566 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5567 if (!skb) { 5568 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 5569 return -ENOMEM; 5570 } 5571 skb_put(skb, len); 5572 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 5573 5574 h2c->phy_idx = cpu_to_le32(phy_idx); 5575 h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en); 5576 5577 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5578 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5579 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 5580 5581 ret = rtw89_h2c_tx(rtwdev, skb, false); 5582 if (ret) { 5583 rtw89_err(rtwdev, "failed to send h2c\n"); 5584 goto fail; 5585 } 5586 5587 return 0; 5588 fail: 5589 dev_kfree_skb_any(skb); 5590 5591 return ret; 5592 } 5593 5594 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5595 const struct rtw89_chan *chan) 5596 { 5597 struct rtw89_h2c_rf_dpk *h2c; 5598 u32 len = sizeof(*h2c); 5599 struct sk_buff *skb; 5600 int ret; 5601 5602 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5603 if (!skb) { 5604 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 5605 return -ENOMEM; 5606 } 5607 skb_put(skb, len); 5608 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 5609 5610 h2c->len = len; 5611 h2c->phy = phy_idx; 5612 h2c->dpk_enable = true; 5613 h2c->kpath = RF_AB; 5614 h2c->cur_band = chan->band_type; 5615 h2c->cur_bw = chan->band_width; 5616 h2c->cur_ch = chan->channel; 5617 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5618 5619 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5620 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5621 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 5622 5623 ret = rtw89_h2c_tx(rtwdev, skb, false); 5624 if (ret) { 5625 rtw89_err(rtwdev, "failed to send h2c\n"); 5626 goto fail; 5627 } 5628 5629 return 0; 5630 fail: 5631 dev_kfree_skb_any(skb); 5632 5633 return ret; 5634 } 5635 5636 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5637 const struct rtw89_chan *chan) 5638 { 5639 struct rtw89_hal *hal = &rtwdev->hal; 5640 struct rtw89_h2c_rf_txgapk *h2c; 5641 u32 len = sizeof(*h2c); 5642 struct sk_buff *skb; 5643 int ret; 5644 5645 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5646 if (!skb) { 5647 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 5648 return -ENOMEM; 5649 } 5650 skb_put(skb, len); 5651 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 5652 5653 h2c->len = len; 5654 h2c->ktype = 2; 5655 h2c->phy = phy_idx; 5656 h2c->kpath = RF_AB; 5657 h2c->band = chan->band_type; 5658 h2c->bw = chan->band_width; 5659 h2c->ch = chan->channel; 5660 h2c->cv = hal->cv; 5661 5662 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5663 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5664 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 5665 5666 ret = rtw89_h2c_tx(rtwdev, skb, false); 5667 if (ret) { 5668 rtw89_err(rtwdev, "failed to send h2c\n"); 5669 goto fail; 5670 } 5671 5672 return 0; 5673 fail: 5674 dev_kfree_skb_any(skb); 5675 5676 return ret; 5677 } 5678 5679 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5680 const struct rtw89_chan *chan) 5681 { 5682 struct rtw89_h2c_rf_dack *h2c; 5683 u32 len = sizeof(*h2c); 5684 struct sk_buff *skb; 5685 int ret; 5686 5687 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5688 if (!skb) { 5689 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 5690 return -ENOMEM; 5691 } 5692 skb_put(skb, len); 5693 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 5694 5695 h2c->len = cpu_to_le32(len); 5696 h2c->phy = cpu_to_le32(phy_idx); 5697 h2c->type = cpu_to_le32(0); 5698 5699 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5700 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5701 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 5702 5703 ret = rtw89_h2c_tx(rtwdev, skb, false); 5704 if (ret) { 5705 rtw89_err(rtwdev, "failed to send h2c\n"); 5706 goto fail; 5707 } 5708 5709 return 0; 5710 fail: 5711 dev_kfree_skb_any(skb); 5712 5713 return ret; 5714 } 5715 5716 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5717 const struct rtw89_chan *chan, bool is_chl_k) 5718 { 5719 struct rtw89_h2c_rf_rxdck_v0 *v0; 5720 struct rtw89_h2c_rf_rxdck *h2c; 5721 u32 len = sizeof(*h2c); 5722 struct sk_buff *skb; 5723 int ver = -1; 5724 int ret; 5725 5726 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) { 5727 len = sizeof(*v0); 5728 ver = 0; 5729 } 5730 5731 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5732 if (!skb) { 5733 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 5734 return -ENOMEM; 5735 } 5736 skb_put(skb, len); 5737 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data; 5738 5739 v0->len = len; 5740 v0->phy = phy_idx; 5741 v0->is_afe = false; 5742 v0->kpath = RF_AB; 5743 v0->cur_band = chan->band_type; 5744 v0->cur_bw = chan->band_width; 5745 v0->cur_ch = chan->channel; 5746 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5747 5748 if (ver == 0) 5749 goto hdr; 5750 5751 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 5752 h2c->is_chl_k = is_chl_k; 5753 5754 hdr: 5755 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5756 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5757 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 5758 5759 ret = rtw89_h2c_tx(rtwdev, skb, false); 5760 if (ret) { 5761 rtw89_err(rtwdev, "failed to send h2c\n"); 5762 goto fail; 5763 } 5764 5765 return 0; 5766 fail: 5767 dev_kfree_skb_any(skb); 5768 5769 return ret; 5770 } 5771 5772 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 5773 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 5774 bool rack, bool dack) 5775 { 5776 struct sk_buff *skb; 5777 int ret; 5778 5779 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5780 if (!skb) { 5781 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 5782 return -ENOMEM; 5783 } 5784 skb_put_data(skb, buf, len); 5785 5786 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5787 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 5788 len); 5789 5790 ret = rtw89_h2c_tx(rtwdev, skb, false); 5791 if (ret) { 5792 rtw89_err(rtwdev, "failed to send h2c\n"); 5793 goto fail; 5794 } 5795 5796 return 0; 5797 fail: 5798 dev_kfree_skb_any(skb); 5799 5800 return ret; 5801 } 5802 5803 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 5804 { 5805 struct sk_buff *skb; 5806 int ret; 5807 5808 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 5809 if (!skb) { 5810 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 5811 return -ENOMEM; 5812 } 5813 skb_put_data(skb, buf, len); 5814 5815 ret = rtw89_h2c_tx(rtwdev, skb, false); 5816 if (ret) { 5817 rtw89_err(rtwdev, "failed to send h2c\n"); 5818 goto fail; 5819 } 5820 5821 return 0; 5822 fail: 5823 dev_kfree_skb_any(skb); 5824 5825 return ret; 5826 } 5827 5828 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 5829 { 5830 struct rtw89_early_h2c *early_h2c; 5831 5832 lockdep_assert_held(&rtwdev->mutex); 5833 5834 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 5835 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 5836 } 5837 } 5838 5839 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 5840 { 5841 struct rtw89_early_h2c *early_h2c, *tmp; 5842 5843 mutex_lock(&rtwdev->mutex); 5844 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 5845 list_del(&early_h2c->list); 5846 kfree(early_h2c->h2c); 5847 kfree(early_h2c); 5848 } 5849 mutex_unlock(&rtwdev->mutex); 5850 } 5851 5852 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 5853 { 5854 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 5855 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 5856 5857 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 5858 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 5859 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 5860 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 5861 } 5862 5863 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 5864 struct sk_buff *c2h) 5865 { 5866 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 5867 u8 category = attr->category; 5868 u8 class = attr->class; 5869 u8 func = attr->func; 5870 5871 switch (category) { 5872 default: 5873 return false; 5874 case RTW89_C2H_CAT_MAC: 5875 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 5876 case RTW89_C2H_CAT_OUTSRC: 5877 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 5878 } 5879 } 5880 5881 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 5882 { 5883 rtw89_fw_c2h_parse_attr(c2h); 5884 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 5885 goto enqueue; 5886 5887 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 5888 dev_kfree_skb_any(c2h); 5889 return; 5890 5891 enqueue: 5892 skb_queue_tail(&rtwdev->c2h_queue, c2h); 5893 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 5894 } 5895 5896 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 5897 struct sk_buff *skb) 5898 { 5899 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 5900 u8 category = attr->category; 5901 u8 class = attr->class; 5902 u8 func = attr->func; 5903 u16 len = attr->len; 5904 bool dump = true; 5905 5906 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 5907 return; 5908 5909 switch (category) { 5910 case RTW89_C2H_CAT_TEST: 5911 break; 5912 case RTW89_C2H_CAT_MAC: 5913 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 5914 if (class == RTW89_MAC_C2H_CLASS_INFO && 5915 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 5916 dump = false; 5917 break; 5918 case RTW89_C2H_CAT_OUTSRC: 5919 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 5920 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 5921 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 5922 else 5923 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 5924 break; 5925 } 5926 5927 if (dump) 5928 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 5929 } 5930 5931 void rtw89_fw_c2h_work(struct work_struct *work) 5932 { 5933 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 5934 c2h_work); 5935 struct sk_buff *skb, *tmp; 5936 5937 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 5938 skb_unlink(skb, &rtwdev->c2h_queue); 5939 mutex_lock(&rtwdev->mutex); 5940 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 5941 mutex_unlock(&rtwdev->mutex); 5942 dev_kfree_skb_any(skb); 5943 } 5944 } 5945 5946 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 5947 struct rtw89_mac_h2c_info *info) 5948 { 5949 const struct rtw89_chip_info *chip = rtwdev->chip; 5950 struct rtw89_fw_info *fw_info = &rtwdev->fw; 5951 const u32 *h2c_reg = chip->h2c_regs; 5952 u8 i, val, len; 5953 int ret; 5954 5955 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 5956 rtwdev, chip->h2c_ctrl_reg); 5957 if (ret) { 5958 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 5959 return ret; 5960 } 5961 5962 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 5963 sizeof(info->u.h2creg[0])); 5964 5965 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 5966 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 5967 5968 for (i = 0; i < RTW89_H2CREG_MAX; i++) 5969 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 5970 5971 fw_info->h2c_counter++; 5972 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 5973 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 5974 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 5975 5976 return 0; 5977 } 5978 5979 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 5980 struct rtw89_mac_c2h_info *info) 5981 { 5982 const struct rtw89_chip_info *chip = rtwdev->chip; 5983 struct rtw89_fw_info *fw_info = &rtwdev->fw; 5984 const u32 *c2h_reg = chip->c2h_regs; 5985 u32 ret; 5986 u8 i, val; 5987 5988 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 5989 5990 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 5991 RTW89_C2H_TIMEOUT, false, rtwdev, 5992 chip->c2h_ctrl_reg); 5993 if (ret) { 5994 rtw89_warn(rtwdev, "c2h reg timeout\n"); 5995 return ret; 5996 } 5997 5998 for (i = 0; i < RTW89_C2HREG_MAX; i++) 5999 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 6000 6001 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 6002 6003 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 6004 info->content_len = 6005 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 6006 RTW89_C2HREG_HDR_LEN; 6007 6008 fw_info->c2h_counter++; 6009 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 6010 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 6011 6012 return 0; 6013 } 6014 6015 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 6016 struct rtw89_mac_h2c_info *h2c_info, 6017 struct rtw89_mac_c2h_info *c2h_info) 6018 { 6019 u32 ret; 6020 6021 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 6022 lockdep_assert_held(&rtwdev->mutex); 6023 6024 if (!h2c_info && !c2h_info) 6025 return -EINVAL; 6026 6027 if (!h2c_info) 6028 goto recv_c2h; 6029 6030 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 6031 if (ret) 6032 return ret; 6033 6034 recv_c2h: 6035 if (!c2h_info) 6036 return 0; 6037 6038 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 6039 if (ret) 6040 return ret; 6041 6042 return 0; 6043 } 6044 6045 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 6046 { 6047 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 6048 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 6049 return; 6050 } 6051 6052 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 6053 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 6054 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 6055 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 6056 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 6057 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 6058 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 6059 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 6060 6061 rtw89_fw_prog_cnt_dump(rtwdev); 6062 } 6063 6064 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 6065 { 6066 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6067 struct rtw89_pktofld_info *info, *tmp; 6068 u8 idx; 6069 6070 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 6071 if (!(rtwdev->chip->support_bands & BIT(idx))) 6072 continue; 6073 6074 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 6075 if (test_bit(info->id, rtwdev->pkt_offload)) 6076 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 6077 list_del(&info->list); 6078 kfree(info); 6079 } 6080 } 6081 } 6082 6083 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 6084 struct cfg80211_scan_request *req, 6085 struct rtw89_pktofld_info *info, 6086 enum nl80211_band band, u8 ssid_idx) 6087 { 6088 if (band != NL80211_BAND_6GHZ) 6089 return false; 6090 6091 if (req->ssids[ssid_idx].ssid_len) { 6092 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 6093 req->ssids[ssid_idx].ssid_len); 6094 info->ssid_len = req->ssids[ssid_idx].ssid_len; 6095 return false; 6096 } else { 6097 info->wildcard_6ghz = true; 6098 return true; 6099 } 6100 } 6101 6102 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 6103 struct rtw89_vif_link *rtwvif_link, 6104 struct sk_buff *skb, u8 ssid_idx) 6105 { 6106 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6107 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6108 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 6109 struct cfg80211_scan_request *req = rtwvif->scan_req; 6110 struct rtw89_pktofld_info *info; 6111 struct sk_buff *new; 6112 int ret = 0; 6113 u8 band; 6114 6115 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 6116 if (!(rtwdev->chip->support_bands & BIT(band))) 6117 continue; 6118 6119 new = skb_copy(skb, GFP_KERNEL); 6120 if (!new) { 6121 ret = -ENOMEM; 6122 goto out; 6123 } 6124 skb_put_data(new, ies->ies[band], ies->len[band]); 6125 skb_put_data(new, ies->common_ies, ies->common_ie_len); 6126 6127 info = kzalloc(sizeof(*info), GFP_KERNEL); 6128 if (!info) { 6129 ret = -ENOMEM; 6130 kfree_skb(new); 6131 goto out; 6132 } 6133 6134 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx); 6135 6136 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 6137 if (ret) { 6138 kfree_skb(new); 6139 kfree(info); 6140 goto out; 6141 } 6142 6143 list_add_tail(&info->list, &scan_info->pkt_list[band]); 6144 kfree_skb(new); 6145 } 6146 out: 6147 return ret; 6148 } 6149 6150 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 6151 struct rtw89_vif_link *rtwvif_link) 6152 { 6153 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6154 struct cfg80211_scan_request *req = rtwvif->scan_req; 6155 struct sk_buff *skb; 6156 u8 num = req->n_ssids, i; 6157 int ret; 6158 6159 for (i = 0; i < num; i++) { 6160 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 6161 req->ssids[i].ssid, 6162 req->ssids[i].ssid_len, 6163 req->ie_len); 6164 if (!skb) 6165 return -ENOMEM; 6166 6167 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i); 6168 kfree_skb(skb); 6169 6170 if (ret) 6171 return ret; 6172 } 6173 6174 return 0; 6175 } 6176 6177 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 6178 struct ieee80211_scan_ies *ies, 6179 struct cfg80211_scan_request *req, 6180 struct rtw89_mac_chinfo *ch_info) 6181 { 6182 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6183 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6184 struct cfg80211_scan_6ghz_params *params; 6185 struct rtw89_pktofld_info *info, *tmp; 6186 struct ieee80211_hdr *hdr; 6187 struct sk_buff *skb; 6188 bool found; 6189 int ret = 0; 6190 u8 i; 6191 6192 if (!req->n_6ghz_params) 6193 return 0; 6194 6195 for (i = 0; i < req->n_6ghz_params; i++) { 6196 params = &req->scan_6ghz_params[i]; 6197 6198 if (req->channels[params->channel_idx]->hw_value != 6199 ch_info->pri_ch) 6200 continue; 6201 6202 found = false; 6203 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 6204 if (ether_addr_equal(tmp->bssid, params->bssid)) { 6205 found = true; 6206 break; 6207 } 6208 } 6209 if (found) 6210 continue; 6211 6212 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 6213 NULL, 0, req->ie_len); 6214 if (!skb) 6215 return -ENOMEM; 6216 6217 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 6218 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 6219 hdr = (struct ieee80211_hdr *)skb->data; 6220 ether_addr_copy(hdr->addr3, params->bssid); 6221 6222 info = kzalloc(sizeof(*info), GFP_KERNEL); 6223 if (!info) { 6224 ret = -ENOMEM; 6225 kfree_skb(skb); 6226 goto out; 6227 } 6228 6229 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 6230 if (ret) { 6231 kfree_skb(skb); 6232 kfree(info); 6233 goto out; 6234 } 6235 6236 ether_addr_copy(info->bssid, params->bssid); 6237 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 6238 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 6239 6240 ch_info->tx_pkt = true; 6241 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6242 6243 kfree_skb(skb); 6244 } 6245 6246 out: 6247 return ret; 6248 } 6249 6250 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, 6251 int chan_type, int ssid_num, 6252 struct rtw89_mac_chinfo *ch_info) 6253 { 6254 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6255 struct rtw89_pktofld_info *info; 6256 u8 probe_count = 0; 6257 6258 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6259 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6260 ch_info->bw = RTW89_SCAN_WIDTH; 6261 ch_info->tx_pkt = true; 6262 ch_info->cfg_tx_pwr = false; 6263 ch_info->tx_pwr_idx = 0; 6264 ch_info->tx_null = false; 6265 ch_info->pause_data = false; 6266 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6267 6268 if (ssid_num) { 6269 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6270 if (info->channel_6ghz && 6271 ch_info->pri_ch != info->channel_6ghz) 6272 continue; 6273 else if (info->channel_6ghz && probe_count != 0) 6274 ch_info->period += RTW89_CHANNEL_TIME_6G; 6275 6276 if (info->wildcard_6ghz) 6277 continue; 6278 6279 ch_info->pkt_id[probe_count++] = info->id; 6280 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6281 break; 6282 } 6283 ch_info->num_pkt = probe_count; 6284 } 6285 6286 switch (chan_type) { 6287 case RTW89_CHAN_DFS: 6288 if (ch_info->ch_band != RTW89_BAND_6G) 6289 ch_info->period = max_t(u8, ch_info->period, 6290 RTW89_DFS_CHAN_TIME); 6291 ch_info->dwell_time = RTW89_DWELL_TIME; 6292 break; 6293 case RTW89_CHAN_ACTIVE: 6294 break; 6295 default: 6296 rtw89_err(rtwdev, "Channel type out of bound\n"); 6297 } 6298 } 6299 6300 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 6301 int ssid_num, 6302 struct rtw89_mac_chinfo *ch_info) 6303 { 6304 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6305 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6306 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6307 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 6308 struct cfg80211_scan_request *req = rtwvif->scan_req; 6309 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 6310 struct rtw89_pktofld_info *info; 6311 u8 band, probe_count = 0; 6312 int ret; 6313 6314 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6315 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6316 ch_info->bw = RTW89_SCAN_WIDTH; 6317 ch_info->tx_pkt = true; 6318 ch_info->cfg_tx_pwr = false; 6319 ch_info->tx_pwr_idx = 0; 6320 ch_info->tx_null = false; 6321 ch_info->pause_data = false; 6322 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6323 6324 if (ch_info->ch_band == RTW89_BAND_6G) { 6325 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6326 !ch_info->is_psc) { 6327 ch_info->tx_pkt = false; 6328 if (!req->duration_mandatory) 6329 ch_info->period -= RTW89_DWELL_TIME_6G; 6330 } 6331 } 6332 6333 ret = rtw89_update_6ghz_rnr_chan(rtwdev, ies, req, ch_info); 6334 if (ret) 6335 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 6336 6337 if (ssid_num) { 6338 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6339 6340 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6341 if (info->channel_6ghz && 6342 ch_info->pri_ch != info->channel_6ghz) 6343 continue; 6344 else if (info->channel_6ghz && probe_count != 0) 6345 ch_info->period += RTW89_CHANNEL_TIME_6G; 6346 6347 if (info->wildcard_6ghz) 6348 continue; 6349 6350 ch_info->pkt_id[probe_count++] = info->id; 6351 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6352 break; 6353 } 6354 ch_info->num_pkt = probe_count; 6355 } 6356 6357 switch (chan_type) { 6358 case RTW89_CHAN_OPERATE: 6359 ch_info->central_ch = op->channel; 6360 ch_info->pri_ch = op->primary_channel; 6361 ch_info->ch_band = op->band_type; 6362 ch_info->bw = op->band_width; 6363 ch_info->tx_null = true; 6364 ch_info->num_pkt = 0; 6365 break; 6366 case RTW89_CHAN_DFS: 6367 if (ch_info->ch_band != RTW89_BAND_6G) 6368 ch_info->period = max_t(u8, ch_info->period, 6369 RTW89_DFS_CHAN_TIME); 6370 ch_info->dwell_time = RTW89_DWELL_TIME; 6371 ch_info->pause_data = true; 6372 break; 6373 case RTW89_CHAN_ACTIVE: 6374 ch_info->pause_data = true; 6375 break; 6376 default: 6377 rtw89_err(rtwdev, "Channel type out of bound\n"); 6378 } 6379 } 6380 6381 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6382 int ssid_num, 6383 struct rtw89_mac_chinfo_be *ch_info) 6384 { 6385 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6386 struct rtw89_pktofld_info *info; 6387 u8 probe_count = 0, i; 6388 6389 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6390 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6391 ch_info->bw = RTW89_SCAN_WIDTH; 6392 ch_info->tx_null = false; 6393 ch_info->pause_data = false; 6394 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6395 6396 if (ssid_num) { 6397 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6398 ch_info->pkt_id[probe_count++] = info->id; 6399 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6400 break; 6401 } 6402 } 6403 6404 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6405 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6406 6407 switch (chan_type) { 6408 case RTW89_CHAN_DFS: 6409 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6410 ch_info->dwell_time = RTW89_DWELL_TIME; 6411 break; 6412 case RTW89_CHAN_ACTIVE: 6413 break; 6414 default: 6415 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6416 break; 6417 } 6418 } 6419 6420 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6421 int ssid_num, 6422 struct rtw89_mac_chinfo_be *ch_info) 6423 { 6424 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6425 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6426 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6427 struct cfg80211_scan_request *req = rtwvif->scan_req; 6428 struct rtw89_pktofld_info *info; 6429 u8 band, probe_count = 0, i; 6430 6431 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6432 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6433 ch_info->bw = RTW89_SCAN_WIDTH; 6434 ch_info->tx_null = false; 6435 ch_info->pause_data = false; 6436 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6437 6438 if (ssid_num) { 6439 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6440 6441 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6442 if (info->channel_6ghz && 6443 ch_info->pri_ch != info->channel_6ghz) 6444 continue; 6445 6446 if (info->wildcard_6ghz) 6447 continue; 6448 6449 ch_info->pkt_id[probe_count++] = info->id; 6450 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6451 break; 6452 } 6453 } 6454 6455 if (ch_info->ch_band == RTW89_BAND_6G) { 6456 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6457 !ch_info->is_psc) { 6458 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6459 if (!req->duration_mandatory) 6460 ch_info->period -= RTW89_DWELL_TIME_6G; 6461 } 6462 } 6463 6464 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6465 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6466 6467 switch (chan_type) { 6468 case RTW89_CHAN_DFS: 6469 if (ch_info->ch_band != RTW89_BAND_6G) 6470 ch_info->period = 6471 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6472 ch_info->dwell_time = RTW89_DWELL_TIME; 6473 ch_info->pause_data = true; 6474 break; 6475 case RTW89_CHAN_ACTIVE: 6476 ch_info->pause_data = true; 6477 break; 6478 default: 6479 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6480 break; 6481 } 6482 } 6483 6484 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6485 struct rtw89_vif_link *rtwvif_link) 6486 { 6487 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6488 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6489 struct rtw89_mac_chinfo *ch_info, *tmp; 6490 struct ieee80211_channel *channel; 6491 struct list_head chan_list; 6492 int list_len; 6493 enum rtw89_chan_type type; 6494 int ret = 0; 6495 u32 idx; 6496 6497 INIT_LIST_HEAD(&chan_list); 6498 for (idx = 0, list_len = 0; 6499 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6500 idx++, list_len++) { 6501 channel = nd_config->channels[idx]; 6502 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6503 if (!ch_info) { 6504 ret = -ENOMEM; 6505 goto out; 6506 } 6507 6508 ch_info->period = RTW89_CHANNEL_TIME; 6509 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6510 ch_info->central_ch = channel->hw_value; 6511 ch_info->pri_ch = channel->hw_value; 6512 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6513 6514 if (channel->flags & 6515 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6516 type = RTW89_CHAN_DFS; 6517 else 6518 type = RTW89_CHAN_ACTIVE; 6519 6520 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); 6521 list_add_tail(&ch_info->list, &chan_list); 6522 } 6523 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6524 6525 out: 6526 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6527 list_del(&ch_info->list); 6528 kfree(ch_info); 6529 } 6530 6531 return ret; 6532 } 6533 6534 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6535 struct rtw89_vif_link *rtwvif_link, bool connected) 6536 { 6537 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6538 struct cfg80211_scan_request *req = rtwvif->scan_req; 6539 struct rtw89_mac_chinfo *ch_info, *tmp; 6540 struct ieee80211_channel *channel; 6541 struct list_head chan_list; 6542 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 6543 int list_len, off_chan_time = 0; 6544 enum rtw89_chan_type type; 6545 int ret = 0; 6546 u32 idx; 6547 6548 INIT_LIST_HEAD(&chan_list); 6549 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6550 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6551 idx++, list_len++) { 6552 channel = req->channels[idx]; 6553 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6554 if (!ch_info) { 6555 ret = -ENOMEM; 6556 goto out; 6557 } 6558 6559 if (req->duration) 6560 ch_info->period = req->duration; 6561 else if (channel->band == NL80211_BAND_6GHZ) 6562 ch_info->period = RTW89_CHANNEL_TIME_6G + 6563 RTW89_DWELL_TIME_6G; 6564 else 6565 ch_info->period = RTW89_CHANNEL_TIME; 6566 6567 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6568 ch_info->central_ch = channel->hw_value; 6569 ch_info->pri_ch = channel->hw_value; 6570 ch_info->rand_seq_num = random_seq; 6571 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6572 6573 if (channel->flags & 6574 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6575 type = RTW89_CHAN_DFS; 6576 else 6577 type = RTW89_CHAN_ACTIVE; 6578 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 6579 6580 if (connected && 6581 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 6582 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 6583 if (!tmp) { 6584 ret = -ENOMEM; 6585 kfree(ch_info); 6586 goto out; 6587 } 6588 6589 type = RTW89_CHAN_OPERATE; 6590 tmp->period = req->duration_mandatory ? 6591 req->duration : RTW89_CHANNEL_TIME; 6592 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 6593 list_add_tail(&tmp->list, &chan_list); 6594 off_chan_time = 0; 6595 list_len++; 6596 } 6597 list_add_tail(&ch_info->list, &chan_list); 6598 off_chan_time += ch_info->period; 6599 } 6600 rtwdev->scan_info.last_chan_idx = idx; 6601 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6602 6603 out: 6604 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6605 list_del(&ch_info->list); 6606 kfree(ch_info); 6607 } 6608 6609 return ret; 6610 } 6611 6612 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6613 struct rtw89_vif_link *rtwvif_link) 6614 { 6615 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6616 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6617 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6618 struct ieee80211_channel *channel; 6619 struct list_head chan_list; 6620 enum rtw89_chan_type type; 6621 int list_len, ret; 6622 u32 idx; 6623 6624 INIT_LIST_HEAD(&chan_list); 6625 6626 for (idx = 0, list_len = 0; 6627 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6628 idx++, list_len++) { 6629 channel = nd_config->channels[idx]; 6630 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6631 if (!ch_info) { 6632 ret = -ENOMEM; 6633 goto out; 6634 } 6635 6636 ch_info->period = RTW89_CHANNEL_TIME; 6637 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6638 ch_info->central_ch = channel->hw_value; 6639 ch_info->pri_ch = channel->hw_value; 6640 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6641 6642 if (channel->flags & 6643 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6644 type = RTW89_CHAN_DFS; 6645 else 6646 type = RTW89_CHAN_ACTIVE; 6647 6648 rtw89_pno_scan_add_chan_be(rtwdev, type, 6649 nd_config->n_match_sets, ch_info); 6650 list_add_tail(&ch_info->list, &chan_list); 6651 } 6652 6653 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 6654 rtwvif_link); 6655 6656 out: 6657 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6658 list_del(&ch_info->list); 6659 kfree(ch_info); 6660 } 6661 6662 return ret; 6663 } 6664 6665 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6666 struct rtw89_vif_link *rtwvif_link, bool connected) 6667 { 6668 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6669 struct cfg80211_scan_request *req = rtwvif->scan_req; 6670 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6671 struct ieee80211_channel *channel; 6672 struct list_head chan_list; 6673 enum rtw89_chan_type type; 6674 int list_len, ret; 6675 bool random_seq; 6676 u32 idx; 6677 6678 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 6679 INIT_LIST_HEAD(&chan_list); 6680 6681 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6682 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6683 idx++, list_len++) { 6684 channel = req->channels[idx]; 6685 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6686 if (!ch_info) { 6687 ret = -ENOMEM; 6688 goto out; 6689 } 6690 6691 if (req->duration) 6692 ch_info->period = req->duration; 6693 else if (channel->band == NL80211_BAND_6GHZ) 6694 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6695 else 6696 ch_info->period = RTW89_CHANNEL_TIME; 6697 6698 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6699 ch_info->central_ch = channel->hw_value; 6700 ch_info->pri_ch = channel->hw_value; 6701 ch_info->rand_seq_num = random_seq; 6702 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6703 6704 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6705 type = RTW89_CHAN_DFS; 6706 else 6707 type = RTW89_CHAN_ACTIVE; 6708 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 6709 6710 list_add_tail(&ch_info->list, &chan_list); 6711 } 6712 6713 rtwdev->scan_info.last_chan_idx = idx; 6714 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 6715 rtwvif_link); 6716 6717 out: 6718 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6719 list_del(&ch_info->list); 6720 kfree(ch_info); 6721 } 6722 6723 return ret; 6724 } 6725 6726 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 6727 struct rtw89_vif_link *rtwvif_link, bool connected) 6728 { 6729 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6730 int ret; 6731 6732 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link); 6733 if (ret) { 6734 rtw89_err(rtwdev, "Update probe request failed\n"); 6735 goto out; 6736 } 6737 ret = mac->add_chan_list(rtwdev, rtwvif_link, connected); 6738 out: 6739 return ret; 6740 } 6741 6742 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, 6743 struct rtw89_vif_link *rtwvif_link, 6744 struct ieee80211_scan_request *scan_req) 6745 { 6746 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6747 struct cfg80211_scan_request *req = &scan_req->req; 6748 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 6749 rtwvif_link->chanctx_idx); 6750 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6751 u32 rx_fltr = rtwdev->hal.rx_fltr; 6752 u8 mac_addr[ETH_ALEN]; 6753 u32 reg; 6754 6755 /* clone op and keep it during scan */ 6756 rtwdev->scan_info.op_chan = *chan; 6757 6758 rtwdev->scan_info.scanning_vif = rtwvif_link; 6759 rtwdev->scan_info.last_chan_idx = 0; 6760 rtwdev->scan_info.abort = false; 6761 rtwvif->scan_ies = &scan_req->ies; 6762 rtwvif->scan_req = req; 6763 ieee80211_stop_queues(rtwdev->hw); 6764 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false); 6765 6766 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 6767 get_random_mask_addr(mac_addr, req->mac_addr, 6768 req->mac_addr_mask); 6769 else 6770 ether_addr_copy(mac_addr, rtwvif_link->mac_addr); 6771 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true); 6772 6773 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 6774 rx_fltr &= ~B_AX_A_BC; 6775 rx_fltr &= ~B_AX_A_A1_MATCH; 6776 6777 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 6778 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr); 6779 6780 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); 6781 } 6782 6783 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, 6784 struct rtw89_vif_link *rtwvif_link, 6785 bool aborted) 6786 { 6787 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6788 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6789 struct cfg80211_scan_info info = { 6790 .aborted = aborted, 6791 }; 6792 struct rtw89_vif *rtwvif; 6793 u32 reg; 6794 6795 if (!rtwvif_link) 6796 return; 6797 6798 rtw89_chanctx_proceed(rtwdev); 6799 6800 rtwvif = rtwvif_link->rtwvif; 6801 6802 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 6803 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); 6804 6805 rtw89_core_scan_complete(rtwdev, rtwvif_link, true); 6806 ieee80211_scan_completed(rtwdev->hw, &info); 6807 ieee80211_wake_queues(rtwdev->hw); 6808 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true); 6809 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 6810 6811 rtw89_release_pkt_list(rtwdev); 6812 rtwvif->scan_req = NULL; 6813 rtwvif->scan_ies = NULL; 6814 scan_info->last_chan_idx = 0; 6815 scan_info->scanning_vif = NULL; 6816 scan_info->abort = false; 6817 } 6818 6819 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, 6820 struct rtw89_vif_link *rtwvif_link) 6821 { 6822 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6823 int ret; 6824 6825 scan_info->abort = true; 6826 6827 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false); 6828 if (ret) 6829 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); 6830 6831 /* Indicate ieee80211_scan_completed() before returning, which is safe 6832 * because scan abort command always waits for completion of 6833 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan 6834 * work properly. 6835 */ 6836 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true); 6837 } 6838 6839 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 6840 { 6841 struct rtw89_vif_link *rtwvif_link; 6842 struct rtw89_vif *rtwvif; 6843 unsigned int link_id; 6844 6845 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 6846 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 6847 /* This variable implies connected or during attempt to connect */ 6848 if (!is_zero_ether_addr(rtwvif_link->bssid)) 6849 return true; 6850 } 6851 } 6852 6853 return false; 6854 } 6855 6856 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, 6857 struct rtw89_vif_link *rtwvif_link, 6858 bool enable) 6859 { 6860 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6861 struct rtw89_scan_option opt = {0}; 6862 bool connected; 6863 int ret = 0; 6864 6865 if (!rtwvif_link) 6866 return -EINVAL; 6867 6868 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 6869 opt.enable = enable; 6870 opt.target_ch_mode = connected; 6871 if (enable) { 6872 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, connected); 6873 if (ret) 6874 goto out; 6875 } 6876 6877 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 6878 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 6879 opt.scan_mode = RTW89_SCAN_MODE_SA; 6880 opt.band = rtwvif_link->mac_idx; 6881 opt.num_macc_role = 0; 6882 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 6883 opt.num_opch = connected ? 1 : 0; 6884 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 6885 } 6886 6887 ret = mac->scan_offload(rtwdev, &opt, rtwvif_link, false); 6888 out: 6889 return ret; 6890 } 6891 6892 #define H2C_FW_CPU_EXCEPTION_LEN 4 6893 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 6894 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 6895 { 6896 struct sk_buff *skb; 6897 int ret; 6898 6899 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 6900 if (!skb) { 6901 rtw89_err(rtwdev, 6902 "failed to alloc skb for fw cpu exception\n"); 6903 return -ENOMEM; 6904 } 6905 6906 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 6907 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 6908 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 6909 6910 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6911 H2C_CAT_TEST, 6912 H2C_CL_FW_STATUS_TEST, 6913 H2C_FUNC_CPU_EXCEPTION, 0, 0, 6914 H2C_FW_CPU_EXCEPTION_LEN); 6915 6916 ret = rtw89_h2c_tx(rtwdev, skb, false); 6917 if (ret) { 6918 rtw89_err(rtwdev, "failed to send h2c\n"); 6919 goto fail; 6920 } 6921 6922 return 0; 6923 6924 fail: 6925 dev_kfree_skb_any(skb); 6926 return ret; 6927 } 6928 6929 #define H2C_PKT_DROP_LEN 24 6930 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 6931 const struct rtw89_pkt_drop_params *params) 6932 { 6933 struct sk_buff *skb; 6934 int ret; 6935 6936 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 6937 if (!skb) { 6938 rtw89_err(rtwdev, 6939 "failed to alloc skb for packet drop\n"); 6940 return -ENOMEM; 6941 } 6942 6943 switch (params->sel) { 6944 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 6945 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 6946 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 6947 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 6948 case RTW89_PKT_DROP_SEL_BAND_ONCE: 6949 break; 6950 default: 6951 rtw89_debug(rtwdev, RTW89_DBG_FW, 6952 "H2C of pkt drop might not fully support sel: %d yet\n", 6953 params->sel); 6954 break; 6955 } 6956 6957 skb_put(skb, H2C_PKT_DROP_LEN); 6958 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 6959 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 6960 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 6961 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 6962 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 6963 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 6964 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 6965 params->macid_band_sel[0]); 6966 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 6967 params->macid_band_sel[1]); 6968 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 6969 params->macid_band_sel[2]); 6970 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 6971 params->macid_band_sel[3]); 6972 6973 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6974 H2C_CAT_MAC, 6975 H2C_CL_MAC_FW_OFLD, 6976 H2C_FUNC_PKT_DROP, 0, 0, 6977 H2C_PKT_DROP_LEN); 6978 6979 ret = rtw89_h2c_tx(rtwdev, skb, false); 6980 if (ret) { 6981 rtw89_err(rtwdev, "failed to send h2c\n"); 6982 goto fail; 6983 } 6984 6985 return 0; 6986 6987 fail: 6988 dev_kfree_skb_any(skb); 6989 return ret; 6990 } 6991 6992 #define H2C_KEEP_ALIVE_LEN 4 6993 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 6994 bool enable) 6995 { 6996 struct sk_buff *skb; 6997 u8 pkt_id = 0; 6998 int ret; 6999 7000 if (enable) { 7001 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7002 RTW89_PKT_OFLD_TYPE_NULL_DATA, 7003 &pkt_id); 7004 if (ret) 7005 return -EPERM; 7006 } 7007 7008 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 7009 if (!skb) { 7010 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7011 return -ENOMEM; 7012 } 7013 7014 skb_put(skb, H2C_KEEP_ALIVE_LEN); 7015 7016 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 7017 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 7018 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 7019 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id); 7020 7021 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7022 H2C_CAT_MAC, 7023 H2C_CL_MAC_WOW, 7024 H2C_FUNC_KEEP_ALIVE, 0, 1, 7025 H2C_KEEP_ALIVE_LEN); 7026 7027 ret = rtw89_h2c_tx(rtwdev, skb, false); 7028 if (ret) { 7029 rtw89_err(rtwdev, "failed to send h2c\n"); 7030 goto fail; 7031 } 7032 7033 return 0; 7034 7035 fail: 7036 dev_kfree_skb_any(skb); 7037 7038 return ret; 7039 } 7040 7041 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7042 bool enable) 7043 { 7044 struct rtw89_h2c_arp_offload *h2c; 7045 u32 len = sizeof(*h2c); 7046 struct sk_buff *skb; 7047 u8 pkt_id = 0; 7048 int ret; 7049 7050 if (enable) { 7051 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7052 RTW89_PKT_OFLD_TYPE_ARP_RSP, 7053 &pkt_id); 7054 if (ret) 7055 return ret; 7056 } 7057 7058 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7059 if (!skb) { 7060 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); 7061 return -ENOMEM; 7062 } 7063 7064 skb_put(skb, len); 7065 h2c = (struct rtw89_h2c_arp_offload *)skb->data; 7066 7067 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | 7068 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | 7069 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | 7070 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); 7071 7072 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7073 H2C_CAT_MAC, 7074 H2C_CL_MAC_WOW, 7075 H2C_FUNC_ARP_OFLD, 0, 1, 7076 len); 7077 7078 ret = rtw89_h2c_tx(rtwdev, skb, false); 7079 if (ret) { 7080 rtw89_err(rtwdev, "failed to send h2c\n"); 7081 goto fail; 7082 } 7083 7084 return 0; 7085 7086 fail: 7087 dev_kfree_skb_any(skb); 7088 7089 return ret; 7090 } 7091 7092 #define H2C_DISCONNECT_DETECT_LEN 8 7093 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 7094 struct rtw89_vif_link *rtwvif_link, bool enable) 7095 { 7096 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7097 struct sk_buff *skb; 7098 u8 macid = rtwvif_link->mac_id; 7099 int ret; 7100 7101 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 7102 if (!skb) { 7103 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7104 return -ENOMEM; 7105 } 7106 7107 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 7108 7109 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 7110 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 7111 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 7112 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 7113 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 7114 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 7115 } 7116 7117 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7118 H2C_CAT_MAC, 7119 H2C_CL_MAC_WOW, 7120 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 7121 H2C_DISCONNECT_DETECT_LEN); 7122 7123 ret = rtw89_h2c_tx(rtwdev, skb, false); 7124 if (ret) { 7125 rtw89_err(rtwdev, "failed to send h2c\n"); 7126 goto fail; 7127 } 7128 7129 return 0; 7130 7131 fail: 7132 dev_kfree_skb_any(skb); 7133 7134 return ret; 7135 } 7136 7137 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7138 bool enable) 7139 { 7140 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7141 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 7142 struct rtw89_h2c_cfg_nlo *h2c; 7143 u32 len = sizeof(*h2c); 7144 struct sk_buff *skb; 7145 int ret, i; 7146 7147 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7148 if (!skb) { 7149 rtw89_err(rtwdev, "failed to alloc skb for nlo\n"); 7150 return -ENOMEM; 7151 } 7152 7153 skb_put(skb, len); 7154 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data; 7155 7156 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) | 7157 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) | 7158 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID); 7159 7160 if (enable) { 7161 h2c->nlo_cnt = nd_config->n_match_sets; 7162 for (i = 0 ; i < nd_config->n_match_sets; i++) { 7163 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len; 7164 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid, 7165 nd_config->match_sets[i].ssid.ssid_len); 7166 } 7167 } 7168 7169 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7170 H2C_CAT_MAC, 7171 H2C_CL_MAC_WOW, 7172 H2C_FUNC_NLO, 0, 1, 7173 len); 7174 7175 ret = rtw89_h2c_tx(rtwdev, skb, false); 7176 if (ret) { 7177 rtw89_err(rtwdev, "failed to send h2c\n"); 7178 goto fail; 7179 } 7180 7181 return 0; 7182 7183 fail: 7184 dev_kfree_skb_any(skb); 7185 return ret; 7186 } 7187 7188 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7189 bool enable) 7190 { 7191 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7192 struct rtw89_h2c_wow_global *h2c; 7193 u8 macid = rtwvif_link->mac_id; 7194 u32 len = sizeof(*h2c); 7195 struct sk_buff *skb; 7196 int ret; 7197 7198 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7199 if (!skb) { 7200 rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); 7201 return -ENOMEM; 7202 } 7203 7204 skb_put(skb, len); 7205 h2c = (struct rtw89_h2c_wow_global *)skb->data; 7206 7207 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | 7208 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | 7209 le32_encode_bits(rtw_wow->ptk_alg, 7210 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | 7211 le32_encode_bits(rtw_wow->gtk_alg, 7212 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); 7213 h2c->key_info = rtw_wow->key_info; 7214 7215 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7216 H2C_CAT_MAC, 7217 H2C_CL_MAC_WOW, 7218 H2C_FUNC_WOW_GLOBAL, 0, 1, 7219 len); 7220 7221 ret = rtw89_h2c_tx(rtwdev, skb, false); 7222 if (ret) { 7223 rtw89_err(rtwdev, "failed to send h2c\n"); 7224 goto fail; 7225 } 7226 7227 return 0; 7228 7229 fail: 7230 dev_kfree_skb_any(skb); 7231 7232 return ret; 7233 } 7234 7235 #define H2C_WAKEUP_CTRL_LEN 4 7236 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 7237 struct rtw89_vif_link *rtwvif_link, 7238 bool enable) 7239 { 7240 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7241 struct sk_buff *skb; 7242 u8 macid = rtwvif_link->mac_id; 7243 int ret; 7244 7245 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 7246 if (!skb) { 7247 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); 7248 return -ENOMEM; 7249 } 7250 7251 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 7252 7253 if (rtw_wow->pattern_cnt) 7254 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 7255 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 7256 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 7257 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 7258 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 7259 7260 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 7261 7262 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7263 H2C_CAT_MAC, 7264 H2C_CL_MAC_WOW, 7265 H2C_FUNC_WAKEUP_CTRL, 0, 1, 7266 H2C_WAKEUP_CTRL_LEN); 7267 7268 ret = rtw89_h2c_tx(rtwdev, skb, false); 7269 if (ret) { 7270 rtw89_err(rtwdev, "failed to send h2c\n"); 7271 goto fail; 7272 } 7273 7274 return 0; 7275 7276 fail: 7277 dev_kfree_skb_any(skb); 7278 7279 return ret; 7280 } 7281 7282 #define H2C_WOW_CAM_UPD_LEN 24 7283 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 7284 struct rtw89_wow_cam_info *cam_info) 7285 { 7286 struct sk_buff *skb; 7287 int ret; 7288 7289 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 7290 if (!skb) { 7291 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7292 return -ENOMEM; 7293 } 7294 7295 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 7296 7297 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 7298 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 7299 if (cam_info->valid) { 7300 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 7301 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 7302 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 7303 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 7304 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 7305 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 7306 cam_info->negative_pattern_match); 7307 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 7308 cam_info->skip_mac_hdr); 7309 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 7310 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 7311 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 7312 } 7313 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 7314 7315 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7316 H2C_CAT_MAC, 7317 H2C_CL_MAC_WOW, 7318 H2C_FUNC_WOW_CAM_UPD, 0, 1, 7319 H2C_WOW_CAM_UPD_LEN); 7320 7321 ret = rtw89_h2c_tx(rtwdev, skb, false); 7322 if (ret) { 7323 rtw89_err(rtwdev, "failed to send h2c\n"); 7324 goto fail; 7325 } 7326 7327 return 0; 7328 fail: 7329 dev_kfree_skb_any(skb); 7330 7331 return ret; 7332 } 7333 7334 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, 7335 struct rtw89_vif_link *rtwvif_link, 7336 bool enable) 7337 { 7338 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7339 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; 7340 struct rtw89_h2c_wow_gtk_ofld *h2c; 7341 u8 macid = rtwvif_link->mac_id; 7342 u32 len = sizeof(*h2c); 7343 u8 pkt_id_sa_query = 0; 7344 struct sk_buff *skb; 7345 u8 pkt_id_eapol = 0; 7346 int ret; 7347 7348 if (!rtw_wow->gtk_alg) 7349 return 0; 7350 7351 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7352 if (!skb) { 7353 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); 7354 return -ENOMEM; 7355 } 7356 7357 skb_put(skb, len); 7358 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; 7359 7360 if (!enable) 7361 goto hdr; 7362 7363 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7364 RTW89_PKT_OFLD_TYPE_EAPOL_KEY, 7365 &pkt_id_eapol); 7366 if (ret) 7367 goto fail; 7368 7369 if (gtk_info->igtk_keyid) { 7370 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7371 RTW89_PKT_OFLD_TYPE_SA_QUERY, 7372 &pkt_id_sa_query); 7373 if (ret) 7374 goto fail; 7375 } 7376 7377 /* not support TKIP yet */ 7378 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 7379 le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 7380 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 7381 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 7382 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | 7383 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); 7384 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, 7385 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | 7386 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); 7387 h2c->gtk_info = rtw_wow->gtk_info; 7388 7389 hdr: 7390 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7391 H2C_CAT_MAC, 7392 H2C_CL_MAC_WOW, 7393 H2C_FUNC_GTK_OFLD, 0, 1, 7394 len); 7395 7396 ret = rtw89_h2c_tx(rtwdev, skb, false); 7397 if (ret) { 7398 rtw89_err(rtwdev, "failed to send h2c\n"); 7399 goto fail; 7400 } 7401 return 0; 7402 fail: 7403 dev_kfree_skb_any(skb); 7404 7405 return ret; 7406 } 7407 7408 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7409 bool enable) 7410 { 7411 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait; 7412 struct rtw89_h2c_fwips *h2c; 7413 u32 len = sizeof(*h2c); 7414 struct sk_buff *skb; 7415 7416 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7417 if (!skb) { 7418 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n"); 7419 return -ENOMEM; 7420 } 7421 skb_put(skb, len); 7422 h2c = (struct rtw89_h2c_fwips *)skb->data; 7423 7424 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) | 7425 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE); 7426 7427 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7428 H2C_CAT_MAC, 7429 H2C_CL_MAC_PS, 7430 H2C_FUNC_IPS_CFG, 0, 1, 7431 len); 7432 7433 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG); 7434 } 7435 7436 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) 7437 { 7438 struct rtw89_wait_info *wait = &rtwdev->wow.wait; 7439 struct rtw89_h2c_wow_aoac *h2c; 7440 u32 len = sizeof(*h2c); 7441 struct sk_buff *skb; 7442 7443 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7444 if (!skb) { 7445 rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); 7446 return -ENOMEM; 7447 } 7448 7449 skb_put(skb, len); 7450 7451 /* This H2C only nofity firmware to generate AOAC report C2H, 7452 * no need any parameter. 7453 */ 7454 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7455 H2C_CAT_MAC, 7456 H2C_CL_MAC_WOW, 7457 H2C_FUNC_AOAC_REPORT_REQ, 1, 0, 7458 len); 7459 7460 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC); 7461 } 7462 7463 /* Return < 0, if failures happen during waiting for the condition. 7464 * Return 0, when waiting for the condition succeeds. 7465 * Return > 0, if the wait is considered unreachable due to driver/FW design, 7466 * where 1 means during SER. 7467 */ 7468 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 7469 struct rtw89_wait_info *wait, unsigned int cond) 7470 { 7471 int ret; 7472 7473 ret = rtw89_h2c_tx(rtwdev, skb, false); 7474 if (ret) { 7475 rtw89_err(rtwdev, "failed to send h2c\n"); 7476 dev_kfree_skb_any(skb); 7477 return -EBUSY; 7478 } 7479 7480 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 7481 return 1; 7482 7483 return rtw89_wait_for_cond(wait, cond); 7484 } 7485 7486 #define H2C_ADD_MCC_LEN 16 7487 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 7488 const struct rtw89_fw_mcc_add_req *p) 7489 { 7490 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7491 struct sk_buff *skb; 7492 unsigned int cond; 7493 7494 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 7495 if (!skb) { 7496 rtw89_err(rtwdev, 7497 "failed to alloc skb for add mcc\n"); 7498 return -ENOMEM; 7499 } 7500 7501 skb_put(skb, H2C_ADD_MCC_LEN); 7502 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 7503 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 7504 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 7505 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 7506 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 7507 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 7508 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 7509 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 7510 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 7511 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 7512 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 7513 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 7514 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 7515 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 7516 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 7517 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 7518 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 7519 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 7520 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 7521 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 7522 7523 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7524 H2C_CAT_MAC, 7525 H2C_CL_MCC, 7526 H2C_FUNC_ADD_MCC, 0, 0, 7527 H2C_ADD_MCC_LEN); 7528 7529 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 7530 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7531 } 7532 7533 #define H2C_START_MCC_LEN 12 7534 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 7535 const struct rtw89_fw_mcc_start_req *p) 7536 { 7537 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7538 struct sk_buff *skb; 7539 unsigned int cond; 7540 7541 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 7542 if (!skb) { 7543 rtw89_err(rtwdev, 7544 "failed to alloc skb for start mcc\n"); 7545 return -ENOMEM; 7546 } 7547 7548 skb_put(skb, H2C_START_MCC_LEN); 7549 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 7550 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 7551 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 7552 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 7553 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 7554 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 7555 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 7556 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 7557 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 7558 7559 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7560 H2C_CAT_MAC, 7561 H2C_CL_MCC, 7562 H2C_FUNC_START_MCC, 0, 0, 7563 H2C_START_MCC_LEN); 7564 7565 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 7566 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7567 } 7568 7569 #define H2C_STOP_MCC_LEN 4 7570 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7571 bool prev_groups) 7572 { 7573 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7574 struct sk_buff *skb; 7575 unsigned int cond; 7576 7577 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 7578 if (!skb) { 7579 rtw89_err(rtwdev, 7580 "failed to alloc skb for stop mcc\n"); 7581 return -ENOMEM; 7582 } 7583 7584 skb_put(skb, H2C_STOP_MCC_LEN); 7585 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 7586 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 7587 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 7588 7589 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7590 H2C_CAT_MAC, 7591 H2C_CL_MCC, 7592 H2C_FUNC_STOP_MCC, 0, 0, 7593 H2C_STOP_MCC_LEN); 7594 7595 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 7596 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7597 } 7598 7599 #define H2C_DEL_MCC_GROUP_LEN 4 7600 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 7601 bool prev_groups) 7602 { 7603 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7604 struct sk_buff *skb; 7605 unsigned int cond; 7606 7607 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 7608 if (!skb) { 7609 rtw89_err(rtwdev, 7610 "failed to alloc skb for del mcc group\n"); 7611 return -ENOMEM; 7612 } 7613 7614 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 7615 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 7616 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 7617 7618 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7619 H2C_CAT_MAC, 7620 H2C_CL_MCC, 7621 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 7622 H2C_DEL_MCC_GROUP_LEN); 7623 7624 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 7625 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7626 } 7627 7628 #define H2C_RESET_MCC_GROUP_LEN 4 7629 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 7630 { 7631 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7632 struct sk_buff *skb; 7633 unsigned int cond; 7634 7635 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 7636 if (!skb) { 7637 rtw89_err(rtwdev, 7638 "failed to alloc skb for reset mcc group\n"); 7639 return -ENOMEM; 7640 } 7641 7642 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 7643 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 7644 7645 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7646 H2C_CAT_MAC, 7647 H2C_CL_MCC, 7648 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 7649 H2C_RESET_MCC_GROUP_LEN); 7650 7651 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 7652 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7653 } 7654 7655 #define H2C_MCC_REQ_TSF_LEN 4 7656 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 7657 const struct rtw89_fw_mcc_tsf_req *req, 7658 struct rtw89_mac_mcc_tsf_rpt *rpt) 7659 { 7660 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7661 struct rtw89_mac_mcc_tsf_rpt *tmp; 7662 struct sk_buff *skb; 7663 unsigned int cond; 7664 int ret; 7665 7666 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 7667 if (!skb) { 7668 rtw89_err(rtwdev, 7669 "failed to alloc skb for mcc req tsf\n"); 7670 return -ENOMEM; 7671 } 7672 7673 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 7674 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 7675 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 7676 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 7677 7678 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7679 H2C_CAT_MAC, 7680 H2C_CL_MCC, 7681 H2C_FUNC_MCC_REQ_TSF, 0, 0, 7682 H2C_MCC_REQ_TSF_LEN); 7683 7684 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 7685 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7686 if (ret) 7687 return ret; 7688 7689 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 7690 *rpt = *tmp; 7691 7692 return 0; 7693 } 7694 7695 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 7696 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7697 u8 *bitmap) 7698 { 7699 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7700 struct sk_buff *skb; 7701 unsigned int cond; 7702 u8 map_len; 7703 u8 h2c_len; 7704 7705 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 7706 map_len = RTW89_MAX_MAC_ID_NUM / 8; 7707 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 7708 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 7709 if (!skb) { 7710 rtw89_err(rtwdev, 7711 "failed to alloc skb for mcc macid bitmap\n"); 7712 return -ENOMEM; 7713 } 7714 7715 skb_put(skb, h2c_len); 7716 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 7717 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 7718 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 7719 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 7720 7721 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7722 H2C_CAT_MAC, 7723 H2C_CL_MCC, 7724 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 7725 h2c_len); 7726 7727 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 7728 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7729 } 7730 7731 #define H2C_MCC_SYNC_LEN 4 7732 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 7733 u8 target, u8 offset) 7734 { 7735 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7736 struct sk_buff *skb; 7737 unsigned int cond; 7738 7739 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 7740 if (!skb) { 7741 rtw89_err(rtwdev, 7742 "failed to alloc skb for mcc sync\n"); 7743 return -ENOMEM; 7744 } 7745 7746 skb_put(skb, H2C_MCC_SYNC_LEN); 7747 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 7748 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 7749 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 7750 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 7751 7752 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7753 H2C_CAT_MAC, 7754 H2C_CL_MCC, 7755 H2C_FUNC_MCC_SYNC, 0, 0, 7756 H2C_MCC_SYNC_LEN); 7757 7758 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 7759 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7760 } 7761 7762 #define H2C_MCC_SET_DURATION_LEN 20 7763 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 7764 const struct rtw89_fw_mcc_duration *p) 7765 { 7766 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7767 struct sk_buff *skb; 7768 unsigned int cond; 7769 7770 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 7771 if (!skb) { 7772 rtw89_err(rtwdev, 7773 "failed to alloc skb for mcc set duration\n"); 7774 return -ENOMEM; 7775 } 7776 7777 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 7778 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 7779 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 7780 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 7781 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 7782 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 7783 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 7784 p->start_tsf_low); 7785 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 7786 p->start_tsf_high); 7787 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 7788 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 7789 7790 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7791 H2C_CAT_MAC, 7792 H2C_CL_MCC, 7793 H2C_FUNC_MCC_SET_DURATION, 0, 0, 7794 H2C_MCC_SET_DURATION_LEN); 7795 7796 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 7797 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7798 } 7799 7800 static 7801 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 7802 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 7803 struct rtw89_h2c_mrc_add_slot *slot_h2c) 7804 { 7805 bool fill_h2c = !!slot_h2c; 7806 unsigned int i; 7807 7808 if (!fill_h2c) 7809 goto calc_len; 7810 7811 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 7812 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 7813 le32_encode_bits(slot_arg->courtesy_en, 7814 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 7815 le32_encode_bits(slot_arg->role_num, 7816 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 7817 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 7818 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 7819 le32_encode_bits(slot_arg->courtesy_target, 7820 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 7821 7822 for (i = 0; i < slot_arg->role_num; i++) { 7823 slot_h2c->roles[i].w0 = 7824 le32_encode_bits(slot_arg->roles[i].macid, 7825 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 7826 le32_encode_bits(slot_arg->roles[i].role_type, 7827 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 7828 le32_encode_bits(slot_arg->roles[i].is_master, 7829 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 7830 le32_encode_bits(slot_arg->roles[i].en_tx_null, 7831 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 7832 le32_encode_bits(false, 7833 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 7834 le32_encode_bits(false, 7835 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 7836 slot_h2c->roles[i].w1 = 7837 le32_encode_bits(slot_arg->roles[i].central_ch, 7838 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 7839 le32_encode_bits(slot_arg->roles[i].primary_ch, 7840 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 7841 le32_encode_bits(slot_arg->roles[i].bw, 7842 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 7843 le32_encode_bits(slot_arg->roles[i].band, 7844 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 7845 le32_encode_bits(slot_arg->roles[i].null_early, 7846 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 7847 le32_encode_bits(false, 7848 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 7849 le32_encode_bits(true, 7850 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 7851 slot_h2c->roles[i].macid_main_bitmap = 7852 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 7853 slot_h2c->roles[i].macid_paired_bitmap = 7854 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 7855 } 7856 7857 calc_len: 7858 return struct_size(slot_h2c, roles, slot_arg->role_num); 7859 } 7860 7861 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 7862 const struct rtw89_fw_mrc_add_arg *arg) 7863 { 7864 struct rtw89_h2c_mrc_add *h2c_head; 7865 struct sk_buff *skb; 7866 unsigned int i; 7867 void *tmp; 7868 u32 len; 7869 int ret; 7870 7871 len = sizeof(*h2c_head); 7872 for (i = 0; i < arg->slot_num; i++) 7873 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 7874 7875 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7876 if (!skb) { 7877 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 7878 return -ENOMEM; 7879 } 7880 7881 skb_put(skb, len); 7882 tmp = skb->data; 7883 7884 h2c_head = tmp; 7885 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 7886 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 7887 le32_encode_bits(arg->sch_type, 7888 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 7889 le32_encode_bits(arg->slot_num, 7890 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 7891 le32_encode_bits(arg->btc_in_sch, 7892 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 7893 7894 tmp += sizeof(*h2c_head); 7895 for (i = 0; i < arg->slot_num; i++) 7896 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 7897 7898 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7899 H2C_CAT_MAC, 7900 H2C_CL_MRC, 7901 H2C_FUNC_ADD_MRC, 0, 0, 7902 len); 7903 7904 ret = rtw89_h2c_tx(rtwdev, skb, false); 7905 if (ret) { 7906 rtw89_err(rtwdev, "failed to send h2c\n"); 7907 dev_kfree_skb_any(skb); 7908 return -EBUSY; 7909 } 7910 7911 return 0; 7912 } 7913 7914 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 7915 const struct rtw89_fw_mrc_start_arg *arg) 7916 { 7917 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7918 struct rtw89_h2c_mrc_start *h2c; 7919 u32 len = sizeof(*h2c); 7920 struct sk_buff *skb; 7921 unsigned int cond; 7922 7923 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7924 if (!skb) { 7925 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 7926 return -ENOMEM; 7927 } 7928 7929 skb_put(skb, len); 7930 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 7931 7932 h2c->w0 = le32_encode_bits(arg->sch_idx, 7933 RTW89_H2C_MRC_START_W0_SCH_IDX) | 7934 le32_encode_bits(arg->old_sch_idx, 7935 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 7936 le32_encode_bits(arg->action, 7937 RTW89_H2C_MRC_START_W0_ACTION); 7938 7939 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 7940 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 7941 7942 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7943 H2C_CAT_MAC, 7944 H2C_CL_MRC, 7945 H2C_FUNC_START_MRC, 0, 0, 7946 len); 7947 7948 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 7949 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7950 } 7951 7952 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx) 7953 { 7954 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7955 struct rtw89_h2c_mrc_del *h2c; 7956 u32 len = sizeof(*h2c); 7957 struct sk_buff *skb; 7958 unsigned int cond; 7959 7960 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7961 if (!skb) { 7962 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 7963 return -ENOMEM; 7964 } 7965 7966 skb_put(skb, len); 7967 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 7968 7969 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) | 7970 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX); 7971 7972 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7973 H2C_CAT_MAC, 7974 H2C_CL_MRC, 7975 H2C_FUNC_DEL_MRC, 0, 0, 7976 len); 7977 7978 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 7979 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7980 } 7981 7982 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 7983 const struct rtw89_fw_mrc_req_tsf_arg *arg, 7984 struct rtw89_mac_mrc_tsf_rpt *rpt) 7985 { 7986 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7987 struct rtw89_h2c_mrc_req_tsf *h2c; 7988 struct rtw89_mac_mrc_tsf_rpt *tmp; 7989 struct sk_buff *skb; 7990 unsigned int i; 7991 u32 len; 7992 int ret; 7993 7994 len = struct_size(h2c, infos, arg->num); 7995 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7996 if (!skb) { 7997 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 7998 return -ENOMEM; 7999 } 8000 8001 skb_put(skb, len); 8002 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 8003 8004 h2c->req_tsf_num = arg->num; 8005 for (i = 0; i < arg->num; i++) 8006 h2c->infos[i] = 8007 u8_encode_bits(arg->infos[i].band, 8008 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 8009 u8_encode_bits(arg->infos[i].port, 8010 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 8011 8012 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8013 H2C_CAT_MAC, 8014 H2C_CL_MRC, 8015 H2C_FUNC_MRC_REQ_TSF, 0, 0, 8016 len); 8017 8018 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 8019 if (ret) 8020 return ret; 8021 8022 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 8023 *rpt = *tmp; 8024 8025 return 0; 8026 } 8027 8028 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 8029 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 8030 { 8031 struct rtw89_h2c_mrc_upd_bitmap *h2c; 8032 u32 len = sizeof(*h2c); 8033 struct sk_buff *skb; 8034 int ret; 8035 8036 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8037 if (!skb) { 8038 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 8039 return -ENOMEM; 8040 } 8041 8042 skb_put(skb, len); 8043 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 8044 8045 h2c->w0 = le32_encode_bits(arg->sch_idx, 8046 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 8047 le32_encode_bits(arg->action, 8048 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 8049 le32_encode_bits(arg->macid, 8050 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 8051 h2c->w1 = le32_encode_bits(arg->client_macid, 8052 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 8053 8054 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8055 H2C_CAT_MAC, 8056 H2C_CL_MRC, 8057 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 8058 len); 8059 8060 ret = rtw89_h2c_tx(rtwdev, skb, false); 8061 if (ret) { 8062 rtw89_err(rtwdev, "failed to send h2c\n"); 8063 dev_kfree_skb_any(skb); 8064 return -EBUSY; 8065 } 8066 8067 return 0; 8068 } 8069 8070 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 8071 const struct rtw89_fw_mrc_sync_arg *arg) 8072 { 8073 struct rtw89_h2c_mrc_sync *h2c; 8074 u32 len = sizeof(*h2c); 8075 struct sk_buff *skb; 8076 int ret; 8077 8078 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8079 if (!skb) { 8080 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 8081 return -ENOMEM; 8082 } 8083 8084 skb_put(skb, len); 8085 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 8086 8087 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 8088 le32_encode_bits(arg->src.port, 8089 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 8090 le32_encode_bits(arg->src.band, 8091 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 8092 le32_encode_bits(arg->dest.port, 8093 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 8094 le32_encode_bits(arg->dest.band, 8095 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 8096 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 8097 8098 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8099 H2C_CAT_MAC, 8100 H2C_CL_MRC, 8101 H2C_FUNC_MRC_SYNC, 0, 0, 8102 len); 8103 8104 ret = rtw89_h2c_tx(rtwdev, skb, false); 8105 if (ret) { 8106 rtw89_err(rtwdev, "failed to send h2c\n"); 8107 dev_kfree_skb_any(skb); 8108 return -EBUSY; 8109 } 8110 8111 return 0; 8112 } 8113 8114 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 8115 const struct rtw89_fw_mrc_upd_duration_arg *arg) 8116 { 8117 struct rtw89_h2c_mrc_upd_duration *h2c; 8118 struct sk_buff *skb; 8119 unsigned int i; 8120 u32 len; 8121 int ret; 8122 8123 len = struct_size(h2c, slots, arg->slot_num); 8124 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8125 if (!skb) { 8126 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 8127 return -ENOMEM; 8128 } 8129 8130 skb_put(skb, len); 8131 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 8132 8133 h2c->w0 = le32_encode_bits(arg->sch_idx, 8134 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 8135 le32_encode_bits(arg->slot_num, 8136 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 8137 le32_encode_bits(false, 8138 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 8139 8140 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 8141 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 8142 8143 for (i = 0; i < arg->slot_num; i++) { 8144 h2c->slots[i] = 8145 le32_encode_bits(arg->slots[i].slot_idx, 8146 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 8147 le32_encode_bits(arg->slots[i].duration, 8148 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 8149 } 8150 8151 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8152 H2C_CAT_MAC, 8153 H2C_CL_MRC, 8154 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 8155 len); 8156 8157 ret = rtw89_h2c_tx(rtwdev, skb, false); 8158 if (ret) { 8159 rtw89_err(rtwdev, "failed to send h2c\n"); 8160 dev_kfree_skb_any(skb); 8161 return -EBUSY; 8162 } 8163 8164 return 0; 8165 } 8166 8167 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 8168 { 8169 static const u8 zeros[U8_MAX] = {}; 8170 8171 return memcmp(ext_ptr, zeros, ext_len) == 0; 8172 } 8173 8174 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 8175 ({ \ 8176 u8 __var_sz = sizeof(*(e)); \ 8177 bool __accept; \ 8178 if (__var_sz >= (ent_sz)) \ 8179 __accept = true; \ 8180 else \ 8181 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 8182 (ent_sz) - __var_sz);\ 8183 __accept; \ 8184 }) 8185 8186 static bool 8187 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 8188 const void *cursor, 8189 const struct rtw89_txpwr_conf *conf) 8190 { 8191 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8192 return false; 8193 8194 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 8195 return false; 8196 8197 switch (e->rs) { 8198 case RTW89_RS_CCK: 8199 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 8200 return false; 8201 break; 8202 case RTW89_RS_OFDM: 8203 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 8204 return false; 8205 break; 8206 case RTW89_RS_MCS: 8207 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 8208 e->nss >= RTW89_NSS_NUM || 8209 e->ofdma >= RTW89_OFDMA_NUM) 8210 return false; 8211 break; 8212 case RTW89_RS_HEDCM: 8213 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 8214 e->nss >= RTW89_NSS_HEDCM_NUM || 8215 e->ofdma >= RTW89_OFDMA_NUM) 8216 return false; 8217 break; 8218 case RTW89_RS_OFFSET: 8219 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 8220 return false; 8221 break; 8222 default: 8223 return false; 8224 } 8225 8226 return true; 8227 } 8228 8229 static 8230 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 8231 const struct rtw89_txpwr_table *tbl) 8232 { 8233 const struct rtw89_txpwr_conf *conf = tbl->data; 8234 struct rtw89_fw_txpwr_byrate_entry entry = {}; 8235 struct rtw89_txpwr_byrate *byr_head; 8236 struct rtw89_rate_desc desc = {}; 8237 const void *cursor; 8238 u32 data; 8239 s8 *byr; 8240 int i; 8241 8242 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8243 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 8244 continue; 8245 8246 byr_head = &rtwdev->byr[entry.band][entry.bw]; 8247 data = le32_to_cpu(entry.data); 8248 desc.ofdma = entry.ofdma; 8249 desc.nss = entry.nss; 8250 desc.rs = entry.rs; 8251 8252 for (i = 0; i < entry.len; i++, data >>= 8) { 8253 desc.idx = entry.shf + i; 8254 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 8255 *byr = data & 0xff; 8256 } 8257 } 8258 } 8259 8260 static bool 8261 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 8262 const void *cursor, 8263 const struct rtw89_txpwr_conf *conf) 8264 { 8265 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8266 return false; 8267 8268 if (e->bw >= RTW89_2G_BW_NUM) 8269 return false; 8270 if (e->nt >= RTW89_NTX_NUM) 8271 return false; 8272 if (e->rs >= RTW89_RS_LMT_NUM) 8273 return false; 8274 if (e->bf >= RTW89_BF_NUM) 8275 return false; 8276 if (e->regd >= RTW89_REGD_NUM) 8277 return false; 8278 if (e->ch_idx >= RTW89_2G_CH_NUM) 8279 return false; 8280 8281 return true; 8282 } 8283 8284 static 8285 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 8286 { 8287 const struct rtw89_txpwr_conf *conf = &data->conf; 8288 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 8289 const void *cursor; 8290 8291 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8292 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 8293 continue; 8294 8295 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8296 [entry.ch_idx] = entry.v; 8297 } 8298 } 8299 8300 static bool 8301 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 8302 const void *cursor, 8303 const struct rtw89_txpwr_conf *conf) 8304 { 8305 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8306 return false; 8307 8308 if (e->bw >= RTW89_5G_BW_NUM) 8309 return false; 8310 if (e->nt >= RTW89_NTX_NUM) 8311 return false; 8312 if (e->rs >= RTW89_RS_LMT_NUM) 8313 return false; 8314 if (e->bf >= RTW89_BF_NUM) 8315 return false; 8316 if (e->regd >= RTW89_REGD_NUM) 8317 return false; 8318 if (e->ch_idx >= RTW89_5G_CH_NUM) 8319 return false; 8320 8321 return true; 8322 } 8323 8324 static 8325 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 8326 { 8327 const struct rtw89_txpwr_conf *conf = &data->conf; 8328 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 8329 const void *cursor; 8330 8331 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8332 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 8333 continue; 8334 8335 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8336 [entry.ch_idx] = entry.v; 8337 } 8338 } 8339 8340 static bool 8341 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 8342 const void *cursor, 8343 const struct rtw89_txpwr_conf *conf) 8344 { 8345 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8346 return false; 8347 8348 if (e->bw >= RTW89_6G_BW_NUM) 8349 return false; 8350 if (e->nt >= RTW89_NTX_NUM) 8351 return false; 8352 if (e->rs >= RTW89_RS_LMT_NUM) 8353 return false; 8354 if (e->bf >= RTW89_BF_NUM) 8355 return false; 8356 if (e->regd >= RTW89_REGD_NUM) 8357 return false; 8358 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8359 return false; 8360 if (e->ch_idx >= RTW89_6G_CH_NUM) 8361 return false; 8362 8363 return true; 8364 } 8365 8366 static 8367 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 8368 { 8369 const struct rtw89_txpwr_conf *conf = &data->conf; 8370 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 8371 const void *cursor; 8372 8373 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8374 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 8375 continue; 8376 8377 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8378 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 8379 } 8380 } 8381 8382 static bool 8383 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 8384 const void *cursor, 8385 const struct rtw89_txpwr_conf *conf) 8386 { 8387 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8388 return false; 8389 8390 if (e->ru >= RTW89_RU_NUM) 8391 return false; 8392 if (e->nt >= RTW89_NTX_NUM) 8393 return false; 8394 if (e->regd >= RTW89_REGD_NUM) 8395 return false; 8396 if (e->ch_idx >= RTW89_2G_CH_NUM) 8397 return false; 8398 8399 return true; 8400 } 8401 8402 static 8403 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 8404 { 8405 const struct rtw89_txpwr_conf *conf = &data->conf; 8406 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 8407 const void *cursor; 8408 8409 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8410 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 8411 continue; 8412 8413 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8414 } 8415 } 8416 8417 static bool 8418 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 8419 const void *cursor, 8420 const struct rtw89_txpwr_conf *conf) 8421 { 8422 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8423 return false; 8424 8425 if (e->ru >= RTW89_RU_NUM) 8426 return false; 8427 if (e->nt >= RTW89_NTX_NUM) 8428 return false; 8429 if (e->regd >= RTW89_REGD_NUM) 8430 return false; 8431 if (e->ch_idx >= RTW89_5G_CH_NUM) 8432 return false; 8433 8434 return true; 8435 } 8436 8437 static 8438 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 8439 { 8440 const struct rtw89_txpwr_conf *conf = &data->conf; 8441 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 8442 const void *cursor; 8443 8444 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8445 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 8446 continue; 8447 8448 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8449 } 8450 } 8451 8452 static bool 8453 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 8454 const void *cursor, 8455 const struct rtw89_txpwr_conf *conf) 8456 { 8457 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8458 return false; 8459 8460 if (e->ru >= RTW89_RU_NUM) 8461 return false; 8462 if (e->nt >= RTW89_NTX_NUM) 8463 return false; 8464 if (e->regd >= RTW89_REGD_NUM) 8465 return false; 8466 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8467 return false; 8468 if (e->ch_idx >= RTW89_6G_CH_NUM) 8469 return false; 8470 8471 return true; 8472 } 8473 8474 static 8475 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 8476 { 8477 const struct rtw89_txpwr_conf *conf = &data->conf; 8478 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 8479 const void *cursor; 8480 8481 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8482 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 8483 continue; 8484 8485 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 8486 [entry.ch_idx] = entry.v; 8487 } 8488 } 8489 8490 static bool 8491 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 8492 const void *cursor, 8493 const struct rtw89_txpwr_conf *conf) 8494 { 8495 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8496 return false; 8497 8498 if (e->band >= RTW89_BAND_NUM) 8499 return false; 8500 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 8501 return false; 8502 if (e->regd >= RTW89_REGD_NUM) 8503 return false; 8504 8505 return true; 8506 } 8507 8508 static 8509 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 8510 { 8511 const struct rtw89_txpwr_conf *conf = &data->conf; 8512 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 8513 const void *cursor; 8514 8515 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8516 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 8517 continue; 8518 8519 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 8520 } 8521 } 8522 8523 static bool 8524 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 8525 const void *cursor, 8526 const struct rtw89_txpwr_conf *conf) 8527 { 8528 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8529 return false; 8530 8531 if (e->band >= RTW89_BAND_NUM) 8532 return false; 8533 if (e->regd >= RTW89_REGD_NUM) 8534 return false; 8535 8536 return true; 8537 } 8538 8539 static 8540 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 8541 { 8542 const struct rtw89_txpwr_conf *conf = &data->conf; 8543 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 8544 const void *cursor; 8545 8546 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8547 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 8548 continue; 8549 8550 data->v[entry.band][entry.regd] = entry.v; 8551 } 8552 } 8553 8554 const struct rtw89_rfe_parms * 8555 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 8556 const struct rtw89_rfe_parms *init) 8557 { 8558 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 8559 struct rtw89_rfe_parms *parms; 8560 8561 if (!rfe_data) 8562 return init; 8563 8564 parms = &rfe_data->rfe_parms; 8565 if (init) 8566 *parms = *init; 8567 8568 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 8569 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 8570 rfe_data->byrate.tbl.size = 0; /* don't care here */ 8571 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 8572 parms->byr_tbl = &rfe_data->byrate.tbl; 8573 } 8574 8575 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 8576 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 8577 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 8578 } 8579 8580 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 8581 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 8582 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 8583 } 8584 8585 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 8586 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 8587 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 8588 } 8589 8590 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 8591 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 8592 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 8593 } 8594 8595 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 8596 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 8597 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 8598 } 8599 8600 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 8601 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 8602 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 8603 } 8604 8605 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 8606 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 8607 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 8608 } 8609 8610 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 8611 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 8612 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 8613 } 8614 8615 return parms; 8616 } 8617