1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/if_arp.h> 6 #include "cam.h" 7 #include "chan.h" 8 #include "coex.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "phy.h" 13 #include "ps.h" 14 #include "reg.h" 15 #include "util.h" 16 #include "wow.h" 17 18 struct rtw89_eapol_2_of_2 { 19 u8 gtkbody[14]; 20 u8 key_des_ver; 21 u8 rsvd[92]; 22 } __packed; 23 24 struct rtw89_sa_query { 25 u8 category; 26 u8 action; 27 } __packed; 28 29 struct rtw89_arp_rsp { 30 u8 llc_hdr[sizeof(rfc1042_header)]; 31 __be16 llc_type; 32 struct arphdr arp_hdr; 33 u8 sender_hw[ETH_ALEN]; 34 __be32 sender_ip; 35 u8 target_hw[ETH_ALEN]; 36 __be32 target_ip; 37 } __packed; 38 39 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 40 41 union rtw89_fw_element_arg { 42 size_t offset; 43 enum rtw89_rf_path rf_path; 44 enum rtw89_fw_type fw_type; 45 }; 46 47 struct rtw89_fw_element_handler { 48 int (*fn)(struct rtw89_dev *rtwdev, 49 const struct rtw89_fw_element_hdr *elm, 50 const union rtw89_fw_element_arg arg); 51 const union rtw89_fw_element_arg arg; 52 const char *name; 53 }; 54 55 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 56 struct sk_buff *skb); 57 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 58 struct rtw89_wait_info *wait, unsigned int cond); 59 static int __parse_security_section(struct rtw89_dev *rtwdev, 60 struct rtw89_fw_bin_info *info, 61 struct rtw89_fw_hdr_section_info *section_info, 62 #if defined(__linux__) 63 const void *content, 64 #elif defined(__FreeBSD__) 65 const u8 *content, 66 #endif 67 u32 *mssc_len); 68 69 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 70 bool header) 71 { 72 struct sk_buff *skb; 73 u32 header_len = 0; 74 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 75 76 if (header) 77 header_len = H2C_HEADER_LEN; 78 79 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 80 if (!skb) 81 return NULL; 82 skb_reserve(skb, header_len + h2c_desc_size); 83 memset(skb->data, 0, len); 84 85 return skb; 86 } 87 88 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 89 { 90 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 91 } 92 93 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 94 { 95 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 96 } 97 98 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 99 { 100 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 101 u8 val; 102 int ret; 103 104 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 105 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 106 1, FWDL_WAIT_CNT, false, rtwdev, type); 107 if (ret) { 108 switch (val) { 109 case RTW89_FWDL_CHECKSUM_FAIL: 110 rtw89_err(rtwdev, "fw checksum fail\n"); 111 return -EINVAL; 112 113 case RTW89_FWDL_SECURITY_FAIL: 114 rtw89_err(rtwdev, "fw security fail\n"); 115 return -EINVAL; 116 117 case RTW89_FWDL_CV_NOT_MATCH: 118 rtw89_err(rtwdev, "fw cv not match\n"); 119 return -EINVAL; 120 121 default: 122 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 123 return -EBUSY; 124 } 125 } 126 127 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 128 129 return 0; 130 } 131 132 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 133 struct rtw89_fw_bin_info *info) 134 { 135 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 136 const struct rtw89_chip_info *chip = rtwdev->chip; 137 struct rtw89_fw_hdr_section_info *section_info; 138 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 139 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 140 const struct rtw89_fw_hdr_section *section; 141 const u8 *fw_end = fw + len; 142 const u8 *bin; 143 u32 base_hdr_len; 144 u32 mssc_len; 145 int ret; 146 u32 i; 147 148 if (!info) 149 return -EINVAL; 150 151 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 152 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 153 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 154 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE); 155 156 if (info->dynamic_hdr_en) { 157 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 158 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 159 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 160 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 161 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 162 return -EINVAL; 163 } 164 } else { 165 info->hdr_len = base_hdr_len; 166 info->dynamic_hdr_len = 0; 167 } 168 169 bin = fw + info->hdr_len; 170 171 /* jump to section header */ 172 section_info = info->section_info; 173 for (i = 0; i < info->section_num; i++) { 174 section = &fw_hdr->sections[i]; 175 section_info->type = 176 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 177 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 178 179 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 180 section_info->len += FWDL_SECTION_CHKSUM_LEN; 181 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 182 section_info->dladdr = 183 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 184 section_info->addr = bin; 185 186 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 187 section_info->mssc = 188 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 189 190 ret = __parse_security_section(rtwdev, info, section_info, 191 bin, &mssc_len); 192 if (ret) 193 return ret; 194 195 if (sec->secure_boot && chip->chip_id == RTL8852B) 196 section_info->len_override = 960; 197 } else { 198 section_info->mssc = 0; 199 mssc_len = 0; 200 } 201 202 rtw89_debug(rtwdev, RTW89_DBG_FW, 203 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 204 i, section_info->type, section_info->len, 205 section_info->mssc, mssc_len, bin - fw); 206 rtw89_debug(rtwdev, RTW89_DBG_FW, 207 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 208 section_info->ignore, section_info->key_addr, 209 section_info->key_addr ? 210 section_info->key_addr - section_info->addr : 0, 211 section_info->key_len, section_info->key_idx); 212 213 bin += section_info->len + mssc_len; 214 section_info++; 215 } 216 217 if (fw_end != bin) { 218 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 219 return -EINVAL; 220 } 221 222 return 0; 223 } 224 225 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 226 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 227 u32 rmp_tbl_size, u32 *key_idx) 228 { 229 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 230 u32 sel_byte_idx; 231 u32 mss_sel_idx; 232 u8 sel_bit_idx; 233 int i; 234 235 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 236 if (!mss_hdr->defen) 237 return -ENOENT; 238 239 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 240 sec->mss_key_num; 241 } else { 242 if (mss_hdr->defen) 243 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 244 else 245 mss_sel_idx = 0; 246 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 247 le16_to_cpu(mss_hdr->msscust_max) + 248 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 249 sec->mss_key_num; 250 } 251 252 sel_byte_idx = mss_sel_idx >> 3; 253 sel_bit_idx = mss_sel_idx & 0x7; 254 255 if (sel_byte_idx >= rmp_tbl_size) 256 return -EFAULT; 257 258 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 259 return -ENOENT; 260 261 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 262 263 for (i = 0; i < sel_byte_idx; i++) 264 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 265 266 return 0; 267 } 268 269 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 270 struct rtw89_fw_bin_info *info, 271 struct rtw89_fw_hdr_section_info *section_info, 272 #if defined(__linux__) 273 const void *content, 274 #elif defined(__FreeBSD__) 275 const u8 *content, 276 #endif 277 u32 *mssc_len) 278 { 279 #if defined(__linux__) 280 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 281 const union rtw89_fw_section_mssc_content *section_content = content; 282 #elif defined(__FreeBSD__) 283 const struct rtw89_fw_mss_pool_hdr *mss_hdr = (const void *)(content + section_info->len); 284 const union rtw89_fw_section_mssc_content *section_content = (const void *)content; 285 #endif 286 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 287 u32 rmp_tbl_size; 288 u32 key_sign_len; 289 u32 real_key_idx; 290 u32 sb_sel_ver; 291 int ret; 292 293 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 294 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 295 return -ENOENT; 296 } 297 298 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 299 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 300 le16_to_cpu(mss_hdr->msscust_max) * 301 mss_hdr->mssdev_max) >> 3; 302 if (mss_hdr->defen) 303 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 304 } else { 305 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 306 mss_hdr->rmpfmt); 307 return -EINVAL; 308 } 309 310 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 311 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 312 rmp_tbl_size, (int)sizeof(*mss_hdr), 313 le32_to_cpu(mss_hdr->key_raw_offset)); 314 return -EINVAL; 315 } 316 317 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 318 if (!key_sign_len) 319 key_sign_len = 512; 320 321 if (info->dsp_checksum) 322 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 323 324 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 325 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 326 327 if (!sec->secure_boot) 328 goto out; 329 330 sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v); 331 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 332 goto ignore; 333 334 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 335 if (ret) 336 goto ignore; 337 338 section_info->key_addr = content + section_info->len + 339 le32_to_cpu(mss_hdr->key_raw_offset) + 340 key_sign_len * real_key_idx; 341 section_info->key_len = key_sign_len; 342 section_info->key_idx = real_key_idx; 343 344 out: 345 if (info->secure_section_exist) { 346 section_info->ignore = true; 347 return 0; 348 } 349 350 info->secure_section_exist = true; 351 352 return 0; 353 354 ignore: 355 section_info->ignore = true; 356 357 return 0; 358 } 359 360 static int __parse_security_section(struct rtw89_dev *rtwdev, 361 struct rtw89_fw_bin_info *info, 362 struct rtw89_fw_hdr_section_info *section_info, 363 #if defined(__linux__) 364 const void *content, 365 #elif defined(__FreeBSD__) 366 const u8 *content, 367 #endif 368 u32 *mssc_len) 369 { 370 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 371 int ret; 372 373 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) { 374 ret = __parse_formatted_mssc(rtwdev, info, section_info, 375 content, mssc_len); 376 if (ret) 377 return -EINVAL; 378 } else { 379 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 380 if (info->dsp_checksum) 381 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 382 383 if (sec->secure_boot) { 384 if (sec->mss_idx >= section_info->mssc) 385 return -EFAULT; 386 section_info->key_addr = content + section_info->len + 387 sec->mss_idx * FWDL_SECURITY_SIGLEN; 388 section_info->key_len = FWDL_SECURITY_SIGLEN; 389 } 390 391 info->secure_section_exist = true; 392 } 393 394 return 0; 395 } 396 397 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 398 struct rtw89_fw_bin_info *info) 399 { 400 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 401 struct rtw89_fw_hdr_section_info *section_info; 402 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 403 const struct rtw89_fw_hdr_section_v1 *section; 404 const u8 *fw_end = fw + len; 405 const u8 *bin; 406 u32 base_hdr_len; 407 u32 mssc_len; 408 int ret; 409 u32 i; 410 411 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 412 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 413 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 414 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 415 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE); 416 417 if (info->dynamic_hdr_en) { 418 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 419 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 420 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 421 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 422 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 423 return -EINVAL; 424 } 425 } else { 426 info->hdr_len = base_hdr_len; 427 info->dynamic_hdr_len = 0; 428 } 429 430 bin = fw + info->hdr_len; 431 432 /* jump to section header */ 433 section_info = info->section_info; 434 for (i = 0; i < info->section_num; i++) { 435 section = &fw_hdr->sections[i]; 436 437 section_info->type = 438 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 439 section_info->len = 440 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 441 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 442 section_info->len += FWDL_SECTION_CHKSUM_LEN; 443 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 444 section_info->dladdr = 445 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 446 section_info->addr = bin; 447 448 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 449 section_info->mssc = 450 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 451 452 ret = __parse_security_section(rtwdev, info, section_info, 453 bin, &mssc_len); 454 if (ret) 455 return ret; 456 } else { 457 section_info->mssc = 0; 458 mssc_len = 0; 459 } 460 461 rtw89_debug(rtwdev, RTW89_DBG_FW, 462 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 463 i, section_info->type, section_info->len, 464 section_info->mssc, mssc_len, bin - fw); 465 rtw89_debug(rtwdev, RTW89_DBG_FW, 466 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 467 section_info->ignore, section_info->key_addr, 468 section_info->key_addr ? 469 section_info->key_addr - section_info->addr : 0, 470 section_info->key_len, section_info->key_idx); 471 472 bin += section_info->len + mssc_len; 473 section_info++; 474 } 475 476 if (fw_end != bin) { 477 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 478 return -EINVAL; 479 } 480 481 if (!info->secure_section_exist) 482 rtw89_warn(rtwdev, "no firmware secure section\n"); 483 484 return 0; 485 } 486 487 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 488 const struct rtw89_fw_suit *fw_suit, 489 struct rtw89_fw_bin_info *info) 490 { 491 const u8 *fw = fw_suit->data; 492 u32 len = fw_suit->size; 493 494 if (!fw || !len) { 495 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 496 return -ENOENT; 497 } 498 499 switch (fw_suit->hdr_ver) { 500 case 0: 501 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 502 case 1: 503 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 504 default: 505 return -ENOENT; 506 } 507 } 508 509 static 510 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 511 struct rtw89_fw_suit *fw_suit, bool nowarn) 512 { 513 struct rtw89_fw_info *fw_info = &rtwdev->fw; 514 const struct firmware *firmware = fw_info->req.firmware; 515 const u8 *mfw = firmware->data; 516 u32 mfw_len = firmware->size; 517 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 518 const struct rtw89_mfw_info *mfw_info = NULL, *tmp; 519 int i; 520 521 if (mfw_hdr->sig != RTW89_MFW_SIG) { 522 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 523 /* legacy firmware support normal type only */ 524 if (type != RTW89_FW_NORMAL) 525 return -EINVAL; 526 fw_suit->data = mfw; 527 fw_suit->size = mfw_len; 528 return 0; 529 } 530 531 for (i = 0; i < mfw_hdr->fw_nr; i++) { 532 tmp = &mfw_hdr->info[i]; 533 if (tmp->type != type) 534 continue; 535 536 if (type == RTW89_FW_LOGFMT) { 537 mfw_info = tmp; 538 goto found; 539 } 540 541 /* Version order of WiFi firmware in firmware file are not in order, 542 * pass all firmware to find the equal or less but closest version. 543 */ 544 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { 545 if (!mfw_info || mfw_info->cv < tmp->cv) 546 mfw_info = tmp; 547 } 548 } 549 550 if (mfw_info) 551 goto found; 552 553 if (!nowarn) 554 rtw89_err(rtwdev, "no suitable firmware found\n"); 555 return -ENOENT; 556 557 found: 558 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 559 fw_suit->size = le32_to_cpu(mfw_info->size); 560 return 0; 561 } 562 563 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 564 { 565 struct rtw89_fw_info *fw_info = &rtwdev->fw; 566 const struct firmware *firmware = fw_info->req.firmware; 567 const struct rtw89_mfw_hdr *mfw_hdr = 568 (const struct rtw89_mfw_hdr *)firmware->data; 569 const struct rtw89_mfw_info *mfw_info; 570 u32 size; 571 572 if (mfw_hdr->sig != RTW89_MFW_SIG) { 573 rtw89_warn(rtwdev, "not mfw format\n"); 574 return 0; 575 } 576 577 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 578 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 579 580 return size; 581 } 582 583 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 584 struct rtw89_fw_suit *fw_suit, 585 const struct rtw89_fw_hdr *hdr) 586 { 587 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 588 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 589 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 590 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 591 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 592 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 593 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 594 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 595 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 596 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 597 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 598 } 599 600 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 601 struct rtw89_fw_suit *fw_suit, 602 const struct rtw89_fw_hdr_v1 *hdr) 603 { 604 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 605 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 606 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 607 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 608 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 609 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 610 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 611 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 612 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 613 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 614 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 615 } 616 617 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 618 enum rtw89_fw_type type, 619 struct rtw89_fw_suit *fw_suit) 620 { 621 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 622 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 623 624 if (type == RTW89_FW_LOGFMT) 625 return 0; 626 627 fw_suit->type = type; 628 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 629 630 switch (fw_suit->hdr_ver) { 631 case 0: 632 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 633 break; 634 case 1: 635 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 636 break; 637 default: 638 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 639 fw_suit->hdr_ver); 640 return -ENOENT; 641 } 642 643 rtw89_info(rtwdev, 644 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 645 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 646 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 647 648 return 0; 649 } 650 651 static 652 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 653 bool nowarn) 654 { 655 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 656 int ret; 657 658 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 659 if (ret) 660 return ret; 661 662 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 663 } 664 665 static 666 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 667 const struct rtw89_fw_element_hdr *elm, 668 const union rtw89_fw_element_arg arg) 669 { 670 #if defined(__linux__) 671 enum rtw89_fw_type type = arg.fw_type; 672 #elif defined(__FreeBSD__) 673 const enum rtw89_fw_type type = arg.fw_type; 674 #endif 675 struct rtw89_hal *hal = &rtwdev->hal; 676 struct rtw89_fw_suit *fw_suit; 677 678 /* Version of BB MCU is in decreasing order in firmware file, so take 679 * first equal or less version, which is equal or less but closest version. 680 */ 681 if (hal->cv < elm->u.bbmcu.cv) 682 return 1; /* ignore this element */ 683 684 fw_suit = rtw89_fw_suit_get(rtwdev, type); 685 if (fw_suit->data) 686 return 1; /* ignore this element (a firmware is taken already) */ 687 688 fw_suit->data = elm->u.bbmcu.contents; 689 fw_suit->size = le32_to_cpu(elm->size); 690 691 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 692 } 693 694 #define __DEF_FW_FEAT_COND(__cond, __op) \ 695 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 696 { \ 697 return suit_ver_code __op comp_ver_code; \ 698 } 699 700 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 701 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 702 __DEF_FW_FEAT_COND(lt, <); /* less than */ 703 704 struct __fw_feat_cfg { 705 enum rtw89_core_chip_id chip_id; 706 enum rtw89_fw_feature feature; 707 u32 ver_code; 708 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 709 }; 710 711 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 712 { \ 713 .chip_id = _chip, \ 714 .feature = RTW89_FW_FEATURE_ ## _feat, \ 715 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 716 .cond = __fw_feat_cond_ ## _cond, \ 717 } 718 719 static const struct __fw_feat_cfg fw_feat_tbl[] = { 720 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 721 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 722 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 723 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 724 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 725 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 726 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 727 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX), 728 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 729 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 730 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 731 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 732 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 733 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER), 734 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX), 735 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 736 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 737 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER), 738 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 739 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER), 740 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 741 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 742 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 743 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 744 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 745 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 746 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), 747 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 748 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 749 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0), 750 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 751 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 752 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0), 753 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO), 754 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0), 755 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO), 756 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0), 757 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1), 758 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1), 759 }; 760 761 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 762 const struct rtw89_chip_info *chip, 763 u32 ver_code) 764 { 765 int i; 766 767 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 768 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 769 770 if (chip->chip_id != ent->chip_id) 771 continue; 772 773 if (ent->cond(ver_code, ent->ver_code)) 774 RTW89_SET_FW_FEATURE(ent->feature, fw); 775 } 776 } 777 778 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 779 { 780 const struct rtw89_chip_info *chip = rtwdev->chip; 781 const struct rtw89_fw_suit *fw_suit; 782 u32 suit_ver_code; 783 784 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 785 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 786 787 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 788 } 789 790 const struct firmware * 791 rtw89_early_fw_feature_recognize(struct device *device, 792 const struct rtw89_chip_info *chip, 793 struct rtw89_fw_info *early_fw, 794 int *used_fw_format) 795 { 796 const struct firmware *firmware; 797 char fw_name[64]; 798 int fw_format; 799 u32 ver_code; 800 int ret; 801 802 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 803 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 804 chip->fw_basename, fw_format); 805 806 ret = request_firmware(&firmware, fw_name, device); 807 if (!ret) { 808 dev_info(device, "loaded firmware %s\n", fw_name); 809 *used_fw_format = fw_format; 810 break; 811 } 812 } 813 814 if (ret) { 815 dev_err(device, "failed to early request firmware: %d\n", ret); 816 return NULL; 817 } 818 819 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 820 821 if (!ver_code) 822 goto out; 823 824 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 825 826 out: 827 return firmware; 828 } 829 830 static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev) 831 { 832 const struct rtw89_chip_variant *variant = rtwdev->variant; 833 const struct rtw89_fw_suit *fw_suit; 834 u32 suit_ver_code; 835 836 if (!variant) 837 return 0; 838 839 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 840 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 841 842 if (variant->fw_min_ver_code > suit_ver_code) { 843 rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n", 844 variant->fw_min_ver_code); 845 return -ENOENT; 846 } 847 848 return 0; 849 } 850 851 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 852 { 853 const struct rtw89_chip_info *chip = rtwdev->chip; 854 int ret; 855 856 if (chip->try_ce_fw) { 857 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 858 if (!ret) 859 goto normal_done; 860 } 861 862 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 863 if (ret) 864 return ret; 865 866 normal_done: 867 ret = rtw89_fw_validate_ver_required(rtwdev); 868 if (ret) 869 return ret; 870 871 /* It still works if wowlan firmware isn't existing. */ 872 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 873 874 /* It still works if log format file isn't existing. */ 875 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 876 877 rtw89_fw_recognize_features(rtwdev); 878 879 rtw89_coex_recognize_ver(rtwdev); 880 881 return 0; 882 } 883 884 static 885 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 886 const struct rtw89_fw_element_hdr *elm, 887 const union rtw89_fw_element_arg arg) 888 { 889 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 890 struct rtw89_phy_table *tbl; 891 struct rtw89_reg2_def *regs; 892 enum rtw89_rf_path rf_path; 893 u32 n_regs, i; 894 u8 idx; 895 896 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 897 if (!tbl) 898 return -ENOMEM; 899 900 switch (le32_to_cpu(elm->id)) { 901 case RTW89_FW_ELEMENT_ID_BB_REG: 902 elm_info->bb_tbl = tbl; 903 break; 904 case RTW89_FW_ELEMENT_ID_BB_GAIN: 905 elm_info->bb_gain = tbl; 906 break; 907 case RTW89_FW_ELEMENT_ID_RADIO_A: 908 case RTW89_FW_ELEMENT_ID_RADIO_B: 909 case RTW89_FW_ELEMENT_ID_RADIO_C: 910 case RTW89_FW_ELEMENT_ID_RADIO_D: 911 rf_path = arg.rf_path; 912 idx = elm->u.reg2.idx; 913 914 elm_info->rf_radio[idx] = tbl; 915 tbl->rf_path = rf_path; 916 tbl->config = rtw89_phy_config_rf_reg_v1; 917 break; 918 case RTW89_FW_ELEMENT_ID_RF_NCTL: 919 elm_info->rf_nctl = tbl; 920 break; 921 default: 922 kfree(tbl); 923 return -ENOENT; 924 } 925 926 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 927 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); 928 if (!regs) 929 goto out; 930 931 for (i = 0; i < n_regs; i++) { 932 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 933 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 934 } 935 936 tbl->n_regs = n_regs; 937 tbl->regs = regs; 938 939 return 0; 940 941 out: 942 kfree(tbl); 943 return -ENOMEM; 944 } 945 946 static 947 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 948 const struct rtw89_fw_element_hdr *elm, 949 const union rtw89_fw_element_arg arg) 950 { 951 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 952 const unsigned long offset = arg.offset; 953 struct rtw89_efuse *efuse = &rtwdev->efuse; 954 struct rtw89_txpwr_conf *conf; 955 956 if (!rtwdev->rfe_data) { 957 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 958 if (!rtwdev->rfe_data) 959 return -ENOMEM; 960 } 961 962 #if defined(__linux__) 963 conf = (void *)rtwdev->rfe_data + offset; 964 #elif defined(__FreeBSD__) 965 conf = (void *)((u8 *)rtwdev->rfe_data + offset); 966 #endif 967 968 /* if multiple matched, take the last eventually */ 969 if (txpwr_elm->rfe_type == efuse->rfe_type) 970 goto setup; 971 972 /* without one is matched, accept default */ 973 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 974 (!rtw89_txpwr_conf_valid(conf) || 975 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 976 goto setup; 977 978 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 979 elm->id, txpwr_elm->rfe_type); 980 return 0; 981 982 setup: 983 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 984 elm->id, txpwr_elm->rfe_type); 985 986 conf->rfe_type = txpwr_elm->rfe_type; 987 conf->ent_sz = txpwr_elm->ent_sz; 988 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 989 conf->data = txpwr_elm->content; 990 return 0; 991 } 992 993 static 994 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 995 const struct rtw89_fw_element_hdr *elm, 996 const union rtw89_fw_element_arg arg) 997 { 998 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 999 const struct rtw89_chip_info *chip = rtwdev->chip; 1000 u32 needed_bitmap = 0; 1001 u32 offset = 0; 1002 int subband; 1003 u32 bitmap; 1004 int type; 1005 1006 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 1007 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 1008 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 1009 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 1010 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 1011 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 1012 1013 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 1014 1015 if ((bitmap & needed_bitmap) != needed_bitmap) { 1016 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n", 1017 needed_bitmap, bitmap); 1018 return -ENOENT; 1019 } 1020 1021 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL); 1022 if (!elm_info->txpwr_trk) 1023 return -ENOMEM; 1024 1025 for (type = 0; bitmap; type++, bitmap >>= 1) { 1026 if (!(bitmap & BIT(0))) 1027 continue; 1028 1029 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 1030 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 1031 subband = 4; 1032 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 1033 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 1034 subband = 3; 1035 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 1036 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 1037 subband = 1; 1038 else 1039 break; 1040 1041 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 1042 1043 offset += subband; 1044 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 1045 goto err; 1046 } 1047 1048 return 0; 1049 1050 err: 1051 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 1052 offset, le32_to_cpu(elm->size)); 1053 kfree(elm_info->txpwr_trk); 1054 elm_info->txpwr_trk = NULL; 1055 1056 return -EFAULT; 1057 } 1058 1059 static 1060 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 1061 const struct rtw89_fw_element_hdr *elm, 1062 const union rtw89_fw_element_arg arg) 1063 { 1064 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1065 u8 rfk_id; 1066 1067 if (elm_info->rfk_log_fmt) 1068 goto allocated; 1069 1070 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL); 1071 if (!elm_info->rfk_log_fmt) 1072 return 1; /* this is an optional element, so just ignore this */ 1073 1074 allocated: 1075 rfk_id = elm->u.rfk_log_fmt.rfk_id; 1076 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 1077 return 1; 1078 1079 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 1080 1081 return 0; 1082 } 1083 1084 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 1085 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 1086 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 1087 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 1088 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 1089 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 1090 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 1091 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 1092 { .rf_path = RF_PATH_A }, "radio A"}, 1093 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 1094 { .rf_path = RF_PATH_B }, NULL}, 1095 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 1096 { .rf_path = RF_PATH_C }, NULL}, 1097 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 1098 { .rf_path = RF_PATH_D }, NULL}, 1099 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 1100 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 1101 rtw89_fw_recognize_txpwr_from_elm, 1102 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 1103 }, 1104 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 1105 rtw89_fw_recognize_txpwr_from_elm, 1106 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 1107 }, 1108 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 1109 rtw89_fw_recognize_txpwr_from_elm, 1110 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 1111 }, 1112 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 1113 rtw89_fw_recognize_txpwr_from_elm, 1114 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 1115 }, 1116 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 1117 rtw89_fw_recognize_txpwr_from_elm, 1118 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 1119 }, 1120 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 1121 rtw89_fw_recognize_txpwr_from_elm, 1122 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 1123 }, 1124 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 1125 rtw89_fw_recognize_txpwr_from_elm, 1126 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 1127 }, 1128 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 1129 rtw89_fw_recognize_txpwr_from_elm, 1130 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 1131 }, 1132 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 1133 rtw89_fw_recognize_txpwr_from_elm, 1134 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 1135 }, 1136 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 1137 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 1138 }, 1139 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 1140 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 1141 }, 1142 }; 1143 1144 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 1145 { 1146 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1147 const struct firmware *firmware = fw_info->req.firmware; 1148 const struct rtw89_chip_info *chip = rtwdev->chip; 1149 u32 unrecognized_elements = chip->needed_fw_elms; 1150 const struct rtw89_fw_element_handler *handler; 1151 const struct rtw89_fw_element_hdr *hdr; 1152 u32 elm_size; 1153 u32 elem_id; 1154 u32 offset; 1155 int ret; 1156 1157 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1158 1159 offset = rtw89_mfw_get_size(rtwdev); 1160 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1161 if (offset == 0) 1162 return -EINVAL; 1163 1164 while (offset + sizeof(*hdr) < firmware->size) { 1165 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1166 1167 elm_size = le32_to_cpu(hdr->size); 1168 if (offset + elm_size >= firmware->size) { 1169 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1170 break; 1171 } 1172 1173 elem_id = le32_to_cpu(hdr->id); 1174 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1175 goto next; 1176 1177 handler = &__fw_element_handlers[elem_id]; 1178 if (!handler->fn) 1179 goto next; 1180 1181 ret = handler->fn(rtwdev, hdr, handler->arg); 1182 if (ret == 1) /* ignore this element */ 1183 goto next; 1184 if (ret) 1185 return ret; 1186 1187 if (handler->name) 1188 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1189 handler->name, hdr->ver); 1190 1191 unrecognized_elements &= ~BIT(elem_id); 1192 next: 1193 offset += sizeof(*hdr) + elm_size; 1194 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1195 } 1196 1197 if (unrecognized_elements) { 1198 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1199 unrecognized_elements); 1200 return -ENOENT; 1201 } 1202 1203 return 0; 1204 } 1205 1206 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1207 u8 type, u8 cat, u8 class, u8 func, 1208 bool rack, bool dack, u32 len) 1209 { 1210 struct fwcmd_hdr *hdr; 1211 1212 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1213 1214 if (!(rtwdev->fw.h2c_seq % 4)) 1215 rack = true; 1216 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1217 FIELD_PREP(H2C_HDR_CAT, cat) | 1218 FIELD_PREP(H2C_HDR_CLASS, class) | 1219 FIELD_PREP(H2C_HDR_FUNC, func) | 1220 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1221 1222 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1223 len + H2C_HEADER_LEN) | 1224 (rack ? H2C_HDR_REC_ACK : 0) | 1225 (dack ? H2C_HDR_DONE_ACK : 0)); 1226 1227 rtwdev->fw.h2c_seq++; 1228 } 1229 1230 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1231 struct sk_buff *skb, 1232 u8 type, u8 cat, u8 class, u8 func, 1233 u32 len) 1234 { 1235 struct fwcmd_hdr *hdr; 1236 1237 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1238 1239 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1240 FIELD_PREP(H2C_HDR_CAT, cat) | 1241 FIELD_PREP(H2C_HDR_CLASS, class) | 1242 FIELD_PREP(H2C_HDR_FUNC, func) | 1243 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1244 1245 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1246 len + H2C_HEADER_LEN)); 1247 } 1248 1249 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1250 struct rtw89_fw_bin_info *info, 1251 struct rtw89_fw_hdr *fw_hdr) 1252 { 1253 struct rtw89_fw_hdr_section_info *section_info; 1254 struct rtw89_fw_hdr_section *section; 1255 int i; 1256 1257 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1258 FW_HDR_W7_PART_SIZE); 1259 1260 for (i = 0; i < info->section_num; i++) { 1261 section_info = &info->section_info[i]; 1262 1263 if (!section_info->len_override) 1264 continue; 1265 1266 section = &fw_hdr->sections[i]; 1267 le32p_replace_bits(§ion->w1, section_info->len_override, 1268 FWSECTION_HDR_W1_SEC_SIZE); 1269 } 1270 1271 return 0; 1272 } 1273 1274 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1275 struct rtw89_fw_bin_info *info, 1276 struct rtw89_fw_hdr_v1 *fw_hdr) 1277 { 1278 struct rtw89_fw_hdr_section_info *section_info; 1279 struct rtw89_fw_hdr_section_v1 *section; 1280 u8 dst_sec_idx = 0; 1281 u8 sec_idx; 1282 1283 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1284 FW_HDR_V1_W7_PART_SIZE); 1285 1286 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1287 section_info = &info->section_info[sec_idx]; 1288 section = &fw_hdr->sections[sec_idx]; 1289 1290 if (section_info->ignore) 1291 continue; 1292 1293 if (dst_sec_idx != sec_idx) 1294 fw_hdr->sections[dst_sec_idx] = *section; 1295 1296 dst_sec_idx++; 1297 } 1298 1299 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1300 1301 return (info->section_num - dst_sec_idx) * sizeof(*section); 1302 } 1303 1304 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1305 const struct rtw89_fw_suit *fw_suit, 1306 struct rtw89_fw_bin_info *info) 1307 { 1308 u32 len = info->hdr_len - info->dynamic_hdr_len; 1309 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1310 const u8 *fw = fw_suit->data; 1311 struct rtw89_fw_hdr *fw_hdr; 1312 struct sk_buff *skb; 1313 u32 truncated; 1314 u32 ret = 0; 1315 1316 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1317 if (!skb) { 1318 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1319 return -ENOMEM; 1320 } 1321 1322 skb_put_data(skb, fw, len); 1323 1324 switch (fw_suit->hdr_ver) { 1325 case 0: 1326 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1327 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1328 break; 1329 case 1: 1330 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1331 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1332 break; 1333 default: 1334 ret = -EOPNOTSUPP; 1335 goto fail; 1336 } 1337 1338 if (truncated) { 1339 len -= truncated; 1340 skb_trim(skb, len); 1341 } 1342 1343 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1344 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1345 H2C_FUNC_MAC_FWHDR_DL, len); 1346 1347 ret = rtw89_h2c_tx(rtwdev, skb, false); 1348 if (ret) { 1349 rtw89_err(rtwdev, "failed to send h2c\n"); 1350 ret = -1; 1351 goto fail; 1352 } 1353 1354 return 0; 1355 fail: 1356 dev_kfree_skb_any(skb); 1357 1358 return ret; 1359 } 1360 1361 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1362 const struct rtw89_fw_suit *fw_suit, 1363 struct rtw89_fw_bin_info *info) 1364 { 1365 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1366 int ret; 1367 1368 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1369 if (ret) { 1370 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1371 return ret; 1372 } 1373 1374 ret = mac->fwdl_check_path_ready(rtwdev, false); 1375 if (ret) { 1376 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1377 return ret; 1378 } 1379 1380 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1381 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1382 1383 return 0; 1384 } 1385 1386 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1387 struct rtw89_fw_hdr_section_info *info) 1388 { 1389 struct sk_buff *skb; 1390 const u8 *section = info->addr; 1391 u32 residue_len = info->len; 1392 bool copy_key = false; 1393 u32 pkt_len; 1394 int ret; 1395 1396 if (info->ignore) 1397 return 0; 1398 1399 if (info->len_override) { 1400 if (info->len_override > info->len) 1401 rtw89_warn(rtwdev, "override length %u larger than original %u\n", 1402 info->len_override, info->len); 1403 else 1404 residue_len = info->len_override; 1405 } 1406 1407 if (info->key_addr && info->key_len) { 1408 if (residue_len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) 1409 rtw89_warn(rtwdev, 1410 "ignore to copy key data because of len %d, %d, %d, %d\n", 1411 info->len, FWDL_SECTION_PER_PKT_LEN, 1412 info->key_len, residue_len); 1413 else 1414 copy_key = true; 1415 } 1416 1417 while (residue_len) { 1418 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 1419 pkt_len = FWDL_SECTION_PER_PKT_LEN; 1420 else 1421 pkt_len = residue_len; 1422 1423 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1424 if (!skb) { 1425 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1426 return -ENOMEM; 1427 } 1428 skb_put_data(skb, section, pkt_len); 1429 1430 if (copy_key) 1431 memcpy(skb->data + pkt_len - info->key_len, 1432 info->key_addr, info->key_len); 1433 1434 ret = rtw89_h2c_tx(rtwdev, skb, true); 1435 if (ret) { 1436 rtw89_err(rtwdev, "failed to send h2c\n"); 1437 ret = -1; 1438 goto fail; 1439 } 1440 1441 section += pkt_len; 1442 residue_len -= pkt_len; 1443 } 1444 1445 return 0; 1446 fail: 1447 dev_kfree_skb_any(skb); 1448 1449 return ret; 1450 } 1451 1452 static enum rtw89_fwdl_check_type 1453 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1454 const struct rtw89_fw_suit *fw_suit) 1455 { 1456 switch (fw_suit->type) { 1457 case RTW89_FW_BBMCU0: 1458 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1459 case RTW89_FW_BBMCU1: 1460 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1461 default: 1462 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1463 } 1464 } 1465 1466 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1467 const struct rtw89_fw_suit *fw_suit, 1468 struct rtw89_fw_bin_info *info) 1469 { 1470 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1471 const struct rtw89_chip_info *chip = rtwdev->chip; 1472 enum rtw89_fwdl_check_type chk_type; 1473 u8 section_num = info->section_num; 1474 int ret; 1475 1476 while (section_num--) { 1477 ret = __rtw89_fw_download_main(rtwdev, section_info); 1478 if (ret) 1479 return ret; 1480 section_info++; 1481 } 1482 1483 if (chip->chip_gen == RTW89_CHIP_AX) 1484 return 0; 1485 1486 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1487 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1488 if (ret) { 1489 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1490 fw_suit->type); 1491 return ret; 1492 } 1493 1494 return 0; 1495 } 1496 1497 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1498 { 1499 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1500 u32 addr = R_AX_DBG_PORT_SEL; 1501 u32 val32; 1502 u16 index; 1503 1504 if (chip_gen == RTW89_CHIP_BE) { 1505 addr = R_BE_WLCPU_PORT_PC; 1506 goto dump; 1507 } 1508 1509 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1510 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1511 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1512 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1513 1514 dump: 1515 for (index = 0; index < 15; index++) { 1516 val32 = rtw89_read32(rtwdev, addr); 1517 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1518 #if defined(__linux__) 1519 fsleep(10); 1520 #elif defined(__FreeBSD__) 1521 /* Seems we are called from a context we cannot sleep. */ 1522 udelay(10); 1523 #endif 1524 } 1525 } 1526 1527 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1528 { 1529 u32 val32; 1530 1531 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1532 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1533 1534 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1535 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1536 1537 rtw89_fw_prog_cnt_dump(rtwdev); 1538 } 1539 1540 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1541 struct rtw89_fw_suit *fw_suit) 1542 { 1543 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1544 struct rtw89_fw_bin_info info = {}; 1545 int ret; 1546 1547 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1548 if (ret) { 1549 rtw89_err(rtwdev, "parse fw header fail\n"); 1550 return ret; 1551 } 1552 1553 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode); 1554 1555 if (rtwdev->chip->chip_id == RTL8922A && 1556 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1557 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1558 1559 ret = mac->fwdl_check_path_ready(rtwdev, true); 1560 if (ret) { 1561 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1562 return ret; 1563 } 1564 1565 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1566 if (ret) 1567 return ret; 1568 1569 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1570 if (ret) 1571 return ret; 1572 1573 return 0; 1574 } 1575 1576 static 1577 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1578 bool include_bb) 1579 { 1580 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1581 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1582 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1583 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1584 int ret; 1585 int i; 1586 1587 mac->disable_cpu(rtwdev); 1588 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1589 if (ret) 1590 return ret; 1591 1592 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1593 if (ret) 1594 goto fwdl_err; 1595 1596 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1597 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1598 1599 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1600 if (ret) 1601 goto fwdl_err; 1602 } 1603 1604 fw_info->h2c_seq = 0; 1605 fw_info->rec_seq = 0; 1606 fw_info->h2c_counter = 0; 1607 fw_info->c2h_counter = 0; 1608 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1609 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1610 1611 mdelay(5); 1612 1613 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1614 if (ret) { 1615 rtw89_warn(rtwdev, "download firmware fail\n"); 1616 goto fwdl_err; 1617 } 1618 1619 return ret; 1620 1621 fwdl_err: 1622 rtw89_fw_dl_fail_dump(rtwdev); 1623 return ret; 1624 } 1625 1626 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1627 bool include_bb) 1628 { 1629 int retry; 1630 int ret; 1631 1632 for (retry = 0; retry < 5; retry++) { 1633 ret = __rtw89_fw_download(rtwdev, type, include_bb); 1634 if (!ret) 1635 return 0; 1636 } 1637 1638 return ret; 1639 } 1640 1641 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1642 { 1643 struct rtw89_fw_info *fw = &rtwdev->fw; 1644 1645 wait_for_completion(&fw->req.completion); 1646 if (!fw->req.firmware) 1647 return -EINVAL; 1648 1649 return 0; 1650 } 1651 1652 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1653 struct rtw89_fw_req_info *req, 1654 const char *fw_name, bool nowarn) 1655 { 1656 int ret; 1657 1658 if (req->firmware) { 1659 rtw89_debug(rtwdev, RTW89_DBG_FW, 1660 "full firmware has been early requested\n"); 1661 complete_all(&req->completion); 1662 return 0; 1663 } 1664 1665 if (nowarn) 1666 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1667 else 1668 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1669 1670 complete_all(&req->completion); 1671 1672 return ret; 1673 } 1674 1675 void rtw89_load_firmware_work(struct work_struct *work) 1676 { 1677 struct rtw89_dev *rtwdev = 1678 container_of(work, struct rtw89_dev, load_firmware_work); 1679 const struct rtw89_chip_info *chip = rtwdev->chip; 1680 char fw_name[64]; 1681 1682 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1683 chip->fw_basename, rtwdev->fw.fw_format); 1684 1685 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1686 } 1687 1688 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1689 { 1690 if (!tbl) 1691 return; 1692 1693 kfree(tbl->regs); 1694 kfree(tbl); 1695 } 1696 1697 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1698 { 1699 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1700 int i; 1701 1702 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1703 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1704 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1705 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1706 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1707 1708 kfree(elm_info->txpwr_trk); 1709 kfree(elm_info->rfk_log_fmt); 1710 } 1711 1712 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1713 { 1714 struct rtw89_fw_info *fw = &rtwdev->fw; 1715 1716 cancel_work_sync(&rtwdev->load_firmware_work); 1717 1718 if (fw->req.firmware) { 1719 release_firmware(fw->req.firmware); 1720 1721 /* assign NULL back in case rtw89_free_ieee80211_hw() 1722 * try to release the same one again. 1723 */ 1724 fw->req.firmware = NULL; 1725 } 1726 1727 kfree(fw->log.fmts); 1728 rtw89_unload_firmware_elements(rtwdev); 1729 } 1730 1731 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1732 { 1733 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1734 u32 i; 1735 1736 if (fmt_id > fw_log->last_fmt_id) 1737 return 0; 1738 1739 for (i = 0; i < fw_log->fmt_count; i++) { 1740 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1741 return i; 1742 } 1743 return 0; 1744 } 1745 1746 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1747 { 1748 struct rtw89_fw_log *log = &rtwdev->fw.log; 1749 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1750 struct rtw89_fw_suit *suit = &log->suit; 1751 #if defined(__linux__) 1752 const void *fmts_ptr, *fmts_end_ptr; 1753 #elif defined(__FreeBSD__) 1754 const u8 *fmts_ptr, *fmts_end_ptr; 1755 #endif 1756 u32 fmt_count; 1757 int i; 1758 1759 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1760 fmt_count = le32_to_cpu(suit_hdr->count); 1761 log->fmt_ids = suit_hdr->ids; 1762 #if defined(__linux__) 1763 fmts_ptr = &suit_hdr->ids[fmt_count]; 1764 #elif defined(__FreeBSD__) 1765 fmts_ptr = (const u8 *)&suit_hdr->ids[fmt_count]; 1766 #endif 1767 fmts_end_ptr = suit->data + suit->size; 1768 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1769 if (!log->fmts) 1770 return -ENOMEM; 1771 1772 for (i = 0; i < fmt_count; i++) { 1773 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1774 if (!fmts_ptr) 1775 break; 1776 1777 (*log->fmts)[i] = fmts_ptr; 1778 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1779 log->fmt_count++; 1780 fmts_ptr += strlen(fmts_ptr); 1781 } 1782 1783 return 0; 1784 } 1785 1786 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1787 { 1788 struct rtw89_fw_log *log = &rtwdev->fw.log; 1789 struct rtw89_fw_suit *suit = &log->suit; 1790 1791 if (!suit || !suit->data) { 1792 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1793 return -EINVAL; 1794 } 1795 if (log->fmts) 1796 return 0; 1797 1798 return rtw89_fw_log_create_fmts_dict(rtwdev); 1799 } 1800 1801 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 1802 const struct rtw89_fw_c2h_log_fmt *log_fmt, 1803 u32 fmt_idx, u8 para_int, bool raw_data) 1804 { 1805 const char *(*fmts)[] = rtwdev->fw.log.fmts; 1806 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 1807 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 1808 int i; 1809 1810 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 1811 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 1812 log_fmt->argc); 1813 return; 1814 } 1815 1816 if (para_int) 1817 for (i = 0 ; i < log_fmt->argc; i++) 1818 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 1819 1820 if (raw_data) { 1821 if (para_int) 1822 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1823 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 1824 para_int, log_fmt->argc, (int)sizeof(args), args); 1825 else 1826 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1827 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 1828 para_int, log_fmt->argc, log_fmt->u.raw); 1829 } else { 1830 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 1831 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 1832 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 1833 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 1834 args[0xf]); 1835 } 1836 1837 rtw89_info(rtwdev, "C2H log: %s", str_buf); 1838 } 1839 1840 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 1841 { 1842 const struct rtw89_fw_c2h_log_fmt *log_fmt; 1843 u8 para_int; 1844 u32 fmt_idx; 1845 1846 if (len < RTW89_C2H_HEADER_LEN) { 1847 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 1848 return; 1849 } 1850 1851 buf += RTW89_C2H_HEADER_LEN; 1852 len -= RTW89_C2H_HEADER_LEN; 1853 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 1854 1855 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 1856 goto plain_log; 1857 1858 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 1859 goto plain_log; 1860 1861 if (!rtwdev->fw.log.fmts) 1862 return; 1863 1864 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 1865 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 1866 1867 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 1868 rtw89_info(rtwdev, "C2H log: %s%s", 1869 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 1870 else if (fmt_idx != 0 && para_int) 1871 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 1872 else 1873 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 1874 return; 1875 1876 plain_log: 1877 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 1878 1879 } 1880 1881 #define H2C_CAM_LEN 60 1882 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 1883 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr) 1884 { 1885 struct sk_buff *skb; 1886 int ret; 1887 1888 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 1889 if (!skb) { 1890 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1891 return -ENOMEM; 1892 } 1893 skb_put(skb, H2C_CAM_LEN); 1894 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, scan_mac_addr, 1895 skb->data); 1896 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, skb->data); 1897 1898 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1899 H2C_CAT_MAC, 1900 H2C_CL_MAC_ADDR_CAM_UPDATE, 1901 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 1902 H2C_CAM_LEN); 1903 1904 ret = rtw89_h2c_tx(rtwdev, skb, false); 1905 if (ret) { 1906 rtw89_err(rtwdev, "failed to send h2c\n"); 1907 goto fail; 1908 } 1909 1910 return 0; 1911 fail: 1912 dev_kfree_skb_any(skb); 1913 1914 return ret; 1915 } 1916 1917 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 1918 struct rtw89_vif_link *rtwvif_link, 1919 struct rtw89_sta_link *rtwsta_link) 1920 { 1921 struct rtw89_h2c_dctlinfo_ud_v1 *h2c; 1922 u32 len = sizeof(*h2c); 1923 struct sk_buff *skb; 1924 int ret; 1925 1926 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1927 if (!skb) { 1928 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1929 return -ENOMEM; 1930 } 1931 skb_put(skb, len); 1932 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; 1933 1934 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c); 1935 1936 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1937 H2C_CAT_MAC, 1938 H2C_CL_MAC_FR_EXCHG, 1939 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 1940 len); 1941 1942 ret = rtw89_h2c_tx(rtwdev, skb, false); 1943 if (ret) { 1944 rtw89_err(rtwdev, "failed to send h2c\n"); 1945 goto fail; 1946 } 1947 1948 return 0; 1949 fail: 1950 dev_kfree_skb_any(skb); 1951 1952 return ret; 1953 } 1954 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 1955 1956 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 1957 struct rtw89_vif_link *rtwvif_link, 1958 struct rtw89_sta_link *rtwsta_link) 1959 { 1960 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1961 u32 len = sizeof(*h2c); 1962 struct sk_buff *skb; 1963 int ret; 1964 1965 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1966 if (!skb) { 1967 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1968 return -ENOMEM; 1969 } 1970 skb_put(skb, len); 1971 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1972 1973 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c); 1974 1975 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1976 H2C_CAT_MAC, 1977 H2C_CL_MAC_FR_EXCHG, 1978 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1979 len); 1980 1981 ret = rtw89_h2c_tx(rtwdev, skb, false); 1982 if (ret) { 1983 rtw89_err(rtwdev, "failed to send h2c\n"); 1984 goto fail; 1985 } 1986 1987 return 0; 1988 fail: 1989 dev_kfree_skb_any(skb); 1990 1991 return ret; 1992 } 1993 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 1994 1995 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 1996 struct rtw89_vif_link *rtwvif_link, 1997 struct rtw89_sta_link *rtwsta_link) 1998 { 1999 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2000 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 2001 u32 len = sizeof(*h2c); 2002 struct sk_buff *skb; 2003 int ret; 2004 2005 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2006 if (!skb) { 2007 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 2008 return -ENOMEM; 2009 } 2010 skb_put(skb, len); 2011 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 2012 2013 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 2014 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 2015 2016 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 2017 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 2018 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 2019 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 2020 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 2021 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 2022 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 2023 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 2024 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 2025 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 2026 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 2027 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 2028 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 2029 2030 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2031 H2C_CAT_MAC, 2032 H2C_CL_MAC_FR_EXCHG, 2033 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 2034 len); 2035 2036 ret = rtw89_h2c_tx(rtwdev, skb, false); 2037 if (ret) { 2038 rtw89_err(rtwdev, "failed to send h2c\n"); 2039 goto fail; 2040 } 2041 2042 return 0; 2043 fail: 2044 dev_kfree_skb_any(skb); 2045 2046 return ret; 2047 } 2048 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 2049 2050 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, 2051 struct rtw89_vif_link *rtwvif_link, 2052 struct rtw89_sta_link *rtwsta_link, 2053 bool valid, struct ieee80211_ampdu_params *params) 2054 { 2055 const struct rtw89_chip_info *chip = rtwdev->chip; 2056 struct rtw89_h2c_ba_cam *h2c; 2057 u8 macid = rtwsta_link->mac_id; 2058 u32 len = sizeof(*h2c); 2059 struct sk_buff *skb; 2060 u8 entry_idx; 2061 int ret; 2062 2063 ret = valid ? 2064 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2065 &entry_idx) : 2066 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2067 &entry_idx); 2068 if (ret) { 2069 /* it still works even if we don't have static BA CAM, because 2070 * hardware can create dynamic BA CAM automatically. 2071 */ 2072 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2073 "failed to %s entry tid=%d for h2c ba cam\n", 2074 valid ? "alloc" : "free", params->tid); 2075 return 0; 2076 } 2077 2078 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2079 if (!skb) { 2080 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2081 return -ENOMEM; 2082 } 2083 skb_put(skb, len); 2084 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2085 2086 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 2087 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 2088 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 2089 else 2090 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 2091 if (!valid) 2092 goto end; 2093 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 2094 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 2095 if (params->buf_size > 64) 2096 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2097 else 2098 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2099 /* If init req is set, hw will set the ssn */ 2100 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 2101 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 2102 2103 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 2104 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 2105 le32_encode_bits(rtwvif_link->mac_idx, 2106 RTW89_H2C_BA_CAM_W1_BAND); 2107 } 2108 2109 end: 2110 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2111 H2C_CAT_MAC, 2112 H2C_CL_BA_CAM, 2113 H2C_FUNC_MAC_BA_CAM, 0, 1, 2114 len); 2115 2116 ret = rtw89_h2c_tx(rtwdev, skb, false); 2117 if (ret) { 2118 rtw89_err(rtwdev, "failed to send h2c\n"); 2119 goto fail; 2120 } 2121 2122 return 0; 2123 fail: 2124 dev_kfree_skb_any(skb); 2125 2126 return ret; 2127 } 2128 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 2129 2130 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 2131 u8 entry_idx, u8 uid) 2132 { 2133 struct rtw89_h2c_ba_cam *h2c; 2134 u32 len = sizeof(*h2c); 2135 struct sk_buff *skb; 2136 int ret; 2137 2138 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2139 if (!skb) { 2140 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 2141 return -ENOMEM; 2142 } 2143 skb_put(skb, len); 2144 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2145 2146 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 2147 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 2148 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 2149 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 2150 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 2151 2152 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2153 H2C_CAT_MAC, 2154 H2C_CL_BA_CAM, 2155 H2C_FUNC_MAC_BA_CAM, 0, 1, 2156 len); 2157 2158 ret = rtw89_h2c_tx(rtwdev, skb, false); 2159 if (ret) { 2160 rtw89_err(rtwdev, "failed to send h2c\n"); 2161 goto fail; 2162 } 2163 2164 return 0; 2165 fail: 2166 dev_kfree_skb_any(skb); 2167 2168 return ret; 2169 } 2170 2171 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 2172 { 2173 const struct rtw89_chip_info *chip = rtwdev->chip; 2174 u8 entry_idx = chip->bacam_num; 2175 u8 uid = 0; 2176 int i; 2177 2178 for (i = 0; i < chip->bacam_dynamic_num; i++) { 2179 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 2180 entry_idx++; 2181 uid++; 2182 } 2183 } 2184 2185 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, 2186 struct rtw89_vif_link *rtwvif_link, 2187 struct rtw89_sta_link *rtwsta_link, 2188 bool valid, struct ieee80211_ampdu_params *params) 2189 { 2190 const struct rtw89_chip_info *chip = rtwdev->chip; 2191 struct rtw89_h2c_ba_cam_v1 *h2c; 2192 u8 macid = rtwsta_link->mac_id; 2193 u32 len = sizeof(*h2c); 2194 struct sk_buff *skb; 2195 u8 entry_idx; 2196 u8 bmap_size; 2197 int ret; 2198 2199 ret = valid ? 2200 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2201 &entry_idx) : 2202 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2203 &entry_idx); 2204 if (ret) { 2205 /* it still works even if we don't have static BA CAM, because 2206 * hardware can create dynamic BA CAM automatically. 2207 */ 2208 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2209 "failed to %s entry tid=%d for h2c ba cam\n", 2210 valid ? "alloc" : "free", params->tid); 2211 return 0; 2212 } 2213 2214 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2215 if (!skb) { 2216 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2217 return -ENOMEM; 2218 } 2219 skb_put(skb, len); 2220 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2221 2222 if (params->buf_size > 512) 2223 bmap_size = 10; 2224 else if (params->buf_size > 256) 2225 bmap_size = 8; 2226 else if (params->buf_size > 64) 2227 bmap_size = 4; 2228 else 2229 bmap_size = 0; 2230 2231 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2232 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2233 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2234 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2235 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2236 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2237 2238 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2239 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2240 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2241 le32_encode_bits(!!rtwvif_link->mac_idx, 2242 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2243 2244 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2245 H2C_CAT_MAC, 2246 H2C_CL_BA_CAM, 2247 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2248 len); 2249 2250 ret = rtw89_h2c_tx(rtwdev, skb, false); 2251 if (ret) { 2252 rtw89_err(rtwdev, "failed to send h2c\n"); 2253 goto fail; 2254 } 2255 2256 return 0; 2257 fail: 2258 dev_kfree_skb_any(skb); 2259 2260 return ret; 2261 } 2262 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2263 2264 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2265 u8 offset, u8 mac_idx) 2266 { 2267 struct rtw89_h2c_ba_cam_init *h2c; 2268 u32 len = sizeof(*h2c); 2269 struct sk_buff *skb; 2270 int ret; 2271 2272 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2273 if (!skb) { 2274 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2275 return -ENOMEM; 2276 } 2277 skb_put(skb, len); 2278 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2279 2280 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2281 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2282 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2283 2284 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2285 H2C_CAT_MAC, 2286 H2C_CL_BA_CAM, 2287 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2288 len); 2289 2290 ret = rtw89_h2c_tx(rtwdev, skb, false); 2291 if (ret) { 2292 rtw89_err(rtwdev, "failed to send h2c\n"); 2293 goto fail; 2294 } 2295 2296 return 0; 2297 fail: 2298 dev_kfree_skb_any(skb); 2299 2300 return ret; 2301 } 2302 2303 #define H2C_LOG_CFG_LEN 12 2304 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2305 { 2306 struct sk_buff *skb; 2307 u32 comp = 0; 2308 int ret; 2309 2310 if (enable) 2311 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2312 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2313 BIT(RTW89_FW_LOG_COMP_SCAN); 2314 2315 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2316 if (!skb) { 2317 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2318 return -ENOMEM; 2319 } 2320 2321 skb_put(skb, H2C_LOG_CFG_LEN); 2322 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2323 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2324 SET_LOG_CFG_COMP(skb->data, comp); 2325 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2326 2327 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2328 H2C_CAT_MAC, 2329 H2C_CL_FW_INFO, 2330 H2C_FUNC_LOG_CFG, 0, 0, 2331 H2C_LOG_CFG_LEN); 2332 2333 ret = rtw89_h2c_tx(rtwdev, skb, false); 2334 if (ret) { 2335 rtw89_err(rtwdev, "failed to send h2c\n"); 2336 goto fail; 2337 } 2338 2339 return 0; 2340 fail: 2341 dev_kfree_skb_any(skb); 2342 2343 return ret; 2344 } 2345 2346 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 2347 struct rtw89_vif_link *rtwvif_link) 2348 { 2349 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 2350 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; 2351 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2352 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2353 struct rtw89_eapol_2_of_2 *eapol_pkt; 2354 struct ieee80211_bss_conf *bss_conf; 2355 struct ieee80211_hdr_3addr *hdr; 2356 struct sk_buff *skb; 2357 u8 key_des_ver; 2358 2359 if (rtw_wow->ptk_alg == 3) 2360 key_des_ver = 1; 2361 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) 2362 key_des_ver = 2; 2363 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) 2364 key_des_ver = 3; 2365 else 2366 key_des_ver = 0; 2367 2368 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); 2369 if (!skb) 2370 return NULL; 2371 2372 hdr = skb_put_zero(skb, sizeof(*hdr)); 2373 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2374 IEEE80211_FCTL_TODS | 2375 IEEE80211_FCTL_PROTECTED); 2376 2377 rcu_read_lock(); 2378 2379 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2380 2381 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2382 ether_addr_copy(hdr->addr2, bss_conf->addr); 2383 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2384 2385 rcu_read_unlock(); 2386 2387 skb_put_zero(skb, sec_hdr_len); 2388 2389 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); 2390 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); 2391 eapol_pkt->key_des_ver = key_des_ver; 2392 2393 return skb; 2394 } 2395 2396 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, 2397 struct rtw89_vif_link *rtwvif_link) 2398 { 2399 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2400 struct ieee80211_bss_conf *bss_conf; 2401 struct ieee80211_hdr_3addr *hdr; 2402 struct rtw89_sa_query *sa_query; 2403 struct sk_buff *skb; 2404 2405 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); 2406 if (!skb) 2407 return NULL; 2408 2409 hdr = skb_put_zero(skb, sizeof(*hdr)); 2410 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2411 IEEE80211_STYPE_ACTION | 2412 IEEE80211_FCTL_PROTECTED); 2413 2414 rcu_read_lock(); 2415 2416 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2417 2418 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2419 ether_addr_copy(hdr->addr2, bss_conf->addr); 2420 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2421 2422 rcu_read_unlock(); 2423 2424 skb_put_zero(skb, sec_hdr_len); 2425 2426 sa_query = skb_put_zero(skb, sizeof(*sa_query)); 2427 sa_query->category = WLAN_CATEGORY_SA_QUERY; 2428 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; 2429 2430 return skb; 2431 } 2432 2433 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, 2434 struct rtw89_vif_link *rtwvif_link) 2435 { 2436 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 2437 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2438 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2439 struct ieee80211_hdr_3addr *hdr; 2440 struct rtw89_arp_rsp *arp_skb; 2441 struct arphdr *arp_hdr; 2442 struct sk_buff *skb; 2443 __le16 fc; 2444 2445 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); 2446 if (!skb) 2447 return NULL; 2448 2449 hdr = skb_put_zero(skb, sizeof(*hdr)); 2450 2451 if (rtw_wow->ptk_alg) 2452 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | 2453 IEEE80211_FCTL_PROTECTED); 2454 else 2455 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); 2456 2457 hdr->frame_control = fc; 2458 ether_addr_copy(hdr->addr1, rtwvif_link->bssid); 2459 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr); 2460 ether_addr_copy(hdr->addr3, rtwvif_link->bssid); 2461 2462 skb_put_zero(skb, sec_hdr_len); 2463 2464 arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); 2465 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 2466 arp_skb->llc_type = htons(ETH_P_ARP); 2467 2468 arp_hdr = &arp_skb->arp_hdr; 2469 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 2470 arp_hdr->ar_pro = htons(ETH_P_IP); 2471 arp_hdr->ar_hln = ETH_ALEN; 2472 arp_hdr->ar_pln = 4; 2473 arp_hdr->ar_op = htons(ARPOP_REPLY); 2474 2475 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr); 2476 arp_skb->sender_ip = rtwvif->ip_addr; 2477 2478 return skb; 2479 } 2480 2481 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2482 struct rtw89_vif_link *rtwvif_link, 2483 enum rtw89_fw_pkt_ofld_type type, 2484 u8 *id) 2485 { 2486 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 2487 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; 2488 struct rtw89_pktofld_info *info; 2489 struct sk_buff *skb; 2490 int ret; 2491 2492 info = kzalloc(sizeof(*info), GFP_KERNEL); 2493 if (!info) 2494 return -ENOMEM; 2495 2496 switch (type) { 2497 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2498 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2499 break; 2500 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2501 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2502 break; 2503 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2504 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false); 2505 break; 2506 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2507 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true); 2508 break; 2509 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2510 skb = rtw89_eapol_get(rtwdev, rtwvif_link); 2511 break; 2512 case RTW89_PKT_OFLD_TYPE_SA_QUERY: 2513 skb = rtw89_sa_query_get(rtwdev, rtwvif_link); 2514 break; 2515 case RTW89_PKT_OFLD_TYPE_ARP_RSP: 2516 skb = rtw89_arp_response_get(rtwdev, rtwvif_link); 2517 break; 2518 default: 2519 goto err; 2520 } 2521 2522 if (!skb) 2523 goto err; 2524 2525 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 2526 kfree_skb(skb); 2527 2528 if (ret) 2529 goto err; 2530 2531 list_add_tail(&info->list, &rtwvif_link->general_pkt_list); 2532 *id = info->id; 2533 return 0; 2534 2535 err: 2536 kfree(info); 2537 return -ENOMEM; 2538 } 2539 2540 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 2541 struct rtw89_vif_link *rtwvif_link, 2542 bool notify_fw) 2543 { 2544 struct list_head *pkt_list = &rtwvif_link->general_pkt_list; 2545 struct rtw89_pktofld_info *info, *tmp; 2546 2547 list_for_each_entry_safe(info, tmp, pkt_list, list) { 2548 if (notify_fw) 2549 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2550 else 2551 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 2552 list_del(&info->list); 2553 kfree(info); 2554 } 2555 } 2556 2557 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 2558 { 2559 struct rtw89_vif_link *rtwvif_link; 2560 struct rtw89_vif *rtwvif; 2561 unsigned int link_id; 2562 2563 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2564 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 2565 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link, 2566 notify_fw); 2567 } 2568 2569 #define H2C_GENERAL_PKT_LEN 6 2570 #define H2C_GENERAL_PKT_ID_UND 0xff 2571 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 2572 struct rtw89_vif_link *rtwvif_link, u8 macid) 2573 { 2574 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 2575 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 2576 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 2577 struct sk_buff *skb; 2578 int ret; 2579 2580 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2581 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 2582 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2583 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 2584 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2585 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 2586 2587 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 2588 if (!skb) { 2589 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2590 return -ENOMEM; 2591 } 2592 skb_put(skb, H2C_GENERAL_PKT_LEN); 2593 SET_GENERAL_PKT_MACID(skb->data, macid); 2594 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2595 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 2596 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 2597 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 2598 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2599 2600 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2601 H2C_CAT_MAC, 2602 H2C_CL_FW_INFO, 2603 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 2604 H2C_GENERAL_PKT_LEN); 2605 2606 ret = rtw89_h2c_tx(rtwdev, skb, false); 2607 if (ret) { 2608 rtw89_err(rtwdev, "failed to send h2c\n"); 2609 goto fail; 2610 } 2611 2612 return 0; 2613 fail: 2614 dev_kfree_skb_any(skb); 2615 2616 return ret; 2617 } 2618 2619 #define H2C_LPS_PARM_LEN 8 2620 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 2621 struct rtw89_lps_parm *lps_param) 2622 { 2623 struct sk_buff *skb; 2624 int ret; 2625 2626 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 2627 if (!skb) { 2628 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2629 return -ENOMEM; 2630 } 2631 skb_put(skb, H2C_LPS_PARM_LEN); 2632 2633 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 2634 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 2635 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 2636 SET_LPS_PARM_RLBM(skb->data, 1); 2637 SET_LPS_PARM_SMARTPS(skb->data, 1); 2638 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 2639 SET_LPS_PARM_VOUAPSD(skb->data, 0); 2640 SET_LPS_PARM_VIUAPSD(skb->data, 0); 2641 SET_LPS_PARM_BEUAPSD(skb->data, 0); 2642 SET_LPS_PARM_BKUAPSD(skb->data, 0); 2643 2644 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2645 H2C_CAT_MAC, 2646 H2C_CL_MAC_PS, 2647 H2C_FUNC_MAC_LPS_PARM, 0, !lps_param->psmode, 2648 H2C_LPS_PARM_LEN); 2649 2650 ret = rtw89_h2c_tx(rtwdev, skb, false); 2651 if (ret) { 2652 rtw89_err(rtwdev, "failed to send h2c\n"); 2653 goto fail; 2654 } 2655 2656 return 0; 2657 fail: 2658 dev_kfree_skb_any(skb); 2659 2660 return ret; 2661 } 2662 2663 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2664 { 2665 const struct rtw89_chip_info *chip = rtwdev->chip; 2666 const struct rtw89_chan *chan; 2667 struct rtw89_vif_link *rtwvif_link; 2668 struct rtw89_h2c_lps_ch_info *h2c; 2669 u32 len = sizeof(*h2c); 2670 unsigned int link_id; 2671 struct sk_buff *skb; 2672 bool no_chan = true; 2673 u8 phy_idx; 2674 u32 done; 2675 int ret; 2676 2677 if (chip->chip_gen != RTW89_CHIP_BE) 2678 return 0; 2679 2680 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2681 if (!skb) { 2682 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 2683 return -ENOMEM; 2684 } 2685 skb_put(skb, len); 2686 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2687 2688 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2689 phy_idx = rtwvif_link->phy_idx; 2690 if (phy_idx >= ARRAY_SIZE(h2c->info)) 2691 continue; 2692 2693 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2694 no_chan = false; 2695 2696 h2c->info[phy_idx].central_ch = chan->channel; 2697 h2c->info[phy_idx].pri_ch = chan->primary_channel; 2698 h2c->info[phy_idx].band = chan->band_type; 2699 h2c->info[phy_idx].bw = chan->band_width; 2700 } 2701 2702 if (no_chan) { 2703 rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n"); 2704 ret = -ENOENT; 2705 goto fail; 2706 } 2707 2708 h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2709 2710 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2711 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2712 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 2713 2714 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2715 ret = rtw89_h2c_tx(rtwdev, skb, false); 2716 if (ret) { 2717 rtw89_err(rtwdev, "failed to send h2c\n"); 2718 goto fail; 2719 } 2720 2721 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2722 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2723 if (ret) 2724 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 2725 2726 return 0; 2727 fail: 2728 dev_kfree_skb_any(skb); 2729 2730 return ret; 2731 } 2732 2733 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, 2734 struct rtw89_vif *rtwvif) 2735 { 2736 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be; 2737 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 2738 const struct rtw89_chip_info *chip = rtwdev->chip; 2739 struct rtw89_h2c_lps_ml_cmn_info *h2c; 2740 struct rtw89_vif_link *rtwvif_link; 2741 const struct rtw89_chan *chan; 2742 u8 bw_idx = RTW89_BB_BW_20_40; 2743 u32 len = sizeof(*h2c); 2744 unsigned int link_id; 2745 struct sk_buff *skb; 2746 u8 gain_band; 2747 u32 done; 2748 u8 path; 2749 int ret; 2750 int i; 2751 2752 if (chip->chip_gen != RTW89_CHIP_BE) 2753 return 0; 2754 2755 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2756 if (!skb) { 2757 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n"); 2758 return -ENOMEM; 2759 } 2760 skb_put(skb, len); 2761 h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data; 2762 2763 h2c->fmt_id = 0x1; 2764 2765 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2766 2767 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2768 path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A; 2769 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2770 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type); 2771 2772 h2c->central_ch[rtwvif_link->phy_idx] = chan->channel; 2773 h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel; 2774 h2c->band[rtwvif_link->phy_idx] = chan->band_type; 2775 h2c->bw[rtwvif_link->phy_idx] = chan->band_width; 2776 if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6) 2777 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1; 2778 else 2779 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2; 2780 2781 /* Fill BW20 RX gain table for beacon mode */ 2782 for (i = 0; i < TIA_GAIN_NUM; i++) { 2783 h2c->tia_gain[rtwvif_link->phy_idx][i] = 2784 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]); 2785 } 2786 memcpy(h2c->lna_gain[rtwvif_link->phy_idx], 2787 gain->lna_gain[gain_band][bw_idx][path], 2788 LNA_GAIN_NUM); 2789 } 2790 2791 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2792 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2793 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len); 2794 2795 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2796 ret = rtw89_h2c_tx(rtwdev, skb, false); 2797 if (ret) { 2798 rtw89_err(rtwdev, "failed to send h2c\n"); 2799 goto fail; 2800 } 2801 2802 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2803 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2804 if (ret) 2805 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n"); 2806 2807 return 0; 2808 fail: 2809 dev_kfree_skb_any(skb); 2810 2811 return ret; 2812 } 2813 2814 #define H2C_P2P_ACT_LEN 20 2815 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, 2816 struct rtw89_vif_link *rtwvif_link, 2817 struct ieee80211_bss_conf *bss_conf, 2818 struct ieee80211_p2p_noa_desc *desc, 2819 u8 act, u8 noa_id) 2820 { 2821 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 2822 u8 ctwindow_oppps = bss_conf->p2p_noa_attr.oppps_ctwindow; 2823 struct sk_buff *skb; 2824 u8 *cmd; 2825 int ret; 2826 2827 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 2828 if (!skb) { 2829 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 2830 return -ENOMEM; 2831 } 2832 skb_put(skb, H2C_P2P_ACT_LEN); 2833 cmd = skb->data; 2834 2835 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id); 2836 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 2837 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 2838 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 2839 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 2840 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 2841 if (desc) { 2842 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 2843 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 2844 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 2845 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 2846 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 2847 } 2848 2849 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2850 H2C_CAT_MAC, H2C_CL_MAC_PS, 2851 H2C_FUNC_P2P_ACT, 0, 0, 2852 H2C_P2P_ACT_LEN); 2853 2854 ret = rtw89_h2c_tx(rtwdev, skb, false); 2855 if (ret) { 2856 rtw89_err(rtwdev, "failed to send h2c\n"); 2857 goto fail; 2858 } 2859 2860 return 0; 2861 fail: 2862 dev_kfree_skb_any(skb); 2863 2864 return ret; 2865 } 2866 2867 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 2868 struct sk_buff *skb) 2869 { 2870 const struct rtw89_chip_info *chip = rtwdev->chip; 2871 struct rtw89_hal *hal = &rtwdev->hal; 2872 u8 ntx_path; 2873 u8 map_b; 2874 2875 if (chip->rf_path_num == 1) { 2876 ntx_path = RF_A; 2877 map_b = 0; 2878 } else { 2879 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 2880 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 2881 } 2882 2883 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 2884 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 2885 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 2886 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 2887 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 2888 } 2889 2890 #define H2C_CMC_TBL_LEN 68 2891 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 2892 struct rtw89_vif_link *rtwvif_link, 2893 struct rtw89_sta_link *rtwsta_link) 2894 { 2895 const struct rtw89_chip_info *chip = rtwdev->chip; 2896 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2897 struct sk_buff *skb; 2898 int ret; 2899 2900 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2901 if (!skb) { 2902 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2903 return -ENOMEM; 2904 } 2905 skb_put(skb, H2C_CMC_TBL_LEN); 2906 SET_CTRL_INFO_MACID(skb->data, macid); 2907 SET_CTRL_INFO_OPERATION(skb->data, 1); 2908 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2909 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 2910 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 2911 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 2912 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 2913 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 2914 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 2915 } 2916 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 2917 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 2918 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 2919 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2920 2921 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2922 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2923 chip->h2c_cctl_func_id, 0, 1, 2924 H2C_CMC_TBL_LEN); 2925 2926 ret = rtw89_h2c_tx(rtwdev, skb, false); 2927 if (ret) { 2928 rtw89_err(rtwdev, "failed to send h2c\n"); 2929 goto fail; 2930 } 2931 2932 return 0; 2933 fail: 2934 dev_kfree_skb_any(skb); 2935 2936 return ret; 2937 } 2938 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 2939 2940 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2941 struct rtw89_vif_link *rtwvif_link, 2942 struct rtw89_sta_link *rtwsta_link) 2943 { 2944 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2945 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2946 u32 len = sizeof(*h2c); 2947 struct sk_buff *skb; 2948 int ret; 2949 2950 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2951 if (!skb) { 2952 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 2953 return -ENOMEM; 2954 } 2955 skb_put(skb, len); 2956 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2957 2958 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 2959 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2960 2961 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE); 2962 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 2963 2964 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 2965 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 2966 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2967 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 2968 2969 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 2970 2971 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 2972 2973 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 2974 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 2975 2976 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 2977 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 2978 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 2979 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 2980 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 2981 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 2982 2983 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 2984 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 2985 2986 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 2987 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 2988 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 2989 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 2990 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 2991 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 2992 2993 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 2994 2995 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 2996 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 2997 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 2998 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 2999 3000 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 3001 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 3002 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 3003 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 3004 3005 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3006 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3007 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3008 len); 3009 3010 ret = rtw89_h2c_tx(rtwdev, skb, false); 3011 if (ret) { 3012 rtw89_err(rtwdev, "failed to send h2c\n"); 3013 goto fail; 3014 } 3015 3016 return 0; 3017 fail: 3018 dev_kfree_skb_any(skb); 3019 3020 return ret; 3021 } 3022 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 3023 3024 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 3025 struct ieee80211_link_sta *link_sta, 3026 u8 *pads) 3027 { 3028 bool ppe_th; 3029 u8 ppe16, ppe8; 3030 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3031 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0]; 3032 u8 ru_bitmap; 3033 u8 n, idx, sh; 3034 u16 ppe; 3035 int i; 3036 3037 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 3038 link_sta->he_cap.he_cap_elem.phy_cap_info[6]); 3039 if (!ppe_th) { 3040 u8 pad; 3041 3042 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 3043 link_sta->he_cap.he_cap_elem.phy_cap_info[9]); 3044 3045 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3046 pads[i] = pad; 3047 3048 return; 3049 } 3050 3051 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 3052 n = hweight8(ru_bitmap); 3053 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3054 3055 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3056 if (!(ru_bitmap & BIT(i))) { 3057 pads[i] = 1; 3058 continue; 3059 } 3060 3061 idx = n >> 3; 3062 sh = n & 7; 3063 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 3064 3065 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx])); 3066 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3067 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 3068 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3069 3070 if (ppe16 != 7 && ppe8 == 7) 3071 pads[i] = RTW89_PE_DURATION_16; 3072 else if (ppe8 != 7) 3073 pads[i] = RTW89_PE_DURATION_8; 3074 else 3075 pads[i] = RTW89_PE_DURATION_0; 3076 } 3077 } 3078 3079 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 3080 struct rtw89_vif_link *rtwvif_link, 3081 struct rtw89_sta_link *rtwsta_link) 3082 { 3083 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3084 const struct rtw89_chip_info *chip = rtwdev->chip; 3085 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3086 rtwvif_link->chanctx_idx); 3087 struct ieee80211_link_sta *link_sta; 3088 struct sk_buff *skb; 3089 u8 pads[RTW89_PPE_BW_NUM]; 3090 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3091 u16 lowest_rate; 3092 int ret; 3093 3094 memset(pads, 0, sizeof(pads)); 3095 3096 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3097 if (!skb) { 3098 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3099 return -ENOMEM; 3100 } 3101 3102 rcu_read_lock(); 3103 3104 if (rtwsta_link) 3105 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3106 3107 if (rtwsta_link && link_sta->he_cap.has_he) 3108 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3109 3110 if (vif->p2p) 3111 lowest_rate = RTW89_HW_RATE_OFDM6; 3112 else if (chan->band_type == RTW89_BAND_2G) 3113 lowest_rate = RTW89_HW_RATE_CCK1; 3114 else 3115 lowest_rate = RTW89_HW_RATE_OFDM6; 3116 3117 skb_put(skb, H2C_CMC_TBL_LEN); 3118 SET_CTRL_INFO_MACID(skb->data, mac_id); 3119 SET_CTRL_INFO_OPERATION(skb->data, 1); 3120 SET_CMC_TBL_DISRTSFB(skb->data, 1); 3121 SET_CMC_TBL_DISDATAFB(skb->data, 1); 3122 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 3123 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 3124 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 3125 if (vif->type == NL80211_IFTYPE_STATION) 3126 SET_CMC_TBL_ULDL(skb->data, 1); 3127 else 3128 SET_CMC_TBL_ULDL(skb->data, 0); 3129 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port); 3130 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 3131 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3132 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3133 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3134 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3135 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3136 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3137 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3138 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3139 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3140 } 3141 if (rtwsta_link) 3142 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 3143 link_sta->he_cap.has_he); 3144 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3145 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3146 3147 rcu_read_unlock(); 3148 3149 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3150 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3151 chip->h2c_cctl_func_id, 0, 1, 3152 H2C_CMC_TBL_LEN); 3153 3154 ret = rtw89_h2c_tx(rtwdev, skb, false); 3155 if (ret) { 3156 rtw89_err(rtwdev, "failed to send h2c\n"); 3157 goto fail; 3158 } 3159 3160 return 0; 3161 fail: 3162 dev_kfree_skb_any(skb); 3163 3164 return ret; 3165 } 3166 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 3167 3168 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 3169 struct ieee80211_link_sta *link_sta, 3170 u8 *pads) 3171 { 3172 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3173 u16 ppe_thres_hdr; 3174 u8 ppe16, ppe8; 3175 u8 n, idx, sh; 3176 u8 ru_bitmap; 3177 bool ppe_th; 3178 u16 ppe; 3179 int i; 3180 3181 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3182 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 3183 if (!ppe_th) { 3184 u8 pad; 3185 3186 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3187 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 3188 3189 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3190 pads[i] = pad; 3191 3192 return; 3193 } 3194 3195 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres); 3196 ru_bitmap = u16_get_bits(ppe_thres_hdr, 3197 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 3198 n = hweight8(ru_bitmap); 3199 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 3200 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3201 3202 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3203 if (!(ru_bitmap & BIT(i))) { 3204 pads[i] = 1; 3205 continue; 3206 } 3207 3208 idx = n >> 3; 3209 sh = n & 7; 3210 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 3211 3212 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx); 3213 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3214 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 3215 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3216 3217 if (ppe16 != 7 && ppe8 == 7) 3218 pads[i] = RTW89_PE_DURATION_16_20; 3219 else if (ppe8 != 7) 3220 pads[i] = RTW89_PE_DURATION_8; 3221 else 3222 pads[i] = RTW89_PE_DURATION_0; 3223 } 3224 } 3225 3226 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3227 struct rtw89_vif_link *rtwvif_link, 3228 struct rtw89_sta_link *rtwsta_link) 3229 { 3230 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3231 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3232 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3233 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3234 struct ieee80211_bss_conf *bss_conf; 3235 struct ieee80211_link_sta *link_sta; 3236 u8 pads[RTW89_PPE_BW_NUM]; 3237 u32 len = sizeof(*h2c); 3238 struct sk_buff *skb; 3239 u16 lowest_rate; 3240 int ret; 3241 3242 memset(pads, 0, sizeof(pads)); 3243 3244 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3245 if (!skb) { 3246 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3247 return -ENOMEM; 3248 } 3249 3250 rcu_read_lock(); 3251 3252 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3253 3254 if (rtwsta_link) { 3255 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3256 3257 if (link_sta->eht_cap.has_eht) 3258 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads); 3259 else if (link_sta->he_cap.has_he) 3260 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3261 } 3262 3263 if (vif->p2p) 3264 lowest_rate = RTW89_HW_RATE_OFDM6; 3265 else if (chan->band_type == RTW89_BAND_2G) 3266 lowest_rate = RTW89_HW_RATE_CCK1; 3267 else 3268 lowest_rate = RTW89_HW_RATE_OFDM6; 3269 3270 skb_put(skb, len); 3271 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3272 3273 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3274 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3275 3276 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 3277 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 3278 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 3279 CCTLINFO_G7_W0_DISDATAFB); 3280 3281 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3282 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3283 3284 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3285 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3286 3287 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3288 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3289 3290 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 3291 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 3292 3293 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3294 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 3295 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 3296 } 3297 3298 if (bss_conf->eht_support) { 3299 u16 punct = bss_conf->chanreq.oper.punctured; 3300 3301 h2c->w4 |= le32_encode_bits(~punct, 3302 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3303 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3304 } 3305 3306 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 3307 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3308 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 3309 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3310 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 3311 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3312 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 3313 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3314 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 3315 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3316 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 3317 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 3318 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 3319 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 3320 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3321 3322 h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 3323 CCTLINFO_G7_W6_ULDL); 3324 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL); 3325 3326 if (rtwsta_link) { 3327 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he, 3328 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3329 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3330 } 3331 3332 rcu_read_unlock(); 3333 3334 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3335 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3336 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3337 len); 3338 3339 ret = rtw89_h2c_tx(rtwdev, skb, false); 3340 if (ret) { 3341 rtw89_err(rtwdev, "failed to send h2c\n"); 3342 goto fail; 3343 } 3344 3345 return 0; 3346 fail: 3347 dev_kfree_skb_any(skb); 3348 3349 return ret; 3350 } 3351 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 3352 3353 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3354 struct rtw89_vif_link *rtwvif_link, 3355 struct rtw89_sta_link *rtwsta_link) 3356 { 3357 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 3358 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3359 u32 len = sizeof(*h2c); 3360 struct sk_buff *skb; 3361 u16 agg_num = 0; 3362 u8 ba_bmap = 0; 3363 int ret; 3364 u8 tid; 3365 3366 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3367 if (!skb) { 3368 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 3369 return -ENOMEM; 3370 } 3371 skb_put(skb, len); 3372 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3373 3374 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 3375 if (agg_num == 0) 3376 agg_num = rtwsta->ampdu_params[tid].agg_num; 3377 else 3378 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 3379 } 3380 3381 if (agg_num <= 0x20) 3382 ba_bmap = 3; 3383 else if (agg_num > 0x20 && agg_num <= 0x40) 3384 ba_bmap = 0; 3385 else if (agg_num > 0x40 && agg_num <= 0x80) 3386 ba_bmap = 1; 3387 else if (agg_num > 0x80 && agg_num <= 0x100) 3388 ba_bmap = 2; 3389 else if (agg_num > 0x100 && agg_num <= 0x200) 3390 ba_bmap = 4; 3391 else if (agg_num > 0x200 && agg_num <= 0x400) 3392 ba_bmap = 5; 3393 3394 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 3395 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3396 3397 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 3398 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 3399 3400 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3401 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3402 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 3403 len); 3404 3405 ret = rtw89_h2c_tx(rtwdev, skb, false); 3406 if (ret) { 3407 rtw89_err(rtwdev, "failed to send h2c\n"); 3408 goto fail; 3409 } 3410 3411 return 0; 3412 fail: 3413 dev_kfree_skb_any(skb); 3414 3415 return ret; 3416 } 3417 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 3418 3419 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 3420 struct rtw89_sta_link *rtwsta_link) 3421 { 3422 const struct rtw89_chip_info *chip = rtwdev->chip; 3423 struct sk_buff *skb; 3424 int ret; 3425 3426 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3427 if (!skb) { 3428 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3429 return -ENOMEM; 3430 } 3431 skb_put(skb, H2C_CMC_TBL_LEN); 3432 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3433 SET_CTRL_INFO_OPERATION(skb->data, 1); 3434 if (rtwsta_link->cctl_tx_time) { 3435 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 3436 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time); 3437 } 3438 if (rtwsta_link->cctl_tx_retry_limit) { 3439 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 3440 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt); 3441 } 3442 3443 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3444 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3445 chip->h2c_cctl_func_id, 0, 1, 3446 H2C_CMC_TBL_LEN); 3447 3448 ret = rtw89_h2c_tx(rtwdev, skb, false); 3449 if (ret) { 3450 rtw89_err(rtwdev, "failed to send h2c\n"); 3451 goto fail; 3452 } 3453 3454 return 0; 3455 fail: 3456 dev_kfree_skb_any(skb); 3457 3458 return ret; 3459 } 3460 3461 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 3462 struct rtw89_sta_link *rtwsta_link) 3463 { 3464 const struct rtw89_chip_info *chip = rtwdev->chip; 3465 struct sk_buff *skb; 3466 int ret; 3467 3468 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 3469 return 0; 3470 3471 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3472 if (!skb) { 3473 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3474 return -ENOMEM; 3475 } 3476 skb_put(skb, H2C_CMC_TBL_LEN); 3477 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3478 SET_CTRL_INFO_OPERATION(skb->data, 1); 3479 3480 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3481 3482 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3483 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3484 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 3485 H2C_CMC_TBL_LEN); 3486 3487 ret = rtw89_h2c_tx(rtwdev, skb, false); 3488 if (ret) { 3489 rtw89_err(rtwdev, "failed to send h2c\n"); 3490 goto fail; 3491 } 3492 3493 return 0; 3494 fail: 3495 dev_kfree_skb_any(skb); 3496 3497 return ret; 3498 } 3499 3500 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 3501 struct rtw89_vif_link *rtwvif_link) 3502 { 3503 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3504 rtwvif_link->chanctx_idx); 3505 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3506 struct rtw89_h2c_bcn_upd *h2c; 3507 struct sk_buff *skb_beacon; 3508 struct ieee80211_hdr *hdr; 3509 u32 len = sizeof(*h2c); 3510 struct sk_buff *skb; 3511 int bcn_total_len; 3512 u16 beacon_rate; 3513 u16 tim_offset; 3514 void *noa_data; 3515 u8 noa_len; 3516 int ret; 3517 3518 if (vif->p2p) 3519 beacon_rate = RTW89_HW_RATE_OFDM6; 3520 else if (chan->band_type == RTW89_BAND_2G) 3521 beacon_rate = RTW89_HW_RATE_CCK1; 3522 else 3523 beacon_rate = RTW89_HW_RATE_OFDM6; 3524 3525 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3526 NULL, 0); 3527 if (!skb_beacon) { 3528 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3529 return -ENOMEM; 3530 } 3531 3532 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3533 if (noa_len && 3534 (noa_len <= skb_tailroom(skb_beacon) || 3535 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3536 skb_put_data(skb_beacon, noa_data, noa_len); 3537 } 3538 3539 hdr = (struct ieee80211_hdr *)skb_beacon; 3540 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3541 3542 bcn_total_len = len + skb_beacon->len; 3543 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3544 if (!skb) { 3545 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3546 dev_kfree_skb_any(skb_beacon); 3547 return -ENOMEM; 3548 } 3549 skb_put(skb, len); 3550 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 3551 3552 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) | 3553 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 3554 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 3555 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 3556 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 3557 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 3558 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 3559 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 3560 3561 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3562 dev_kfree_skb_any(skb_beacon); 3563 3564 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3565 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3566 H2C_FUNC_MAC_BCN_UPD, 0, 1, 3567 bcn_total_len); 3568 3569 ret = rtw89_h2c_tx(rtwdev, skb, false); 3570 if (ret) { 3571 rtw89_err(rtwdev, "failed to send h2c\n"); 3572 dev_kfree_skb_any(skb); 3573 return ret; 3574 } 3575 3576 return 0; 3577 } 3578 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 3579 3580 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 3581 struct rtw89_vif_link *rtwvif_link) 3582 { 3583 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3584 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3585 struct rtw89_h2c_bcn_upd_be *h2c; 3586 struct sk_buff *skb_beacon; 3587 struct ieee80211_hdr *hdr; 3588 u32 len = sizeof(*h2c); 3589 struct sk_buff *skb; 3590 int bcn_total_len; 3591 u16 beacon_rate; 3592 u16 tim_offset; 3593 void *noa_data; 3594 u8 noa_len; 3595 int ret; 3596 3597 if (vif->p2p) 3598 beacon_rate = RTW89_HW_RATE_OFDM6; 3599 else if (chan->band_type == RTW89_BAND_2G) 3600 beacon_rate = RTW89_HW_RATE_CCK1; 3601 else 3602 beacon_rate = RTW89_HW_RATE_OFDM6; 3603 3604 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3605 NULL, 0); 3606 if (!skb_beacon) { 3607 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3608 return -ENOMEM; 3609 } 3610 3611 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3612 if (noa_len && 3613 (noa_len <= skb_tailroom(skb_beacon) || 3614 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3615 skb_put_data(skb_beacon, noa_data, noa_len); 3616 } 3617 3618 hdr = (struct ieee80211_hdr *)skb_beacon; 3619 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3620 3621 bcn_total_len = len + skb_beacon->len; 3622 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3623 if (!skb) { 3624 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3625 dev_kfree_skb_any(skb_beacon); 3626 return -ENOMEM; 3627 } 3628 skb_put(skb, len); 3629 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 3630 3631 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 3632 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 3633 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 3634 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 3635 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 3636 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 3637 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 3638 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 3639 3640 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3641 dev_kfree_skb_any(skb_beacon); 3642 3643 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3644 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3645 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 3646 bcn_total_len); 3647 3648 ret = rtw89_h2c_tx(rtwdev, skb, false); 3649 if (ret) { 3650 rtw89_err(rtwdev, "failed to send h2c\n"); 3651 goto fail; 3652 } 3653 3654 return 0; 3655 3656 fail: 3657 dev_kfree_skb_any(skb); 3658 3659 return ret; 3660 } 3661 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 3662 3663 #define H2C_ROLE_MAINTAIN_LEN 4 3664 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 3665 struct rtw89_vif_link *rtwvif_link, 3666 struct rtw89_sta_link *rtwsta_link, 3667 enum rtw89_upd_mode upd_mode) 3668 { 3669 struct sk_buff *skb; 3670 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3671 u8 self_role; 3672 int ret; 3673 3674 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3675 if (rtwsta_link) 3676 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3677 else 3678 self_role = rtwvif_link->self_role; 3679 } else { 3680 self_role = rtwvif_link->self_role; 3681 } 3682 3683 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 3684 if (!skb) { 3685 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3686 return -ENOMEM; 3687 } 3688 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 3689 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 3690 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 3691 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 3692 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif_link->wifi_role); 3693 3694 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3695 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3696 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 3697 H2C_ROLE_MAINTAIN_LEN); 3698 3699 ret = rtw89_h2c_tx(rtwdev, skb, false); 3700 if (ret) { 3701 rtw89_err(rtwdev, "failed to send h2c\n"); 3702 goto fail; 3703 } 3704 3705 return 0; 3706 fail: 3707 dev_kfree_skb_any(skb); 3708 3709 return ret; 3710 } 3711 3712 static enum rtw89_fw_sta_type 3713 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3714 struct rtw89_sta_link *rtwsta_link) 3715 { 3716 struct ieee80211_bss_conf *bss_conf; 3717 struct ieee80211_link_sta *link_sta; 3718 enum rtw89_fw_sta_type type; 3719 3720 rcu_read_lock(); 3721 3722 if (!rtwsta_link) 3723 goto by_vif; 3724 3725 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3726 3727 if (link_sta->eht_cap.has_eht) 3728 type = RTW89_FW_BE_STA; 3729 else if (link_sta->he_cap.has_he) 3730 type = RTW89_FW_AX_STA; 3731 else 3732 type = RTW89_FW_N_AC_STA; 3733 3734 goto out; 3735 3736 by_vif: 3737 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3738 3739 if (bss_conf->eht_support) 3740 type = RTW89_FW_BE_STA; 3741 else if (bss_conf->he_support) 3742 type = RTW89_FW_AX_STA; 3743 else 3744 type = RTW89_FW_N_AC_STA; 3745 3746 out: 3747 rcu_read_unlock(); 3748 3749 return type; 3750 } 3751 3752 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3753 struct rtw89_sta_link *rtwsta_link, bool dis_conn) 3754 { 3755 struct sk_buff *skb; 3756 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3757 u8 self_role = rtwvif_link->self_role; 3758 enum rtw89_fw_sta_type sta_type; 3759 u8 net_type = rtwvif_link->net_type; 3760 struct rtw89_h2c_join_v1 *h2c_v1; 3761 struct rtw89_h2c_join *h2c; 3762 u32 len = sizeof(*h2c); 3763 bool format_v1 = false; 3764 int ret; 3765 3766 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 3767 len = sizeof(*h2c_v1); 3768 format_v1 = true; 3769 } 3770 3771 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) { 3772 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3773 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 3774 } 3775 3776 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3777 if (!skb) { 3778 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3779 return -ENOMEM; 3780 } 3781 skb_put(skb, len); 3782 h2c = (struct rtw89_h2c_join *)skb->data; 3783 3784 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 3785 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 3786 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 3787 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) | 3788 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) | 3789 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 3790 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 3791 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 3792 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 3793 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 3794 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 3795 le32_encode_bits(rtwvif_link->wifi_role, 3796 RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 3797 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 3798 3799 if (!format_v1) 3800 goto done; 3801 3802 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 3803 3804 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link); 3805 3806 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE); 3807 h2c_v1->w2 = 0; 3808 3809 done: 3810 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3811 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3812 H2C_FUNC_MAC_JOININFO, 0, 1, 3813 len); 3814 3815 ret = rtw89_h2c_tx(rtwdev, skb, false); 3816 if (ret) { 3817 rtw89_err(rtwdev, "failed to send h2c\n"); 3818 goto fail; 3819 } 3820 3821 return 0; 3822 fail: 3823 dev_kfree_skb_any(skb); 3824 3825 return ret; 3826 } 3827 3828 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 3829 { 3830 struct rtw89_h2c_notify_dbcc *h2c; 3831 u32 len = sizeof(*h2c); 3832 struct sk_buff *skb; 3833 int ret; 3834 3835 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3836 if (!skb) { 3837 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 3838 return -ENOMEM; 3839 } 3840 skb_put(skb, len); 3841 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 3842 3843 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 3844 3845 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3846 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3847 H2C_FUNC_NOTIFY_DBCC, 0, 1, 3848 len); 3849 3850 ret = rtw89_h2c_tx(rtwdev, skb, false); 3851 if (ret) { 3852 rtw89_err(rtwdev, "failed to send h2c\n"); 3853 goto fail; 3854 } 3855 3856 return 0; 3857 fail: 3858 dev_kfree_skb_any(skb); 3859 3860 return ret; 3861 } 3862 3863 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 3864 bool pause) 3865 { 3866 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 3867 struct rtw89_fw_macid_pause_grp *h2c; 3868 __le32 set = cpu_to_le32(BIT(sh)); 3869 u8 h2c_macid_pause_id; 3870 struct sk_buff *skb; 3871 u32 len; 3872 int ret; 3873 3874 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 3875 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 3876 len = sizeof(*h2c_new); 3877 } else { 3878 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 3879 len = sizeof(*h2c); 3880 } 3881 3882 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3883 if (!skb) { 3884 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 3885 return -ENOMEM; 3886 } 3887 skb_put(skb, len); 3888 3889 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 3890 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 3891 3892 h2c_new->n[0].pause_mask_grp[grp] = set; 3893 h2c_new->n[0].sleep_mask_grp[grp] = set; 3894 if (pause) { 3895 h2c_new->n[0].pause_grp[grp] = set; 3896 h2c_new->n[0].sleep_grp[grp] = set; 3897 } 3898 } else { 3899 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 3900 3901 h2c->mask_grp[grp] = set; 3902 if (pause) 3903 h2c->pause_grp[grp] = set; 3904 } 3905 3906 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3907 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3908 h2c_macid_pause_id, 1, 0, 3909 len); 3910 3911 ret = rtw89_h2c_tx(rtwdev, skb, false); 3912 if (ret) { 3913 rtw89_err(rtwdev, "failed to send h2c\n"); 3914 goto fail; 3915 } 3916 3917 return 0; 3918 fail: 3919 dev_kfree_skb_any(skb); 3920 3921 return ret; 3922 } 3923 3924 #define H2C_EDCA_LEN 12 3925 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3926 u8 ac, u32 val) 3927 { 3928 struct sk_buff *skb; 3929 int ret; 3930 3931 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 3932 if (!skb) { 3933 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 3934 return -ENOMEM; 3935 } 3936 skb_put(skb, H2C_EDCA_LEN); 3937 RTW89_SET_EDCA_SEL(skb->data, 0); 3938 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx); 3939 RTW89_SET_EDCA_WMM(skb->data, 0); 3940 RTW89_SET_EDCA_AC(skb->data, ac); 3941 RTW89_SET_EDCA_PARAM(skb->data, val); 3942 3943 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3944 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3945 H2C_FUNC_USR_EDCA, 0, 1, 3946 H2C_EDCA_LEN); 3947 3948 ret = rtw89_h2c_tx(rtwdev, skb, false); 3949 if (ret) { 3950 rtw89_err(rtwdev, "failed to send h2c\n"); 3951 goto fail; 3952 } 3953 3954 return 0; 3955 fail: 3956 dev_kfree_skb_any(skb); 3957 3958 return ret; 3959 } 3960 3961 #define H2C_TSF32_TOGL_LEN 4 3962 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, 3963 struct rtw89_vif_link *rtwvif_link, 3964 bool en) 3965 { 3966 struct sk_buff *skb; 3967 u16 early_us = en ? 2000 : 0; 3968 u8 *cmd; 3969 int ret; 3970 3971 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 3972 if (!skb) { 3973 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 3974 return -ENOMEM; 3975 } 3976 skb_put(skb, H2C_TSF32_TOGL_LEN); 3977 cmd = skb->data; 3978 3979 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx); 3980 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 3981 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port); 3982 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 3983 3984 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3985 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3986 H2C_FUNC_TSF32_TOGL, 0, 0, 3987 H2C_TSF32_TOGL_LEN); 3988 3989 ret = rtw89_h2c_tx(rtwdev, skb, false); 3990 if (ret) { 3991 rtw89_err(rtwdev, "failed to send h2c\n"); 3992 goto fail; 3993 } 3994 3995 return 0; 3996 fail: 3997 dev_kfree_skb_any(skb); 3998 3999 return ret; 4000 } 4001 4002 #define H2C_OFLD_CFG_LEN 8 4003 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 4004 { 4005 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 4006 struct sk_buff *skb; 4007 int ret; 4008 4009 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 4010 if (!skb) { 4011 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 4012 return -ENOMEM; 4013 } 4014 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 4015 4016 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4017 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4018 H2C_FUNC_OFLD_CFG, 0, 1, 4019 H2C_OFLD_CFG_LEN); 4020 4021 ret = rtw89_h2c_tx(rtwdev, skb, false); 4022 if (ret) { 4023 rtw89_err(rtwdev, "failed to send h2c\n"); 4024 goto fail; 4025 } 4026 4027 return 0; 4028 fail: 4029 dev_kfree_skb_any(skb); 4030 4031 return ret; 4032 } 4033 4034 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv) 4035 { 4036 struct rtw89_h2c_tx_duty *h2c; 4037 u32 len = sizeof(*h2c); 4038 struct sk_buff *skb; 4039 u16 pause, active; 4040 int ret; 4041 4042 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4043 if (!skb) { 4044 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n"); 4045 return -ENOMEM; 4046 } 4047 4048 skb_put(skb, len); 4049 h2c = (struct rtw89_h2c_tx_duty *)skb->data; 4050 4051 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100); 4052 4053 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) { 4054 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP); 4055 } else { 4056 active = 100 - lv * RTW89_THERMAL_PROT_STEP; 4057 pause = 100 - active; 4058 4059 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) | 4060 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK); 4061 } 4062 4063 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4064 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4065 H2C_FUNC_TX_DUTY, 0, 0, len); 4066 4067 ret = rtw89_h2c_tx(rtwdev, skb, false); 4068 if (ret) { 4069 rtw89_err(rtwdev, "failed to send h2c\n"); 4070 goto fail; 4071 } 4072 4073 return 0; 4074 fail: 4075 dev_kfree_skb_any(skb); 4076 4077 return ret; 4078 } 4079 4080 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 4081 struct rtw89_vif_link *rtwvif_link, 4082 bool connect) 4083 { 4084 struct ieee80211_bss_conf *bss_conf; 4085 s32 thold = RTW89_DEFAULT_CQM_THOLD; 4086 u32 hyst = RTW89_DEFAULT_CQM_HYST; 4087 struct rtw89_h2c_bcnfltr *h2c; 4088 u32 len = sizeof(*h2c); 4089 struct sk_buff *skb; 4090 int ret; 4091 4092 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4093 return -EINVAL; 4094 4095 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4096 return -EINVAL; 4097 4098 rcu_read_lock(); 4099 4100 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 4101 4102 if (bss_conf->cqm_rssi_hyst) 4103 hyst = bss_conf->cqm_rssi_hyst; 4104 if (bss_conf->cqm_rssi_thold) 4105 thold = bss_conf->cqm_rssi_thold; 4106 4107 rcu_read_unlock(); 4108 4109 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4110 if (!skb) { 4111 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 4112 return -ENOMEM; 4113 } 4114 4115 skb_put(skb, len); 4116 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 4117 4118 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 4119 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 4120 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 4121 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 4122 RTW89_H2C_BCNFLTR_W0_MODE) | 4123 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 4124 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 4125 le32_encode_bits(thold + MAX_RSSI, 4126 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 4127 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 4128 4129 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4130 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4131 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 4132 4133 ret = rtw89_h2c_tx(rtwdev, skb, false); 4134 if (ret) { 4135 rtw89_err(rtwdev, "failed to send h2c\n"); 4136 goto fail; 4137 } 4138 4139 return 0; 4140 fail: 4141 dev_kfree_skb_any(skb); 4142 4143 return ret; 4144 } 4145 4146 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 4147 struct rtw89_rx_phy_ppdu *phy_ppdu) 4148 { 4149 struct rtw89_h2c_ofld_rssi *h2c; 4150 u32 len = sizeof(*h2c); 4151 struct sk_buff *skb; 4152 s8 rssi; 4153 int ret; 4154 4155 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4156 return -EINVAL; 4157 4158 if (!phy_ppdu) 4159 return -EINVAL; 4160 4161 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4162 if (!skb) { 4163 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 4164 return -ENOMEM; 4165 } 4166 4167 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 4168 skb_put(skb, len); 4169 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 4170 4171 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 4172 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 4173 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 4174 4175 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4176 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4177 H2C_FUNC_OFLD_RSSI, 0, 1, len); 4178 4179 ret = rtw89_h2c_tx(rtwdev, skb, false); 4180 if (ret) { 4181 rtw89_err(rtwdev, "failed to send h2c\n"); 4182 goto fail; 4183 } 4184 4185 return 0; 4186 fail: 4187 dev_kfree_skb_any(skb); 4188 4189 return ret; 4190 } 4191 4192 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4193 { 4194 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 4195 struct rtw89_traffic_stats *stats = &rtwvif->stats; 4196 struct rtw89_h2c_ofld *h2c; 4197 u32 len = sizeof(*h2c); 4198 struct sk_buff *skb; 4199 int ret; 4200 4201 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4202 return -EINVAL; 4203 4204 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4205 if (!skb) { 4206 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 4207 return -ENOMEM; 4208 } 4209 4210 skb_put(skb, len); 4211 h2c = (struct rtw89_h2c_ofld *)skb->data; 4212 4213 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 4214 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 4215 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 4216 4217 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4218 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4219 H2C_FUNC_OFLD_TP, 0, 1, len); 4220 4221 ret = rtw89_h2c_tx(rtwdev, skb, false); 4222 if (ret) { 4223 rtw89_err(rtwdev, "failed to send h2c\n"); 4224 goto fail; 4225 } 4226 4227 return 0; 4228 fail: 4229 dev_kfree_skb_any(skb); 4230 4231 return ret; 4232 } 4233 4234 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 4235 { 4236 const struct rtw89_chip_info *chip = rtwdev->chip; 4237 struct rtw89_h2c_ra_v1 *h2c_v1; 4238 struct rtw89_h2c_ra *h2c; 4239 u32 len = sizeof(*h2c); 4240 bool format_v1 = false; 4241 struct sk_buff *skb; 4242 int ret; 4243 4244 if (chip->chip_gen == RTW89_CHIP_BE) { 4245 len = sizeof(*h2c_v1); 4246 format_v1 = true; 4247 } 4248 4249 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4250 if (!skb) { 4251 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4252 return -ENOMEM; 4253 } 4254 skb_put(skb, len); 4255 h2c = (struct rtw89_h2c_ra *)skb->data; 4256 rtw89_debug(rtwdev, RTW89_DBG_RA, 4257 #if defined(__linux__) 4258 "ra cmd msk: %llx ", ra->ra_mask); 4259 #elif defined(__FreeBSD__) 4260 "ra cmd msk: %jx ", (uintmax_t)ra->ra_mask); 4261 #endif 4262 4263 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 4264 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 4265 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 4266 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 4267 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 4268 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 4269 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 4270 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 4271 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 4272 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 4273 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 4274 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 4275 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 4276 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 4277 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 4278 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 4279 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 4280 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 4281 4282 if (!format_v1) 4283 goto csi; 4284 4285 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 4286 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 4287 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 4288 4289 csi: 4290 if (!csi) 4291 goto done; 4292 4293 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 4294 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 4295 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 4296 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 4297 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 4298 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 4299 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 4300 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 4301 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 4302 4303 done: 4304 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4305 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 4306 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 4307 len); 4308 4309 ret = rtw89_h2c_tx(rtwdev, skb, false); 4310 if (ret) { 4311 rtw89_err(rtwdev, "failed to send h2c\n"); 4312 goto fail; 4313 } 4314 4315 return 0; 4316 fail: 4317 dev_kfree_skb_any(skb); 4318 4319 return ret; 4320 } 4321 4322 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 4323 { 4324 struct rtw89_btc *btc = &rtwdev->btc; 4325 struct rtw89_btc_dm *dm = &btc->dm; 4326 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 4327 struct rtw89_btc_module *module = &init_info->module; 4328 struct rtw89_btc_ant_info *ant = &module->ant; 4329 struct rtw89_h2c_cxinit *h2c; 4330 u32 len = sizeof(*h2c); 4331 struct sk_buff *skb; 4332 int ret; 4333 4334 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4335 if (!skb) { 4336 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 4337 return -ENOMEM; 4338 } 4339 skb_put(skb, len); 4340 h2c = (struct rtw89_h2c_cxinit *)skb->data; 4341 4342 h2c->hdr.type = type; 4343 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 4344 4345 h2c->ant_type = ant->type; 4346 h2c->ant_num = ant->num; 4347 h2c->ant_iso = ant->isolation; 4348 h2c->ant_info = 4349 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 4350 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 4351 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 4352 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 4353 4354 h2c->mod_rfe = module->rfe_type; 4355 h2c->mod_cv = module->cv; 4356 h2c->mod_info = 4357 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 4358 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 4359 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 4360 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 4361 h2c->mod_adie_kt = module->kt_ver_adie; 4362 h2c->wl_gch = init_info->wl_guard_ch; 4363 4364 h2c->info = 4365 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 4366 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 4367 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 4368 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 4369 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 4370 4371 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4372 H2C_CAT_OUTSRC, BTFC_SET, 4373 SET_DRV_INFO, 0, 0, 4374 len); 4375 4376 ret = rtw89_h2c_tx(rtwdev, skb, false); 4377 if (ret) { 4378 rtw89_err(rtwdev, "failed to send h2c\n"); 4379 goto fail; 4380 } 4381 4382 return 0; 4383 fail: 4384 dev_kfree_skb_any(skb); 4385 4386 return ret; 4387 } 4388 4389 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 4390 { 4391 struct rtw89_btc *btc = &rtwdev->btc; 4392 struct rtw89_btc_dm *dm = &btc->dm; 4393 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 4394 struct rtw89_h2c_cxinit_v7 *h2c; 4395 u32 len = sizeof(*h2c); 4396 struct sk_buff *skb; 4397 int ret; 4398 4399 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4400 if (!skb) { 4401 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 4402 return -ENOMEM; 4403 } 4404 skb_put(skb, len); 4405 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 4406 4407 h2c->hdr.type = type; 4408 h2c->hdr.ver = btc->ver->fcxinit; 4409 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4410 h2c->init = *init_info; 4411 4412 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4413 H2C_CAT_OUTSRC, BTFC_SET, 4414 SET_DRV_INFO, 0, 0, 4415 len); 4416 4417 ret = rtw89_h2c_tx(rtwdev, skb, false); 4418 if (ret) { 4419 rtw89_err(rtwdev, "failed to send h2c\n"); 4420 goto fail; 4421 } 4422 4423 return 0; 4424 fail: 4425 dev_kfree_skb_any(skb); 4426 4427 return ret; 4428 } 4429 4430 #define PORT_DATA_OFFSET 4 4431 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 4432 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 4433 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 4434 4435 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 4436 { 4437 struct rtw89_btc *btc = &rtwdev->btc; 4438 const struct rtw89_btc_ver *ver = btc->ver; 4439 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4440 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 4441 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4442 struct rtw89_btc_wl_active_role *active = role_info->active_role; 4443 struct sk_buff *skb; 4444 u32 len; 4445 u8 offset = 0; 4446 u8 *cmd; 4447 int ret; 4448 int i; 4449 4450 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 4451 4452 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4453 if (!skb) { 4454 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4455 return -ENOMEM; 4456 } 4457 skb_put(skb, len); 4458 cmd = skb->data; 4459 4460 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4461 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4462 4463 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4464 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4465 4466 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4467 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4468 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4469 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4470 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4471 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4472 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4473 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4474 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4475 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4476 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4477 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4478 4479 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4480 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4481 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4482 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4483 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4484 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4485 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4486 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4487 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4488 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4489 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4490 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4491 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4492 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4493 } 4494 4495 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4496 H2C_CAT_OUTSRC, BTFC_SET, 4497 SET_DRV_INFO, 0, 0, 4498 len); 4499 4500 ret = rtw89_h2c_tx(rtwdev, skb, false); 4501 if (ret) { 4502 rtw89_err(rtwdev, "failed to send h2c\n"); 4503 goto fail; 4504 } 4505 4506 return 0; 4507 fail: 4508 dev_kfree_skb_any(skb); 4509 4510 return ret; 4511 } 4512 4513 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 4514 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4515 4516 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 4517 { 4518 struct rtw89_btc *btc = &rtwdev->btc; 4519 const struct rtw89_btc_ver *ver = btc->ver; 4520 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4521 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 4522 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4523 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 4524 struct sk_buff *skb; 4525 u32 len; 4526 u8 *cmd, offset; 4527 int ret; 4528 int i; 4529 4530 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 4531 4532 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4533 if (!skb) { 4534 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4535 return -ENOMEM; 4536 } 4537 skb_put(skb, len); 4538 cmd = skb->data; 4539 4540 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4541 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4542 4543 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4544 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4545 4546 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4547 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4548 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4549 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4550 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4551 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4552 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4553 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4554 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4555 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4556 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4557 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4558 4559 offset = PORT_DATA_OFFSET; 4560 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4561 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4562 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4563 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4564 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4565 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4566 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4567 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4568 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4569 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4570 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4571 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4572 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4573 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4574 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 4575 } 4576 4577 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4578 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4579 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4580 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4581 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4582 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4583 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4584 4585 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4586 H2C_CAT_OUTSRC, BTFC_SET, 4587 SET_DRV_INFO, 0, 0, 4588 len); 4589 4590 ret = rtw89_h2c_tx(rtwdev, skb, false); 4591 if (ret) { 4592 rtw89_err(rtwdev, "failed to send h2c\n"); 4593 goto fail; 4594 } 4595 4596 return 0; 4597 fail: 4598 dev_kfree_skb_any(skb); 4599 4600 return ret; 4601 } 4602 4603 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 4604 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4605 4606 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 4607 { 4608 struct rtw89_btc *btc = &rtwdev->btc; 4609 const struct rtw89_btc_ver *ver = btc->ver; 4610 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4611 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 4612 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4613 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 4614 struct sk_buff *skb; 4615 u32 len; 4616 u8 *cmd, offset; 4617 int ret; 4618 int i; 4619 4620 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 4621 4622 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4623 if (!skb) { 4624 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4625 return -ENOMEM; 4626 } 4627 skb_put(skb, len); 4628 cmd = skb->data; 4629 4630 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4631 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4632 4633 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4634 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4635 4636 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4637 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4638 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4639 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4640 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4641 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4642 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4643 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4644 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4645 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4646 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4647 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4648 4649 offset = PORT_DATA_OFFSET; 4650 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4651 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 4652 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 4653 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 4654 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 4655 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 4656 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 4657 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 4658 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 4659 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 4660 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 4661 } 4662 4663 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4664 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4665 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4666 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4667 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4668 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4669 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4670 4671 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4672 H2C_CAT_OUTSRC, BTFC_SET, 4673 SET_DRV_INFO, 0, 0, 4674 len); 4675 4676 ret = rtw89_h2c_tx(rtwdev, skb, false); 4677 if (ret) { 4678 rtw89_err(rtwdev, "failed to send h2c\n"); 4679 goto fail; 4680 } 4681 4682 return 0; 4683 fail: 4684 dev_kfree_skb_any(skb); 4685 4686 return ret; 4687 } 4688 4689 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type) 4690 { 4691 struct rtw89_btc *btc = &rtwdev->btc; 4692 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7; 4693 struct rtw89_h2c_cxrole_v7 *h2c; 4694 u32 len = sizeof(*h2c); 4695 struct sk_buff *skb; 4696 int ret; 4697 4698 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4699 if (!skb) { 4700 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4701 return -ENOMEM; 4702 } 4703 skb_put(skb, len); 4704 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data; 4705 4706 h2c->hdr.type = type; 4707 h2c->hdr.ver = btc->ver->fwlrole; 4708 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4709 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4710 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4711 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4712 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4713 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en); 4714 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg); 4715 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy); 4716 4717 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4718 H2C_CAT_OUTSRC, BTFC_SET, 4719 SET_DRV_INFO, 0, 0, 4720 len); 4721 4722 ret = rtw89_h2c_tx(rtwdev, skb, false); 4723 if (ret) { 4724 rtw89_err(rtwdev, "failed to send h2c\n"); 4725 goto fail; 4726 } 4727 4728 return 0; 4729 fail: 4730 dev_kfree_skb_any(skb); 4731 4732 return ret; 4733 } 4734 4735 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 4736 { 4737 struct rtw89_btc *btc = &rtwdev->btc; 4738 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 4739 struct rtw89_h2c_cxrole_v8 *h2c; 4740 u32 len = sizeof(*h2c); 4741 struct sk_buff *skb; 4742 int ret; 4743 4744 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4745 if (!skb) { 4746 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4747 return -ENOMEM; 4748 } 4749 skb_put(skb, len); 4750 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 4751 4752 h2c->hdr.type = type; 4753 h2c->hdr.ver = btc->ver->fwlrole; 4754 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4755 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4756 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4757 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4758 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4759 4760 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4761 H2C_CAT_OUTSRC, BTFC_SET, 4762 SET_DRV_INFO, 0, 0, 4763 len); 4764 4765 ret = rtw89_h2c_tx(rtwdev, skb, false); 4766 if (ret) { 4767 rtw89_err(rtwdev, "failed to send h2c\n"); 4768 goto fail; 4769 } 4770 4771 return 0; 4772 fail: 4773 dev_kfree_skb_any(skb); 4774 4775 return ret; 4776 } 4777 4778 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 4779 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 4780 { 4781 struct rtw89_btc *btc = &rtwdev->btc; 4782 const struct rtw89_btc_ver *ver = btc->ver; 4783 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 4784 struct sk_buff *skb; 4785 u8 *cmd; 4786 int ret; 4787 4788 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 4789 if (!skb) { 4790 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4791 return -ENOMEM; 4792 } 4793 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 4794 cmd = skb->data; 4795 4796 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4797 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 4798 4799 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 4800 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 4801 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 4802 if (ver->fcxctrl == 0) 4803 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 4804 4805 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4806 H2C_CAT_OUTSRC, BTFC_SET, 4807 SET_DRV_INFO, 0, 0, 4808 H2C_LEN_CXDRVINFO_CTRL); 4809 4810 ret = rtw89_h2c_tx(rtwdev, skb, false); 4811 if (ret) { 4812 rtw89_err(rtwdev, "failed to send h2c\n"); 4813 goto fail; 4814 } 4815 4816 return 0; 4817 fail: 4818 dev_kfree_skb_any(skb); 4819 4820 return ret; 4821 } 4822 4823 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 4824 { 4825 struct rtw89_btc *btc = &rtwdev->btc; 4826 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 4827 struct rtw89_h2c_cxctrl_v7 *h2c; 4828 u32 len = sizeof(*h2c); 4829 struct sk_buff *skb; 4830 int ret; 4831 4832 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4833 if (!skb) { 4834 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n"); 4835 return -ENOMEM; 4836 } 4837 skb_put(skb, len); 4838 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 4839 4840 h2c->hdr.type = type; 4841 h2c->hdr.ver = btc->ver->fcxctrl; 4842 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 4843 h2c->ctrl = *ctrl; 4844 4845 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4846 H2C_CAT_OUTSRC, BTFC_SET, 4847 SET_DRV_INFO, 0, 0, len); 4848 4849 ret = rtw89_h2c_tx(rtwdev, skb, false); 4850 if (ret) { 4851 rtw89_err(rtwdev, "failed to send h2c\n"); 4852 goto fail; 4853 } 4854 4855 return 0; 4856 fail: 4857 dev_kfree_skb_any(skb); 4858 4859 return ret; 4860 } 4861 4862 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 4863 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 4864 { 4865 struct rtw89_btc *btc = &rtwdev->btc; 4866 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 4867 struct sk_buff *skb; 4868 u8 *cmd; 4869 int ret; 4870 4871 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 4872 if (!skb) { 4873 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 4874 return -ENOMEM; 4875 } 4876 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 4877 cmd = skb->data; 4878 4879 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4880 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 4881 4882 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 4883 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 4884 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 4885 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 4886 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 4887 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 4888 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 4889 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 4890 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 4891 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 4892 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 4893 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 4894 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 4895 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 4896 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 4897 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 4898 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 4899 4900 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4901 H2C_CAT_OUTSRC, BTFC_SET, 4902 SET_DRV_INFO, 0, 0, 4903 H2C_LEN_CXDRVINFO_TRX); 4904 4905 ret = rtw89_h2c_tx(rtwdev, skb, false); 4906 if (ret) { 4907 rtw89_err(rtwdev, "failed to send h2c\n"); 4908 goto fail; 4909 } 4910 4911 return 0; 4912 fail: 4913 dev_kfree_skb_any(skb); 4914 4915 return ret; 4916 } 4917 4918 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 4919 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 4920 { 4921 struct rtw89_btc *btc = &rtwdev->btc; 4922 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4923 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 4924 struct sk_buff *skb; 4925 u8 *cmd; 4926 int ret; 4927 4928 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 4929 if (!skb) { 4930 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4931 return -ENOMEM; 4932 } 4933 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 4934 cmd = skb->data; 4935 4936 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4937 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 4938 4939 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 4940 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 4941 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 4942 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 4943 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 4944 4945 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4946 H2C_CAT_OUTSRC, BTFC_SET, 4947 SET_DRV_INFO, 0, 0, 4948 H2C_LEN_CXDRVINFO_RFK); 4949 4950 ret = rtw89_h2c_tx(rtwdev, skb, false); 4951 if (ret) { 4952 rtw89_err(rtwdev, "failed to send h2c\n"); 4953 goto fail; 4954 } 4955 4956 return 0; 4957 fail: 4958 dev_kfree_skb_any(skb); 4959 4960 return ret; 4961 } 4962 4963 #define H2C_LEN_PKT_OFLD 4 4964 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 4965 { 4966 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4967 struct sk_buff *skb; 4968 unsigned int cond; 4969 u8 *cmd; 4970 int ret; 4971 4972 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 4973 if (!skb) { 4974 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4975 return -ENOMEM; 4976 } 4977 skb_put(skb, H2C_LEN_PKT_OFLD); 4978 cmd = skb->data; 4979 4980 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 4981 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 4982 4983 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4984 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4985 H2C_FUNC_PACKET_OFLD, 1, 1, 4986 H2C_LEN_PKT_OFLD); 4987 4988 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 4989 4990 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4991 if (ret < 0) { 4992 rtw89_debug(rtwdev, RTW89_DBG_FW, 4993 "failed to del pkt ofld: id %d, ret %d\n", 4994 id, ret); 4995 return ret; 4996 } 4997 4998 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 4999 return 0; 5000 } 5001 5002 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 5003 struct sk_buff *skb_ofld) 5004 { 5005 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5006 struct sk_buff *skb; 5007 unsigned int cond; 5008 u8 *cmd; 5009 u8 alloc_id; 5010 int ret; 5011 5012 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 5013 RTW89_MAX_PKT_OFLD_NUM); 5014 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 5015 return -ENOSPC; 5016 5017 *id = alloc_id; 5018 5019 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 5020 if (!skb) { 5021 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 5022 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 5023 return -ENOMEM; 5024 } 5025 skb_put(skb, H2C_LEN_PKT_OFLD); 5026 cmd = skb->data; 5027 5028 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 5029 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 5030 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 5031 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 5032 5033 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5034 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5035 H2C_FUNC_PACKET_OFLD, 1, 1, 5036 H2C_LEN_PKT_OFLD + skb_ofld->len); 5037 5038 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 5039 5040 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5041 if (ret < 0) { 5042 rtw89_debug(rtwdev, RTW89_DBG_FW, 5043 "failed to add pkt ofld: id %d, ret %d\n", 5044 alloc_id, ret); 5045 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 5046 return ret; 5047 } 5048 5049 return 0; 5050 } 5051 5052 static 5053 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num, 5054 struct list_head *chan_list) 5055 { 5056 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5057 struct rtw89_h2c_chinfo_elem *elem; 5058 struct rtw89_mac_chinfo *ch_info; 5059 struct rtw89_h2c_chinfo *h2c; 5060 struct sk_buff *skb; 5061 unsigned int cond; 5062 int skb_len; 5063 int ret; 5064 5065 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 5066 5067 skb_len = struct_size(h2c, elem, ch_num); 5068 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5069 if (!skb) { 5070 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5071 return -ENOMEM; 5072 } 5073 skb_put(skb, sizeof(*h2c)); 5074 h2c = (struct rtw89_h2c_chinfo *)skb->data; 5075 5076 h2c->ch_num = ch_num; 5077 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5078 5079 list_for_each_entry(ch_info, chan_list, list) { 5080 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 5081 5082 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 5083 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 5084 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 5085 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 5086 5087 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 5088 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 5089 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 5090 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 5091 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 5092 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 5093 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 5094 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 5095 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 5096 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 5097 5098 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 5099 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 5100 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 5101 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 5102 5103 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 5104 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 5105 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 5106 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 5107 } 5108 5109 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5110 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5111 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5112 5113 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5114 5115 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5116 if (ret) { 5117 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5118 return ret; 5119 } 5120 5121 return 0; 5122 } 5123 5124 static 5125 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 5126 struct list_head *chan_list, 5127 struct rtw89_vif_link *rtwvif_link) 5128 { 5129 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5130 struct rtw89_h2c_chinfo_elem_be *elem; 5131 struct rtw89_mac_chinfo_be *ch_info; 5132 struct rtw89_h2c_chinfo_be *h2c; 5133 struct sk_buff *skb; 5134 unsigned int cond; 5135 u8 ver = U8_MAX; 5136 int skb_len; 5137 int ret; 5138 5139 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE); 5140 5141 skb_len = struct_size(h2c, elem, ch_num); 5142 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5143 if (!skb) { 5144 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5145 return -ENOMEM; 5146 } 5147 5148 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5149 ver = 0; 5150 5151 skb_put(skb, sizeof(*h2c)); 5152 h2c = (struct rtw89_h2c_chinfo_be *)skb->data; 5153 5154 h2c->ch_num = ch_num; 5155 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5156 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx, 5157 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 5158 5159 list_for_each_entry(ch_info, chan_list, list) { 5160 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 5161 5162 elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 5163 le32_encode_bits(ch_info->central_ch, 5164 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 5165 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 5166 5167 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 5168 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 5169 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 5170 le32_encode_bits(ch_info->pause_data, 5171 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 5172 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 5173 le32_encode_bits(ch_info->rand_seq_num, 5174 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 5175 le32_encode_bits(ch_info->notify_action, 5176 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 5177 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 5178 RTW89_H2C_CHINFO_BE_W1_PROBE) | 5179 le32_encode_bits(ch_info->leave_crit, 5180 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 5181 le32_encode_bits(ch_info->chkpt_timer, 5182 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 5183 5184 elem->w2 = le32_encode_bits(ch_info->leave_time, 5185 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 5186 le32_encode_bits(ch_info->leave_th, 5187 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 5188 le32_encode_bits(ch_info->tx_pkt_ctrl, 5189 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 5190 5191 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 5192 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 5193 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 5194 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 5195 5196 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 5197 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 5198 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 5199 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 5200 5201 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 5202 le32_encode_bits(ch_info->fw_probe0_ssids, 5203 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 5204 5205 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 5206 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 5207 le32_encode_bits(ch_info->fw_probe0_bssids, 5208 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 5209 if (ver == 0) 5210 elem->w0 |= 5211 le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD); 5212 else 5213 elem->w7 = le32_encode_bits(ch_info->period, 5214 RTW89_H2C_CHINFO_BE_W7_PERIOD_V1); 5215 } 5216 5217 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5218 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5219 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5220 5221 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5222 5223 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5224 if (ret) { 5225 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5226 return ret; 5227 } 5228 5229 return 0; 5230 } 5231 5232 #define RTW89_SCAN_DELAY_TSF_UNIT 104800 5233 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, 5234 struct rtw89_scan_option *option, 5235 struct rtw89_vif_link *rtwvif_link, 5236 bool wowlan) 5237 { 5238 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5239 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 5240 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE; 5241 struct rtw89_h2c_scanofld *h2c; 5242 u32 len = sizeof(*h2c); 5243 struct sk_buff *skb; 5244 unsigned int cond; 5245 u64 tsf = 0; 5246 int ret; 5247 5248 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5249 if (!skb) { 5250 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5251 return -ENOMEM; 5252 } 5253 skb_put(skb, len); 5254 h2c = (struct rtw89_h2c_scanofld *)skb->data; 5255 5256 if (option->delay) { 5257 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 5258 if (ret) { 5259 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret); 5260 scan_mode = RTW89_SCAN_IMMEDIATE; 5261 } else { 5262 scan_mode = RTW89_SCAN_DELAY; 5263 tsf += (u64)option->delay * RTW89_SCAN_DELAY_TSF_UNIT; 5264 } 5265 } 5266 5267 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 5268 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 5269 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) | 5270 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 5271 5272 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 5273 le32_encode_bits(option->target_ch_mode, 5274 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 5275 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) | 5276 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 5277 5278 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) | 5279 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD); 5280 5281 if (option->target_ch_mode) { 5282 h2c->w1 |= le32_encode_bits(op->band_width, 5283 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 5284 le32_encode_bits(op->primary_channel, 5285 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 5286 le32_encode_bits(op->channel, 5287 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 5288 h2c->w0 |= le32_encode_bits(op->band_type, 5289 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 5290 } 5291 5292 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf), 5293 RTW89_H2C_SCANOFLD_W3_TSF_HIGH); 5294 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf), 5295 RTW89_H2C_SCANOFLD_W4_TSF_LOW); 5296 5297 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5298 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5299 H2C_FUNC_SCANOFLD, 1, 1, 5300 len); 5301 5302 if (option->enable) 5303 cond = RTW89_SCANOFLD_WAIT_COND_START; 5304 else 5305 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 5306 5307 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5308 if (ret) { 5309 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 5310 return ret; 5311 } 5312 5313 return 0; 5314 } 5315 5316 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 5317 struct rtw89_scan_option *option) 5318 { 5319 struct ieee80211_supported_band *sband; 5320 struct ieee80211_channel *chan; 5321 u8 i, idx; 5322 5323 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 5324 if (!sband) { 5325 option->prohib_chan = U64_MAX; 5326 return; 5327 } 5328 5329 for (i = 0; i < sband->n_channels; i++) { 5330 chan = &sband->channels[i]; 5331 if (chan->flags & IEEE80211_CHAN_DISABLED) { 5332 idx = (chan->hw_value - 1) / 4; 5333 option->prohib_chan |= BIT(idx); 5334 } 5335 } 5336 } 5337 5338 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 5339 struct rtw89_scan_option *option, 5340 struct rtw89_vif_link *rtwvif_link, 5341 bool wowlan) 5342 { 5343 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 5344 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5345 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5346 struct cfg80211_scan_request *req = rtwvif->scan_req; 5347 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 5348 struct rtw89_chan *op = &scan_info->op_chan; 5349 struct rtw89_h2c_scanofld_be_opch *opch; 5350 struct rtw89_pktofld_info *pkt_info; 5351 struct rtw89_h2c_scanofld_be *h2c; 5352 struct sk_buff *skb; 5353 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 5354 u8 opch_size = sizeof(*opch) * option->num_opch; 5355 u8 probe_id[NUM_NL80211_BANDS]; 5356 u8 cfg_len = sizeof(*h2c); 5357 unsigned int cond; 5358 u8 ver = U8_MAX; 5359 #if defined(__linux__) 5360 void *ptr; 5361 #elif defined(__FreeBSD__) 5362 u8 *ptr; 5363 #endif 5364 int ret; 5365 u32 len; 5366 u8 i; 5367 5368 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 5369 5370 len = cfg_len + macc_role_size + opch_size; 5371 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5372 if (!skb) { 5373 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5374 return -ENOMEM; 5375 } 5376 5377 skb_put(skb, len); 5378 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 5379 ptr = skb->data; 5380 5381 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 5382 5383 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5384 ver = 0; 5385 5386 if (!wowlan) { 5387 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 5388 if (pkt_info->wildcard_6ghz) { 5389 /* Provide wildcard as template */ 5390 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 5391 break; 5392 } 5393 } 5394 } 5395 5396 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 5397 le32_encode_bits(option->scan_mode, 5398 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 5399 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 5400 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 5401 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 5402 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 5403 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 5404 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 5405 5406 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 5407 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 5408 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 5409 5410 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 5411 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 5412 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 5413 5414 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 5415 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 5416 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 5417 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 5418 5419 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 5420 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 5421 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 5422 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 5423 le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 5424 5425 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 5426 5427 h2c->w6 = le32_encode_bits(option->prohib_chan, 5428 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 5429 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 5430 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 5431 if (!wowlan && req->no_cck) { 5432 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); 5433 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, 5434 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | 5435 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5436 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | 5437 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5438 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); 5439 } 5440 5441 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { 5442 cfg_len = offsetofend(typeof(*h2c), w8); 5443 goto flex_member; 5444 } 5445 5446 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), 5447 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | 5448 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0), 5449 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) | 5450 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0), 5451 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP); 5452 5453 flex_member: 5454 ptr += cfg_len; 5455 5456 for (i = 0; i < option->num_macc_role; i++) { 5457 #if defined(__linux__) 5458 macc_role = ptr; 5459 #elif defined(__FreeBSD__) 5460 macc_role = (void *)ptr; 5461 #endif 5462 macc_role->w0 = 5463 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 5464 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 5465 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 5466 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 5467 ptr += sizeof(*macc_role); 5468 } 5469 5470 for (i = 0; i < option->num_opch; i++) { 5471 #if defined(__linux__) 5472 opch = ptr; 5473 #elif defined(__FreeBSD__) 5474 opch = (void *)ptr; 5475 #endif 5476 opch->w0 = le32_encode_bits(rtwvif_link->mac_id, 5477 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 5478 le32_encode_bits(option->band, 5479 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 5480 le32_encode_bits(rtwvif_link->port, 5481 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 5482 le32_encode_bits(RTW89_SCAN_OPMODE_INTV, 5483 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 5484 le32_encode_bits(true, 5485 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 5486 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10, 5487 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 5488 5489 opch->w1 = le32_encode_bits(op->band_type, 5490 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 5491 le32_encode_bits(op->band_width, 5492 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 5493 le32_encode_bits(0x3, 5494 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 5495 le32_encode_bits(op->primary_channel, 5496 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 5497 le32_encode_bits(op->channel, 5498 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 5499 5500 opch->w2 = le32_encode_bits(0, 5501 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 5502 le32_encode_bits(0, 5503 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 5504 le32_encode_bits(2, 5505 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS); 5506 5507 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5508 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 5509 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5510 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 5511 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5512 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 5513 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5514 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 5515 5516 if (ver == 0) 5517 opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME, 5518 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION); 5519 else 5520 opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME, 5521 RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1); 5522 ptr += sizeof(*opch); 5523 } 5524 5525 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5526 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5527 H2C_FUNC_SCANOFLD_BE, 1, 1, 5528 len); 5529 5530 if (option->enable) 5531 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 5532 else 5533 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 5534 5535 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5536 if (ret) { 5537 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 5538 return ret; 5539 } 5540 5541 return 0; 5542 } 5543 5544 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 5545 struct rtw89_fw_h2c_rf_reg_info *info, 5546 u16 len, u8 page) 5547 { 5548 struct sk_buff *skb; 5549 u8 class = info->rf_path == RF_PATH_A ? 5550 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 5551 int ret; 5552 5553 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5554 if (!skb) { 5555 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 5556 return -ENOMEM; 5557 } 5558 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 5559 5560 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5561 H2C_CAT_OUTSRC, class, page, 0, 0, 5562 len); 5563 5564 ret = rtw89_h2c_tx(rtwdev, skb, false); 5565 if (ret) { 5566 rtw89_err(rtwdev, "failed to send h2c\n"); 5567 goto fail; 5568 } 5569 5570 return 0; 5571 fail: 5572 dev_kfree_skb_any(skb); 5573 5574 return ret; 5575 } 5576 5577 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 5578 { 5579 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; 5580 struct rtw89_fw_h2c_rf_get_mccch *mccch; 5581 struct sk_buff *skb; 5582 int ret; 5583 u8 idx; 5584 5585 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 5586 if (!skb) { 5587 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5588 return -ENOMEM; 5589 } 5590 skb_put(skb, sizeof(*mccch)); 5591 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 5592 5593 idx = rfk_mcc->table_idx; 5594 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 5595 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 5596 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 5597 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 5598 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 5599 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 5600 5601 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5602 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 5603 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 5604 sizeof(*mccch)); 5605 5606 ret = rtw89_h2c_tx(rtwdev, skb, false); 5607 if (ret) { 5608 rtw89_err(rtwdev, "failed to send h2c\n"); 5609 goto fail; 5610 } 5611 5612 return 0; 5613 fail: 5614 dev_kfree_skb_any(skb); 5615 5616 return ret; 5617 } 5618 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 5619 5620 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 5621 enum rtw89_phy_idx phy_idx) 5622 { 5623 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 5624 struct rtw89_fw_h2c_rfk_pre_info_common *common; 5625 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; 5626 struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1; 5627 struct rtw89_fw_h2c_rfk_pre_info *h2c; 5628 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH]; 5629 u32 len = sizeof(*h2c); 5630 struct sk_buff *skb; 5631 u8 ver = U8_MAX; 5632 u8 tbl, path; 5633 u32 val32; 5634 int ret; 5635 5636 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) { 5637 len = sizeof(*h2c_v1); 5638 ver = 1; 5639 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 5640 len = sizeof(*h2c_v0); 5641 ver = 0; 5642 } 5643 5644 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5645 if (!skb) { 5646 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 5647 return -ENOMEM; 5648 } 5649 skb_put(skb, len); 5650 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 5651 common = &h2c->base_v1.common; 5652 5653 common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 5654 5655 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 5656 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH); 5657 5658 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 5659 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5660 common->dbcc.ch[path][tbl] = 5661 cpu_to_le32(rfk_mcc->data[path].ch[tbl]); 5662 common->dbcc.band[path][tbl] = 5663 cpu_to_le32(rfk_mcc->data[path].band[tbl]); 5664 } 5665 } 5666 5667 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5668 tbl_sel[path] = rfk_mcc->data[path].table_idx; 5669 5670 common->tbl.cur_ch[path] = 5671 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]); 5672 common->tbl.cur_band[path] = 5673 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]); 5674 5675 if (ver <= 1) 5676 continue; 5677 5678 h2c->cur_bandwidth[path] = 5679 cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]); 5680 } 5681 5682 common->phy_idx = cpu_to_le32(phy_idx); 5683 5684 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ 5685 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; 5686 5687 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]); 5688 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]); 5689 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]); 5690 5691 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 5692 h2c_v0->ktbl_sel0 = cpu_to_le32(val32); 5693 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 5694 h2c_v0->ktbl_sel1 = cpu_to_le32(val32); 5695 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 5696 h2c_v0->rfmod0 = cpu_to_le32(val32); 5697 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 5698 h2c_v0->rfmod1 = cpu_to_le32(val32); 5699 5700 if (rtw89_is_mlo_1_1(rtwdev)) 5701 h2c_v0->mlo_1_1 = cpu_to_le32(1); 5702 5703 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 5704 5705 goto done; 5706 } 5707 5708 if (rtw89_is_mlo_1_1(rtwdev)) { 5709 h2c_v1 = &h2c->base_v1; 5710 h2c_v1->mlo_1_1 = cpu_to_le32(1); 5711 } 5712 done: 5713 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5714 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5715 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 5716 len); 5717 5718 ret = rtw89_h2c_tx(rtwdev, skb, false); 5719 if (ret) { 5720 rtw89_err(rtwdev, "failed to send h2c\n"); 5721 goto fail; 5722 } 5723 5724 return 0; 5725 fail: 5726 dev_kfree_skb_any(skb); 5727 5728 return ret; 5729 } 5730 5731 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5732 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode) 5733 { 5734 struct rtw89_hal *hal = &rtwdev->hal; 5735 struct rtw89_h2c_rf_tssi *h2c; 5736 u32 len = sizeof(*h2c); 5737 struct sk_buff *skb; 5738 int ret; 5739 5740 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5741 if (!skb) { 5742 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 5743 return -ENOMEM; 5744 } 5745 skb_put(skb, len); 5746 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 5747 5748 h2c->len = cpu_to_le16(len); 5749 h2c->phy = phy_idx; 5750 h2c->ch = chan->channel; 5751 h2c->bw = chan->band_width; 5752 h2c->band = chan->band_type; 5753 h2c->hwtx_en = true; 5754 h2c->cv = hal->cv; 5755 h2c->tssi_mode = tssi_mode; 5756 5757 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 5758 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 5759 5760 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5761 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5762 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 5763 5764 ret = rtw89_h2c_tx(rtwdev, skb, false); 5765 if (ret) { 5766 rtw89_err(rtwdev, "failed to send h2c\n"); 5767 goto fail; 5768 } 5769 5770 return 0; 5771 fail: 5772 dev_kfree_skb_any(skb); 5773 5774 return ret; 5775 } 5776 5777 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5778 const struct rtw89_chan *chan) 5779 { 5780 struct rtw89_h2c_rf_iqk *h2c; 5781 u32 len = sizeof(*h2c); 5782 struct sk_buff *skb; 5783 int ret; 5784 5785 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5786 if (!skb) { 5787 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 5788 return -ENOMEM; 5789 } 5790 skb_put(skb, len); 5791 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 5792 5793 h2c->phy_idx = cpu_to_le32(phy_idx); 5794 h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en); 5795 5796 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5797 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5798 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 5799 5800 ret = rtw89_h2c_tx(rtwdev, skb, false); 5801 if (ret) { 5802 rtw89_err(rtwdev, "failed to send h2c\n"); 5803 goto fail; 5804 } 5805 5806 return 0; 5807 fail: 5808 dev_kfree_skb_any(skb); 5809 5810 return ret; 5811 } 5812 5813 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5814 const struct rtw89_chan *chan) 5815 { 5816 struct rtw89_h2c_rf_dpk *h2c; 5817 u32 len = sizeof(*h2c); 5818 struct sk_buff *skb; 5819 int ret; 5820 5821 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5822 if (!skb) { 5823 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 5824 return -ENOMEM; 5825 } 5826 skb_put(skb, len); 5827 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 5828 5829 h2c->len = len; 5830 h2c->phy = phy_idx; 5831 h2c->dpk_enable = true; 5832 h2c->kpath = RF_AB; 5833 h2c->cur_band = chan->band_type; 5834 h2c->cur_bw = chan->band_width; 5835 h2c->cur_ch = chan->channel; 5836 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5837 5838 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5839 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5840 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 5841 5842 ret = rtw89_h2c_tx(rtwdev, skb, false); 5843 if (ret) { 5844 rtw89_err(rtwdev, "failed to send h2c\n"); 5845 goto fail; 5846 } 5847 5848 return 0; 5849 fail: 5850 dev_kfree_skb_any(skb); 5851 5852 return ret; 5853 } 5854 5855 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5856 const struct rtw89_chan *chan) 5857 { 5858 struct rtw89_hal *hal = &rtwdev->hal; 5859 struct rtw89_h2c_rf_txgapk *h2c; 5860 u32 len = sizeof(*h2c); 5861 struct sk_buff *skb; 5862 int ret; 5863 5864 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5865 if (!skb) { 5866 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 5867 return -ENOMEM; 5868 } 5869 skb_put(skb, len); 5870 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 5871 5872 h2c->len = len; 5873 h2c->ktype = 2; 5874 h2c->phy = phy_idx; 5875 h2c->kpath = RF_AB; 5876 h2c->band = chan->band_type; 5877 h2c->bw = chan->band_width; 5878 h2c->ch = chan->channel; 5879 h2c->cv = hal->cv; 5880 5881 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5882 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5883 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 5884 5885 ret = rtw89_h2c_tx(rtwdev, skb, false); 5886 if (ret) { 5887 rtw89_err(rtwdev, "failed to send h2c\n"); 5888 goto fail; 5889 } 5890 5891 return 0; 5892 fail: 5893 dev_kfree_skb_any(skb); 5894 5895 return ret; 5896 } 5897 5898 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5899 const struct rtw89_chan *chan) 5900 { 5901 struct rtw89_h2c_rf_dack *h2c; 5902 u32 len = sizeof(*h2c); 5903 struct sk_buff *skb; 5904 int ret; 5905 5906 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5907 if (!skb) { 5908 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 5909 return -ENOMEM; 5910 } 5911 skb_put(skb, len); 5912 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 5913 5914 h2c->len = cpu_to_le32(len); 5915 h2c->phy = cpu_to_le32(phy_idx); 5916 h2c->type = cpu_to_le32(0); 5917 5918 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5919 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5920 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 5921 5922 ret = rtw89_h2c_tx(rtwdev, skb, false); 5923 if (ret) { 5924 rtw89_err(rtwdev, "failed to send h2c\n"); 5925 goto fail; 5926 } 5927 5928 return 0; 5929 fail: 5930 dev_kfree_skb_any(skb); 5931 5932 return ret; 5933 } 5934 5935 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5936 const struct rtw89_chan *chan, bool is_chl_k) 5937 { 5938 struct rtw89_h2c_rf_rxdck_v0 *v0; 5939 struct rtw89_h2c_rf_rxdck *h2c; 5940 u32 len = sizeof(*h2c); 5941 struct sk_buff *skb; 5942 int ver = -1; 5943 int ret; 5944 5945 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) { 5946 len = sizeof(*v0); 5947 ver = 0; 5948 } 5949 5950 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5951 if (!skb) { 5952 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 5953 return -ENOMEM; 5954 } 5955 skb_put(skb, len); 5956 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data; 5957 5958 v0->len = len; 5959 v0->phy = phy_idx; 5960 v0->is_afe = false; 5961 v0->kpath = RF_AB; 5962 v0->cur_band = chan->band_type; 5963 v0->cur_bw = chan->band_width; 5964 v0->cur_ch = chan->channel; 5965 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5966 5967 if (ver == 0) 5968 goto hdr; 5969 5970 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 5971 h2c->is_chl_k = is_chl_k; 5972 5973 hdr: 5974 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5975 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5976 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 5977 5978 ret = rtw89_h2c_tx(rtwdev, skb, false); 5979 if (ret) { 5980 rtw89_err(rtwdev, "failed to send h2c\n"); 5981 goto fail; 5982 } 5983 5984 return 0; 5985 fail: 5986 dev_kfree_skb_any(skb); 5987 5988 return ret; 5989 } 5990 5991 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 5992 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 5993 bool rack, bool dack) 5994 { 5995 struct sk_buff *skb; 5996 int ret; 5997 5998 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5999 if (!skb) { 6000 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 6001 return -ENOMEM; 6002 } 6003 skb_put_data(skb, buf, len); 6004 6005 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6006 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 6007 len); 6008 6009 ret = rtw89_h2c_tx(rtwdev, skb, false); 6010 if (ret) { 6011 rtw89_err(rtwdev, "failed to send h2c\n"); 6012 goto fail; 6013 } 6014 6015 return 0; 6016 fail: 6017 dev_kfree_skb_any(skb); 6018 6019 return ret; 6020 } 6021 6022 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 6023 { 6024 struct sk_buff *skb; 6025 int ret; 6026 6027 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 6028 if (!skb) { 6029 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 6030 return -ENOMEM; 6031 } 6032 skb_put_data(skb, buf, len); 6033 6034 ret = rtw89_h2c_tx(rtwdev, skb, false); 6035 if (ret) { 6036 rtw89_err(rtwdev, "failed to send h2c\n"); 6037 goto fail; 6038 } 6039 6040 return 0; 6041 fail: 6042 dev_kfree_skb_any(skb); 6043 6044 return ret; 6045 } 6046 6047 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 6048 { 6049 struct rtw89_early_h2c *early_h2c; 6050 6051 lockdep_assert_held(&rtwdev->mutex); 6052 6053 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 6054 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 6055 } 6056 } 6057 6058 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 6059 { 6060 struct rtw89_early_h2c *early_h2c, *tmp; 6061 6062 mutex_lock(&rtwdev->mutex); 6063 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 6064 list_del(&early_h2c->list); 6065 kfree(early_h2c->h2c); 6066 kfree(early_h2c); 6067 } 6068 mutex_unlock(&rtwdev->mutex); 6069 } 6070 6071 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 6072 { 6073 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 6074 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6075 6076 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 6077 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 6078 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 6079 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 6080 } 6081 6082 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 6083 struct sk_buff *c2h) 6084 { 6085 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6086 u8 category = attr->category; 6087 u8 class = attr->class; 6088 u8 func = attr->func; 6089 6090 switch (category) { 6091 default: 6092 return false; 6093 case RTW89_C2H_CAT_MAC: 6094 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 6095 case RTW89_C2H_CAT_OUTSRC: 6096 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 6097 } 6098 } 6099 6100 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 6101 { 6102 rtw89_fw_c2h_parse_attr(c2h); 6103 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 6104 goto enqueue; 6105 6106 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 6107 dev_kfree_skb_any(c2h); 6108 return; 6109 6110 enqueue: 6111 skb_queue_tail(&rtwdev->c2h_queue, c2h); 6112 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 6113 } 6114 6115 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 6116 struct sk_buff *skb) 6117 { 6118 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 6119 u8 category = attr->category; 6120 u8 class = attr->class; 6121 u8 func = attr->func; 6122 u16 len = attr->len; 6123 bool dump = true; 6124 6125 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 6126 return; 6127 6128 switch (category) { 6129 case RTW89_C2H_CAT_TEST: 6130 break; 6131 case RTW89_C2H_CAT_MAC: 6132 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 6133 if (class == RTW89_MAC_C2H_CLASS_INFO && 6134 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 6135 dump = false; 6136 break; 6137 case RTW89_C2H_CAT_OUTSRC: 6138 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 6139 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 6140 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 6141 else 6142 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 6143 break; 6144 } 6145 6146 if (dump) 6147 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 6148 } 6149 6150 void rtw89_fw_c2h_work(struct work_struct *work) 6151 { 6152 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 6153 c2h_work); 6154 struct sk_buff *skb, *tmp; 6155 6156 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 6157 skb_unlink(skb, &rtwdev->c2h_queue); 6158 mutex_lock(&rtwdev->mutex); 6159 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 6160 mutex_unlock(&rtwdev->mutex); 6161 dev_kfree_skb_any(skb); 6162 } 6163 } 6164 6165 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 6166 struct rtw89_mac_h2c_info *info) 6167 { 6168 const struct rtw89_chip_info *chip = rtwdev->chip; 6169 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6170 const u32 *h2c_reg = chip->h2c_regs; 6171 u8 i, val, len; 6172 int ret; 6173 6174 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 6175 rtwdev, chip->h2c_ctrl_reg); 6176 if (ret) { 6177 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 6178 return ret; 6179 } 6180 6181 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 6182 sizeof(info->u.h2creg[0])); 6183 6184 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 6185 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 6186 6187 for (i = 0; i < RTW89_H2CREG_MAX; i++) 6188 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 6189 6190 fw_info->h2c_counter++; 6191 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 6192 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 6193 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 6194 6195 return 0; 6196 } 6197 6198 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 6199 struct rtw89_mac_c2h_info *info) 6200 { 6201 const struct rtw89_chip_info *chip = rtwdev->chip; 6202 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6203 const u32 *c2h_reg = chip->c2h_regs; 6204 u32 ret; 6205 u8 i, val; 6206 6207 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 6208 6209 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 6210 RTW89_C2H_TIMEOUT, false, rtwdev, 6211 chip->c2h_ctrl_reg); 6212 if (ret) { 6213 rtw89_warn(rtwdev, "c2h reg timeout\n"); 6214 return ret; 6215 } 6216 6217 for (i = 0; i < RTW89_C2HREG_MAX; i++) 6218 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 6219 6220 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 6221 6222 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 6223 info->content_len = 6224 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 6225 RTW89_C2HREG_HDR_LEN; 6226 6227 fw_info->c2h_counter++; 6228 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 6229 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 6230 6231 return 0; 6232 } 6233 6234 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 6235 struct rtw89_mac_h2c_info *h2c_info, 6236 struct rtw89_mac_c2h_info *c2h_info) 6237 { 6238 u32 ret; 6239 6240 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 6241 lockdep_assert_held(&rtwdev->mutex); 6242 6243 if (!h2c_info && !c2h_info) 6244 return -EINVAL; 6245 6246 if (!h2c_info) 6247 goto recv_c2h; 6248 6249 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 6250 if (ret) 6251 return ret; 6252 6253 recv_c2h: 6254 if (!c2h_info) 6255 return 0; 6256 6257 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 6258 if (ret) 6259 return ret; 6260 6261 return 0; 6262 } 6263 6264 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 6265 { 6266 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 6267 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 6268 return; 6269 } 6270 6271 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 6272 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 6273 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 6274 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 6275 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 6276 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 6277 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 6278 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 6279 6280 rtw89_fw_prog_cnt_dump(rtwdev); 6281 } 6282 6283 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 6284 { 6285 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6286 struct rtw89_pktofld_info *info, *tmp; 6287 u8 idx; 6288 6289 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 6290 if (!(rtwdev->chip->support_bands & BIT(idx))) 6291 continue; 6292 6293 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 6294 if (test_bit(info->id, rtwdev->pkt_offload)) 6295 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 6296 list_del(&info->list); 6297 kfree(info); 6298 } 6299 } 6300 } 6301 6302 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 6303 struct cfg80211_scan_request *req, 6304 struct rtw89_pktofld_info *info, 6305 enum nl80211_band band, u8 ssid_idx) 6306 { 6307 if (band != NL80211_BAND_6GHZ) 6308 return false; 6309 6310 if (req->ssids[ssid_idx].ssid_len) { 6311 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 6312 req->ssids[ssid_idx].ssid_len); 6313 info->ssid_len = req->ssids[ssid_idx].ssid_len; 6314 return false; 6315 } else { 6316 info->wildcard_6ghz = true; 6317 return true; 6318 } 6319 } 6320 6321 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 6322 struct rtw89_vif_link *rtwvif_link, 6323 struct sk_buff *skb, u8 ssid_idx) 6324 { 6325 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6326 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6327 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 6328 struct cfg80211_scan_request *req = rtwvif->scan_req; 6329 struct rtw89_pktofld_info *info; 6330 struct sk_buff *new; 6331 int ret = 0; 6332 u8 band; 6333 6334 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 6335 if (!(rtwdev->chip->support_bands & BIT(band))) 6336 continue; 6337 6338 new = skb_copy(skb, GFP_KERNEL); 6339 if (!new) { 6340 ret = -ENOMEM; 6341 goto out; 6342 } 6343 skb_put_data(new, ies->ies[band], ies->len[band]); 6344 skb_put_data(new, ies->common_ies, ies->common_ie_len); 6345 6346 info = kzalloc(sizeof(*info), GFP_KERNEL); 6347 if (!info) { 6348 ret = -ENOMEM; 6349 kfree_skb(new); 6350 goto out; 6351 } 6352 6353 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx); 6354 6355 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 6356 if (ret) { 6357 kfree_skb(new); 6358 kfree(info); 6359 goto out; 6360 } 6361 6362 list_add_tail(&info->list, &scan_info->pkt_list[band]); 6363 kfree_skb(new); 6364 } 6365 out: 6366 return ret; 6367 } 6368 6369 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 6370 struct rtw89_vif_link *rtwvif_link) 6371 { 6372 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6373 struct cfg80211_scan_request *req = rtwvif->scan_req; 6374 struct sk_buff *skb; 6375 u8 num = req->n_ssids, i; 6376 int ret; 6377 6378 for (i = 0; i < num; i++) { 6379 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 6380 req->ssids[i].ssid, 6381 req->ssids[i].ssid_len, 6382 req->ie_len); 6383 if (!skb) 6384 return -ENOMEM; 6385 6386 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i); 6387 kfree_skb(skb); 6388 6389 if (ret) 6390 return ret; 6391 } 6392 6393 return 0; 6394 } 6395 6396 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 6397 struct ieee80211_scan_ies *ies, 6398 struct cfg80211_scan_request *req, 6399 struct rtw89_mac_chinfo *ch_info) 6400 { 6401 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6402 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6403 struct cfg80211_scan_6ghz_params *params; 6404 struct rtw89_pktofld_info *info, *tmp; 6405 struct ieee80211_hdr *hdr; 6406 struct sk_buff *skb; 6407 bool found; 6408 int ret = 0; 6409 u8 i; 6410 6411 if (!req->n_6ghz_params) 6412 return 0; 6413 6414 for (i = 0; i < req->n_6ghz_params; i++) { 6415 params = &req->scan_6ghz_params[i]; 6416 6417 if (req->channels[params->channel_idx]->hw_value != 6418 ch_info->pri_ch) 6419 continue; 6420 6421 found = false; 6422 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 6423 if (ether_addr_equal(tmp->bssid, params->bssid)) { 6424 found = true; 6425 break; 6426 } 6427 } 6428 if (found) 6429 continue; 6430 6431 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 6432 NULL, 0, req->ie_len); 6433 if (!skb) 6434 return -ENOMEM; 6435 6436 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 6437 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 6438 hdr = (struct ieee80211_hdr *)skb->data; 6439 ether_addr_copy(hdr->addr3, params->bssid); 6440 6441 info = kzalloc(sizeof(*info), GFP_KERNEL); 6442 if (!info) { 6443 ret = -ENOMEM; 6444 kfree_skb(skb); 6445 goto out; 6446 } 6447 6448 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 6449 if (ret) { 6450 kfree_skb(skb); 6451 kfree(info); 6452 goto out; 6453 } 6454 6455 ether_addr_copy(info->bssid, params->bssid); 6456 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 6457 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 6458 6459 ch_info->tx_pkt = true; 6460 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6461 6462 kfree_skb(skb); 6463 } 6464 6465 out: 6466 return ret; 6467 } 6468 6469 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, 6470 int chan_type, int ssid_num, 6471 struct rtw89_mac_chinfo *ch_info) 6472 { 6473 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6474 struct rtw89_pktofld_info *info; 6475 u8 probe_count = 0; 6476 6477 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6478 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6479 ch_info->bw = RTW89_SCAN_WIDTH; 6480 ch_info->tx_pkt = true; 6481 ch_info->cfg_tx_pwr = false; 6482 ch_info->tx_pwr_idx = 0; 6483 ch_info->tx_null = false; 6484 ch_info->pause_data = false; 6485 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6486 6487 if (ssid_num) { 6488 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6489 if (info->channel_6ghz && 6490 ch_info->pri_ch != info->channel_6ghz) 6491 continue; 6492 else if (info->channel_6ghz && probe_count != 0) 6493 ch_info->period += RTW89_CHANNEL_TIME_6G; 6494 6495 if (info->wildcard_6ghz) 6496 continue; 6497 6498 ch_info->pkt_id[probe_count++] = info->id; 6499 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6500 break; 6501 } 6502 ch_info->num_pkt = probe_count; 6503 } 6504 6505 switch (chan_type) { 6506 case RTW89_CHAN_DFS: 6507 if (ch_info->ch_band != RTW89_BAND_6G) 6508 ch_info->period = max_t(u8, ch_info->period, 6509 RTW89_DFS_CHAN_TIME); 6510 ch_info->dwell_time = RTW89_DWELL_TIME; 6511 break; 6512 case RTW89_CHAN_ACTIVE: 6513 break; 6514 default: 6515 rtw89_err(rtwdev, "Channel type out of bound\n"); 6516 } 6517 } 6518 6519 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 6520 int ssid_num, 6521 struct rtw89_mac_chinfo *ch_info) 6522 { 6523 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6524 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6525 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6526 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 6527 struct cfg80211_scan_request *req = rtwvif->scan_req; 6528 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 6529 struct rtw89_pktofld_info *info; 6530 u8 band, probe_count = 0; 6531 int ret; 6532 6533 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6534 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6535 ch_info->bw = RTW89_SCAN_WIDTH; 6536 ch_info->tx_pkt = true; 6537 ch_info->cfg_tx_pwr = false; 6538 ch_info->tx_pwr_idx = 0; 6539 ch_info->tx_null = false; 6540 ch_info->pause_data = false; 6541 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6542 6543 if (ch_info->ch_band == RTW89_BAND_6G) { 6544 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6545 !ch_info->is_psc) { 6546 ch_info->tx_pkt = false; 6547 if (!req->duration_mandatory) 6548 ch_info->period -= RTW89_DWELL_TIME_6G; 6549 } 6550 } 6551 6552 ret = rtw89_update_6ghz_rnr_chan(rtwdev, ies, req, ch_info); 6553 if (ret) 6554 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 6555 6556 if (ssid_num) { 6557 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6558 6559 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6560 if (info->channel_6ghz && 6561 ch_info->pri_ch != info->channel_6ghz) 6562 continue; 6563 else if (info->channel_6ghz && probe_count != 0) 6564 ch_info->period += RTW89_CHANNEL_TIME_6G; 6565 6566 if (info->wildcard_6ghz) 6567 continue; 6568 6569 ch_info->pkt_id[probe_count++] = info->id; 6570 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6571 break; 6572 } 6573 ch_info->num_pkt = probe_count; 6574 } 6575 6576 switch (chan_type) { 6577 case RTW89_CHAN_OPERATE: 6578 ch_info->central_ch = op->channel; 6579 ch_info->pri_ch = op->primary_channel; 6580 ch_info->ch_band = op->band_type; 6581 ch_info->bw = op->band_width; 6582 ch_info->tx_null = true; 6583 ch_info->num_pkt = 0; 6584 break; 6585 case RTW89_CHAN_DFS: 6586 if (ch_info->ch_band != RTW89_BAND_6G) 6587 ch_info->period = max_t(u8, ch_info->period, 6588 RTW89_DFS_CHAN_TIME); 6589 ch_info->dwell_time = RTW89_DWELL_TIME; 6590 ch_info->pause_data = true; 6591 break; 6592 case RTW89_CHAN_ACTIVE: 6593 ch_info->pause_data = true; 6594 break; 6595 default: 6596 rtw89_err(rtwdev, "Channel type out of bound\n"); 6597 } 6598 } 6599 6600 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6601 int ssid_num, 6602 struct rtw89_mac_chinfo_be *ch_info) 6603 { 6604 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6605 struct rtw89_pktofld_info *info; 6606 u8 probe_count = 0, i; 6607 6608 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6609 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6610 ch_info->bw = RTW89_SCAN_WIDTH; 6611 ch_info->tx_null = false; 6612 ch_info->pause_data = false; 6613 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6614 6615 if (ssid_num) { 6616 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6617 ch_info->pkt_id[probe_count++] = info->id; 6618 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6619 break; 6620 } 6621 } 6622 6623 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6624 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6625 6626 switch (chan_type) { 6627 case RTW89_CHAN_DFS: 6628 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6629 ch_info->dwell_time = RTW89_DWELL_TIME; 6630 break; 6631 case RTW89_CHAN_ACTIVE: 6632 break; 6633 default: 6634 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6635 break; 6636 } 6637 } 6638 6639 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6640 int ssid_num, 6641 struct rtw89_mac_chinfo_be *ch_info) 6642 { 6643 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6644 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6645 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6646 struct cfg80211_scan_request *req = rtwvif->scan_req; 6647 struct rtw89_pktofld_info *info; 6648 u8 band, probe_count = 0, i; 6649 6650 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6651 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6652 ch_info->bw = RTW89_SCAN_WIDTH; 6653 ch_info->tx_null = false; 6654 ch_info->pause_data = false; 6655 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6656 6657 if (ssid_num) { 6658 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6659 6660 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6661 if (info->channel_6ghz && 6662 ch_info->pri_ch != info->channel_6ghz) 6663 continue; 6664 6665 if (info->wildcard_6ghz) 6666 continue; 6667 6668 ch_info->pkt_id[probe_count++] = info->id; 6669 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6670 break; 6671 } 6672 } 6673 6674 if (ch_info->ch_band == RTW89_BAND_6G) { 6675 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6676 !ch_info->is_psc) { 6677 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6678 if (!req->duration_mandatory) 6679 ch_info->period -= RTW89_DWELL_TIME_6G; 6680 } 6681 } 6682 6683 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6684 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6685 6686 switch (chan_type) { 6687 case RTW89_CHAN_DFS: 6688 if (ch_info->ch_band != RTW89_BAND_6G) 6689 ch_info->period = 6690 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6691 ch_info->dwell_time = RTW89_DWELL_TIME; 6692 ch_info->pause_data = true; 6693 break; 6694 case RTW89_CHAN_ACTIVE: 6695 ch_info->pause_data = true; 6696 break; 6697 default: 6698 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6699 break; 6700 } 6701 } 6702 6703 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6704 struct rtw89_vif_link *rtwvif_link) 6705 { 6706 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6707 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6708 struct rtw89_mac_chinfo *ch_info, *tmp; 6709 struct ieee80211_channel *channel; 6710 struct list_head chan_list; 6711 int list_len; 6712 enum rtw89_chan_type type; 6713 int ret = 0; 6714 u32 idx; 6715 6716 INIT_LIST_HEAD(&chan_list); 6717 for (idx = 0, list_len = 0; 6718 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 6719 idx++, list_len++) { 6720 channel = nd_config->channels[idx]; 6721 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6722 if (!ch_info) { 6723 ret = -ENOMEM; 6724 goto out; 6725 } 6726 6727 ch_info->period = RTW89_CHANNEL_TIME; 6728 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6729 ch_info->central_ch = channel->hw_value; 6730 ch_info->pri_ch = channel->hw_value; 6731 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6732 6733 if (channel->flags & 6734 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6735 type = RTW89_CHAN_DFS; 6736 else 6737 type = RTW89_CHAN_ACTIVE; 6738 6739 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); 6740 list_add_tail(&ch_info->list, &chan_list); 6741 } 6742 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6743 6744 out: 6745 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6746 list_del(&ch_info->list); 6747 kfree(ch_info); 6748 } 6749 6750 return ret; 6751 } 6752 6753 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6754 struct rtw89_vif_link *rtwvif_link, bool connected) 6755 { 6756 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6757 struct cfg80211_scan_request *req = rtwvif->scan_req; 6758 struct rtw89_mac_chinfo *ch_info, *tmp; 6759 struct ieee80211_channel *channel; 6760 struct list_head chan_list; 6761 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 6762 int list_len, off_chan_time = 0; 6763 enum rtw89_chan_type type; 6764 int ret = 0; 6765 u32 idx; 6766 6767 INIT_LIST_HEAD(&chan_list); 6768 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6769 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 6770 idx++, list_len++) { 6771 channel = req->channels[idx]; 6772 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6773 if (!ch_info) { 6774 ret = -ENOMEM; 6775 goto out; 6776 } 6777 6778 if (req->duration) 6779 ch_info->period = req->duration; 6780 else if (channel->band == NL80211_BAND_6GHZ) 6781 ch_info->period = RTW89_CHANNEL_TIME_6G + 6782 RTW89_DWELL_TIME_6G; 6783 else 6784 ch_info->period = RTW89_CHANNEL_TIME; 6785 6786 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6787 ch_info->central_ch = channel->hw_value; 6788 ch_info->pri_ch = channel->hw_value; 6789 ch_info->rand_seq_num = random_seq; 6790 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6791 6792 if (channel->flags & 6793 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6794 type = RTW89_CHAN_DFS; 6795 else 6796 type = RTW89_CHAN_ACTIVE; 6797 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 6798 6799 if (connected && 6800 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 6801 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 6802 if (!tmp) { 6803 ret = -ENOMEM; 6804 kfree(ch_info); 6805 goto out; 6806 } 6807 6808 type = RTW89_CHAN_OPERATE; 6809 tmp->period = req->duration_mandatory ? 6810 req->duration : RTW89_CHANNEL_TIME; 6811 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 6812 list_add_tail(&tmp->list, &chan_list); 6813 off_chan_time = 0; 6814 list_len++; 6815 } 6816 list_add_tail(&ch_info->list, &chan_list); 6817 off_chan_time += ch_info->period; 6818 } 6819 rtwdev->scan_info.last_chan_idx = idx; 6820 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6821 6822 out: 6823 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6824 list_del(&ch_info->list); 6825 kfree(ch_info); 6826 } 6827 6828 return ret; 6829 } 6830 6831 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6832 struct rtw89_vif_link *rtwvif_link) 6833 { 6834 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6835 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6836 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6837 struct ieee80211_channel *channel; 6838 struct list_head chan_list; 6839 enum rtw89_chan_type type; 6840 int list_len, ret; 6841 u32 idx; 6842 6843 INIT_LIST_HEAD(&chan_list); 6844 6845 for (idx = 0, list_len = 0; 6846 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 6847 idx++, list_len++) { 6848 channel = nd_config->channels[idx]; 6849 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6850 if (!ch_info) { 6851 ret = -ENOMEM; 6852 goto out; 6853 } 6854 6855 ch_info->period = RTW89_CHANNEL_TIME; 6856 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6857 ch_info->central_ch = channel->hw_value; 6858 ch_info->pri_ch = channel->hw_value; 6859 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6860 6861 if (channel->flags & 6862 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6863 type = RTW89_CHAN_DFS; 6864 else 6865 type = RTW89_CHAN_ACTIVE; 6866 6867 rtw89_pno_scan_add_chan_be(rtwdev, type, 6868 nd_config->n_match_sets, ch_info); 6869 list_add_tail(&ch_info->list, &chan_list); 6870 } 6871 6872 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 6873 rtwvif_link); 6874 6875 out: 6876 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6877 list_del(&ch_info->list); 6878 kfree(ch_info); 6879 } 6880 6881 return ret; 6882 } 6883 6884 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6885 struct rtw89_vif_link *rtwvif_link, bool connected) 6886 { 6887 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6888 struct cfg80211_scan_request *req = rtwvif->scan_req; 6889 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6890 struct ieee80211_channel *channel; 6891 struct list_head chan_list; 6892 enum rtw89_chan_type type; 6893 int list_len, ret; 6894 bool random_seq; 6895 u32 idx; 6896 6897 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 6898 INIT_LIST_HEAD(&chan_list); 6899 6900 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6901 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 6902 idx++, list_len++) { 6903 channel = req->channels[idx]; 6904 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6905 if (!ch_info) { 6906 ret = -ENOMEM; 6907 goto out; 6908 } 6909 6910 if (req->duration) 6911 ch_info->period = req->duration; 6912 else if (channel->band == NL80211_BAND_6GHZ) 6913 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6914 else 6915 ch_info->period = RTW89_CHANNEL_TIME; 6916 6917 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6918 ch_info->central_ch = channel->hw_value; 6919 ch_info->pri_ch = channel->hw_value; 6920 ch_info->rand_seq_num = random_seq; 6921 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6922 6923 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6924 type = RTW89_CHAN_DFS; 6925 else 6926 type = RTW89_CHAN_ACTIVE; 6927 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 6928 6929 list_add_tail(&ch_info->list, &chan_list); 6930 } 6931 6932 rtwdev->scan_info.last_chan_idx = idx; 6933 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 6934 rtwvif_link); 6935 6936 out: 6937 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6938 list_del(&ch_info->list); 6939 kfree(ch_info); 6940 } 6941 6942 return ret; 6943 } 6944 6945 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 6946 struct rtw89_vif_link *rtwvif_link, bool connected) 6947 { 6948 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6949 int ret; 6950 6951 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link); 6952 if (ret) { 6953 #if defined(__linux__) 6954 rtw89_err(rtwdev, "Update probe request failed\n"); 6955 #elif defined(__FreeBSD__) 6956 rtw89_err(rtwdev, "Update probe request failed: ret %d\n", ret); 6957 #endif 6958 goto out; 6959 } 6960 ret = mac->add_chan_list(rtwdev, rtwvif_link, connected); 6961 out: 6962 return ret; 6963 } 6964 6965 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, 6966 struct rtw89_vif_link *rtwvif_link, 6967 struct ieee80211_scan_request *scan_req) 6968 { 6969 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6970 struct cfg80211_scan_request *req = &scan_req->req; 6971 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 6972 rtwvif_link->chanctx_idx); 6973 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6974 u32 rx_fltr = rtwdev->hal.rx_fltr; 6975 u8 mac_addr[ETH_ALEN]; 6976 u32 reg; 6977 6978 /* clone op and keep it during scan */ 6979 rtwdev->scan_info.op_chan = *chan; 6980 6981 rtwdev->scan_info.scanning_vif = rtwvif_link; 6982 rtwdev->scan_info.last_chan_idx = 0; 6983 rtwdev->scan_info.abort = false; 6984 rtwvif->scan_ies = &scan_req->ies; 6985 rtwvif->scan_req = req; 6986 ieee80211_stop_queues(rtwdev->hw); 6987 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false); 6988 6989 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 6990 get_random_mask_addr(mac_addr, req->mac_addr, 6991 req->mac_addr_mask); 6992 else 6993 ether_addr_copy(mac_addr, rtwvif_link->mac_addr); 6994 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true); 6995 6996 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 6997 rx_fltr &= ~B_AX_A_BC; 6998 rx_fltr &= ~B_AX_A_A1_MATCH; 6999 7000 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 7001 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr); 7002 7003 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); 7004 } 7005 7006 struct rtw89_hw_scan_complete_cb_data { 7007 struct rtw89_vif_link *rtwvif_link; 7008 bool aborted; 7009 }; 7010 7011 static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data) 7012 { 7013 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7014 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7015 struct rtw89_hw_scan_complete_cb_data *cb_data = data; 7016 struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link; 7017 struct cfg80211_scan_info info = { 7018 .aborted = cb_data->aborted, 7019 }; 7020 struct rtw89_vif *rtwvif; 7021 u32 reg; 7022 7023 if (!rtwvif_link) 7024 return -EINVAL; 7025 7026 rtwvif = rtwvif_link->rtwvif; 7027 7028 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 7029 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); 7030 7031 rtw89_core_scan_complete(rtwdev, rtwvif_link, true); 7032 ieee80211_scan_completed(rtwdev->hw, &info); 7033 ieee80211_wake_queues(rtwdev->hw); 7034 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true); 7035 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 7036 7037 rtw89_release_pkt_list(rtwdev); 7038 rtwvif->scan_req = NULL; 7039 rtwvif->scan_ies = NULL; 7040 scan_info->last_chan_idx = 0; 7041 scan_info->scanning_vif = NULL; 7042 scan_info->abort = false; 7043 7044 return 0; 7045 } 7046 7047 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, 7048 struct rtw89_vif_link *rtwvif_link, 7049 bool aborted) 7050 { 7051 struct rtw89_hw_scan_complete_cb_data cb_data = { 7052 .rtwvif_link = rtwvif_link, 7053 .aborted = aborted, 7054 }; 7055 const struct rtw89_chanctx_cb_parm cb_parm = { 7056 .cb = rtw89_hw_scan_complete_cb, 7057 .data = &cb_data, 7058 .caller = __func__, 7059 }; 7060 7061 /* The things here needs to be done after setting channel (for coex) 7062 * and before proceeding entity mode (for MCC). So, pass a callback 7063 * of them for the right sequence rather than doing them directly. 7064 */ 7065 rtw89_chanctx_proceed(rtwdev, &cb_parm); 7066 } 7067 7068 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, 7069 struct rtw89_vif_link *rtwvif_link) 7070 { 7071 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7072 int ret; 7073 7074 scan_info->abort = true; 7075 7076 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false); 7077 if (ret) 7078 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); 7079 7080 /* Indicate ieee80211_scan_completed() before returning, which is safe 7081 * because scan abort command always waits for completion of 7082 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan 7083 * work properly. 7084 */ 7085 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true); 7086 } 7087 7088 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 7089 { 7090 struct rtw89_vif_link *rtwvif_link; 7091 struct rtw89_vif *rtwvif; 7092 unsigned int link_id; 7093 7094 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 7095 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 7096 /* This variable implies connected or during attempt to connect */ 7097 if (!is_zero_ether_addr(rtwvif_link->bssid)) 7098 return true; 7099 } 7100 } 7101 7102 return false; 7103 } 7104 7105 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, 7106 struct rtw89_vif_link *rtwvif_link, 7107 bool enable) 7108 { 7109 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7110 struct rtw89_scan_option opt = {0}; 7111 bool connected; 7112 int ret = 0; 7113 7114 if (!rtwvif_link) 7115 return -EINVAL; 7116 7117 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 7118 opt.enable = enable; 7119 opt.target_ch_mode = connected; 7120 if (enable) { 7121 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, connected); 7122 if (ret) 7123 goto out; 7124 } 7125 7126 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 7127 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 7128 opt.scan_mode = RTW89_SCAN_MODE_SA; 7129 opt.band = rtwvif_link->mac_idx; 7130 opt.num_macc_role = 0; 7131 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 7132 opt.num_opch = connected ? 1 : 0; 7133 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 7134 } 7135 7136 ret = mac->scan_offload(rtwdev, &opt, rtwvif_link, false); 7137 out: 7138 return ret; 7139 } 7140 7141 #define H2C_FW_CPU_EXCEPTION_LEN 4 7142 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 7143 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 7144 { 7145 struct sk_buff *skb; 7146 int ret; 7147 7148 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 7149 if (!skb) { 7150 rtw89_err(rtwdev, 7151 "failed to alloc skb for fw cpu exception\n"); 7152 return -ENOMEM; 7153 } 7154 7155 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 7156 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 7157 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 7158 7159 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7160 H2C_CAT_TEST, 7161 H2C_CL_FW_STATUS_TEST, 7162 H2C_FUNC_CPU_EXCEPTION, 0, 0, 7163 H2C_FW_CPU_EXCEPTION_LEN); 7164 7165 ret = rtw89_h2c_tx(rtwdev, skb, false); 7166 if (ret) { 7167 rtw89_err(rtwdev, "failed to send h2c\n"); 7168 goto fail; 7169 } 7170 7171 return 0; 7172 7173 fail: 7174 dev_kfree_skb_any(skb); 7175 return ret; 7176 } 7177 7178 #define H2C_PKT_DROP_LEN 24 7179 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 7180 const struct rtw89_pkt_drop_params *params) 7181 { 7182 struct sk_buff *skb; 7183 int ret; 7184 7185 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 7186 if (!skb) { 7187 rtw89_err(rtwdev, 7188 "failed to alloc skb for packet drop\n"); 7189 return -ENOMEM; 7190 } 7191 7192 switch (params->sel) { 7193 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 7194 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 7195 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 7196 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 7197 case RTW89_PKT_DROP_SEL_BAND_ONCE: 7198 break; 7199 default: 7200 rtw89_debug(rtwdev, RTW89_DBG_FW, 7201 "H2C of pkt drop might not fully support sel: %d yet\n", 7202 params->sel); 7203 break; 7204 } 7205 7206 skb_put(skb, H2C_PKT_DROP_LEN); 7207 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 7208 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 7209 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 7210 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 7211 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 7212 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 7213 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 7214 params->macid_band_sel[0]); 7215 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 7216 params->macid_band_sel[1]); 7217 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 7218 params->macid_band_sel[2]); 7219 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 7220 params->macid_band_sel[3]); 7221 7222 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7223 H2C_CAT_MAC, 7224 H2C_CL_MAC_FW_OFLD, 7225 H2C_FUNC_PKT_DROP, 0, 0, 7226 H2C_PKT_DROP_LEN); 7227 7228 ret = rtw89_h2c_tx(rtwdev, skb, false); 7229 if (ret) { 7230 rtw89_err(rtwdev, "failed to send h2c\n"); 7231 goto fail; 7232 } 7233 7234 return 0; 7235 7236 fail: 7237 dev_kfree_skb_any(skb); 7238 return ret; 7239 } 7240 7241 #define H2C_KEEP_ALIVE_LEN 4 7242 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7243 bool enable) 7244 { 7245 struct sk_buff *skb; 7246 u8 pkt_id = 0; 7247 int ret; 7248 7249 if (enable) { 7250 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7251 RTW89_PKT_OFLD_TYPE_NULL_DATA, 7252 &pkt_id); 7253 if (ret) 7254 return -EPERM; 7255 } 7256 7257 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 7258 if (!skb) { 7259 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7260 return -ENOMEM; 7261 } 7262 7263 skb_put(skb, H2C_KEEP_ALIVE_LEN); 7264 7265 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 7266 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 7267 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 7268 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id); 7269 7270 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7271 H2C_CAT_MAC, 7272 H2C_CL_MAC_WOW, 7273 H2C_FUNC_KEEP_ALIVE, 0, 1, 7274 H2C_KEEP_ALIVE_LEN); 7275 7276 ret = rtw89_h2c_tx(rtwdev, skb, false); 7277 if (ret) { 7278 rtw89_err(rtwdev, "failed to send h2c\n"); 7279 goto fail; 7280 } 7281 7282 return 0; 7283 7284 fail: 7285 dev_kfree_skb_any(skb); 7286 7287 return ret; 7288 } 7289 7290 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7291 bool enable) 7292 { 7293 struct rtw89_h2c_arp_offload *h2c; 7294 u32 len = sizeof(*h2c); 7295 struct sk_buff *skb; 7296 u8 pkt_id = 0; 7297 int ret; 7298 7299 if (enable) { 7300 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7301 RTW89_PKT_OFLD_TYPE_ARP_RSP, 7302 &pkt_id); 7303 if (ret) 7304 return ret; 7305 } 7306 7307 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7308 if (!skb) { 7309 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); 7310 return -ENOMEM; 7311 } 7312 7313 skb_put(skb, len); 7314 h2c = (struct rtw89_h2c_arp_offload *)skb->data; 7315 7316 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | 7317 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | 7318 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | 7319 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); 7320 7321 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7322 H2C_CAT_MAC, 7323 H2C_CL_MAC_WOW, 7324 H2C_FUNC_ARP_OFLD, 0, 1, 7325 len); 7326 7327 ret = rtw89_h2c_tx(rtwdev, skb, false); 7328 if (ret) { 7329 rtw89_err(rtwdev, "failed to send h2c\n"); 7330 goto fail; 7331 } 7332 7333 return 0; 7334 7335 fail: 7336 dev_kfree_skb_any(skb); 7337 7338 return ret; 7339 } 7340 7341 #define H2C_DISCONNECT_DETECT_LEN 8 7342 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 7343 struct rtw89_vif_link *rtwvif_link, bool enable) 7344 { 7345 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7346 struct sk_buff *skb; 7347 u8 macid = rtwvif_link->mac_id; 7348 int ret; 7349 7350 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 7351 if (!skb) { 7352 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7353 return -ENOMEM; 7354 } 7355 7356 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 7357 7358 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 7359 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 7360 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 7361 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 7362 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 7363 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 7364 } 7365 7366 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7367 H2C_CAT_MAC, 7368 H2C_CL_MAC_WOW, 7369 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 7370 H2C_DISCONNECT_DETECT_LEN); 7371 7372 ret = rtw89_h2c_tx(rtwdev, skb, false); 7373 if (ret) { 7374 rtw89_err(rtwdev, "failed to send h2c\n"); 7375 goto fail; 7376 } 7377 7378 return 0; 7379 7380 fail: 7381 dev_kfree_skb_any(skb); 7382 7383 return ret; 7384 } 7385 7386 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7387 bool enable) 7388 { 7389 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7390 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 7391 struct rtw89_h2c_cfg_nlo *h2c; 7392 u32 len = sizeof(*h2c); 7393 struct sk_buff *skb; 7394 int ret, i; 7395 7396 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7397 if (!skb) { 7398 rtw89_err(rtwdev, "failed to alloc skb for nlo\n"); 7399 return -ENOMEM; 7400 } 7401 7402 skb_put(skb, len); 7403 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data; 7404 7405 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) | 7406 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) | 7407 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID); 7408 7409 if (enable) { 7410 h2c->nlo_cnt = nd_config->n_match_sets; 7411 for (i = 0 ; i < nd_config->n_match_sets; i++) { 7412 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len; 7413 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid, 7414 nd_config->match_sets[i].ssid.ssid_len); 7415 } 7416 } 7417 7418 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7419 H2C_CAT_MAC, 7420 H2C_CL_MAC_WOW, 7421 H2C_FUNC_NLO, 0, 1, 7422 len); 7423 7424 ret = rtw89_h2c_tx(rtwdev, skb, false); 7425 if (ret) { 7426 rtw89_err(rtwdev, "failed to send h2c\n"); 7427 goto fail; 7428 } 7429 7430 return 0; 7431 7432 fail: 7433 dev_kfree_skb_any(skb); 7434 return ret; 7435 } 7436 7437 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7438 bool enable) 7439 { 7440 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7441 struct rtw89_h2c_wow_global *h2c; 7442 u8 macid = rtwvif_link->mac_id; 7443 u32 len = sizeof(*h2c); 7444 struct sk_buff *skb; 7445 int ret; 7446 7447 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7448 if (!skb) { 7449 rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); 7450 return -ENOMEM; 7451 } 7452 7453 skb_put(skb, len); 7454 h2c = (struct rtw89_h2c_wow_global *)skb->data; 7455 7456 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | 7457 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | 7458 le32_encode_bits(rtw_wow->ptk_alg, 7459 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | 7460 le32_encode_bits(rtw_wow->gtk_alg, 7461 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); 7462 h2c->key_info = rtw_wow->key_info; 7463 7464 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7465 H2C_CAT_MAC, 7466 H2C_CL_MAC_WOW, 7467 H2C_FUNC_WOW_GLOBAL, 0, 1, 7468 len); 7469 7470 ret = rtw89_h2c_tx(rtwdev, skb, false); 7471 if (ret) { 7472 rtw89_err(rtwdev, "failed to send h2c\n"); 7473 goto fail; 7474 } 7475 7476 return 0; 7477 7478 fail: 7479 dev_kfree_skb_any(skb); 7480 7481 return ret; 7482 } 7483 7484 #define H2C_WAKEUP_CTRL_LEN 4 7485 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 7486 struct rtw89_vif_link *rtwvif_link, 7487 bool enable) 7488 { 7489 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7490 struct sk_buff *skb; 7491 u8 macid = rtwvif_link->mac_id; 7492 int ret; 7493 7494 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 7495 if (!skb) { 7496 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); 7497 return -ENOMEM; 7498 } 7499 7500 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 7501 7502 if (rtw_wow->pattern_cnt) 7503 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 7504 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 7505 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 7506 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 7507 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 7508 7509 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 7510 7511 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7512 H2C_CAT_MAC, 7513 H2C_CL_MAC_WOW, 7514 H2C_FUNC_WAKEUP_CTRL, 0, 1, 7515 H2C_WAKEUP_CTRL_LEN); 7516 7517 ret = rtw89_h2c_tx(rtwdev, skb, false); 7518 if (ret) { 7519 rtw89_err(rtwdev, "failed to send h2c\n"); 7520 goto fail; 7521 } 7522 7523 return 0; 7524 7525 fail: 7526 dev_kfree_skb_any(skb); 7527 7528 return ret; 7529 } 7530 7531 #define H2C_WOW_CAM_UPD_LEN 24 7532 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 7533 struct rtw89_wow_cam_info *cam_info) 7534 { 7535 struct sk_buff *skb; 7536 int ret; 7537 7538 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 7539 if (!skb) { 7540 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7541 return -ENOMEM; 7542 } 7543 7544 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 7545 7546 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 7547 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 7548 if (cam_info->valid) { 7549 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 7550 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 7551 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 7552 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 7553 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 7554 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 7555 cam_info->negative_pattern_match); 7556 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 7557 cam_info->skip_mac_hdr); 7558 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 7559 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 7560 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 7561 } 7562 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 7563 7564 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7565 H2C_CAT_MAC, 7566 H2C_CL_MAC_WOW, 7567 H2C_FUNC_WOW_CAM_UPD, 0, 1, 7568 H2C_WOW_CAM_UPD_LEN); 7569 7570 ret = rtw89_h2c_tx(rtwdev, skb, false); 7571 if (ret) { 7572 rtw89_err(rtwdev, "failed to send h2c\n"); 7573 goto fail; 7574 } 7575 7576 return 0; 7577 fail: 7578 dev_kfree_skb_any(skb); 7579 7580 return ret; 7581 } 7582 7583 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, 7584 struct rtw89_vif_link *rtwvif_link, 7585 bool enable) 7586 { 7587 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7588 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; 7589 struct rtw89_h2c_wow_gtk_ofld *h2c; 7590 u8 macid = rtwvif_link->mac_id; 7591 u32 len = sizeof(*h2c); 7592 u8 pkt_id_sa_query = 0; 7593 struct sk_buff *skb; 7594 u8 pkt_id_eapol = 0; 7595 int ret; 7596 7597 if (!rtw_wow->gtk_alg) 7598 return 0; 7599 7600 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7601 if (!skb) { 7602 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); 7603 return -ENOMEM; 7604 } 7605 7606 skb_put(skb, len); 7607 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; 7608 7609 if (!enable) 7610 goto hdr; 7611 7612 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7613 RTW89_PKT_OFLD_TYPE_EAPOL_KEY, 7614 &pkt_id_eapol); 7615 if (ret) 7616 goto fail; 7617 7618 if (gtk_info->igtk_keyid) { 7619 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7620 RTW89_PKT_OFLD_TYPE_SA_QUERY, 7621 &pkt_id_sa_query); 7622 if (ret) 7623 goto fail; 7624 } 7625 7626 /* not support TKIP yet */ 7627 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 7628 le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 7629 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 7630 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 7631 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | 7632 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); 7633 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, 7634 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | 7635 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); 7636 h2c->gtk_info = rtw_wow->gtk_info; 7637 7638 hdr: 7639 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7640 H2C_CAT_MAC, 7641 H2C_CL_MAC_WOW, 7642 H2C_FUNC_GTK_OFLD, 0, 1, 7643 len); 7644 7645 ret = rtw89_h2c_tx(rtwdev, skb, false); 7646 if (ret) { 7647 rtw89_err(rtwdev, "failed to send h2c\n"); 7648 goto fail; 7649 } 7650 return 0; 7651 fail: 7652 dev_kfree_skb_any(skb); 7653 7654 return ret; 7655 } 7656 7657 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7658 bool enable) 7659 { 7660 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait; 7661 struct rtw89_h2c_fwips *h2c; 7662 u32 len = sizeof(*h2c); 7663 struct sk_buff *skb; 7664 7665 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7666 if (!skb) { 7667 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n"); 7668 return -ENOMEM; 7669 } 7670 skb_put(skb, len); 7671 h2c = (struct rtw89_h2c_fwips *)skb->data; 7672 7673 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) | 7674 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE); 7675 7676 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7677 H2C_CAT_MAC, 7678 H2C_CL_MAC_PS, 7679 H2C_FUNC_IPS_CFG, 0, 1, 7680 len); 7681 7682 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG); 7683 } 7684 7685 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) 7686 { 7687 struct rtw89_wait_info *wait = &rtwdev->wow.wait; 7688 struct rtw89_h2c_wow_aoac *h2c; 7689 u32 len = sizeof(*h2c); 7690 struct sk_buff *skb; 7691 7692 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7693 if (!skb) { 7694 rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); 7695 return -ENOMEM; 7696 } 7697 7698 skb_put(skb, len); 7699 7700 /* This H2C only nofity firmware to generate AOAC report C2H, 7701 * no need any parameter. 7702 */ 7703 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7704 H2C_CAT_MAC, 7705 H2C_CL_MAC_WOW, 7706 H2C_FUNC_AOAC_REPORT_REQ, 1, 0, 7707 len); 7708 7709 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC); 7710 } 7711 7712 /* Return < 0, if failures happen during waiting for the condition. 7713 * Return 0, when waiting for the condition succeeds. 7714 * Return > 0, if the wait is considered unreachable due to driver/FW design, 7715 * where 1 means during SER. 7716 */ 7717 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 7718 struct rtw89_wait_info *wait, unsigned int cond) 7719 { 7720 int ret; 7721 7722 ret = rtw89_h2c_tx(rtwdev, skb, false); 7723 if (ret) { 7724 rtw89_err(rtwdev, "failed to send h2c\n"); 7725 dev_kfree_skb_any(skb); 7726 return -EBUSY; 7727 } 7728 7729 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 7730 return 1; 7731 7732 return rtw89_wait_for_cond(wait, cond); 7733 } 7734 7735 #define H2C_ADD_MCC_LEN 16 7736 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 7737 const struct rtw89_fw_mcc_add_req *p) 7738 { 7739 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7740 struct sk_buff *skb; 7741 unsigned int cond; 7742 7743 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 7744 if (!skb) { 7745 rtw89_err(rtwdev, 7746 "failed to alloc skb for add mcc\n"); 7747 return -ENOMEM; 7748 } 7749 7750 skb_put(skb, H2C_ADD_MCC_LEN); 7751 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 7752 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 7753 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 7754 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 7755 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 7756 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 7757 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 7758 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 7759 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 7760 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 7761 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 7762 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 7763 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 7764 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 7765 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 7766 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 7767 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 7768 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 7769 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 7770 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 7771 7772 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7773 H2C_CAT_MAC, 7774 H2C_CL_MCC, 7775 H2C_FUNC_ADD_MCC, 0, 0, 7776 H2C_ADD_MCC_LEN); 7777 7778 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 7779 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7780 } 7781 7782 #define H2C_START_MCC_LEN 12 7783 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 7784 const struct rtw89_fw_mcc_start_req *p) 7785 { 7786 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7787 struct sk_buff *skb; 7788 unsigned int cond; 7789 7790 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 7791 if (!skb) { 7792 rtw89_err(rtwdev, 7793 "failed to alloc skb for start mcc\n"); 7794 return -ENOMEM; 7795 } 7796 7797 skb_put(skb, H2C_START_MCC_LEN); 7798 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 7799 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 7800 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 7801 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 7802 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 7803 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 7804 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 7805 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 7806 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 7807 7808 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7809 H2C_CAT_MAC, 7810 H2C_CL_MCC, 7811 H2C_FUNC_START_MCC, 0, 0, 7812 H2C_START_MCC_LEN); 7813 7814 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 7815 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7816 } 7817 7818 #define H2C_STOP_MCC_LEN 4 7819 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7820 bool prev_groups) 7821 { 7822 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7823 struct sk_buff *skb; 7824 unsigned int cond; 7825 7826 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 7827 if (!skb) { 7828 rtw89_err(rtwdev, 7829 "failed to alloc skb for stop mcc\n"); 7830 return -ENOMEM; 7831 } 7832 7833 skb_put(skb, H2C_STOP_MCC_LEN); 7834 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 7835 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 7836 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 7837 7838 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7839 H2C_CAT_MAC, 7840 H2C_CL_MCC, 7841 H2C_FUNC_STOP_MCC, 0, 0, 7842 H2C_STOP_MCC_LEN); 7843 7844 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 7845 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7846 } 7847 7848 #define H2C_DEL_MCC_GROUP_LEN 4 7849 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 7850 bool prev_groups) 7851 { 7852 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7853 struct sk_buff *skb; 7854 unsigned int cond; 7855 7856 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 7857 if (!skb) { 7858 rtw89_err(rtwdev, 7859 "failed to alloc skb for del mcc group\n"); 7860 return -ENOMEM; 7861 } 7862 7863 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 7864 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 7865 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 7866 7867 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7868 H2C_CAT_MAC, 7869 H2C_CL_MCC, 7870 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 7871 H2C_DEL_MCC_GROUP_LEN); 7872 7873 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 7874 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7875 } 7876 7877 #define H2C_RESET_MCC_GROUP_LEN 4 7878 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 7879 { 7880 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7881 struct sk_buff *skb; 7882 unsigned int cond; 7883 7884 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 7885 if (!skb) { 7886 rtw89_err(rtwdev, 7887 "failed to alloc skb for reset mcc group\n"); 7888 return -ENOMEM; 7889 } 7890 7891 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 7892 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 7893 7894 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7895 H2C_CAT_MAC, 7896 H2C_CL_MCC, 7897 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 7898 H2C_RESET_MCC_GROUP_LEN); 7899 7900 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 7901 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7902 } 7903 7904 #define H2C_MCC_REQ_TSF_LEN 4 7905 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 7906 const struct rtw89_fw_mcc_tsf_req *req, 7907 struct rtw89_mac_mcc_tsf_rpt *rpt) 7908 { 7909 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7910 struct rtw89_mac_mcc_tsf_rpt *tmp; 7911 struct sk_buff *skb; 7912 unsigned int cond; 7913 int ret; 7914 7915 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 7916 if (!skb) { 7917 rtw89_err(rtwdev, 7918 "failed to alloc skb for mcc req tsf\n"); 7919 return -ENOMEM; 7920 } 7921 7922 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 7923 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 7924 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 7925 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 7926 7927 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7928 H2C_CAT_MAC, 7929 H2C_CL_MCC, 7930 H2C_FUNC_MCC_REQ_TSF, 0, 0, 7931 H2C_MCC_REQ_TSF_LEN); 7932 7933 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 7934 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7935 if (ret) 7936 return ret; 7937 7938 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 7939 *rpt = *tmp; 7940 7941 return 0; 7942 } 7943 7944 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 7945 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7946 u8 *bitmap) 7947 { 7948 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7949 struct sk_buff *skb; 7950 unsigned int cond; 7951 u8 map_len; 7952 u8 h2c_len; 7953 7954 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 7955 map_len = RTW89_MAX_MAC_ID_NUM / 8; 7956 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 7957 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 7958 if (!skb) { 7959 rtw89_err(rtwdev, 7960 "failed to alloc skb for mcc macid bitmap\n"); 7961 return -ENOMEM; 7962 } 7963 7964 skb_put(skb, h2c_len); 7965 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 7966 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 7967 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 7968 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 7969 7970 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7971 H2C_CAT_MAC, 7972 H2C_CL_MCC, 7973 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 7974 h2c_len); 7975 7976 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 7977 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7978 } 7979 7980 #define H2C_MCC_SYNC_LEN 4 7981 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 7982 u8 target, u8 offset) 7983 { 7984 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7985 struct sk_buff *skb; 7986 unsigned int cond; 7987 7988 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 7989 if (!skb) { 7990 rtw89_err(rtwdev, 7991 "failed to alloc skb for mcc sync\n"); 7992 return -ENOMEM; 7993 } 7994 7995 skb_put(skb, H2C_MCC_SYNC_LEN); 7996 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 7997 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 7998 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 7999 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 8000 8001 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8002 H2C_CAT_MAC, 8003 H2C_CL_MCC, 8004 H2C_FUNC_MCC_SYNC, 0, 0, 8005 H2C_MCC_SYNC_LEN); 8006 8007 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 8008 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8009 } 8010 8011 #define H2C_MCC_SET_DURATION_LEN 20 8012 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 8013 const struct rtw89_fw_mcc_duration *p) 8014 { 8015 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8016 struct sk_buff *skb; 8017 unsigned int cond; 8018 8019 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 8020 if (!skb) { 8021 rtw89_err(rtwdev, 8022 "failed to alloc skb for mcc set duration\n"); 8023 return -ENOMEM; 8024 } 8025 8026 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 8027 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 8028 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 8029 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 8030 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 8031 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 8032 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 8033 p->start_tsf_low); 8034 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 8035 p->start_tsf_high); 8036 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 8037 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 8038 8039 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8040 H2C_CAT_MAC, 8041 H2C_CL_MCC, 8042 H2C_FUNC_MCC_SET_DURATION, 0, 0, 8043 H2C_MCC_SET_DURATION_LEN); 8044 8045 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 8046 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8047 } 8048 8049 static 8050 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 8051 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 8052 struct rtw89_h2c_mrc_add_slot *slot_h2c) 8053 { 8054 bool fill_h2c = !!slot_h2c; 8055 unsigned int i; 8056 8057 if (!fill_h2c) 8058 goto calc_len; 8059 8060 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 8061 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 8062 le32_encode_bits(slot_arg->courtesy_en, 8063 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 8064 le32_encode_bits(slot_arg->role_num, 8065 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 8066 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 8067 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 8068 le32_encode_bits(slot_arg->courtesy_target, 8069 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 8070 8071 for (i = 0; i < slot_arg->role_num; i++) { 8072 slot_h2c->roles[i].w0 = 8073 le32_encode_bits(slot_arg->roles[i].macid, 8074 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 8075 le32_encode_bits(slot_arg->roles[i].role_type, 8076 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 8077 le32_encode_bits(slot_arg->roles[i].is_master, 8078 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 8079 le32_encode_bits(slot_arg->roles[i].en_tx_null, 8080 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 8081 le32_encode_bits(false, 8082 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 8083 le32_encode_bits(false, 8084 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 8085 slot_h2c->roles[i].w1 = 8086 le32_encode_bits(slot_arg->roles[i].central_ch, 8087 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 8088 le32_encode_bits(slot_arg->roles[i].primary_ch, 8089 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 8090 le32_encode_bits(slot_arg->roles[i].bw, 8091 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 8092 le32_encode_bits(slot_arg->roles[i].band, 8093 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 8094 le32_encode_bits(slot_arg->roles[i].null_early, 8095 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 8096 le32_encode_bits(false, 8097 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 8098 le32_encode_bits(true, 8099 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 8100 slot_h2c->roles[i].macid_main_bitmap = 8101 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 8102 slot_h2c->roles[i].macid_paired_bitmap = 8103 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 8104 } 8105 8106 calc_len: 8107 return struct_size(slot_h2c, roles, slot_arg->role_num); 8108 } 8109 8110 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 8111 const struct rtw89_fw_mrc_add_arg *arg) 8112 { 8113 struct rtw89_h2c_mrc_add *h2c_head; 8114 struct sk_buff *skb; 8115 unsigned int i; 8116 #if defined(__linux__) 8117 void *tmp; 8118 #elif defined(__FreeBSD__) 8119 u8 *tmp; 8120 #endif 8121 u32 len; 8122 int ret; 8123 8124 len = sizeof(*h2c_head); 8125 for (i = 0; i < arg->slot_num; i++) 8126 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 8127 8128 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8129 if (!skb) { 8130 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 8131 return -ENOMEM; 8132 } 8133 8134 skb_put(skb, len); 8135 tmp = skb->data; 8136 8137 #if defined(__linux__) 8138 h2c_head = tmp; 8139 #elif defined(__FreeBSD__) 8140 h2c_head = (void *)tmp; 8141 #endif 8142 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 8143 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 8144 le32_encode_bits(arg->sch_type, 8145 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 8146 le32_encode_bits(arg->slot_num, 8147 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 8148 le32_encode_bits(arg->btc_in_sch, 8149 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 8150 8151 tmp += sizeof(*h2c_head); 8152 for (i = 0; i < arg->slot_num; i++) 8153 #if defined(__linux__) 8154 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 8155 #elif defined(__FreeBSD__) 8156 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], (void *)tmp); 8157 #endif 8158 8159 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8160 H2C_CAT_MAC, 8161 H2C_CL_MRC, 8162 H2C_FUNC_ADD_MRC, 0, 0, 8163 len); 8164 8165 ret = rtw89_h2c_tx(rtwdev, skb, false); 8166 if (ret) { 8167 rtw89_err(rtwdev, "failed to send h2c\n"); 8168 dev_kfree_skb_any(skb); 8169 return -EBUSY; 8170 } 8171 8172 return 0; 8173 } 8174 8175 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 8176 const struct rtw89_fw_mrc_start_arg *arg) 8177 { 8178 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8179 struct rtw89_h2c_mrc_start *h2c; 8180 u32 len = sizeof(*h2c); 8181 struct sk_buff *skb; 8182 unsigned int cond; 8183 8184 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8185 if (!skb) { 8186 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 8187 return -ENOMEM; 8188 } 8189 8190 skb_put(skb, len); 8191 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 8192 8193 h2c->w0 = le32_encode_bits(arg->sch_idx, 8194 RTW89_H2C_MRC_START_W0_SCH_IDX) | 8195 le32_encode_bits(arg->old_sch_idx, 8196 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 8197 le32_encode_bits(arg->action, 8198 RTW89_H2C_MRC_START_W0_ACTION); 8199 8200 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 8201 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 8202 8203 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8204 H2C_CAT_MAC, 8205 H2C_CL_MRC, 8206 H2C_FUNC_START_MRC, 0, 0, 8207 len); 8208 8209 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 8210 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8211 } 8212 8213 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx) 8214 { 8215 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8216 struct rtw89_h2c_mrc_del *h2c; 8217 u32 len = sizeof(*h2c); 8218 struct sk_buff *skb; 8219 unsigned int cond; 8220 8221 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8222 if (!skb) { 8223 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 8224 return -ENOMEM; 8225 } 8226 8227 skb_put(skb, len); 8228 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 8229 8230 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) | 8231 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX); 8232 8233 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8234 H2C_CAT_MAC, 8235 H2C_CL_MRC, 8236 H2C_FUNC_DEL_MRC, 0, 0, 8237 len); 8238 8239 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 8240 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8241 } 8242 8243 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 8244 const struct rtw89_fw_mrc_req_tsf_arg *arg, 8245 struct rtw89_mac_mrc_tsf_rpt *rpt) 8246 { 8247 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8248 struct rtw89_h2c_mrc_req_tsf *h2c; 8249 struct rtw89_mac_mrc_tsf_rpt *tmp; 8250 struct sk_buff *skb; 8251 unsigned int i; 8252 u32 len; 8253 int ret; 8254 8255 len = struct_size(h2c, infos, arg->num); 8256 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8257 if (!skb) { 8258 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 8259 return -ENOMEM; 8260 } 8261 8262 skb_put(skb, len); 8263 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 8264 8265 h2c->req_tsf_num = arg->num; 8266 for (i = 0; i < arg->num; i++) 8267 h2c->infos[i] = 8268 u8_encode_bits(arg->infos[i].band, 8269 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 8270 u8_encode_bits(arg->infos[i].port, 8271 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 8272 8273 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8274 H2C_CAT_MAC, 8275 H2C_CL_MRC, 8276 H2C_FUNC_MRC_REQ_TSF, 0, 0, 8277 len); 8278 8279 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 8280 if (ret) 8281 return ret; 8282 8283 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 8284 *rpt = *tmp; 8285 8286 return 0; 8287 } 8288 8289 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 8290 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 8291 { 8292 struct rtw89_h2c_mrc_upd_bitmap *h2c; 8293 u32 len = sizeof(*h2c); 8294 struct sk_buff *skb; 8295 int ret; 8296 8297 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8298 if (!skb) { 8299 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 8300 return -ENOMEM; 8301 } 8302 8303 skb_put(skb, len); 8304 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 8305 8306 h2c->w0 = le32_encode_bits(arg->sch_idx, 8307 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 8308 le32_encode_bits(arg->action, 8309 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 8310 le32_encode_bits(arg->macid, 8311 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 8312 h2c->w1 = le32_encode_bits(arg->client_macid, 8313 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 8314 8315 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8316 H2C_CAT_MAC, 8317 H2C_CL_MRC, 8318 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 8319 len); 8320 8321 ret = rtw89_h2c_tx(rtwdev, skb, false); 8322 if (ret) { 8323 rtw89_err(rtwdev, "failed to send h2c\n"); 8324 dev_kfree_skb_any(skb); 8325 return -EBUSY; 8326 } 8327 8328 return 0; 8329 } 8330 8331 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 8332 const struct rtw89_fw_mrc_sync_arg *arg) 8333 { 8334 struct rtw89_h2c_mrc_sync *h2c; 8335 u32 len = sizeof(*h2c); 8336 struct sk_buff *skb; 8337 int ret; 8338 8339 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8340 if (!skb) { 8341 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 8342 return -ENOMEM; 8343 } 8344 8345 skb_put(skb, len); 8346 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 8347 8348 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 8349 le32_encode_bits(arg->src.port, 8350 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 8351 le32_encode_bits(arg->src.band, 8352 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 8353 le32_encode_bits(arg->dest.port, 8354 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 8355 le32_encode_bits(arg->dest.band, 8356 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 8357 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 8358 8359 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8360 H2C_CAT_MAC, 8361 H2C_CL_MRC, 8362 H2C_FUNC_MRC_SYNC, 0, 0, 8363 len); 8364 8365 ret = rtw89_h2c_tx(rtwdev, skb, false); 8366 if (ret) { 8367 rtw89_err(rtwdev, "failed to send h2c\n"); 8368 dev_kfree_skb_any(skb); 8369 return -EBUSY; 8370 } 8371 8372 return 0; 8373 } 8374 8375 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 8376 const struct rtw89_fw_mrc_upd_duration_arg *arg) 8377 { 8378 struct rtw89_h2c_mrc_upd_duration *h2c; 8379 struct sk_buff *skb; 8380 unsigned int i; 8381 u32 len; 8382 int ret; 8383 8384 len = struct_size(h2c, slots, arg->slot_num); 8385 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8386 if (!skb) { 8387 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 8388 return -ENOMEM; 8389 } 8390 8391 skb_put(skb, len); 8392 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 8393 8394 h2c->w0 = le32_encode_bits(arg->sch_idx, 8395 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 8396 le32_encode_bits(arg->slot_num, 8397 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 8398 le32_encode_bits(false, 8399 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 8400 8401 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 8402 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 8403 8404 for (i = 0; i < arg->slot_num; i++) { 8405 h2c->slots[i] = 8406 le32_encode_bits(arg->slots[i].slot_idx, 8407 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 8408 le32_encode_bits(arg->slots[i].duration, 8409 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 8410 } 8411 8412 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8413 H2C_CAT_MAC, 8414 H2C_CL_MRC, 8415 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 8416 len); 8417 8418 ret = rtw89_h2c_tx(rtwdev, skb, false); 8419 if (ret) { 8420 rtw89_err(rtwdev, "failed to send h2c\n"); 8421 dev_kfree_skb_any(skb); 8422 return -EBUSY; 8423 } 8424 8425 return 0; 8426 } 8427 8428 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en) 8429 { 8430 struct rtw89_h2c_ap_info *h2c; 8431 u32 len = sizeof(*h2c); 8432 struct sk_buff *skb; 8433 int ret; 8434 8435 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8436 if (!skb) { 8437 rtw89_err(rtwdev, "failed to alloc skb for ap info\n"); 8438 return -ENOMEM; 8439 } 8440 8441 skb_put(skb, len); 8442 h2c = (struct rtw89_h2c_ap_info *)skb->data; 8443 8444 h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN); 8445 8446 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8447 H2C_CAT_MAC, 8448 H2C_CL_AP, 8449 H2C_FUNC_AP_INFO, 0, 0, 8450 len); 8451 8452 ret = rtw89_h2c_tx(rtwdev, skb, false); 8453 if (ret) { 8454 rtw89_err(rtwdev, "failed to send h2c\n"); 8455 dev_kfree_skb_any(skb); 8456 return -EBUSY; 8457 } 8458 8459 return 0; 8460 } 8461 8462 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en) 8463 { 8464 int ret; 8465 8466 if (en) { 8467 if (refcount_inc_not_zero(&rtwdev->refcount_ap_info)) 8468 return 0; 8469 } else { 8470 if (!refcount_dec_and_test(&rtwdev->refcount_ap_info)) 8471 return 0; 8472 } 8473 8474 ret = rtw89_fw_h2c_ap_info(rtwdev, en); 8475 if (ret) { 8476 if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 8477 return ret; 8478 8479 /* During recovery, neither driver nor stack has full error 8480 * handling, so show a warning, but return 0 with refcount 8481 * increased normally. It can avoid underflow when calling 8482 * with @en == false later. 8483 */ 8484 rtw89_warn(rtwdev, "h2c ap_info failed during SER\n"); 8485 } 8486 8487 if (en) 8488 refcount_set(&rtwdev->refcount_ap_info, 1); 8489 8490 return 0; 8491 } 8492 8493 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 8494 { 8495 static const u8 zeros[U8_MAX] = {}; 8496 8497 return memcmp(ext_ptr, zeros, ext_len) == 0; 8498 } 8499 8500 #if defined(__linux__) 8501 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 8502 ({ \ 8503 u8 __var_sz = sizeof(*(e)); \ 8504 bool __accept; \ 8505 if (__var_sz >= (ent_sz)) \ 8506 __accept = true; \ 8507 else \ 8508 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 8509 (ent_sz) - __var_sz);\ 8510 __accept; \ 8511 }) 8512 #elif defined(__FreeBSD__) 8513 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 8514 ({ \ 8515 u8 __var_sz = sizeof(*(e)); \ 8516 bool __accept; \ 8517 if (__var_sz >= (ent_sz)) \ 8518 __accept = true; \ 8519 else \ 8520 __accept = __fw_txpwr_entry_zero_ext((const u8 *)(cursor) + __var_sz,\ 8521 (ent_sz) - __var_sz);\ 8522 __accept; \ 8523 }) 8524 #endif 8525 8526 static bool 8527 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 8528 const void *cursor, 8529 const struct rtw89_txpwr_conf *conf) 8530 { 8531 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8532 return false; 8533 8534 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 8535 return false; 8536 8537 switch (e->rs) { 8538 case RTW89_RS_CCK: 8539 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 8540 return false; 8541 break; 8542 case RTW89_RS_OFDM: 8543 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 8544 return false; 8545 break; 8546 case RTW89_RS_MCS: 8547 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 8548 e->nss >= RTW89_NSS_NUM || 8549 e->ofdma >= RTW89_OFDMA_NUM) 8550 return false; 8551 break; 8552 case RTW89_RS_HEDCM: 8553 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 8554 e->nss >= RTW89_NSS_HEDCM_NUM || 8555 e->ofdma >= RTW89_OFDMA_NUM) 8556 return false; 8557 break; 8558 case RTW89_RS_OFFSET: 8559 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 8560 return false; 8561 break; 8562 default: 8563 return false; 8564 } 8565 8566 return true; 8567 } 8568 8569 static 8570 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 8571 const struct rtw89_txpwr_table *tbl) 8572 { 8573 const struct rtw89_txpwr_conf *conf = tbl->data; 8574 struct rtw89_fw_txpwr_byrate_entry entry = {}; 8575 struct rtw89_txpwr_byrate *byr_head; 8576 struct rtw89_rate_desc desc = {}; 8577 #if defined(__linux__) 8578 const void *cursor; 8579 #elif defined(__FreeBSD__) 8580 const u8 *cursor; 8581 #endif 8582 u32 data; 8583 s8 *byr; 8584 int i; 8585 8586 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8587 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 8588 continue; 8589 8590 byr_head = &rtwdev->byr[entry.band][entry.bw]; 8591 data = le32_to_cpu(entry.data); 8592 desc.ofdma = entry.ofdma; 8593 desc.nss = entry.nss; 8594 desc.rs = entry.rs; 8595 8596 for (i = 0; i < entry.len; i++, data >>= 8) { 8597 desc.idx = entry.shf + i; 8598 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 8599 *byr = data & 0xff; 8600 } 8601 } 8602 } 8603 8604 static bool 8605 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 8606 const void *cursor, 8607 const struct rtw89_txpwr_conf *conf) 8608 { 8609 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8610 return false; 8611 8612 if (e->bw >= RTW89_2G_BW_NUM) 8613 return false; 8614 if (e->nt >= RTW89_NTX_NUM) 8615 return false; 8616 if (e->rs >= RTW89_RS_LMT_NUM) 8617 return false; 8618 if (e->bf >= RTW89_BF_NUM) 8619 return false; 8620 if (e->regd >= RTW89_REGD_NUM) 8621 return false; 8622 if (e->ch_idx >= RTW89_2G_CH_NUM) 8623 return false; 8624 8625 return true; 8626 } 8627 8628 static 8629 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 8630 { 8631 const struct rtw89_txpwr_conf *conf = &data->conf; 8632 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 8633 #if defined(__linux__) 8634 const void *cursor; 8635 #elif defined(__FreeBSD__) 8636 const u8 *cursor; 8637 #endif 8638 8639 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8640 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 8641 continue; 8642 8643 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8644 [entry.ch_idx] = entry.v; 8645 } 8646 } 8647 8648 static bool 8649 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 8650 const void *cursor, 8651 const struct rtw89_txpwr_conf *conf) 8652 { 8653 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8654 return false; 8655 8656 if (e->bw >= RTW89_5G_BW_NUM) 8657 return false; 8658 if (e->nt >= RTW89_NTX_NUM) 8659 return false; 8660 if (e->rs >= RTW89_RS_LMT_NUM) 8661 return false; 8662 if (e->bf >= RTW89_BF_NUM) 8663 return false; 8664 if (e->regd >= RTW89_REGD_NUM) 8665 return false; 8666 if (e->ch_idx >= RTW89_5G_CH_NUM) 8667 return false; 8668 8669 return true; 8670 } 8671 8672 static 8673 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 8674 { 8675 const struct rtw89_txpwr_conf *conf = &data->conf; 8676 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 8677 #if defined(__linux__) 8678 const void *cursor; 8679 #elif defined(__FreeBSD__) 8680 const u8 *cursor; 8681 #endif 8682 8683 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8684 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 8685 continue; 8686 8687 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8688 [entry.ch_idx] = entry.v; 8689 } 8690 } 8691 8692 static bool 8693 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 8694 const void *cursor, 8695 const struct rtw89_txpwr_conf *conf) 8696 { 8697 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8698 return false; 8699 8700 if (e->bw >= RTW89_6G_BW_NUM) 8701 return false; 8702 if (e->nt >= RTW89_NTX_NUM) 8703 return false; 8704 if (e->rs >= RTW89_RS_LMT_NUM) 8705 return false; 8706 if (e->bf >= RTW89_BF_NUM) 8707 return false; 8708 if (e->regd >= RTW89_REGD_NUM) 8709 return false; 8710 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8711 return false; 8712 if (e->ch_idx >= RTW89_6G_CH_NUM) 8713 return false; 8714 8715 return true; 8716 } 8717 8718 static 8719 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 8720 { 8721 const struct rtw89_txpwr_conf *conf = &data->conf; 8722 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 8723 #if defined(__linux__) 8724 const void *cursor; 8725 #elif defined(__FreeBSD__) 8726 const u8 *cursor; 8727 #endif 8728 8729 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8730 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 8731 continue; 8732 8733 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8734 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 8735 } 8736 } 8737 8738 static bool 8739 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 8740 const void *cursor, 8741 const struct rtw89_txpwr_conf *conf) 8742 { 8743 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8744 return false; 8745 8746 if (e->ru >= RTW89_RU_NUM) 8747 return false; 8748 if (e->nt >= RTW89_NTX_NUM) 8749 return false; 8750 if (e->regd >= RTW89_REGD_NUM) 8751 return false; 8752 if (e->ch_idx >= RTW89_2G_CH_NUM) 8753 return false; 8754 8755 return true; 8756 } 8757 8758 static 8759 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 8760 { 8761 const struct rtw89_txpwr_conf *conf = &data->conf; 8762 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 8763 #if defined(__linux__) 8764 const void *cursor; 8765 #elif defined(__FreeBSD__) 8766 const u8 *cursor; 8767 #endif 8768 8769 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8770 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 8771 continue; 8772 8773 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8774 } 8775 } 8776 8777 static bool 8778 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 8779 const void *cursor, 8780 const struct rtw89_txpwr_conf *conf) 8781 { 8782 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8783 return false; 8784 8785 if (e->ru >= RTW89_RU_NUM) 8786 return false; 8787 if (e->nt >= RTW89_NTX_NUM) 8788 return false; 8789 if (e->regd >= RTW89_REGD_NUM) 8790 return false; 8791 if (e->ch_idx >= RTW89_5G_CH_NUM) 8792 return false; 8793 8794 return true; 8795 } 8796 8797 static 8798 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 8799 { 8800 const struct rtw89_txpwr_conf *conf = &data->conf; 8801 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 8802 #if defined(__linux__) 8803 const void *cursor; 8804 #elif defined(__FreeBSD__) 8805 const u8 *cursor; 8806 #endif 8807 8808 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8809 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 8810 continue; 8811 8812 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8813 } 8814 } 8815 8816 static bool 8817 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 8818 const void *cursor, 8819 const struct rtw89_txpwr_conf *conf) 8820 { 8821 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8822 return false; 8823 8824 if (e->ru >= RTW89_RU_NUM) 8825 return false; 8826 if (e->nt >= RTW89_NTX_NUM) 8827 return false; 8828 if (e->regd >= RTW89_REGD_NUM) 8829 return false; 8830 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8831 return false; 8832 if (e->ch_idx >= RTW89_6G_CH_NUM) 8833 return false; 8834 8835 return true; 8836 } 8837 8838 static 8839 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 8840 { 8841 const struct rtw89_txpwr_conf *conf = &data->conf; 8842 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 8843 #if defined(__linux__) 8844 const void *cursor; 8845 #elif defined(__FreeBSD__) 8846 const u8 *cursor; 8847 #endif 8848 8849 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8850 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 8851 continue; 8852 8853 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 8854 [entry.ch_idx] = entry.v; 8855 } 8856 } 8857 8858 static bool 8859 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 8860 const void *cursor, 8861 const struct rtw89_txpwr_conf *conf) 8862 { 8863 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8864 return false; 8865 8866 if (e->band >= RTW89_BAND_NUM) 8867 return false; 8868 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 8869 return false; 8870 if (e->regd >= RTW89_REGD_NUM) 8871 return false; 8872 8873 return true; 8874 } 8875 8876 static 8877 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 8878 { 8879 const struct rtw89_txpwr_conf *conf = &data->conf; 8880 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 8881 #if defined(__linux__) 8882 const void *cursor; 8883 #elif defined(__FreeBSD__) 8884 const u8 *cursor; 8885 #endif 8886 8887 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8888 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 8889 continue; 8890 8891 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 8892 } 8893 } 8894 8895 static bool 8896 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 8897 const void *cursor, 8898 const struct rtw89_txpwr_conf *conf) 8899 { 8900 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8901 return false; 8902 8903 if (e->band >= RTW89_BAND_NUM) 8904 return false; 8905 if (e->regd >= RTW89_REGD_NUM) 8906 return false; 8907 8908 return true; 8909 } 8910 8911 static 8912 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 8913 { 8914 const struct rtw89_txpwr_conf *conf = &data->conf; 8915 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 8916 #if defined(__linux__) 8917 const void *cursor; 8918 #elif defined(__FreeBSD__) 8919 const u8 *cursor; 8920 #endif 8921 8922 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8923 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 8924 continue; 8925 8926 data->v[entry.band][entry.regd] = entry.v; 8927 } 8928 } 8929 8930 const struct rtw89_rfe_parms * 8931 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 8932 const struct rtw89_rfe_parms *init) 8933 { 8934 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 8935 struct rtw89_rfe_parms *parms; 8936 8937 if (!rfe_data) 8938 return init; 8939 8940 parms = &rfe_data->rfe_parms; 8941 if (init) 8942 *parms = *init; 8943 8944 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 8945 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 8946 rfe_data->byrate.tbl.size = 0; /* don't care here */ 8947 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 8948 parms->byr_tbl = &rfe_data->byrate.tbl; 8949 } 8950 8951 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 8952 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 8953 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 8954 } 8955 8956 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 8957 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 8958 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 8959 } 8960 8961 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 8962 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 8963 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 8964 } 8965 8966 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 8967 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 8968 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 8969 } 8970 8971 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 8972 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 8973 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 8974 } 8975 8976 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 8977 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 8978 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 8979 } 8980 8981 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 8982 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 8983 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 8984 } 8985 8986 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 8987 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 8988 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 8989 } 8990 8991 return parms; 8992 } 8993