1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/if_arp.h> 6 #include "cam.h" 7 #include "chan.h" 8 #include "coex.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "phy.h" 13 #include "ps.h" 14 #include "reg.h" 15 #include "util.h" 16 #include "wow.h" 17 18 struct rtw89_eapol_2_of_2 { 19 u8 gtkbody[14]; 20 u8 key_des_ver; 21 u8 rsvd[92]; 22 } __packed; 23 24 struct rtw89_sa_query { 25 u8 category; 26 u8 action; 27 } __packed; 28 29 struct rtw89_arp_rsp { 30 u8 llc_hdr[sizeof(rfc1042_header)]; 31 __be16 llc_type; 32 struct arphdr arp_hdr; 33 u8 sender_hw[ETH_ALEN]; 34 __be32 sender_ip; 35 u8 target_hw[ETH_ALEN]; 36 __be32 target_ip; 37 } __packed; 38 39 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 40 41 union rtw89_fw_element_arg { 42 size_t offset; 43 enum rtw89_rf_path rf_path; 44 enum rtw89_fw_type fw_type; 45 }; 46 47 struct rtw89_fw_element_handler { 48 int (*fn)(struct rtw89_dev *rtwdev, 49 const struct rtw89_fw_element_hdr *elm, 50 const union rtw89_fw_element_arg arg); 51 const union rtw89_fw_element_arg arg; 52 const char *name; 53 }; 54 55 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 56 struct sk_buff *skb); 57 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 58 struct rtw89_wait_info *wait, unsigned int cond); 59 static int __parse_security_section(struct rtw89_dev *rtwdev, 60 struct rtw89_fw_bin_info *info, 61 struct rtw89_fw_hdr_section_info *section_info, 62 #if defined(__linux__) 63 const void *content, 64 #elif defined(__FreeBSD__) 65 const u8 *content, 66 #endif 67 u32 *mssc_len); 68 69 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 70 bool header) 71 { 72 struct sk_buff *skb; 73 u32 header_len = 0; 74 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 75 76 if (header) 77 header_len = H2C_HEADER_LEN; 78 79 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 80 if (!skb) 81 return NULL; 82 skb_reserve(skb, header_len + h2c_desc_size); 83 memset(skb->data, 0, len); 84 85 return skb; 86 } 87 88 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 89 { 90 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 91 } 92 93 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 94 { 95 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 96 } 97 98 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 99 { 100 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 101 u8 val; 102 int ret; 103 104 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 105 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 106 1, FWDL_WAIT_CNT, false, rtwdev, type); 107 if (ret) { 108 switch (val) { 109 case RTW89_FWDL_CHECKSUM_FAIL: 110 rtw89_err(rtwdev, "fw checksum fail\n"); 111 return -EINVAL; 112 113 case RTW89_FWDL_SECURITY_FAIL: 114 rtw89_err(rtwdev, "fw security fail\n"); 115 return -EINVAL; 116 117 case RTW89_FWDL_CV_NOT_MATCH: 118 rtw89_err(rtwdev, "fw cv not match\n"); 119 return -EINVAL; 120 121 default: 122 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 123 return -EBUSY; 124 } 125 } 126 127 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 128 129 return 0; 130 } 131 132 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 133 struct rtw89_fw_bin_info *info) 134 { 135 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 136 const struct rtw89_chip_info *chip = rtwdev->chip; 137 struct rtw89_fw_hdr_section_info *section_info; 138 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 139 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 140 const struct rtw89_fw_hdr_section *section; 141 const u8 *fw_end = fw + len; 142 const u8 *bin; 143 u32 base_hdr_len; 144 u32 mssc_len; 145 int ret; 146 u32 i; 147 148 if (!info) 149 return -EINVAL; 150 151 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 152 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 153 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 154 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE); 155 156 if (info->dynamic_hdr_en) { 157 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 158 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 159 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 160 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 161 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 162 return -EINVAL; 163 } 164 } else { 165 info->hdr_len = base_hdr_len; 166 info->dynamic_hdr_len = 0; 167 } 168 169 bin = fw + info->hdr_len; 170 171 /* jump to section header */ 172 section_info = info->section_info; 173 for (i = 0; i < info->section_num; i++) { 174 section = &fw_hdr->sections[i]; 175 section_info->type = 176 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 177 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 178 179 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 180 section_info->len += FWDL_SECTION_CHKSUM_LEN; 181 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 182 section_info->dladdr = 183 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 184 section_info->addr = bin; 185 186 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 187 section_info->mssc = 188 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 189 190 ret = __parse_security_section(rtwdev, info, section_info, 191 bin, &mssc_len); 192 if (ret) 193 return ret; 194 195 if (sec->secure_boot && chip->chip_id == RTL8852B) 196 section_info->len_override = 960; 197 } else { 198 section_info->mssc = 0; 199 mssc_len = 0; 200 } 201 202 rtw89_debug(rtwdev, RTW89_DBG_FW, 203 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 204 i, section_info->type, section_info->len, 205 section_info->mssc, mssc_len, bin - fw); 206 rtw89_debug(rtwdev, RTW89_DBG_FW, 207 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 208 section_info->ignore, section_info->key_addr, 209 section_info->key_addr ? 210 section_info->key_addr - section_info->addr : 0, 211 section_info->key_len, section_info->key_idx); 212 213 bin += section_info->len + mssc_len; 214 section_info++; 215 } 216 217 if (fw_end != bin) { 218 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 219 return -EINVAL; 220 } 221 222 return 0; 223 } 224 225 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 226 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 227 u32 rmp_tbl_size, u32 *key_idx) 228 { 229 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 230 u32 sel_byte_idx; 231 u32 mss_sel_idx; 232 u8 sel_bit_idx; 233 int i; 234 235 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 236 if (!mss_hdr->defen) 237 return -ENOENT; 238 239 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 240 sec->mss_key_num; 241 } else { 242 if (mss_hdr->defen) 243 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 244 else 245 mss_sel_idx = 0; 246 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 247 le16_to_cpu(mss_hdr->msscust_max) + 248 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 249 sec->mss_key_num; 250 } 251 252 sel_byte_idx = mss_sel_idx >> 3; 253 sel_bit_idx = mss_sel_idx & 0x7; 254 255 if (sel_byte_idx >= rmp_tbl_size) 256 return -EFAULT; 257 258 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 259 return -ENOENT; 260 261 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 262 263 for (i = 0; i < sel_byte_idx; i++) 264 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 265 266 return 0; 267 } 268 269 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 270 struct rtw89_fw_bin_info *info, 271 struct rtw89_fw_hdr_section_info *section_info, 272 #if defined(__linux__) 273 const void *content, 274 #elif defined(__FreeBSD__) 275 const u8 *content, 276 #endif 277 u32 *mssc_len) 278 { 279 #if defined(__linux__) 280 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 281 const union rtw89_fw_section_mssc_content *section_content = content; 282 #elif defined(__FreeBSD__) 283 const struct rtw89_fw_mss_pool_hdr *mss_hdr = (const void *)(content + section_info->len); 284 const union rtw89_fw_section_mssc_content *section_content = (const void *)content; 285 #endif 286 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 287 u32 rmp_tbl_size; 288 u32 key_sign_len; 289 u32 real_key_idx; 290 u32 sb_sel_ver; 291 int ret; 292 293 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 294 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 295 return -ENOENT; 296 } 297 298 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 299 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 300 le16_to_cpu(mss_hdr->msscust_max) * 301 mss_hdr->mssdev_max) >> 3; 302 if (mss_hdr->defen) 303 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 304 } else { 305 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 306 mss_hdr->rmpfmt); 307 return -EINVAL; 308 } 309 310 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 311 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 312 rmp_tbl_size, (int)sizeof(*mss_hdr), 313 le32_to_cpu(mss_hdr->key_raw_offset)); 314 return -EINVAL; 315 } 316 317 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 318 if (!key_sign_len) 319 key_sign_len = 512; 320 321 if (info->dsp_checksum) 322 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 323 324 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 325 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 326 327 if (!sec->secure_boot) 328 goto out; 329 330 sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v); 331 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 332 goto ignore; 333 334 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 335 if (ret) 336 goto ignore; 337 338 section_info->key_addr = content + section_info->len + 339 le32_to_cpu(mss_hdr->key_raw_offset) + 340 key_sign_len * real_key_idx; 341 section_info->key_len = key_sign_len; 342 section_info->key_idx = real_key_idx; 343 344 out: 345 if (info->secure_section_exist) { 346 section_info->ignore = true; 347 return 0; 348 } 349 350 info->secure_section_exist = true; 351 352 return 0; 353 354 ignore: 355 section_info->ignore = true; 356 357 return 0; 358 } 359 360 static int __parse_security_section(struct rtw89_dev *rtwdev, 361 struct rtw89_fw_bin_info *info, 362 struct rtw89_fw_hdr_section_info *section_info, 363 #if defined(__linux__) 364 const void *content, 365 #elif defined(__FreeBSD__) 366 const u8 *content, 367 #endif 368 u32 *mssc_len) 369 { 370 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 371 int ret; 372 373 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) { 374 ret = __parse_formatted_mssc(rtwdev, info, section_info, 375 content, mssc_len); 376 if (ret) 377 return -EINVAL; 378 } else { 379 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 380 if (info->dsp_checksum) 381 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 382 383 if (sec->secure_boot) { 384 if (sec->mss_idx >= section_info->mssc) 385 return -EFAULT; 386 section_info->key_addr = content + section_info->len + 387 sec->mss_idx * FWDL_SECURITY_SIGLEN; 388 section_info->key_len = FWDL_SECURITY_SIGLEN; 389 } 390 391 info->secure_section_exist = true; 392 } 393 394 return 0; 395 } 396 397 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 398 struct rtw89_fw_bin_info *info) 399 { 400 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 401 struct rtw89_fw_hdr_section_info *section_info; 402 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 403 const struct rtw89_fw_hdr_section_v1 *section; 404 const u8 *fw_end = fw + len; 405 const u8 *bin; 406 u32 base_hdr_len; 407 u32 mssc_len; 408 int ret; 409 u32 i; 410 411 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 412 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 413 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 414 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 415 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE); 416 417 if (info->dynamic_hdr_en) { 418 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 419 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 420 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 421 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 422 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 423 return -EINVAL; 424 } 425 } else { 426 info->hdr_len = base_hdr_len; 427 info->dynamic_hdr_len = 0; 428 } 429 430 bin = fw + info->hdr_len; 431 432 /* jump to section header */ 433 section_info = info->section_info; 434 for (i = 0; i < info->section_num; i++) { 435 section = &fw_hdr->sections[i]; 436 437 section_info->type = 438 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 439 section_info->len = 440 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 441 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 442 section_info->len += FWDL_SECTION_CHKSUM_LEN; 443 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 444 section_info->dladdr = 445 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 446 section_info->addr = bin; 447 448 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 449 section_info->mssc = 450 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 451 452 ret = __parse_security_section(rtwdev, info, section_info, 453 bin, &mssc_len); 454 if (ret) 455 return ret; 456 } else { 457 section_info->mssc = 0; 458 mssc_len = 0; 459 } 460 461 rtw89_debug(rtwdev, RTW89_DBG_FW, 462 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 463 i, section_info->type, section_info->len, 464 section_info->mssc, mssc_len, bin - fw); 465 rtw89_debug(rtwdev, RTW89_DBG_FW, 466 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 467 section_info->ignore, section_info->key_addr, 468 section_info->key_addr ? 469 section_info->key_addr - section_info->addr : 0, 470 section_info->key_len, section_info->key_idx); 471 472 bin += section_info->len + mssc_len; 473 section_info++; 474 } 475 476 if (fw_end != bin) { 477 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 478 return -EINVAL; 479 } 480 481 if (!info->secure_section_exist) 482 rtw89_warn(rtwdev, "no firmware secure section\n"); 483 484 return 0; 485 } 486 487 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 488 const struct rtw89_fw_suit *fw_suit, 489 struct rtw89_fw_bin_info *info) 490 { 491 const u8 *fw = fw_suit->data; 492 u32 len = fw_suit->size; 493 494 if (!fw || !len) { 495 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 496 return -ENOENT; 497 } 498 499 switch (fw_suit->hdr_ver) { 500 case 0: 501 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 502 case 1: 503 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 504 default: 505 return -ENOENT; 506 } 507 } 508 509 static 510 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 511 struct rtw89_fw_suit *fw_suit, bool nowarn) 512 { 513 struct rtw89_fw_info *fw_info = &rtwdev->fw; 514 const struct firmware *firmware = fw_info->req.firmware; 515 const u8 *mfw = firmware->data; 516 u32 mfw_len = firmware->size; 517 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 518 const struct rtw89_mfw_info *mfw_info = NULL, *tmp; 519 int i; 520 521 if (mfw_hdr->sig != RTW89_MFW_SIG) { 522 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 523 /* legacy firmware support normal type only */ 524 if (type != RTW89_FW_NORMAL) 525 return -EINVAL; 526 fw_suit->data = mfw; 527 fw_suit->size = mfw_len; 528 return 0; 529 } 530 531 for (i = 0; i < mfw_hdr->fw_nr; i++) { 532 tmp = &mfw_hdr->info[i]; 533 if (tmp->type != type) 534 continue; 535 536 if (type == RTW89_FW_LOGFMT) { 537 mfw_info = tmp; 538 goto found; 539 } 540 541 /* Version order of WiFi firmware in firmware file are not in order, 542 * pass all firmware to find the equal or less but closest version. 543 */ 544 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { 545 if (!mfw_info || mfw_info->cv < tmp->cv) 546 mfw_info = tmp; 547 } 548 } 549 550 if (mfw_info) 551 goto found; 552 553 if (!nowarn) 554 rtw89_err(rtwdev, "no suitable firmware found\n"); 555 return -ENOENT; 556 557 found: 558 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 559 fw_suit->size = le32_to_cpu(mfw_info->size); 560 return 0; 561 } 562 563 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 564 { 565 struct rtw89_fw_info *fw_info = &rtwdev->fw; 566 const struct firmware *firmware = fw_info->req.firmware; 567 const struct rtw89_mfw_hdr *mfw_hdr = 568 (const struct rtw89_mfw_hdr *)firmware->data; 569 const struct rtw89_mfw_info *mfw_info; 570 u32 size; 571 572 if (mfw_hdr->sig != RTW89_MFW_SIG) { 573 rtw89_warn(rtwdev, "not mfw format\n"); 574 return 0; 575 } 576 577 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 578 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 579 580 return size; 581 } 582 583 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 584 struct rtw89_fw_suit *fw_suit, 585 const struct rtw89_fw_hdr *hdr) 586 { 587 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 588 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 589 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 590 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 591 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 592 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 593 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 594 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 595 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 596 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 597 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 598 } 599 600 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 601 struct rtw89_fw_suit *fw_suit, 602 const struct rtw89_fw_hdr_v1 *hdr) 603 { 604 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 605 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 606 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 607 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 608 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 609 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 610 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 611 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 612 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 613 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 614 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 615 } 616 617 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 618 enum rtw89_fw_type type, 619 struct rtw89_fw_suit *fw_suit) 620 { 621 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 622 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 623 624 if (type == RTW89_FW_LOGFMT) 625 return 0; 626 627 fw_suit->type = type; 628 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 629 630 switch (fw_suit->hdr_ver) { 631 case 0: 632 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 633 break; 634 case 1: 635 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 636 break; 637 default: 638 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 639 fw_suit->hdr_ver); 640 return -ENOENT; 641 } 642 643 rtw89_info(rtwdev, 644 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 645 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 646 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 647 648 return 0; 649 } 650 651 static 652 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 653 bool nowarn) 654 { 655 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 656 int ret; 657 658 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 659 if (ret) 660 return ret; 661 662 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 663 } 664 665 static 666 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 667 const struct rtw89_fw_element_hdr *elm, 668 const union rtw89_fw_element_arg arg) 669 { 670 #if defined(__linux__) 671 enum rtw89_fw_type type = arg.fw_type; 672 #elif defined(__FreeBSD__) 673 const enum rtw89_fw_type type = arg.fw_type; 674 #endif 675 struct rtw89_hal *hal = &rtwdev->hal; 676 struct rtw89_fw_suit *fw_suit; 677 678 /* Version of BB MCU is in decreasing order in firmware file, so take 679 * first equal or less version, which is equal or less but closest version. 680 */ 681 if (hal->cv < elm->u.bbmcu.cv) 682 return 1; /* ignore this element */ 683 684 fw_suit = rtw89_fw_suit_get(rtwdev, type); 685 if (fw_suit->data) 686 return 1; /* ignore this element (a firmware is taken already) */ 687 688 fw_suit->data = elm->u.bbmcu.contents; 689 fw_suit->size = le32_to_cpu(elm->size); 690 691 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 692 } 693 694 #define __DEF_FW_FEAT_COND(__cond, __op) \ 695 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 696 { \ 697 return suit_ver_code __op comp_ver_code; \ 698 } 699 700 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 701 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 702 __DEF_FW_FEAT_COND(lt, <); /* less than */ 703 704 struct __fw_feat_cfg { 705 enum rtw89_core_chip_id chip_id; 706 enum rtw89_fw_feature feature; 707 u32 ver_code; 708 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 709 }; 710 711 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 712 { \ 713 .chip_id = _chip, \ 714 .feature = RTW89_FW_FEATURE_ ## _feat, \ 715 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 716 .cond = __fw_feat_cond_ ## _cond, \ 717 } 718 719 static const struct __fw_feat_cfg fw_feat_tbl[] = { 720 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 721 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 722 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 723 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 724 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 725 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 726 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 727 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX), 728 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 729 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 730 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 731 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 732 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 733 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER), 734 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX), 735 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 736 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 737 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER), 738 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 739 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER), 740 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 741 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 742 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 743 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 744 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 745 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 746 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), 747 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 748 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 749 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0), 750 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 751 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 752 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0), 753 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO), 754 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0), 755 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO), 756 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0), 757 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1), 758 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1), 759 }; 760 761 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 762 const struct rtw89_chip_info *chip, 763 u32 ver_code) 764 { 765 int i; 766 767 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 768 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 769 770 if (chip->chip_id != ent->chip_id) 771 continue; 772 773 if (ent->cond(ver_code, ent->ver_code)) 774 RTW89_SET_FW_FEATURE(ent->feature, fw); 775 } 776 } 777 778 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 779 { 780 const struct rtw89_chip_info *chip = rtwdev->chip; 781 const struct rtw89_fw_suit *fw_suit; 782 u32 suit_ver_code; 783 784 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 785 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 786 787 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 788 } 789 790 const struct firmware * 791 rtw89_early_fw_feature_recognize(struct device *device, 792 const struct rtw89_chip_info *chip, 793 struct rtw89_fw_info *early_fw, 794 int *used_fw_format) 795 { 796 const struct firmware *firmware; 797 char fw_name[64]; 798 int fw_format; 799 u32 ver_code; 800 int ret; 801 802 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 803 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 804 chip->fw_basename, fw_format); 805 806 ret = request_firmware(&firmware, fw_name, device); 807 if (!ret) { 808 dev_info(device, "loaded firmware %s\n", fw_name); 809 *used_fw_format = fw_format; 810 break; 811 } 812 } 813 814 if (ret) { 815 dev_err(device, "failed to early request firmware: %d\n", ret); 816 return NULL; 817 } 818 819 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 820 821 if (!ver_code) 822 goto out; 823 824 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 825 826 out: 827 return firmware; 828 } 829 830 static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev) 831 { 832 const struct rtw89_chip_variant *variant = rtwdev->variant; 833 const struct rtw89_fw_suit *fw_suit; 834 u32 suit_ver_code; 835 836 if (!variant) 837 return 0; 838 839 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 840 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 841 842 if (variant->fw_min_ver_code > suit_ver_code) { 843 rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n", 844 variant->fw_min_ver_code); 845 return -ENOENT; 846 } 847 848 return 0; 849 } 850 851 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 852 { 853 const struct rtw89_chip_info *chip = rtwdev->chip; 854 int ret; 855 856 if (chip->try_ce_fw) { 857 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 858 if (!ret) 859 goto normal_done; 860 } 861 862 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 863 if (ret) 864 return ret; 865 866 normal_done: 867 ret = rtw89_fw_validate_ver_required(rtwdev); 868 if (ret) 869 return ret; 870 871 /* It still works if wowlan firmware isn't existing. */ 872 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 873 874 /* It still works if log format file isn't existing. */ 875 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 876 877 rtw89_fw_recognize_features(rtwdev); 878 879 rtw89_coex_recognize_ver(rtwdev); 880 881 return 0; 882 } 883 884 static 885 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 886 const struct rtw89_fw_element_hdr *elm, 887 const union rtw89_fw_element_arg arg) 888 { 889 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 890 struct rtw89_phy_table *tbl; 891 struct rtw89_reg2_def *regs; 892 enum rtw89_rf_path rf_path; 893 u32 n_regs, i; 894 u8 idx; 895 896 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 897 if (!tbl) 898 return -ENOMEM; 899 900 switch (le32_to_cpu(elm->id)) { 901 case RTW89_FW_ELEMENT_ID_BB_REG: 902 elm_info->bb_tbl = tbl; 903 break; 904 case RTW89_FW_ELEMENT_ID_BB_GAIN: 905 elm_info->bb_gain = tbl; 906 break; 907 case RTW89_FW_ELEMENT_ID_RADIO_A: 908 case RTW89_FW_ELEMENT_ID_RADIO_B: 909 case RTW89_FW_ELEMENT_ID_RADIO_C: 910 case RTW89_FW_ELEMENT_ID_RADIO_D: 911 #if defined(__linux__) 912 rf_path = arg.rf_path; 913 #elif defined(__FreeBSD__) 914 rf_path = __DECONST(enum rtw89_rf_path, arg.rf_path); 915 #endif 916 idx = elm->u.reg2.idx; 917 918 elm_info->rf_radio[idx] = tbl; 919 tbl->rf_path = rf_path; 920 tbl->config = rtw89_phy_config_rf_reg_v1; 921 break; 922 case RTW89_FW_ELEMENT_ID_RF_NCTL: 923 elm_info->rf_nctl = tbl; 924 break; 925 default: 926 kfree(tbl); 927 return -ENOENT; 928 } 929 930 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 931 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); 932 if (!regs) 933 goto out; 934 935 for (i = 0; i < n_regs; i++) { 936 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 937 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 938 } 939 940 tbl->n_regs = n_regs; 941 tbl->regs = regs; 942 943 return 0; 944 945 out: 946 kfree(tbl); 947 return -ENOMEM; 948 } 949 950 static 951 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 952 const struct rtw89_fw_element_hdr *elm, 953 const union rtw89_fw_element_arg arg) 954 { 955 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 956 const unsigned long offset = arg.offset; 957 struct rtw89_efuse *efuse = &rtwdev->efuse; 958 struct rtw89_txpwr_conf *conf; 959 960 if (!rtwdev->rfe_data) { 961 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 962 if (!rtwdev->rfe_data) 963 return -ENOMEM; 964 } 965 966 #if defined(__linux__) 967 conf = (void *)rtwdev->rfe_data + offset; 968 #elif defined(__FreeBSD__) 969 conf = (void *)((u8 *)rtwdev->rfe_data + offset); 970 #endif 971 972 /* if multiple matched, take the last eventually */ 973 if (txpwr_elm->rfe_type == efuse->rfe_type) 974 goto setup; 975 976 /* without one is matched, accept default */ 977 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 978 (!rtw89_txpwr_conf_valid(conf) || 979 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 980 goto setup; 981 982 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 983 elm->id, txpwr_elm->rfe_type); 984 return 0; 985 986 setup: 987 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 988 elm->id, txpwr_elm->rfe_type); 989 990 conf->rfe_type = txpwr_elm->rfe_type; 991 conf->ent_sz = txpwr_elm->ent_sz; 992 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 993 conf->data = txpwr_elm->content; 994 return 0; 995 } 996 997 static 998 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 999 const struct rtw89_fw_element_hdr *elm, 1000 const union rtw89_fw_element_arg arg) 1001 { 1002 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1003 const struct rtw89_chip_info *chip = rtwdev->chip; 1004 u32 needed_bitmap = 0; 1005 u32 offset = 0; 1006 int subband; 1007 u32 bitmap; 1008 int type; 1009 1010 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 1011 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 1012 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 1013 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 1014 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 1015 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 1016 1017 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 1018 1019 if ((bitmap & needed_bitmap) != needed_bitmap) { 1020 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n", 1021 needed_bitmap, bitmap); 1022 return -ENOENT; 1023 } 1024 1025 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL); 1026 if (!elm_info->txpwr_trk) 1027 return -ENOMEM; 1028 1029 for (type = 0; bitmap; type++, bitmap >>= 1) { 1030 if (!(bitmap & BIT(0))) 1031 continue; 1032 1033 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 1034 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 1035 subband = 4; 1036 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 1037 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 1038 subband = 3; 1039 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 1040 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 1041 subband = 1; 1042 else 1043 break; 1044 1045 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 1046 1047 offset += subband; 1048 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 1049 goto err; 1050 } 1051 1052 return 0; 1053 1054 err: 1055 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 1056 offset, le32_to_cpu(elm->size)); 1057 kfree(elm_info->txpwr_trk); 1058 elm_info->txpwr_trk = NULL; 1059 1060 return -EFAULT; 1061 } 1062 1063 static 1064 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 1065 const struct rtw89_fw_element_hdr *elm, 1066 const union rtw89_fw_element_arg arg) 1067 { 1068 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1069 u8 rfk_id; 1070 1071 if (elm_info->rfk_log_fmt) 1072 goto allocated; 1073 1074 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL); 1075 if (!elm_info->rfk_log_fmt) 1076 return 1; /* this is an optional element, so just ignore this */ 1077 1078 allocated: 1079 rfk_id = elm->u.rfk_log_fmt.rfk_id; 1080 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 1081 return 1; 1082 1083 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 1084 1085 return 0; 1086 } 1087 1088 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 1089 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 1090 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 1091 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 1092 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 1093 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 1094 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 1095 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 1096 { .rf_path = RF_PATH_A }, "radio A"}, 1097 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 1098 { .rf_path = RF_PATH_B }, NULL}, 1099 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 1100 { .rf_path = RF_PATH_C }, NULL}, 1101 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 1102 { .rf_path = RF_PATH_D }, NULL}, 1103 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 1104 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 1105 rtw89_fw_recognize_txpwr_from_elm, 1106 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 1107 }, 1108 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 1109 rtw89_fw_recognize_txpwr_from_elm, 1110 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 1111 }, 1112 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 1113 rtw89_fw_recognize_txpwr_from_elm, 1114 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 1115 }, 1116 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 1117 rtw89_fw_recognize_txpwr_from_elm, 1118 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 1119 }, 1120 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 1121 rtw89_fw_recognize_txpwr_from_elm, 1122 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 1123 }, 1124 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 1125 rtw89_fw_recognize_txpwr_from_elm, 1126 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 1127 }, 1128 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 1129 rtw89_fw_recognize_txpwr_from_elm, 1130 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 1131 }, 1132 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 1133 rtw89_fw_recognize_txpwr_from_elm, 1134 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 1135 }, 1136 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 1137 rtw89_fw_recognize_txpwr_from_elm, 1138 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 1139 }, 1140 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 1141 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 1142 }, 1143 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 1144 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 1145 }, 1146 }; 1147 1148 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 1149 { 1150 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1151 const struct firmware *firmware = fw_info->req.firmware; 1152 const struct rtw89_chip_info *chip = rtwdev->chip; 1153 u32 unrecognized_elements = chip->needed_fw_elms; 1154 const struct rtw89_fw_element_handler *handler; 1155 const struct rtw89_fw_element_hdr *hdr; 1156 u32 elm_size; 1157 u32 elem_id; 1158 u32 offset; 1159 int ret; 1160 1161 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1162 1163 offset = rtw89_mfw_get_size(rtwdev); 1164 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1165 if (offset == 0) 1166 return -EINVAL; 1167 1168 while (offset + sizeof(*hdr) < firmware->size) { 1169 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1170 1171 elm_size = le32_to_cpu(hdr->size); 1172 if (offset + elm_size >= firmware->size) { 1173 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1174 break; 1175 } 1176 1177 elem_id = le32_to_cpu(hdr->id); 1178 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1179 goto next; 1180 1181 handler = &__fw_element_handlers[elem_id]; 1182 if (!handler->fn) 1183 goto next; 1184 1185 ret = handler->fn(rtwdev, hdr, handler->arg); 1186 if (ret == 1) /* ignore this element */ 1187 goto next; 1188 if (ret) 1189 return ret; 1190 1191 if (handler->name) 1192 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1193 handler->name, hdr->ver); 1194 1195 unrecognized_elements &= ~BIT(elem_id); 1196 next: 1197 offset += sizeof(*hdr) + elm_size; 1198 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1199 } 1200 1201 if (unrecognized_elements) { 1202 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1203 unrecognized_elements); 1204 return -ENOENT; 1205 } 1206 1207 return 0; 1208 } 1209 1210 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1211 u8 type, u8 cat, u8 class, u8 func, 1212 bool rack, bool dack, u32 len) 1213 { 1214 struct fwcmd_hdr *hdr; 1215 1216 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1217 1218 if (!(rtwdev->fw.h2c_seq % 4)) 1219 rack = true; 1220 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1221 FIELD_PREP(H2C_HDR_CAT, cat) | 1222 FIELD_PREP(H2C_HDR_CLASS, class) | 1223 FIELD_PREP(H2C_HDR_FUNC, func) | 1224 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1225 1226 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1227 len + H2C_HEADER_LEN) | 1228 (rack ? H2C_HDR_REC_ACK : 0) | 1229 (dack ? H2C_HDR_DONE_ACK : 0)); 1230 1231 rtwdev->fw.h2c_seq++; 1232 } 1233 1234 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1235 struct sk_buff *skb, 1236 u8 type, u8 cat, u8 class, u8 func, 1237 u32 len) 1238 { 1239 struct fwcmd_hdr *hdr; 1240 1241 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1242 1243 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1244 FIELD_PREP(H2C_HDR_CAT, cat) | 1245 FIELD_PREP(H2C_HDR_CLASS, class) | 1246 FIELD_PREP(H2C_HDR_FUNC, func) | 1247 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1248 1249 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1250 len + H2C_HEADER_LEN)); 1251 } 1252 1253 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1254 struct rtw89_fw_bin_info *info, 1255 struct rtw89_fw_hdr *fw_hdr) 1256 { 1257 struct rtw89_fw_hdr_section_info *section_info; 1258 struct rtw89_fw_hdr_section *section; 1259 int i; 1260 1261 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1262 FW_HDR_W7_PART_SIZE); 1263 1264 for (i = 0; i < info->section_num; i++) { 1265 section_info = &info->section_info[i]; 1266 1267 if (!section_info->len_override) 1268 continue; 1269 1270 section = &fw_hdr->sections[i]; 1271 le32p_replace_bits(§ion->w1, section_info->len_override, 1272 FWSECTION_HDR_W1_SEC_SIZE); 1273 } 1274 1275 return 0; 1276 } 1277 1278 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1279 struct rtw89_fw_bin_info *info, 1280 struct rtw89_fw_hdr_v1 *fw_hdr) 1281 { 1282 struct rtw89_fw_hdr_section_info *section_info; 1283 struct rtw89_fw_hdr_section_v1 *section; 1284 u8 dst_sec_idx = 0; 1285 u8 sec_idx; 1286 1287 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1288 FW_HDR_V1_W7_PART_SIZE); 1289 1290 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1291 section_info = &info->section_info[sec_idx]; 1292 section = &fw_hdr->sections[sec_idx]; 1293 1294 if (section_info->ignore) 1295 continue; 1296 1297 if (dst_sec_idx != sec_idx) 1298 fw_hdr->sections[dst_sec_idx] = *section; 1299 1300 dst_sec_idx++; 1301 } 1302 1303 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1304 1305 return (info->section_num - dst_sec_idx) * sizeof(*section); 1306 } 1307 1308 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1309 const struct rtw89_fw_suit *fw_suit, 1310 struct rtw89_fw_bin_info *info) 1311 { 1312 u32 len = info->hdr_len - info->dynamic_hdr_len; 1313 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1314 const u8 *fw = fw_suit->data; 1315 struct rtw89_fw_hdr *fw_hdr; 1316 struct sk_buff *skb; 1317 u32 truncated; 1318 u32 ret = 0; 1319 1320 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1321 if (!skb) { 1322 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1323 return -ENOMEM; 1324 } 1325 1326 skb_put_data(skb, fw, len); 1327 1328 switch (fw_suit->hdr_ver) { 1329 case 0: 1330 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1331 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1332 break; 1333 case 1: 1334 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1335 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1336 break; 1337 default: 1338 ret = -EOPNOTSUPP; 1339 goto fail; 1340 } 1341 1342 if (truncated) { 1343 len -= truncated; 1344 skb_trim(skb, len); 1345 } 1346 1347 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1348 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1349 H2C_FUNC_MAC_FWHDR_DL, len); 1350 1351 ret = rtw89_h2c_tx(rtwdev, skb, false); 1352 if (ret) { 1353 rtw89_err(rtwdev, "failed to send h2c\n"); 1354 ret = -1; 1355 goto fail; 1356 } 1357 1358 return 0; 1359 fail: 1360 dev_kfree_skb_any(skb); 1361 1362 return ret; 1363 } 1364 1365 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1366 const struct rtw89_fw_suit *fw_suit, 1367 struct rtw89_fw_bin_info *info) 1368 { 1369 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1370 int ret; 1371 1372 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1373 if (ret) { 1374 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1375 return ret; 1376 } 1377 1378 ret = mac->fwdl_check_path_ready(rtwdev, false); 1379 if (ret) { 1380 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1381 return ret; 1382 } 1383 1384 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1385 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1386 1387 return 0; 1388 } 1389 1390 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1391 struct rtw89_fw_hdr_section_info *info) 1392 { 1393 struct sk_buff *skb; 1394 const u8 *section = info->addr; 1395 u32 residue_len = info->len; 1396 bool copy_key = false; 1397 u32 pkt_len; 1398 int ret; 1399 1400 if (info->ignore) 1401 return 0; 1402 1403 if (info->len_override) { 1404 if (info->len_override > info->len) 1405 rtw89_warn(rtwdev, "override length %u larger than original %u\n", 1406 info->len_override, info->len); 1407 else 1408 residue_len = info->len_override; 1409 } 1410 1411 if (info->key_addr && info->key_len) { 1412 if (residue_len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) 1413 rtw89_warn(rtwdev, 1414 "ignore to copy key data because of len %d, %d, %d, %d\n", 1415 info->len, FWDL_SECTION_PER_PKT_LEN, 1416 info->key_len, residue_len); 1417 else 1418 copy_key = true; 1419 } 1420 1421 while (residue_len) { 1422 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 1423 pkt_len = FWDL_SECTION_PER_PKT_LEN; 1424 else 1425 pkt_len = residue_len; 1426 1427 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1428 if (!skb) { 1429 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1430 return -ENOMEM; 1431 } 1432 skb_put_data(skb, section, pkt_len); 1433 1434 if (copy_key) 1435 memcpy(skb->data + pkt_len - info->key_len, 1436 info->key_addr, info->key_len); 1437 1438 ret = rtw89_h2c_tx(rtwdev, skb, true); 1439 if (ret) { 1440 rtw89_err(rtwdev, "failed to send h2c\n"); 1441 ret = -1; 1442 goto fail; 1443 } 1444 1445 section += pkt_len; 1446 residue_len -= pkt_len; 1447 } 1448 1449 return 0; 1450 fail: 1451 dev_kfree_skb_any(skb); 1452 1453 return ret; 1454 } 1455 1456 static enum rtw89_fwdl_check_type 1457 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1458 const struct rtw89_fw_suit *fw_suit) 1459 { 1460 switch (fw_suit->type) { 1461 case RTW89_FW_BBMCU0: 1462 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1463 case RTW89_FW_BBMCU1: 1464 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1465 default: 1466 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1467 } 1468 } 1469 1470 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1471 const struct rtw89_fw_suit *fw_suit, 1472 struct rtw89_fw_bin_info *info) 1473 { 1474 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1475 const struct rtw89_chip_info *chip = rtwdev->chip; 1476 enum rtw89_fwdl_check_type chk_type; 1477 u8 section_num = info->section_num; 1478 int ret; 1479 1480 while (section_num--) { 1481 ret = __rtw89_fw_download_main(rtwdev, section_info); 1482 if (ret) 1483 return ret; 1484 section_info++; 1485 } 1486 1487 if (chip->chip_gen == RTW89_CHIP_AX) 1488 return 0; 1489 1490 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1491 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1492 if (ret) { 1493 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1494 fw_suit->type); 1495 return ret; 1496 } 1497 1498 return 0; 1499 } 1500 1501 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1502 { 1503 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1504 u32 addr = R_AX_DBG_PORT_SEL; 1505 u32 val32; 1506 u16 index; 1507 1508 if (chip_gen == RTW89_CHIP_BE) { 1509 addr = R_BE_WLCPU_PORT_PC; 1510 goto dump; 1511 } 1512 1513 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1514 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1515 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1516 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1517 1518 dump: 1519 for (index = 0; index < 15; index++) { 1520 val32 = rtw89_read32(rtwdev, addr); 1521 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1522 #if defined(__linux__) 1523 fsleep(10); 1524 #elif defined(__FreeBSD__) 1525 /* Seems we are called from a context we cannot sleep. */ 1526 udelay(10); 1527 #endif 1528 } 1529 } 1530 1531 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1532 { 1533 u32 val32; 1534 1535 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1536 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1537 1538 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1539 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1540 1541 rtw89_fw_prog_cnt_dump(rtwdev); 1542 } 1543 1544 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1545 struct rtw89_fw_suit *fw_suit) 1546 { 1547 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1548 struct rtw89_fw_bin_info info = {}; 1549 int ret; 1550 1551 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1552 if (ret) { 1553 rtw89_err(rtwdev, "parse fw header fail\n"); 1554 return ret; 1555 } 1556 1557 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode); 1558 1559 if (rtwdev->chip->chip_id == RTL8922A && 1560 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1561 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1562 1563 ret = mac->fwdl_check_path_ready(rtwdev, true); 1564 if (ret) { 1565 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1566 return ret; 1567 } 1568 1569 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1570 if (ret) 1571 return ret; 1572 1573 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1574 if (ret) 1575 return ret; 1576 1577 return 0; 1578 } 1579 1580 static 1581 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1582 bool include_bb) 1583 { 1584 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1585 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1586 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1587 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1588 int ret; 1589 int i; 1590 1591 mac->disable_cpu(rtwdev); 1592 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1593 if (ret) 1594 return ret; 1595 1596 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1597 if (ret) 1598 goto fwdl_err; 1599 1600 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1601 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1602 1603 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1604 if (ret) 1605 goto fwdl_err; 1606 } 1607 1608 fw_info->h2c_seq = 0; 1609 fw_info->rec_seq = 0; 1610 fw_info->h2c_counter = 0; 1611 fw_info->c2h_counter = 0; 1612 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1613 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1614 1615 mdelay(5); 1616 1617 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1618 if (ret) { 1619 rtw89_warn(rtwdev, "download firmware fail\n"); 1620 goto fwdl_err; 1621 } 1622 1623 return ret; 1624 1625 fwdl_err: 1626 rtw89_fw_dl_fail_dump(rtwdev); 1627 return ret; 1628 } 1629 1630 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1631 bool include_bb) 1632 { 1633 int retry; 1634 int ret; 1635 1636 for (retry = 0; retry < 5; retry++) { 1637 ret = __rtw89_fw_download(rtwdev, type, include_bb); 1638 if (!ret) 1639 return 0; 1640 } 1641 1642 return ret; 1643 } 1644 1645 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1646 { 1647 struct rtw89_fw_info *fw = &rtwdev->fw; 1648 1649 wait_for_completion(&fw->req.completion); 1650 if (!fw->req.firmware) 1651 return -EINVAL; 1652 1653 return 0; 1654 } 1655 1656 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1657 struct rtw89_fw_req_info *req, 1658 const char *fw_name, bool nowarn) 1659 { 1660 int ret; 1661 1662 if (req->firmware) { 1663 rtw89_debug(rtwdev, RTW89_DBG_FW, 1664 "full firmware has been early requested\n"); 1665 complete_all(&req->completion); 1666 return 0; 1667 } 1668 1669 if (nowarn) 1670 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1671 else 1672 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1673 1674 complete_all(&req->completion); 1675 1676 return ret; 1677 } 1678 1679 void rtw89_load_firmware_work(struct work_struct *work) 1680 { 1681 struct rtw89_dev *rtwdev = 1682 container_of(work, struct rtw89_dev, load_firmware_work); 1683 const struct rtw89_chip_info *chip = rtwdev->chip; 1684 char fw_name[64]; 1685 1686 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1687 chip->fw_basename, rtwdev->fw.fw_format); 1688 1689 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1690 } 1691 1692 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1693 { 1694 if (!tbl) 1695 return; 1696 1697 kfree(tbl->regs); 1698 kfree(tbl); 1699 } 1700 1701 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1702 { 1703 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1704 int i; 1705 1706 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1707 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1708 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1709 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1710 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1711 1712 kfree(elm_info->txpwr_trk); 1713 kfree(elm_info->rfk_log_fmt); 1714 } 1715 1716 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1717 { 1718 struct rtw89_fw_info *fw = &rtwdev->fw; 1719 1720 cancel_work_sync(&rtwdev->load_firmware_work); 1721 1722 if (fw->req.firmware) { 1723 release_firmware(fw->req.firmware); 1724 1725 /* assign NULL back in case rtw89_free_ieee80211_hw() 1726 * try to release the same one again. 1727 */ 1728 fw->req.firmware = NULL; 1729 } 1730 1731 kfree(fw->log.fmts); 1732 rtw89_unload_firmware_elements(rtwdev); 1733 } 1734 1735 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1736 { 1737 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1738 u32 i; 1739 1740 if (fmt_id > fw_log->last_fmt_id) 1741 return 0; 1742 1743 for (i = 0; i < fw_log->fmt_count; i++) { 1744 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1745 return i; 1746 } 1747 return 0; 1748 } 1749 1750 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1751 { 1752 struct rtw89_fw_log *log = &rtwdev->fw.log; 1753 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1754 struct rtw89_fw_suit *suit = &log->suit; 1755 #if defined(__linux__) 1756 const void *fmts_ptr, *fmts_end_ptr; 1757 #elif defined(__FreeBSD__) 1758 const u8 *fmts_ptr, *fmts_end_ptr; 1759 #endif 1760 u32 fmt_count; 1761 int i; 1762 1763 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1764 fmt_count = le32_to_cpu(suit_hdr->count); 1765 log->fmt_ids = suit_hdr->ids; 1766 #if defined(__linux__) 1767 fmts_ptr = &suit_hdr->ids[fmt_count]; 1768 #elif defined(__FreeBSD__) 1769 fmts_ptr = (const u8 *)&suit_hdr->ids[fmt_count]; 1770 #endif 1771 fmts_end_ptr = suit->data + suit->size; 1772 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1773 if (!log->fmts) 1774 return -ENOMEM; 1775 1776 for (i = 0; i < fmt_count; i++) { 1777 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1778 if (!fmts_ptr) 1779 break; 1780 1781 (*log->fmts)[i] = fmts_ptr; 1782 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1783 log->fmt_count++; 1784 fmts_ptr += strlen(fmts_ptr); 1785 } 1786 1787 return 0; 1788 } 1789 1790 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1791 { 1792 struct rtw89_fw_log *log = &rtwdev->fw.log; 1793 struct rtw89_fw_suit *suit = &log->suit; 1794 1795 if (!suit || !suit->data) { 1796 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1797 return -EINVAL; 1798 } 1799 if (log->fmts) 1800 return 0; 1801 1802 return rtw89_fw_log_create_fmts_dict(rtwdev); 1803 } 1804 1805 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 1806 const struct rtw89_fw_c2h_log_fmt *log_fmt, 1807 u32 fmt_idx, u8 para_int, bool raw_data) 1808 { 1809 const char *(*fmts)[] = rtwdev->fw.log.fmts; 1810 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 1811 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 1812 int i; 1813 1814 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 1815 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 1816 log_fmt->argc); 1817 return; 1818 } 1819 1820 if (para_int) 1821 for (i = 0 ; i < log_fmt->argc; i++) 1822 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 1823 1824 if (raw_data) { 1825 if (para_int) 1826 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1827 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 1828 para_int, log_fmt->argc, (int)sizeof(args), args); 1829 else 1830 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1831 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 1832 para_int, log_fmt->argc, log_fmt->u.raw); 1833 } else { 1834 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 1835 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 1836 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 1837 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 1838 args[0xf]); 1839 } 1840 1841 rtw89_info(rtwdev, "C2H log: %s", str_buf); 1842 } 1843 1844 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 1845 { 1846 const struct rtw89_fw_c2h_log_fmt *log_fmt; 1847 u8 para_int; 1848 u32 fmt_idx; 1849 1850 if (len < RTW89_C2H_HEADER_LEN) { 1851 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 1852 return; 1853 } 1854 1855 buf += RTW89_C2H_HEADER_LEN; 1856 len -= RTW89_C2H_HEADER_LEN; 1857 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 1858 1859 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 1860 goto plain_log; 1861 1862 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 1863 goto plain_log; 1864 1865 if (!rtwdev->fw.log.fmts) 1866 return; 1867 1868 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 1869 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 1870 1871 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 1872 rtw89_info(rtwdev, "C2H log: %s%s", 1873 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 1874 else if (fmt_idx != 0 && para_int) 1875 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 1876 else 1877 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 1878 return; 1879 1880 plain_log: 1881 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 1882 1883 } 1884 1885 #define H2C_CAM_LEN 60 1886 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 1887 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr) 1888 { 1889 struct sk_buff *skb; 1890 int ret; 1891 1892 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 1893 if (!skb) { 1894 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1895 return -ENOMEM; 1896 } 1897 skb_put(skb, H2C_CAM_LEN); 1898 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, scan_mac_addr, 1899 skb->data); 1900 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, skb->data); 1901 1902 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1903 H2C_CAT_MAC, 1904 H2C_CL_MAC_ADDR_CAM_UPDATE, 1905 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 1906 H2C_CAM_LEN); 1907 1908 ret = rtw89_h2c_tx(rtwdev, skb, false); 1909 if (ret) { 1910 rtw89_err(rtwdev, "failed to send h2c\n"); 1911 goto fail; 1912 } 1913 1914 return 0; 1915 fail: 1916 dev_kfree_skb_any(skb); 1917 1918 return ret; 1919 } 1920 1921 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 1922 struct rtw89_vif_link *rtwvif_link, 1923 struct rtw89_sta_link *rtwsta_link) 1924 { 1925 struct rtw89_h2c_dctlinfo_ud_v1 *h2c; 1926 u32 len = sizeof(*h2c); 1927 struct sk_buff *skb; 1928 int ret; 1929 1930 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1931 if (!skb) { 1932 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1933 return -ENOMEM; 1934 } 1935 skb_put(skb, len); 1936 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; 1937 1938 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c); 1939 1940 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1941 H2C_CAT_MAC, 1942 H2C_CL_MAC_FR_EXCHG, 1943 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 1944 len); 1945 1946 ret = rtw89_h2c_tx(rtwdev, skb, false); 1947 if (ret) { 1948 rtw89_err(rtwdev, "failed to send h2c\n"); 1949 goto fail; 1950 } 1951 1952 return 0; 1953 fail: 1954 dev_kfree_skb_any(skb); 1955 1956 return ret; 1957 } 1958 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 1959 1960 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 1961 struct rtw89_vif_link *rtwvif_link, 1962 struct rtw89_sta_link *rtwsta_link) 1963 { 1964 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1965 u32 len = sizeof(*h2c); 1966 struct sk_buff *skb; 1967 int ret; 1968 1969 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1970 if (!skb) { 1971 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1972 return -ENOMEM; 1973 } 1974 skb_put(skb, len); 1975 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1976 1977 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c); 1978 1979 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1980 H2C_CAT_MAC, 1981 H2C_CL_MAC_FR_EXCHG, 1982 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1983 len); 1984 1985 ret = rtw89_h2c_tx(rtwdev, skb, false); 1986 if (ret) { 1987 rtw89_err(rtwdev, "failed to send h2c\n"); 1988 goto fail; 1989 } 1990 1991 return 0; 1992 fail: 1993 dev_kfree_skb_any(skb); 1994 1995 return ret; 1996 } 1997 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 1998 1999 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 2000 struct rtw89_vif_link *rtwvif_link, 2001 struct rtw89_sta_link *rtwsta_link) 2002 { 2003 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2004 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 2005 u32 len = sizeof(*h2c); 2006 struct sk_buff *skb; 2007 int ret; 2008 2009 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2010 if (!skb) { 2011 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 2012 return -ENOMEM; 2013 } 2014 skb_put(skb, len); 2015 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 2016 2017 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 2018 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 2019 2020 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 2021 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 2022 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 2023 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 2024 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 2025 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 2026 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 2027 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 2028 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 2029 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 2030 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 2031 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 2032 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 2033 2034 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2035 H2C_CAT_MAC, 2036 H2C_CL_MAC_FR_EXCHG, 2037 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 2038 len); 2039 2040 ret = rtw89_h2c_tx(rtwdev, skb, false); 2041 if (ret) { 2042 rtw89_err(rtwdev, "failed to send h2c\n"); 2043 goto fail; 2044 } 2045 2046 return 0; 2047 fail: 2048 dev_kfree_skb_any(skb); 2049 2050 return ret; 2051 } 2052 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 2053 2054 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, 2055 struct rtw89_vif_link *rtwvif_link, 2056 struct rtw89_sta_link *rtwsta_link, 2057 bool valid, struct ieee80211_ampdu_params *params) 2058 { 2059 const struct rtw89_chip_info *chip = rtwdev->chip; 2060 struct rtw89_h2c_ba_cam *h2c; 2061 u8 macid = rtwsta_link->mac_id; 2062 u32 len = sizeof(*h2c); 2063 struct sk_buff *skb; 2064 u8 entry_idx; 2065 int ret; 2066 2067 ret = valid ? 2068 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2069 &entry_idx) : 2070 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2071 &entry_idx); 2072 if (ret) { 2073 /* it still works even if we don't have static BA CAM, because 2074 * hardware can create dynamic BA CAM automatically. 2075 */ 2076 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2077 "failed to %s entry tid=%d for h2c ba cam\n", 2078 valid ? "alloc" : "free", params->tid); 2079 return 0; 2080 } 2081 2082 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2083 if (!skb) { 2084 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2085 return -ENOMEM; 2086 } 2087 skb_put(skb, len); 2088 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2089 2090 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 2091 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 2092 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 2093 else 2094 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 2095 if (!valid) 2096 goto end; 2097 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 2098 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 2099 if (params->buf_size > 64) 2100 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2101 else 2102 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2103 /* If init req is set, hw will set the ssn */ 2104 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 2105 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 2106 2107 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 2108 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 2109 le32_encode_bits(rtwvif_link->mac_idx, 2110 RTW89_H2C_BA_CAM_W1_BAND); 2111 } 2112 2113 end: 2114 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2115 H2C_CAT_MAC, 2116 H2C_CL_BA_CAM, 2117 H2C_FUNC_MAC_BA_CAM, 0, 1, 2118 len); 2119 2120 ret = rtw89_h2c_tx(rtwdev, skb, false); 2121 if (ret) { 2122 rtw89_err(rtwdev, "failed to send h2c\n"); 2123 goto fail; 2124 } 2125 2126 return 0; 2127 fail: 2128 dev_kfree_skb_any(skb); 2129 2130 return ret; 2131 } 2132 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 2133 2134 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 2135 u8 entry_idx, u8 uid) 2136 { 2137 struct rtw89_h2c_ba_cam *h2c; 2138 u32 len = sizeof(*h2c); 2139 struct sk_buff *skb; 2140 int ret; 2141 2142 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2143 if (!skb) { 2144 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 2145 return -ENOMEM; 2146 } 2147 skb_put(skb, len); 2148 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2149 2150 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 2151 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 2152 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 2153 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 2154 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 2155 2156 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2157 H2C_CAT_MAC, 2158 H2C_CL_BA_CAM, 2159 H2C_FUNC_MAC_BA_CAM, 0, 1, 2160 len); 2161 2162 ret = rtw89_h2c_tx(rtwdev, skb, false); 2163 if (ret) { 2164 rtw89_err(rtwdev, "failed to send h2c\n"); 2165 goto fail; 2166 } 2167 2168 return 0; 2169 fail: 2170 dev_kfree_skb_any(skb); 2171 2172 return ret; 2173 } 2174 2175 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 2176 { 2177 const struct rtw89_chip_info *chip = rtwdev->chip; 2178 u8 entry_idx = chip->bacam_num; 2179 u8 uid = 0; 2180 int i; 2181 2182 for (i = 0; i < chip->bacam_dynamic_num; i++) { 2183 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 2184 entry_idx++; 2185 uid++; 2186 } 2187 } 2188 2189 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, 2190 struct rtw89_vif_link *rtwvif_link, 2191 struct rtw89_sta_link *rtwsta_link, 2192 bool valid, struct ieee80211_ampdu_params *params) 2193 { 2194 const struct rtw89_chip_info *chip = rtwdev->chip; 2195 struct rtw89_h2c_ba_cam_v1 *h2c; 2196 u8 macid = rtwsta_link->mac_id; 2197 u32 len = sizeof(*h2c); 2198 struct sk_buff *skb; 2199 u8 entry_idx; 2200 u8 bmap_size; 2201 int ret; 2202 2203 ret = valid ? 2204 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2205 &entry_idx) : 2206 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2207 &entry_idx); 2208 if (ret) { 2209 /* it still works even if we don't have static BA CAM, because 2210 * hardware can create dynamic BA CAM automatically. 2211 */ 2212 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2213 "failed to %s entry tid=%d for h2c ba cam\n", 2214 valid ? "alloc" : "free", params->tid); 2215 return 0; 2216 } 2217 2218 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2219 if (!skb) { 2220 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2221 return -ENOMEM; 2222 } 2223 skb_put(skb, len); 2224 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2225 2226 if (params->buf_size > 512) 2227 bmap_size = 10; 2228 else if (params->buf_size > 256) 2229 bmap_size = 8; 2230 else if (params->buf_size > 64) 2231 bmap_size = 4; 2232 else 2233 bmap_size = 0; 2234 2235 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2236 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2237 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2238 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2239 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2240 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2241 2242 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2243 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2244 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2245 le32_encode_bits(!!rtwvif_link->mac_idx, 2246 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2247 2248 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2249 H2C_CAT_MAC, 2250 H2C_CL_BA_CAM, 2251 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2252 len); 2253 2254 ret = rtw89_h2c_tx(rtwdev, skb, false); 2255 if (ret) { 2256 rtw89_err(rtwdev, "failed to send h2c\n"); 2257 goto fail; 2258 } 2259 2260 return 0; 2261 fail: 2262 dev_kfree_skb_any(skb); 2263 2264 return ret; 2265 } 2266 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2267 2268 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2269 u8 offset, u8 mac_idx) 2270 { 2271 struct rtw89_h2c_ba_cam_init *h2c; 2272 u32 len = sizeof(*h2c); 2273 struct sk_buff *skb; 2274 int ret; 2275 2276 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2277 if (!skb) { 2278 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2279 return -ENOMEM; 2280 } 2281 skb_put(skb, len); 2282 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2283 2284 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2285 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2286 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2287 2288 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2289 H2C_CAT_MAC, 2290 H2C_CL_BA_CAM, 2291 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2292 len); 2293 2294 ret = rtw89_h2c_tx(rtwdev, skb, false); 2295 if (ret) { 2296 rtw89_err(rtwdev, "failed to send h2c\n"); 2297 goto fail; 2298 } 2299 2300 return 0; 2301 fail: 2302 dev_kfree_skb_any(skb); 2303 2304 return ret; 2305 } 2306 2307 #define H2C_LOG_CFG_LEN 12 2308 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2309 { 2310 struct sk_buff *skb; 2311 u32 comp = 0; 2312 int ret; 2313 2314 if (enable) 2315 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2316 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2317 BIT(RTW89_FW_LOG_COMP_SCAN); 2318 2319 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2320 if (!skb) { 2321 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2322 return -ENOMEM; 2323 } 2324 2325 skb_put(skb, H2C_LOG_CFG_LEN); 2326 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2327 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2328 SET_LOG_CFG_COMP(skb->data, comp); 2329 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2330 2331 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2332 H2C_CAT_MAC, 2333 H2C_CL_FW_INFO, 2334 H2C_FUNC_LOG_CFG, 0, 0, 2335 H2C_LOG_CFG_LEN); 2336 2337 ret = rtw89_h2c_tx(rtwdev, skb, false); 2338 if (ret) { 2339 rtw89_err(rtwdev, "failed to send h2c\n"); 2340 goto fail; 2341 } 2342 2343 return 0; 2344 fail: 2345 dev_kfree_skb_any(skb); 2346 2347 return ret; 2348 } 2349 2350 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 2351 struct rtw89_vif_link *rtwvif_link) 2352 { 2353 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 2354 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; 2355 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2356 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2357 struct rtw89_eapol_2_of_2 *eapol_pkt; 2358 struct ieee80211_bss_conf *bss_conf; 2359 struct ieee80211_hdr_3addr *hdr; 2360 struct sk_buff *skb; 2361 u8 key_des_ver; 2362 2363 if (rtw_wow->ptk_alg == 3) 2364 key_des_ver = 1; 2365 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) 2366 key_des_ver = 2; 2367 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) 2368 key_des_ver = 3; 2369 else 2370 key_des_ver = 0; 2371 2372 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); 2373 if (!skb) 2374 return NULL; 2375 2376 hdr = skb_put_zero(skb, sizeof(*hdr)); 2377 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2378 IEEE80211_FCTL_TODS | 2379 IEEE80211_FCTL_PROTECTED); 2380 2381 rcu_read_lock(); 2382 2383 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2384 2385 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2386 ether_addr_copy(hdr->addr2, bss_conf->addr); 2387 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2388 2389 rcu_read_unlock(); 2390 2391 skb_put_zero(skb, sec_hdr_len); 2392 2393 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); 2394 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); 2395 eapol_pkt->key_des_ver = key_des_ver; 2396 2397 return skb; 2398 } 2399 2400 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, 2401 struct rtw89_vif_link *rtwvif_link) 2402 { 2403 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2404 struct ieee80211_bss_conf *bss_conf; 2405 struct ieee80211_hdr_3addr *hdr; 2406 struct rtw89_sa_query *sa_query; 2407 struct sk_buff *skb; 2408 2409 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); 2410 if (!skb) 2411 return NULL; 2412 2413 hdr = skb_put_zero(skb, sizeof(*hdr)); 2414 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2415 IEEE80211_STYPE_ACTION | 2416 IEEE80211_FCTL_PROTECTED); 2417 2418 rcu_read_lock(); 2419 2420 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2421 2422 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2423 ether_addr_copy(hdr->addr2, bss_conf->addr); 2424 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2425 2426 rcu_read_unlock(); 2427 2428 skb_put_zero(skb, sec_hdr_len); 2429 2430 sa_query = skb_put_zero(skb, sizeof(*sa_query)); 2431 sa_query->category = WLAN_CATEGORY_SA_QUERY; 2432 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; 2433 2434 return skb; 2435 } 2436 2437 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, 2438 struct rtw89_vif_link *rtwvif_link) 2439 { 2440 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 2441 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2442 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2443 struct ieee80211_hdr_3addr *hdr; 2444 struct rtw89_arp_rsp *arp_skb; 2445 struct arphdr *arp_hdr; 2446 struct sk_buff *skb; 2447 __le16 fc; 2448 2449 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); 2450 if (!skb) 2451 return NULL; 2452 2453 hdr = skb_put_zero(skb, sizeof(*hdr)); 2454 2455 if (rtw_wow->ptk_alg) 2456 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | 2457 IEEE80211_FCTL_PROTECTED); 2458 else 2459 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); 2460 2461 hdr->frame_control = fc; 2462 ether_addr_copy(hdr->addr1, rtwvif_link->bssid); 2463 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr); 2464 ether_addr_copy(hdr->addr3, rtwvif_link->bssid); 2465 2466 skb_put_zero(skb, sec_hdr_len); 2467 2468 arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); 2469 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 2470 arp_skb->llc_type = htons(ETH_P_ARP); 2471 2472 arp_hdr = &arp_skb->arp_hdr; 2473 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 2474 arp_hdr->ar_pro = htons(ETH_P_IP); 2475 arp_hdr->ar_hln = ETH_ALEN; 2476 arp_hdr->ar_pln = 4; 2477 arp_hdr->ar_op = htons(ARPOP_REPLY); 2478 2479 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr); 2480 arp_skb->sender_ip = rtwvif->ip_addr; 2481 2482 return skb; 2483 } 2484 2485 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2486 struct rtw89_vif_link *rtwvif_link, 2487 enum rtw89_fw_pkt_ofld_type type, 2488 u8 *id) 2489 { 2490 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 2491 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; 2492 struct rtw89_pktofld_info *info; 2493 struct sk_buff *skb; 2494 int ret; 2495 2496 info = kzalloc(sizeof(*info), GFP_KERNEL); 2497 if (!info) 2498 return -ENOMEM; 2499 2500 switch (type) { 2501 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2502 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2503 break; 2504 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2505 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2506 break; 2507 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2508 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false); 2509 break; 2510 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2511 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true); 2512 break; 2513 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2514 skb = rtw89_eapol_get(rtwdev, rtwvif_link); 2515 break; 2516 case RTW89_PKT_OFLD_TYPE_SA_QUERY: 2517 skb = rtw89_sa_query_get(rtwdev, rtwvif_link); 2518 break; 2519 case RTW89_PKT_OFLD_TYPE_ARP_RSP: 2520 skb = rtw89_arp_response_get(rtwdev, rtwvif_link); 2521 break; 2522 default: 2523 goto err; 2524 } 2525 2526 if (!skb) 2527 goto err; 2528 2529 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 2530 kfree_skb(skb); 2531 2532 if (ret) 2533 goto err; 2534 2535 list_add_tail(&info->list, &rtwvif_link->general_pkt_list); 2536 *id = info->id; 2537 return 0; 2538 2539 err: 2540 kfree(info); 2541 return -ENOMEM; 2542 } 2543 2544 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 2545 struct rtw89_vif_link *rtwvif_link, 2546 bool notify_fw) 2547 { 2548 struct list_head *pkt_list = &rtwvif_link->general_pkt_list; 2549 struct rtw89_pktofld_info *info, *tmp; 2550 2551 list_for_each_entry_safe(info, tmp, pkt_list, list) { 2552 if (notify_fw) 2553 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2554 else 2555 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 2556 list_del(&info->list); 2557 kfree(info); 2558 } 2559 } 2560 2561 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 2562 { 2563 struct rtw89_vif_link *rtwvif_link; 2564 struct rtw89_vif *rtwvif; 2565 unsigned int link_id; 2566 2567 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2568 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 2569 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link, 2570 notify_fw); 2571 } 2572 2573 #define H2C_GENERAL_PKT_LEN 6 2574 #define H2C_GENERAL_PKT_ID_UND 0xff 2575 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 2576 struct rtw89_vif_link *rtwvif_link, u8 macid) 2577 { 2578 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 2579 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 2580 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 2581 struct sk_buff *skb; 2582 int ret; 2583 2584 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2585 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 2586 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2587 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 2588 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2589 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 2590 2591 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 2592 if (!skb) { 2593 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2594 return -ENOMEM; 2595 } 2596 skb_put(skb, H2C_GENERAL_PKT_LEN); 2597 SET_GENERAL_PKT_MACID(skb->data, macid); 2598 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2599 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 2600 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 2601 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 2602 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2603 2604 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2605 H2C_CAT_MAC, 2606 H2C_CL_FW_INFO, 2607 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 2608 H2C_GENERAL_PKT_LEN); 2609 2610 ret = rtw89_h2c_tx(rtwdev, skb, false); 2611 if (ret) { 2612 rtw89_err(rtwdev, "failed to send h2c\n"); 2613 goto fail; 2614 } 2615 2616 return 0; 2617 fail: 2618 dev_kfree_skb_any(skb); 2619 2620 return ret; 2621 } 2622 2623 #define H2C_LPS_PARM_LEN 8 2624 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 2625 struct rtw89_lps_parm *lps_param) 2626 { 2627 struct sk_buff *skb; 2628 int ret; 2629 2630 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 2631 if (!skb) { 2632 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2633 return -ENOMEM; 2634 } 2635 skb_put(skb, H2C_LPS_PARM_LEN); 2636 2637 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 2638 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 2639 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 2640 SET_LPS_PARM_RLBM(skb->data, 1); 2641 SET_LPS_PARM_SMARTPS(skb->data, 1); 2642 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 2643 SET_LPS_PARM_VOUAPSD(skb->data, 0); 2644 SET_LPS_PARM_VIUAPSD(skb->data, 0); 2645 SET_LPS_PARM_BEUAPSD(skb->data, 0); 2646 SET_LPS_PARM_BKUAPSD(skb->data, 0); 2647 2648 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2649 H2C_CAT_MAC, 2650 H2C_CL_MAC_PS, 2651 H2C_FUNC_MAC_LPS_PARM, 0, !lps_param->psmode, 2652 H2C_LPS_PARM_LEN); 2653 2654 ret = rtw89_h2c_tx(rtwdev, skb, false); 2655 if (ret) { 2656 rtw89_err(rtwdev, "failed to send h2c\n"); 2657 goto fail; 2658 } 2659 2660 return 0; 2661 fail: 2662 dev_kfree_skb_any(skb); 2663 2664 return ret; 2665 } 2666 2667 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2668 { 2669 const struct rtw89_chip_info *chip = rtwdev->chip; 2670 const struct rtw89_chan *chan; 2671 struct rtw89_vif_link *rtwvif_link; 2672 struct rtw89_h2c_lps_ch_info *h2c; 2673 u32 len = sizeof(*h2c); 2674 unsigned int link_id; 2675 struct sk_buff *skb; 2676 bool no_chan = true; 2677 u8 phy_idx; 2678 u32 done; 2679 int ret; 2680 2681 if (chip->chip_gen != RTW89_CHIP_BE) 2682 return 0; 2683 2684 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2685 if (!skb) { 2686 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 2687 return -ENOMEM; 2688 } 2689 skb_put(skb, len); 2690 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2691 2692 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2693 phy_idx = rtwvif_link->phy_idx; 2694 if (phy_idx >= ARRAY_SIZE(h2c->info)) 2695 continue; 2696 2697 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2698 no_chan = false; 2699 2700 h2c->info[phy_idx].central_ch = chan->channel; 2701 h2c->info[phy_idx].pri_ch = chan->primary_channel; 2702 h2c->info[phy_idx].band = chan->band_type; 2703 h2c->info[phy_idx].bw = chan->band_width; 2704 } 2705 2706 if (no_chan) { 2707 rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n"); 2708 ret = -ENOENT; 2709 goto fail; 2710 } 2711 2712 h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2713 2714 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2715 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2716 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 2717 2718 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2719 ret = rtw89_h2c_tx(rtwdev, skb, false); 2720 if (ret) { 2721 rtw89_err(rtwdev, "failed to send h2c\n"); 2722 goto fail; 2723 } 2724 2725 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2726 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2727 if (ret) 2728 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 2729 2730 return 0; 2731 fail: 2732 dev_kfree_skb_any(skb); 2733 2734 return ret; 2735 } 2736 2737 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, 2738 struct rtw89_vif *rtwvif) 2739 { 2740 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be; 2741 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 2742 const struct rtw89_chip_info *chip = rtwdev->chip; 2743 struct rtw89_h2c_lps_ml_cmn_info *h2c; 2744 struct rtw89_vif_link *rtwvif_link; 2745 const struct rtw89_chan *chan; 2746 u8 bw_idx = RTW89_BB_BW_20_40; 2747 u32 len = sizeof(*h2c); 2748 unsigned int link_id; 2749 struct sk_buff *skb; 2750 u8 gain_band; 2751 u32 done; 2752 u8 path; 2753 int ret; 2754 int i; 2755 2756 if (chip->chip_gen != RTW89_CHIP_BE) 2757 return 0; 2758 2759 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2760 if (!skb) { 2761 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n"); 2762 return -ENOMEM; 2763 } 2764 skb_put(skb, len); 2765 h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data; 2766 2767 h2c->fmt_id = 0x1; 2768 2769 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2770 2771 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2772 path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A; 2773 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2774 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type); 2775 2776 h2c->central_ch[rtwvif_link->phy_idx] = chan->channel; 2777 h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel; 2778 h2c->band[rtwvif_link->phy_idx] = chan->band_type; 2779 h2c->bw[rtwvif_link->phy_idx] = chan->band_width; 2780 if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6) 2781 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1; 2782 else 2783 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2; 2784 2785 /* Fill BW20 RX gain table for beacon mode */ 2786 for (i = 0; i < TIA_GAIN_NUM; i++) { 2787 h2c->tia_gain[rtwvif_link->phy_idx][i] = 2788 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]); 2789 } 2790 memcpy(h2c->lna_gain[rtwvif_link->phy_idx], 2791 gain->lna_gain[gain_band][bw_idx][path], 2792 LNA_GAIN_NUM); 2793 } 2794 2795 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2796 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2797 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len); 2798 2799 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2800 ret = rtw89_h2c_tx(rtwdev, skb, false); 2801 if (ret) { 2802 rtw89_err(rtwdev, "failed to send h2c\n"); 2803 goto fail; 2804 } 2805 2806 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2807 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2808 if (ret) 2809 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n"); 2810 2811 return 0; 2812 fail: 2813 dev_kfree_skb_any(skb); 2814 2815 return ret; 2816 } 2817 2818 #define H2C_P2P_ACT_LEN 20 2819 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, 2820 struct rtw89_vif_link *rtwvif_link, 2821 struct ieee80211_bss_conf *bss_conf, 2822 struct ieee80211_p2p_noa_desc *desc, 2823 u8 act, u8 noa_id) 2824 { 2825 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 2826 u8 ctwindow_oppps = bss_conf->p2p_noa_attr.oppps_ctwindow; 2827 struct sk_buff *skb; 2828 u8 *cmd; 2829 int ret; 2830 2831 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 2832 if (!skb) { 2833 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 2834 return -ENOMEM; 2835 } 2836 skb_put(skb, H2C_P2P_ACT_LEN); 2837 cmd = skb->data; 2838 2839 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id); 2840 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 2841 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 2842 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 2843 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 2844 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 2845 if (desc) { 2846 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 2847 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 2848 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 2849 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 2850 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 2851 } 2852 2853 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2854 H2C_CAT_MAC, H2C_CL_MAC_PS, 2855 H2C_FUNC_P2P_ACT, 0, 0, 2856 H2C_P2P_ACT_LEN); 2857 2858 ret = rtw89_h2c_tx(rtwdev, skb, false); 2859 if (ret) { 2860 rtw89_err(rtwdev, "failed to send h2c\n"); 2861 goto fail; 2862 } 2863 2864 return 0; 2865 fail: 2866 dev_kfree_skb_any(skb); 2867 2868 return ret; 2869 } 2870 2871 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 2872 struct sk_buff *skb) 2873 { 2874 const struct rtw89_chip_info *chip = rtwdev->chip; 2875 struct rtw89_hal *hal = &rtwdev->hal; 2876 u8 ntx_path; 2877 u8 map_b; 2878 2879 if (chip->rf_path_num == 1) { 2880 ntx_path = RF_A; 2881 map_b = 0; 2882 } else { 2883 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 2884 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 2885 } 2886 2887 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 2888 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 2889 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 2890 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 2891 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 2892 } 2893 2894 #define H2C_CMC_TBL_LEN 68 2895 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 2896 struct rtw89_vif_link *rtwvif_link, 2897 struct rtw89_sta_link *rtwsta_link) 2898 { 2899 const struct rtw89_chip_info *chip = rtwdev->chip; 2900 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2901 struct sk_buff *skb; 2902 int ret; 2903 2904 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2905 if (!skb) { 2906 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2907 return -ENOMEM; 2908 } 2909 skb_put(skb, H2C_CMC_TBL_LEN); 2910 SET_CTRL_INFO_MACID(skb->data, macid); 2911 SET_CTRL_INFO_OPERATION(skb->data, 1); 2912 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2913 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 2914 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 2915 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 2916 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 2917 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 2918 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 2919 } 2920 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 2921 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 2922 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 2923 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2924 2925 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2926 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2927 chip->h2c_cctl_func_id, 0, 1, 2928 H2C_CMC_TBL_LEN); 2929 2930 ret = rtw89_h2c_tx(rtwdev, skb, false); 2931 if (ret) { 2932 rtw89_err(rtwdev, "failed to send h2c\n"); 2933 goto fail; 2934 } 2935 2936 return 0; 2937 fail: 2938 dev_kfree_skb_any(skb); 2939 2940 return ret; 2941 } 2942 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 2943 2944 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2945 struct rtw89_vif_link *rtwvif_link, 2946 struct rtw89_sta_link *rtwsta_link) 2947 { 2948 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2949 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2950 u32 len = sizeof(*h2c); 2951 struct sk_buff *skb; 2952 int ret; 2953 2954 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2955 if (!skb) { 2956 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 2957 return -ENOMEM; 2958 } 2959 skb_put(skb, len); 2960 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2961 2962 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 2963 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2964 2965 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE); 2966 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 2967 2968 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 2969 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 2970 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2971 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 2972 2973 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 2974 2975 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 2976 2977 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 2978 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 2979 2980 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 2981 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 2982 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 2983 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 2984 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 2985 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 2986 2987 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 2988 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 2989 2990 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 2991 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 2992 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 2993 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 2994 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 2995 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 2996 2997 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 2998 2999 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 3000 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 3001 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 3002 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 3003 3004 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 3005 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 3006 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 3007 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 3008 3009 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3010 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3011 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3012 len); 3013 3014 ret = rtw89_h2c_tx(rtwdev, skb, false); 3015 if (ret) { 3016 rtw89_err(rtwdev, "failed to send h2c\n"); 3017 goto fail; 3018 } 3019 3020 return 0; 3021 fail: 3022 dev_kfree_skb_any(skb); 3023 3024 return ret; 3025 } 3026 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 3027 3028 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 3029 struct ieee80211_link_sta *link_sta, 3030 u8 *pads) 3031 { 3032 bool ppe_th; 3033 u8 ppe16, ppe8; 3034 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3035 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0]; 3036 u8 ru_bitmap; 3037 u8 n, idx, sh; 3038 u16 ppe; 3039 int i; 3040 3041 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 3042 link_sta->he_cap.he_cap_elem.phy_cap_info[6]); 3043 if (!ppe_th) { 3044 u8 pad; 3045 3046 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 3047 link_sta->he_cap.he_cap_elem.phy_cap_info[9]); 3048 3049 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3050 pads[i] = pad; 3051 3052 return; 3053 } 3054 3055 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 3056 n = hweight8(ru_bitmap); 3057 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3058 3059 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3060 if (!(ru_bitmap & BIT(i))) { 3061 pads[i] = 1; 3062 continue; 3063 } 3064 3065 idx = n >> 3; 3066 sh = n & 7; 3067 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 3068 3069 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx])); 3070 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3071 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 3072 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3073 3074 if (ppe16 != 7 && ppe8 == 7) 3075 pads[i] = RTW89_PE_DURATION_16; 3076 else if (ppe8 != 7) 3077 pads[i] = RTW89_PE_DURATION_8; 3078 else 3079 pads[i] = RTW89_PE_DURATION_0; 3080 } 3081 } 3082 3083 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 3084 struct rtw89_vif_link *rtwvif_link, 3085 struct rtw89_sta_link *rtwsta_link) 3086 { 3087 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3088 const struct rtw89_chip_info *chip = rtwdev->chip; 3089 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3090 rtwvif_link->chanctx_idx); 3091 struct ieee80211_link_sta *link_sta; 3092 struct sk_buff *skb; 3093 u8 pads[RTW89_PPE_BW_NUM]; 3094 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3095 u16 lowest_rate; 3096 int ret; 3097 3098 memset(pads, 0, sizeof(pads)); 3099 3100 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3101 if (!skb) { 3102 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3103 return -ENOMEM; 3104 } 3105 3106 rcu_read_lock(); 3107 3108 if (rtwsta_link) 3109 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3110 3111 if (rtwsta_link && link_sta->he_cap.has_he) 3112 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3113 3114 if (vif->p2p) 3115 lowest_rate = RTW89_HW_RATE_OFDM6; 3116 else if (chan->band_type == RTW89_BAND_2G) 3117 lowest_rate = RTW89_HW_RATE_CCK1; 3118 else 3119 lowest_rate = RTW89_HW_RATE_OFDM6; 3120 3121 skb_put(skb, H2C_CMC_TBL_LEN); 3122 SET_CTRL_INFO_MACID(skb->data, mac_id); 3123 SET_CTRL_INFO_OPERATION(skb->data, 1); 3124 SET_CMC_TBL_DISRTSFB(skb->data, 1); 3125 SET_CMC_TBL_DISDATAFB(skb->data, 1); 3126 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 3127 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 3128 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 3129 if (vif->type == NL80211_IFTYPE_STATION) 3130 SET_CMC_TBL_ULDL(skb->data, 1); 3131 else 3132 SET_CMC_TBL_ULDL(skb->data, 0); 3133 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port); 3134 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 3135 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3136 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3137 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3138 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3139 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3140 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3141 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3142 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3143 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3144 } 3145 if (rtwsta_link) 3146 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 3147 link_sta->he_cap.has_he); 3148 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3149 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3150 3151 rcu_read_unlock(); 3152 3153 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3154 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3155 chip->h2c_cctl_func_id, 0, 1, 3156 H2C_CMC_TBL_LEN); 3157 3158 ret = rtw89_h2c_tx(rtwdev, skb, false); 3159 if (ret) { 3160 rtw89_err(rtwdev, "failed to send h2c\n"); 3161 goto fail; 3162 } 3163 3164 return 0; 3165 fail: 3166 dev_kfree_skb_any(skb); 3167 3168 return ret; 3169 } 3170 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 3171 3172 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 3173 struct ieee80211_link_sta *link_sta, 3174 u8 *pads) 3175 { 3176 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3177 u16 ppe_thres_hdr; 3178 u8 ppe16, ppe8; 3179 u8 n, idx, sh; 3180 u8 ru_bitmap; 3181 bool ppe_th; 3182 u16 ppe; 3183 int i; 3184 3185 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3186 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 3187 if (!ppe_th) { 3188 u8 pad; 3189 3190 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3191 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 3192 3193 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3194 pads[i] = pad; 3195 3196 return; 3197 } 3198 3199 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres); 3200 ru_bitmap = u16_get_bits(ppe_thres_hdr, 3201 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 3202 n = hweight8(ru_bitmap); 3203 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 3204 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3205 3206 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3207 if (!(ru_bitmap & BIT(i))) { 3208 pads[i] = 1; 3209 continue; 3210 } 3211 3212 idx = n >> 3; 3213 sh = n & 7; 3214 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 3215 3216 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx); 3217 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3218 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 3219 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3220 3221 if (ppe16 != 7 && ppe8 == 7) 3222 pads[i] = RTW89_PE_DURATION_16_20; 3223 else if (ppe8 != 7) 3224 pads[i] = RTW89_PE_DURATION_8; 3225 else 3226 pads[i] = RTW89_PE_DURATION_0; 3227 } 3228 } 3229 3230 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3231 struct rtw89_vif_link *rtwvif_link, 3232 struct rtw89_sta_link *rtwsta_link) 3233 { 3234 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3235 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3236 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3237 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3238 struct ieee80211_bss_conf *bss_conf; 3239 struct ieee80211_link_sta *link_sta; 3240 u8 pads[RTW89_PPE_BW_NUM]; 3241 u32 len = sizeof(*h2c); 3242 struct sk_buff *skb; 3243 u16 lowest_rate; 3244 int ret; 3245 3246 memset(pads, 0, sizeof(pads)); 3247 3248 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3249 if (!skb) { 3250 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3251 return -ENOMEM; 3252 } 3253 3254 rcu_read_lock(); 3255 3256 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3257 3258 if (rtwsta_link) { 3259 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3260 3261 if (link_sta->eht_cap.has_eht) 3262 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads); 3263 else if (link_sta->he_cap.has_he) 3264 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3265 } 3266 3267 if (vif->p2p) 3268 lowest_rate = RTW89_HW_RATE_OFDM6; 3269 else if (chan->band_type == RTW89_BAND_2G) 3270 lowest_rate = RTW89_HW_RATE_CCK1; 3271 else 3272 lowest_rate = RTW89_HW_RATE_OFDM6; 3273 3274 skb_put(skb, len); 3275 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3276 3277 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3278 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3279 3280 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 3281 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 3282 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 3283 CCTLINFO_G7_W0_DISDATAFB); 3284 3285 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3286 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3287 3288 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3289 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3290 3291 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3292 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3293 3294 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 3295 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 3296 3297 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3298 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 3299 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 3300 } 3301 3302 if (bss_conf->eht_support) { 3303 u16 punct = bss_conf->chanreq.oper.punctured; 3304 3305 h2c->w4 |= le32_encode_bits(~punct, 3306 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3307 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3308 } 3309 3310 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 3311 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3312 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 3313 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3314 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 3315 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3316 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 3317 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3318 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 3319 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3320 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 3321 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 3322 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 3323 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 3324 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3325 3326 h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 3327 CCTLINFO_G7_W6_ULDL); 3328 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL); 3329 3330 if (rtwsta_link) { 3331 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he, 3332 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3333 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3334 } 3335 3336 rcu_read_unlock(); 3337 3338 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3339 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3340 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3341 len); 3342 3343 ret = rtw89_h2c_tx(rtwdev, skb, false); 3344 if (ret) { 3345 rtw89_err(rtwdev, "failed to send h2c\n"); 3346 goto fail; 3347 } 3348 3349 return 0; 3350 fail: 3351 dev_kfree_skb_any(skb); 3352 3353 return ret; 3354 } 3355 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 3356 3357 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3358 struct rtw89_vif_link *rtwvif_link, 3359 struct rtw89_sta_link *rtwsta_link) 3360 { 3361 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 3362 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3363 u32 len = sizeof(*h2c); 3364 struct sk_buff *skb; 3365 u16 agg_num = 0; 3366 u8 ba_bmap = 0; 3367 int ret; 3368 u8 tid; 3369 3370 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3371 if (!skb) { 3372 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 3373 return -ENOMEM; 3374 } 3375 skb_put(skb, len); 3376 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3377 3378 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 3379 if (agg_num == 0) 3380 agg_num = rtwsta->ampdu_params[tid].agg_num; 3381 else 3382 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 3383 } 3384 3385 if (agg_num <= 0x20) 3386 ba_bmap = 3; 3387 else if (agg_num > 0x20 && agg_num <= 0x40) 3388 ba_bmap = 0; 3389 else if (agg_num > 0x40 && agg_num <= 0x80) 3390 ba_bmap = 1; 3391 else if (agg_num > 0x80 && agg_num <= 0x100) 3392 ba_bmap = 2; 3393 else if (agg_num > 0x100 && agg_num <= 0x200) 3394 ba_bmap = 4; 3395 else if (agg_num > 0x200 && agg_num <= 0x400) 3396 ba_bmap = 5; 3397 3398 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 3399 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3400 3401 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 3402 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 3403 3404 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3405 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3406 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 3407 len); 3408 3409 ret = rtw89_h2c_tx(rtwdev, skb, false); 3410 if (ret) { 3411 rtw89_err(rtwdev, "failed to send h2c\n"); 3412 goto fail; 3413 } 3414 3415 return 0; 3416 fail: 3417 dev_kfree_skb_any(skb); 3418 3419 return ret; 3420 } 3421 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 3422 3423 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 3424 struct rtw89_sta_link *rtwsta_link) 3425 { 3426 const struct rtw89_chip_info *chip = rtwdev->chip; 3427 struct sk_buff *skb; 3428 int ret; 3429 3430 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3431 if (!skb) { 3432 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3433 return -ENOMEM; 3434 } 3435 skb_put(skb, H2C_CMC_TBL_LEN); 3436 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3437 SET_CTRL_INFO_OPERATION(skb->data, 1); 3438 if (rtwsta_link->cctl_tx_time) { 3439 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 3440 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time); 3441 } 3442 if (rtwsta_link->cctl_tx_retry_limit) { 3443 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 3444 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt); 3445 } 3446 3447 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3448 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3449 chip->h2c_cctl_func_id, 0, 1, 3450 H2C_CMC_TBL_LEN); 3451 3452 ret = rtw89_h2c_tx(rtwdev, skb, false); 3453 if (ret) { 3454 rtw89_err(rtwdev, "failed to send h2c\n"); 3455 goto fail; 3456 } 3457 3458 return 0; 3459 fail: 3460 dev_kfree_skb_any(skb); 3461 3462 return ret; 3463 } 3464 3465 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 3466 struct rtw89_sta_link *rtwsta_link) 3467 { 3468 const struct rtw89_chip_info *chip = rtwdev->chip; 3469 struct sk_buff *skb; 3470 int ret; 3471 3472 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 3473 return 0; 3474 3475 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3476 if (!skb) { 3477 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3478 return -ENOMEM; 3479 } 3480 skb_put(skb, H2C_CMC_TBL_LEN); 3481 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3482 SET_CTRL_INFO_OPERATION(skb->data, 1); 3483 3484 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3485 3486 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3487 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3488 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 3489 H2C_CMC_TBL_LEN); 3490 3491 ret = rtw89_h2c_tx(rtwdev, skb, false); 3492 if (ret) { 3493 rtw89_err(rtwdev, "failed to send h2c\n"); 3494 goto fail; 3495 } 3496 3497 return 0; 3498 fail: 3499 dev_kfree_skb_any(skb); 3500 3501 return ret; 3502 } 3503 3504 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 3505 struct rtw89_vif_link *rtwvif_link) 3506 { 3507 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3508 rtwvif_link->chanctx_idx); 3509 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3510 struct rtw89_h2c_bcn_upd *h2c; 3511 struct sk_buff *skb_beacon; 3512 struct ieee80211_hdr *hdr; 3513 u32 len = sizeof(*h2c); 3514 struct sk_buff *skb; 3515 int bcn_total_len; 3516 u16 beacon_rate; 3517 u16 tim_offset; 3518 void *noa_data; 3519 u8 noa_len; 3520 int ret; 3521 3522 if (vif->p2p) 3523 beacon_rate = RTW89_HW_RATE_OFDM6; 3524 else if (chan->band_type == RTW89_BAND_2G) 3525 beacon_rate = RTW89_HW_RATE_CCK1; 3526 else 3527 beacon_rate = RTW89_HW_RATE_OFDM6; 3528 3529 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3530 NULL, 0); 3531 if (!skb_beacon) { 3532 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3533 return -ENOMEM; 3534 } 3535 3536 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3537 if (noa_len && 3538 (noa_len <= skb_tailroom(skb_beacon) || 3539 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3540 skb_put_data(skb_beacon, noa_data, noa_len); 3541 } 3542 3543 hdr = (struct ieee80211_hdr *)skb_beacon; 3544 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3545 3546 bcn_total_len = len + skb_beacon->len; 3547 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3548 if (!skb) { 3549 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3550 dev_kfree_skb_any(skb_beacon); 3551 return -ENOMEM; 3552 } 3553 skb_put(skb, len); 3554 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 3555 3556 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) | 3557 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 3558 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 3559 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 3560 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 3561 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 3562 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 3563 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 3564 3565 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3566 dev_kfree_skb_any(skb_beacon); 3567 3568 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3569 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3570 H2C_FUNC_MAC_BCN_UPD, 0, 1, 3571 bcn_total_len); 3572 3573 ret = rtw89_h2c_tx(rtwdev, skb, false); 3574 if (ret) { 3575 rtw89_err(rtwdev, "failed to send h2c\n"); 3576 dev_kfree_skb_any(skb); 3577 return ret; 3578 } 3579 3580 return 0; 3581 } 3582 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 3583 3584 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 3585 struct rtw89_vif_link *rtwvif_link) 3586 { 3587 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3588 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3589 struct rtw89_h2c_bcn_upd_be *h2c; 3590 struct sk_buff *skb_beacon; 3591 struct ieee80211_hdr *hdr; 3592 u32 len = sizeof(*h2c); 3593 struct sk_buff *skb; 3594 int bcn_total_len; 3595 u16 beacon_rate; 3596 u16 tim_offset; 3597 void *noa_data; 3598 u8 noa_len; 3599 int ret; 3600 3601 if (vif->p2p) 3602 beacon_rate = RTW89_HW_RATE_OFDM6; 3603 else if (chan->band_type == RTW89_BAND_2G) 3604 beacon_rate = RTW89_HW_RATE_CCK1; 3605 else 3606 beacon_rate = RTW89_HW_RATE_OFDM6; 3607 3608 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3609 NULL, 0); 3610 if (!skb_beacon) { 3611 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3612 return -ENOMEM; 3613 } 3614 3615 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3616 if (noa_len && 3617 (noa_len <= skb_tailroom(skb_beacon) || 3618 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3619 skb_put_data(skb_beacon, noa_data, noa_len); 3620 } 3621 3622 hdr = (struct ieee80211_hdr *)skb_beacon; 3623 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3624 3625 bcn_total_len = len + skb_beacon->len; 3626 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3627 if (!skb) { 3628 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3629 dev_kfree_skb_any(skb_beacon); 3630 return -ENOMEM; 3631 } 3632 skb_put(skb, len); 3633 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 3634 3635 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 3636 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 3637 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 3638 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 3639 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 3640 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 3641 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 3642 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 3643 3644 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3645 dev_kfree_skb_any(skb_beacon); 3646 3647 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3648 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3649 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 3650 bcn_total_len); 3651 3652 ret = rtw89_h2c_tx(rtwdev, skb, false); 3653 if (ret) { 3654 rtw89_err(rtwdev, "failed to send h2c\n"); 3655 goto fail; 3656 } 3657 3658 return 0; 3659 3660 fail: 3661 dev_kfree_skb_any(skb); 3662 3663 return ret; 3664 } 3665 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 3666 3667 #define H2C_ROLE_MAINTAIN_LEN 4 3668 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 3669 struct rtw89_vif_link *rtwvif_link, 3670 struct rtw89_sta_link *rtwsta_link, 3671 enum rtw89_upd_mode upd_mode) 3672 { 3673 struct sk_buff *skb; 3674 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3675 u8 self_role; 3676 int ret; 3677 3678 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3679 if (rtwsta_link) 3680 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3681 else 3682 self_role = rtwvif_link->self_role; 3683 } else { 3684 self_role = rtwvif_link->self_role; 3685 } 3686 3687 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 3688 if (!skb) { 3689 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3690 return -ENOMEM; 3691 } 3692 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 3693 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 3694 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 3695 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 3696 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif_link->wifi_role); 3697 3698 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3699 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3700 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 3701 H2C_ROLE_MAINTAIN_LEN); 3702 3703 ret = rtw89_h2c_tx(rtwdev, skb, false); 3704 if (ret) { 3705 rtw89_err(rtwdev, "failed to send h2c\n"); 3706 goto fail; 3707 } 3708 3709 return 0; 3710 fail: 3711 dev_kfree_skb_any(skb); 3712 3713 return ret; 3714 } 3715 3716 static enum rtw89_fw_sta_type 3717 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3718 struct rtw89_sta_link *rtwsta_link) 3719 { 3720 struct ieee80211_bss_conf *bss_conf; 3721 struct ieee80211_link_sta *link_sta; 3722 enum rtw89_fw_sta_type type; 3723 3724 rcu_read_lock(); 3725 3726 if (!rtwsta_link) 3727 goto by_vif; 3728 3729 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3730 3731 if (link_sta->eht_cap.has_eht) 3732 type = RTW89_FW_BE_STA; 3733 else if (link_sta->he_cap.has_he) 3734 type = RTW89_FW_AX_STA; 3735 else 3736 type = RTW89_FW_N_AC_STA; 3737 3738 goto out; 3739 3740 by_vif: 3741 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3742 3743 if (bss_conf->eht_support) 3744 type = RTW89_FW_BE_STA; 3745 else if (bss_conf->he_support) 3746 type = RTW89_FW_AX_STA; 3747 else 3748 type = RTW89_FW_N_AC_STA; 3749 3750 out: 3751 rcu_read_unlock(); 3752 3753 return type; 3754 } 3755 3756 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3757 struct rtw89_sta_link *rtwsta_link, bool dis_conn) 3758 { 3759 struct sk_buff *skb; 3760 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3761 u8 self_role = rtwvif_link->self_role; 3762 enum rtw89_fw_sta_type sta_type; 3763 u8 net_type = rtwvif_link->net_type; 3764 struct rtw89_h2c_join_v1 *h2c_v1; 3765 struct rtw89_h2c_join *h2c; 3766 u32 len = sizeof(*h2c); 3767 bool format_v1 = false; 3768 int ret; 3769 3770 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 3771 len = sizeof(*h2c_v1); 3772 format_v1 = true; 3773 } 3774 3775 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) { 3776 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3777 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 3778 } 3779 3780 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3781 if (!skb) { 3782 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3783 return -ENOMEM; 3784 } 3785 skb_put(skb, len); 3786 h2c = (struct rtw89_h2c_join *)skb->data; 3787 3788 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 3789 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 3790 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 3791 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) | 3792 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) | 3793 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 3794 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 3795 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 3796 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 3797 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 3798 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 3799 le32_encode_bits(rtwvif_link->wifi_role, 3800 RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 3801 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 3802 3803 if (!format_v1) 3804 goto done; 3805 3806 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 3807 3808 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link); 3809 3810 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE); 3811 h2c_v1->w2 = 0; 3812 3813 done: 3814 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3815 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3816 H2C_FUNC_MAC_JOININFO, 0, 1, 3817 len); 3818 3819 ret = rtw89_h2c_tx(rtwdev, skb, false); 3820 if (ret) { 3821 rtw89_err(rtwdev, "failed to send h2c\n"); 3822 goto fail; 3823 } 3824 3825 return 0; 3826 fail: 3827 dev_kfree_skb_any(skb); 3828 3829 return ret; 3830 } 3831 3832 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 3833 { 3834 struct rtw89_h2c_notify_dbcc *h2c; 3835 u32 len = sizeof(*h2c); 3836 struct sk_buff *skb; 3837 int ret; 3838 3839 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3840 if (!skb) { 3841 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 3842 return -ENOMEM; 3843 } 3844 skb_put(skb, len); 3845 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 3846 3847 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 3848 3849 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3850 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3851 H2C_FUNC_NOTIFY_DBCC, 0, 1, 3852 len); 3853 3854 ret = rtw89_h2c_tx(rtwdev, skb, false); 3855 if (ret) { 3856 rtw89_err(rtwdev, "failed to send h2c\n"); 3857 goto fail; 3858 } 3859 3860 return 0; 3861 fail: 3862 dev_kfree_skb_any(skb); 3863 3864 return ret; 3865 } 3866 3867 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 3868 bool pause) 3869 { 3870 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 3871 struct rtw89_fw_macid_pause_grp *h2c; 3872 __le32 set = cpu_to_le32(BIT(sh)); 3873 u8 h2c_macid_pause_id; 3874 struct sk_buff *skb; 3875 u32 len; 3876 int ret; 3877 3878 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 3879 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 3880 len = sizeof(*h2c_new); 3881 } else { 3882 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 3883 len = sizeof(*h2c); 3884 } 3885 3886 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3887 if (!skb) { 3888 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 3889 return -ENOMEM; 3890 } 3891 skb_put(skb, len); 3892 3893 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 3894 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 3895 3896 h2c_new->n[0].pause_mask_grp[grp] = set; 3897 h2c_new->n[0].sleep_mask_grp[grp] = set; 3898 if (pause) { 3899 h2c_new->n[0].pause_grp[grp] = set; 3900 h2c_new->n[0].sleep_grp[grp] = set; 3901 } 3902 } else { 3903 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 3904 3905 h2c->mask_grp[grp] = set; 3906 if (pause) 3907 h2c->pause_grp[grp] = set; 3908 } 3909 3910 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3911 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3912 h2c_macid_pause_id, 1, 0, 3913 len); 3914 3915 ret = rtw89_h2c_tx(rtwdev, skb, false); 3916 if (ret) { 3917 rtw89_err(rtwdev, "failed to send h2c\n"); 3918 goto fail; 3919 } 3920 3921 return 0; 3922 fail: 3923 dev_kfree_skb_any(skb); 3924 3925 return ret; 3926 } 3927 3928 #define H2C_EDCA_LEN 12 3929 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3930 u8 ac, u32 val) 3931 { 3932 struct sk_buff *skb; 3933 int ret; 3934 3935 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 3936 if (!skb) { 3937 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 3938 return -ENOMEM; 3939 } 3940 skb_put(skb, H2C_EDCA_LEN); 3941 RTW89_SET_EDCA_SEL(skb->data, 0); 3942 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx); 3943 RTW89_SET_EDCA_WMM(skb->data, 0); 3944 RTW89_SET_EDCA_AC(skb->data, ac); 3945 RTW89_SET_EDCA_PARAM(skb->data, val); 3946 3947 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3948 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3949 H2C_FUNC_USR_EDCA, 0, 1, 3950 H2C_EDCA_LEN); 3951 3952 ret = rtw89_h2c_tx(rtwdev, skb, false); 3953 if (ret) { 3954 rtw89_err(rtwdev, "failed to send h2c\n"); 3955 goto fail; 3956 } 3957 3958 return 0; 3959 fail: 3960 dev_kfree_skb_any(skb); 3961 3962 return ret; 3963 } 3964 3965 #define H2C_TSF32_TOGL_LEN 4 3966 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, 3967 struct rtw89_vif_link *rtwvif_link, 3968 bool en) 3969 { 3970 struct sk_buff *skb; 3971 u16 early_us = en ? 2000 : 0; 3972 u8 *cmd; 3973 int ret; 3974 3975 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 3976 if (!skb) { 3977 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 3978 return -ENOMEM; 3979 } 3980 skb_put(skb, H2C_TSF32_TOGL_LEN); 3981 cmd = skb->data; 3982 3983 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx); 3984 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 3985 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port); 3986 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 3987 3988 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3989 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3990 H2C_FUNC_TSF32_TOGL, 0, 0, 3991 H2C_TSF32_TOGL_LEN); 3992 3993 ret = rtw89_h2c_tx(rtwdev, skb, false); 3994 if (ret) { 3995 rtw89_err(rtwdev, "failed to send h2c\n"); 3996 goto fail; 3997 } 3998 3999 return 0; 4000 fail: 4001 dev_kfree_skb_any(skb); 4002 4003 return ret; 4004 } 4005 4006 #define H2C_OFLD_CFG_LEN 8 4007 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 4008 { 4009 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 4010 struct sk_buff *skb; 4011 int ret; 4012 4013 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 4014 if (!skb) { 4015 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 4016 return -ENOMEM; 4017 } 4018 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 4019 4020 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4021 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4022 H2C_FUNC_OFLD_CFG, 0, 1, 4023 H2C_OFLD_CFG_LEN); 4024 4025 ret = rtw89_h2c_tx(rtwdev, skb, false); 4026 if (ret) { 4027 rtw89_err(rtwdev, "failed to send h2c\n"); 4028 goto fail; 4029 } 4030 4031 return 0; 4032 fail: 4033 dev_kfree_skb_any(skb); 4034 4035 return ret; 4036 } 4037 4038 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv) 4039 { 4040 struct rtw89_h2c_tx_duty *h2c; 4041 u32 len = sizeof(*h2c); 4042 struct sk_buff *skb; 4043 u16 pause, active; 4044 int ret; 4045 4046 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4047 if (!skb) { 4048 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n"); 4049 return -ENOMEM; 4050 } 4051 4052 skb_put(skb, len); 4053 h2c = (struct rtw89_h2c_tx_duty *)skb->data; 4054 4055 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100); 4056 4057 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) { 4058 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP); 4059 } else { 4060 active = 100 - lv * RTW89_THERMAL_PROT_STEP; 4061 pause = 100 - active; 4062 4063 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) | 4064 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK); 4065 } 4066 4067 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4068 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4069 H2C_FUNC_TX_DUTY, 0, 0, len); 4070 4071 ret = rtw89_h2c_tx(rtwdev, skb, false); 4072 if (ret) { 4073 rtw89_err(rtwdev, "failed to send h2c\n"); 4074 goto fail; 4075 } 4076 4077 return 0; 4078 fail: 4079 dev_kfree_skb_any(skb); 4080 4081 return ret; 4082 } 4083 4084 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 4085 struct rtw89_vif_link *rtwvif_link, 4086 bool connect) 4087 { 4088 struct ieee80211_bss_conf *bss_conf; 4089 s32 thold = RTW89_DEFAULT_CQM_THOLD; 4090 u32 hyst = RTW89_DEFAULT_CQM_HYST; 4091 struct rtw89_h2c_bcnfltr *h2c; 4092 u32 len = sizeof(*h2c); 4093 struct sk_buff *skb; 4094 int ret; 4095 4096 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4097 return -EINVAL; 4098 4099 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4100 return -EINVAL; 4101 4102 rcu_read_lock(); 4103 4104 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 4105 4106 if (bss_conf->cqm_rssi_hyst) 4107 hyst = bss_conf->cqm_rssi_hyst; 4108 if (bss_conf->cqm_rssi_thold) 4109 thold = bss_conf->cqm_rssi_thold; 4110 4111 rcu_read_unlock(); 4112 4113 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4114 if (!skb) { 4115 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 4116 return -ENOMEM; 4117 } 4118 4119 skb_put(skb, len); 4120 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 4121 4122 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 4123 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 4124 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 4125 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 4126 RTW89_H2C_BCNFLTR_W0_MODE) | 4127 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 4128 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 4129 le32_encode_bits(thold + MAX_RSSI, 4130 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 4131 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 4132 4133 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4134 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4135 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 4136 4137 ret = rtw89_h2c_tx(rtwdev, skb, false); 4138 if (ret) { 4139 rtw89_err(rtwdev, "failed to send h2c\n"); 4140 goto fail; 4141 } 4142 4143 return 0; 4144 fail: 4145 dev_kfree_skb_any(skb); 4146 4147 return ret; 4148 } 4149 4150 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 4151 struct rtw89_rx_phy_ppdu *phy_ppdu) 4152 { 4153 struct rtw89_h2c_ofld_rssi *h2c; 4154 u32 len = sizeof(*h2c); 4155 struct sk_buff *skb; 4156 s8 rssi; 4157 int ret; 4158 4159 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4160 return -EINVAL; 4161 4162 if (!phy_ppdu) 4163 return -EINVAL; 4164 4165 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4166 if (!skb) { 4167 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 4168 return -ENOMEM; 4169 } 4170 4171 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 4172 skb_put(skb, len); 4173 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 4174 4175 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 4176 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 4177 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 4178 4179 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4180 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4181 H2C_FUNC_OFLD_RSSI, 0, 1, len); 4182 4183 ret = rtw89_h2c_tx(rtwdev, skb, false); 4184 if (ret) { 4185 rtw89_err(rtwdev, "failed to send h2c\n"); 4186 goto fail; 4187 } 4188 4189 return 0; 4190 fail: 4191 dev_kfree_skb_any(skb); 4192 4193 return ret; 4194 } 4195 4196 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4197 { 4198 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 4199 struct rtw89_traffic_stats *stats = &rtwvif->stats; 4200 struct rtw89_h2c_ofld *h2c; 4201 u32 len = sizeof(*h2c); 4202 struct sk_buff *skb; 4203 int ret; 4204 4205 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4206 return -EINVAL; 4207 4208 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4209 if (!skb) { 4210 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 4211 return -ENOMEM; 4212 } 4213 4214 skb_put(skb, len); 4215 h2c = (struct rtw89_h2c_ofld *)skb->data; 4216 4217 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 4218 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 4219 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 4220 4221 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4222 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4223 H2C_FUNC_OFLD_TP, 0, 1, len); 4224 4225 ret = rtw89_h2c_tx(rtwdev, skb, false); 4226 if (ret) { 4227 rtw89_err(rtwdev, "failed to send h2c\n"); 4228 goto fail; 4229 } 4230 4231 return 0; 4232 fail: 4233 dev_kfree_skb_any(skb); 4234 4235 return ret; 4236 } 4237 4238 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 4239 { 4240 const struct rtw89_chip_info *chip = rtwdev->chip; 4241 struct rtw89_h2c_ra_v1 *h2c_v1; 4242 struct rtw89_h2c_ra *h2c; 4243 u32 len = sizeof(*h2c); 4244 bool format_v1 = false; 4245 struct sk_buff *skb; 4246 int ret; 4247 4248 if (chip->chip_gen == RTW89_CHIP_BE) { 4249 len = sizeof(*h2c_v1); 4250 format_v1 = true; 4251 } 4252 4253 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4254 if (!skb) { 4255 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4256 return -ENOMEM; 4257 } 4258 skb_put(skb, len); 4259 h2c = (struct rtw89_h2c_ra *)skb->data; 4260 rtw89_debug(rtwdev, RTW89_DBG_RA, 4261 #if defined(__linux__) 4262 "ra cmd msk: %llx ", ra->ra_mask); 4263 #elif defined(__FreeBSD__) 4264 "ra cmd msk: %jx ", (uintmax_t)ra->ra_mask); 4265 #endif 4266 4267 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 4268 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 4269 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 4270 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 4271 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 4272 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 4273 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 4274 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 4275 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 4276 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 4277 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 4278 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 4279 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 4280 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 4281 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 4282 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 4283 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 4284 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 4285 4286 if (!format_v1) 4287 goto csi; 4288 4289 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 4290 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 4291 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 4292 4293 csi: 4294 if (!csi) 4295 goto done; 4296 4297 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 4298 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 4299 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 4300 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 4301 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 4302 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 4303 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 4304 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 4305 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 4306 4307 done: 4308 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4309 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 4310 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 4311 len); 4312 4313 ret = rtw89_h2c_tx(rtwdev, skb, false); 4314 if (ret) { 4315 rtw89_err(rtwdev, "failed to send h2c\n"); 4316 goto fail; 4317 } 4318 4319 return 0; 4320 fail: 4321 dev_kfree_skb_any(skb); 4322 4323 return ret; 4324 } 4325 4326 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 4327 { 4328 struct rtw89_btc *btc = &rtwdev->btc; 4329 struct rtw89_btc_dm *dm = &btc->dm; 4330 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 4331 struct rtw89_btc_module *module = &init_info->module; 4332 struct rtw89_btc_ant_info *ant = &module->ant; 4333 struct rtw89_h2c_cxinit *h2c; 4334 u32 len = sizeof(*h2c); 4335 struct sk_buff *skb; 4336 int ret; 4337 4338 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4339 if (!skb) { 4340 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 4341 return -ENOMEM; 4342 } 4343 skb_put(skb, len); 4344 h2c = (struct rtw89_h2c_cxinit *)skb->data; 4345 4346 h2c->hdr.type = type; 4347 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 4348 4349 h2c->ant_type = ant->type; 4350 h2c->ant_num = ant->num; 4351 h2c->ant_iso = ant->isolation; 4352 h2c->ant_info = 4353 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 4354 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 4355 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 4356 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 4357 4358 h2c->mod_rfe = module->rfe_type; 4359 h2c->mod_cv = module->cv; 4360 h2c->mod_info = 4361 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 4362 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 4363 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 4364 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 4365 h2c->mod_adie_kt = module->kt_ver_adie; 4366 h2c->wl_gch = init_info->wl_guard_ch; 4367 4368 h2c->info = 4369 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 4370 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 4371 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 4372 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 4373 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 4374 4375 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4376 H2C_CAT_OUTSRC, BTFC_SET, 4377 SET_DRV_INFO, 0, 0, 4378 len); 4379 4380 ret = rtw89_h2c_tx(rtwdev, skb, false); 4381 if (ret) { 4382 rtw89_err(rtwdev, "failed to send h2c\n"); 4383 goto fail; 4384 } 4385 4386 return 0; 4387 fail: 4388 dev_kfree_skb_any(skb); 4389 4390 return ret; 4391 } 4392 4393 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 4394 { 4395 struct rtw89_btc *btc = &rtwdev->btc; 4396 struct rtw89_btc_dm *dm = &btc->dm; 4397 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 4398 struct rtw89_h2c_cxinit_v7 *h2c; 4399 u32 len = sizeof(*h2c); 4400 struct sk_buff *skb; 4401 int ret; 4402 4403 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4404 if (!skb) { 4405 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 4406 return -ENOMEM; 4407 } 4408 skb_put(skb, len); 4409 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 4410 4411 h2c->hdr.type = type; 4412 h2c->hdr.ver = btc->ver->fcxinit; 4413 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4414 h2c->init = *init_info; 4415 4416 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4417 H2C_CAT_OUTSRC, BTFC_SET, 4418 SET_DRV_INFO, 0, 0, 4419 len); 4420 4421 ret = rtw89_h2c_tx(rtwdev, skb, false); 4422 if (ret) { 4423 rtw89_err(rtwdev, "failed to send h2c\n"); 4424 goto fail; 4425 } 4426 4427 return 0; 4428 fail: 4429 dev_kfree_skb_any(skb); 4430 4431 return ret; 4432 } 4433 4434 #define PORT_DATA_OFFSET 4 4435 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 4436 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 4437 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 4438 4439 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 4440 { 4441 struct rtw89_btc *btc = &rtwdev->btc; 4442 const struct rtw89_btc_ver *ver = btc->ver; 4443 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4444 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 4445 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4446 struct rtw89_btc_wl_active_role *active = role_info->active_role; 4447 struct sk_buff *skb; 4448 u32 len; 4449 u8 offset = 0; 4450 u8 *cmd; 4451 int ret; 4452 int i; 4453 4454 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 4455 4456 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4457 if (!skb) { 4458 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4459 return -ENOMEM; 4460 } 4461 skb_put(skb, len); 4462 cmd = skb->data; 4463 4464 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4465 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4466 4467 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4468 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4469 4470 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4471 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4472 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4473 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4474 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4475 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4476 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4477 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4478 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4479 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4480 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4481 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4482 4483 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4484 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4485 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4486 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4487 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4488 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4489 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4490 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4491 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4492 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4493 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4494 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4495 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4496 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4497 } 4498 4499 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4500 H2C_CAT_OUTSRC, BTFC_SET, 4501 SET_DRV_INFO, 0, 0, 4502 len); 4503 4504 ret = rtw89_h2c_tx(rtwdev, skb, false); 4505 if (ret) { 4506 rtw89_err(rtwdev, "failed to send h2c\n"); 4507 goto fail; 4508 } 4509 4510 return 0; 4511 fail: 4512 dev_kfree_skb_any(skb); 4513 4514 return ret; 4515 } 4516 4517 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 4518 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4519 4520 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 4521 { 4522 struct rtw89_btc *btc = &rtwdev->btc; 4523 const struct rtw89_btc_ver *ver = btc->ver; 4524 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4525 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 4526 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4527 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 4528 struct sk_buff *skb; 4529 u32 len; 4530 u8 *cmd, offset; 4531 int ret; 4532 int i; 4533 4534 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 4535 4536 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4537 if (!skb) { 4538 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4539 return -ENOMEM; 4540 } 4541 skb_put(skb, len); 4542 cmd = skb->data; 4543 4544 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4545 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4546 4547 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4548 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4549 4550 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4551 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4552 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4553 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4554 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4555 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4556 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4557 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4558 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4559 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4560 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4561 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4562 4563 offset = PORT_DATA_OFFSET; 4564 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4565 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4566 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4567 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4568 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4569 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4570 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4571 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4572 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4573 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4574 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4575 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4576 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4577 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4578 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 4579 } 4580 4581 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4582 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4583 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4584 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4585 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4586 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4587 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4588 4589 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4590 H2C_CAT_OUTSRC, BTFC_SET, 4591 SET_DRV_INFO, 0, 0, 4592 len); 4593 4594 ret = rtw89_h2c_tx(rtwdev, skb, false); 4595 if (ret) { 4596 rtw89_err(rtwdev, "failed to send h2c\n"); 4597 goto fail; 4598 } 4599 4600 return 0; 4601 fail: 4602 dev_kfree_skb_any(skb); 4603 4604 return ret; 4605 } 4606 4607 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 4608 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4609 4610 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 4611 { 4612 struct rtw89_btc *btc = &rtwdev->btc; 4613 const struct rtw89_btc_ver *ver = btc->ver; 4614 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4615 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 4616 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4617 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 4618 struct sk_buff *skb; 4619 u32 len; 4620 u8 *cmd, offset; 4621 int ret; 4622 int i; 4623 4624 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 4625 4626 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4627 if (!skb) { 4628 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4629 return -ENOMEM; 4630 } 4631 skb_put(skb, len); 4632 cmd = skb->data; 4633 4634 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4635 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4636 4637 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4638 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4639 4640 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4641 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4642 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4643 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4644 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4645 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4646 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4647 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4648 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4649 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4650 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4651 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4652 4653 offset = PORT_DATA_OFFSET; 4654 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4655 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 4656 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 4657 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 4658 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 4659 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 4660 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 4661 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 4662 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 4663 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 4664 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 4665 } 4666 4667 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4668 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4669 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4670 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4671 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4672 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4673 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4674 4675 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4676 H2C_CAT_OUTSRC, BTFC_SET, 4677 SET_DRV_INFO, 0, 0, 4678 len); 4679 4680 ret = rtw89_h2c_tx(rtwdev, skb, false); 4681 if (ret) { 4682 rtw89_err(rtwdev, "failed to send h2c\n"); 4683 goto fail; 4684 } 4685 4686 return 0; 4687 fail: 4688 dev_kfree_skb_any(skb); 4689 4690 return ret; 4691 } 4692 4693 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type) 4694 { 4695 struct rtw89_btc *btc = &rtwdev->btc; 4696 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7; 4697 struct rtw89_h2c_cxrole_v7 *h2c; 4698 u32 len = sizeof(*h2c); 4699 struct sk_buff *skb; 4700 int ret; 4701 4702 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4703 if (!skb) { 4704 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4705 return -ENOMEM; 4706 } 4707 skb_put(skb, len); 4708 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data; 4709 4710 h2c->hdr.type = type; 4711 h2c->hdr.ver = btc->ver->fwlrole; 4712 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4713 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4714 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4715 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4716 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4717 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en); 4718 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg); 4719 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy); 4720 4721 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4722 H2C_CAT_OUTSRC, BTFC_SET, 4723 SET_DRV_INFO, 0, 0, 4724 len); 4725 4726 ret = rtw89_h2c_tx(rtwdev, skb, false); 4727 if (ret) { 4728 rtw89_err(rtwdev, "failed to send h2c\n"); 4729 goto fail; 4730 } 4731 4732 return 0; 4733 fail: 4734 dev_kfree_skb_any(skb); 4735 4736 return ret; 4737 } 4738 4739 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 4740 { 4741 struct rtw89_btc *btc = &rtwdev->btc; 4742 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 4743 struct rtw89_h2c_cxrole_v8 *h2c; 4744 u32 len = sizeof(*h2c); 4745 struct sk_buff *skb; 4746 int ret; 4747 4748 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4749 if (!skb) { 4750 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4751 return -ENOMEM; 4752 } 4753 skb_put(skb, len); 4754 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 4755 4756 h2c->hdr.type = type; 4757 h2c->hdr.ver = btc->ver->fwlrole; 4758 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4759 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4760 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4761 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4762 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4763 4764 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4765 H2C_CAT_OUTSRC, BTFC_SET, 4766 SET_DRV_INFO, 0, 0, 4767 len); 4768 4769 ret = rtw89_h2c_tx(rtwdev, skb, false); 4770 if (ret) { 4771 rtw89_err(rtwdev, "failed to send h2c\n"); 4772 goto fail; 4773 } 4774 4775 return 0; 4776 fail: 4777 dev_kfree_skb_any(skb); 4778 4779 return ret; 4780 } 4781 4782 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 4783 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 4784 { 4785 struct rtw89_btc *btc = &rtwdev->btc; 4786 const struct rtw89_btc_ver *ver = btc->ver; 4787 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 4788 struct sk_buff *skb; 4789 u8 *cmd; 4790 int ret; 4791 4792 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 4793 if (!skb) { 4794 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4795 return -ENOMEM; 4796 } 4797 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 4798 cmd = skb->data; 4799 4800 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4801 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 4802 4803 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 4804 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 4805 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 4806 if (ver->fcxctrl == 0) 4807 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 4808 4809 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4810 H2C_CAT_OUTSRC, BTFC_SET, 4811 SET_DRV_INFO, 0, 0, 4812 H2C_LEN_CXDRVINFO_CTRL); 4813 4814 ret = rtw89_h2c_tx(rtwdev, skb, false); 4815 if (ret) { 4816 rtw89_err(rtwdev, "failed to send h2c\n"); 4817 goto fail; 4818 } 4819 4820 return 0; 4821 fail: 4822 dev_kfree_skb_any(skb); 4823 4824 return ret; 4825 } 4826 4827 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 4828 { 4829 struct rtw89_btc *btc = &rtwdev->btc; 4830 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 4831 struct rtw89_h2c_cxctrl_v7 *h2c; 4832 u32 len = sizeof(*h2c); 4833 struct sk_buff *skb; 4834 int ret; 4835 4836 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4837 if (!skb) { 4838 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n"); 4839 return -ENOMEM; 4840 } 4841 skb_put(skb, len); 4842 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 4843 4844 h2c->hdr.type = type; 4845 h2c->hdr.ver = btc->ver->fcxctrl; 4846 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 4847 h2c->ctrl = *ctrl; 4848 4849 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4850 H2C_CAT_OUTSRC, BTFC_SET, 4851 SET_DRV_INFO, 0, 0, len); 4852 4853 ret = rtw89_h2c_tx(rtwdev, skb, false); 4854 if (ret) { 4855 rtw89_err(rtwdev, "failed to send h2c\n"); 4856 goto fail; 4857 } 4858 4859 return 0; 4860 fail: 4861 dev_kfree_skb_any(skb); 4862 4863 return ret; 4864 } 4865 4866 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 4867 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 4868 { 4869 struct rtw89_btc *btc = &rtwdev->btc; 4870 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 4871 struct sk_buff *skb; 4872 u8 *cmd; 4873 int ret; 4874 4875 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 4876 if (!skb) { 4877 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 4878 return -ENOMEM; 4879 } 4880 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 4881 cmd = skb->data; 4882 4883 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4884 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 4885 4886 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 4887 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 4888 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 4889 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 4890 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 4891 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 4892 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 4893 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 4894 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 4895 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 4896 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 4897 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 4898 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 4899 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 4900 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 4901 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 4902 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 4903 4904 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4905 H2C_CAT_OUTSRC, BTFC_SET, 4906 SET_DRV_INFO, 0, 0, 4907 H2C_LEN_CXDRVINFO_TRX); 4908 4909 ret = rtw89_h2c_tx(rtwdev, skb, false); 4910 if (ret) { 4911 rtw89_err(rtwdev, "failed to send h2c\n"); 4912 goto fail; 4913 } 4914 4915 return 0; 4916 fail: 4917 dev_kfree_skb_any(skb); 4918 4919 return ret; 4920 } 4921 4922 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 4923 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 4924 { 4925 struct rtw89_btc *btc = &rtwdev->btc; 4926 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4927 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 4928 struct sk_buff *skb; 4929 u8 *cmd; 4930 int ret; 4931 4932 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 4933 if (!skb) { 4934 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4935 return -ENOMEM; 4936 } 4937 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 4938 cmd = skb->data; 4939 4940 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4941 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 4942 4943 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 4944 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 4945 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 4946 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 4947 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 4948 4949 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4950 H2C_CAT_OUTSRC, BTFC_SET, 4951 SET_DRV_INFO, 0, 0, 4952 H2C_LEN_CXDRVINFO_RFK); 4953 4954 ret = rtw89_h2c_tx(rtwdev, skb, false); 4955 if (ret) { 4956 rtw89_err(rtwdev, "failed to send h2c\n"); 4957 goto fail; 4958 } 4959 4960 return 0; 4961 fail: 4962 dev_kfree_skb_any(skb); 4963 4964 return ret; 4965 } 4966 4967 #define H2C_LEN_PKT_OFLD 4 4968 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 4969 { 4970 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4971 struct sk_buff *skb; 4972 unsigned int cond; 4973 u8 *cmd; 4974 int ret; 4975 4976 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 4977 if (!skb) { 4978 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4979 return -ENOMEM; 4980 } 4981 skb_put(skb, H2C_LEN_PKT_OFLD); 4982 cmd = skb->data; 4983 4984 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 4985 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 4986 4987 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4988 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4989 H2C_FUNC_PACKET_OFLD, 1, 1, 4990 H2C_LEN_PKT_OFLD); 4991 4992 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 4993 4994 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4995 if (ret < 0) { 4996 rtw89_debug(rtwdev, RTW89_DBG_FW, 4997 "failed to del pkt ofld: id %d, ret %d\n", 4998 id, ret); 4999 return ret; 5000 } 5001 5002 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 5003 return 0; 5004 } 5005 5006 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 5007 struct sk_buff *skb_ofld) 5008 { 5009 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5010 struct sk_buff *skb; 5011 unsigned int cond; 5012 u8 *cmd; 5013 u8 alloc_id; 5014 int ret; 5015 5016 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 5017 RTW89_MAX_PKT_OFLD_NUM); 5018 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 5019 return -ENOSPC; 5020 5021 *id = alloc_id; 5022 5023 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 5024 if (!skb) { 5025 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 5026 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 5027 return -ENOMEM; 5028 } 5029 skb_put(skb, H2C_LEN_PKT_OFLD); 5030 cmd = skb->data; 5031 5032 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 5033 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 5034 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 5035 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 5036 5037 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5038 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5039 H2C_FUNC_PACKET_OFLD, 1, 1, 5040 H2C_LEN_PKT_OFLD + skb_ofld->len); 5041 5042 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 5043 5044 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5045 if (ret < 0) { 5046 rtw89_debug(rtwdev, RTW89_DBG_FW, 5047 "failed to add pkt ofld: id %d, ret %d\n", 5048 alloc_id, ret); 5049 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 5050 return ret; 5051 } 5052 5053 return 0; 5054 } 5055 5056 static 5057 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num, 5058 struct list_head *chan_list) 5059 { 5060 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5061 struct rtw89_h2c_chinfo_elem *elem; 5062 struct rtw89_mac_chinfo *ch_info; 5063 struct rtw89_h2c_chinfo *h2c; 5064 struct sk_buff *skb; 5065 unsigned int cond; 5066 int skb_len; 5067 int ret; 5068 5069 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 5070 5071 skb_len = struct_size(h2c, elem, ch_num); 5072 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5073 if (!skb) { 5074 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5075 return -ENOMEM; 5076 } 5077 skb_put(skb, sizeof(*h2c)); 5078 h2c = (struct rtw89_h2c_chinfo *)skb->data; 5079 5080 h2c->ch_num = ch_num; 5081 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5082 5083 list_for_each_entry(ch_info, chan_list, list) { 5084 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 5085 5086 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 5087 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 5088 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 5089 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 5090 5091 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 5092 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 5093 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 5094 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 5095 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 5096 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 5097 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 5098 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 5099 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 5100 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 5101 5102 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 5103 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 5104 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 5105 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 5106 5107 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 5108 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 5109 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 5110 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 5111 } 5112 5113 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5114 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5115 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5116 5117 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5118 5119 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5120 if (ret) { 5121 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5122 return ret; 5123 } 5124 5125 return 0; 5126 } 5127 5128 static 5129 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 5130 struct list_head *chan_list, 5131 struct rtw89_vif_link *rtwvif_link) 5132 { 5133 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5134 struct rtw89_h2c_chinfo_elem_be *elem; 5135 struct rtw89_mac_chinfo_be *ch_info; 5136 struct rtw89_h2c_chinfo_be *h2c; 5137 struct sk_buff *skb; 5138 unsigned int cond; 5139 u8 ver = U8_MAX; 5140 int skb_len; 5141 int ret; 5142 5143 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE); 5144 5145 skb_len = struct_size(h2c, elem, ch_num); 5146 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5147 if (!skb) { 5148 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5149 return -ENOMEM; 5150 } 5151 5152 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5153 ver = 0; 5154 5155 skb_put(skb, sizeof(*h2c)); 5156 h2c = (struct rtw89_h2c_chinfo_be *)skb->data; 5157 5158 h2c->ch_num = ch_num; 5159 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5160 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx, 5161 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 5162 5163 list_for_each_entry(ch_info, chan_list, list) { 5164 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 5165 5166 elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 5167 le32_encode_bits(ch_info->central_ch, 5168 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 5169 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 5170 5171 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 5172 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 5173 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 5174 le32_encode_bits(ch_info->pause_data, 5175 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 5176 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 5177 le32_encode_bits(ch_info->rand_seq_num, 5178 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 5179 le32_encode_bits(ch_info->notify_action, 5180 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 5181 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 5182 RTW89_H2C_CHINFO_BE_W1_PROBE) | 5183 le32_encode_bits(ch_info->leave_crit, 5184 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 5185 le32_encode_bits(ch_info->chkpt_timer, 5186 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 5187 5188 elem->w2 = le32_encode_bits(ch_info->leave_time, 5189 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 5190 le32_encode_bits(ch_info->leave_th, 5191 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 5192 le32_encode_bits(ch_info->tx_pkt_ctrl, 5193 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 5194 5195 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 5196 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 5197 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 5198 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 5199 5200 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 5201 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 5202 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 5203 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 5204 5205 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 5206 le32_encode_bits(ch_info->fw_probe0_ssids, 5207 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 5208 5209 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 5210 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 5211 le32_encode_bits(ch_info->fw_probe0_bssids, 5212 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 5213 if (ver == 0) 5214 elem->w0 |= 5215 le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD); 5216 else 5217 elem->w7 = le32_encode_bits(ch_info->period, 5218 RTW89_H2C_CHINFO_BE_W7_PERIOD_V1); 5219 } 5220 5221 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5222 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5223 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5224 5225 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5226 5227 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5228 if (ret) { 5229 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5230 return ret; 5231 } 5232 5233 return 0; 5234 } 5235 5236 #define RTW89_SCAN_DELAY_TSF_UNIT 104800 5237 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, 5238 struct rtw89_scan_option *option, 5239 struct rtw89_vif_link *rtwvif_link, 5240 bool wowlan) 5241 { 5242 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5243 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 5244 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE; 5245 struct rtw89_h2c_scanofld *h2c; 5246 u32 len = sizeof(*h2c); 5247 struct sk_buff *skb; 5248 unsigned int cond; 5249 u64 tsf = 0; 5250 int ret; 5251 5252 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5253 if (!skb) { 5254 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5255 return -ENOMEM; 5256 } 5257 skb_put(skb, len); 5258 h2c = (struct rtw89_h2c_scanofld *)skb->data; 5259 5260 if (option->delay) { 5261 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 5262 if (ret) { 5263 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret); 5264 scan_mode = RTW89_SCAN_IMMEDIATE; 5265 } else { 5266 scan_mode = RTW89_SCAN_DELAY; 5267 tsf += (u64)option->delay * RTW89_SCAN_DELAY_TSF_UNIT; 5268 } 5269 } 5270 5271 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 5272 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 5273 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) | 5274 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 5275 5276 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 5277 le32_encode_bits(option->target_ch_mode, 5278 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 5279 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) | 5280 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 5281 5282 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) | 5283 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD); 5284 5285 if (option->target_ch_mode) { 5286 h2c->w1 |= le32_encode_bits(op->band_width, 5287 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 5288 le32_encode_bits(op->primary_channel, 5289 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 5290 le32_encode_bits(op->channel, 5291 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 5292 h2c->w0 |= le32_encode_bits(op->band_type, 5293 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 5294 } 5295 5296 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf), 5297 RTW89_H2C_SCANOFLD_W3_TSF_HIGH); 5298 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf), 5299 RTW89_H2C_SCANOFLD_W4_TSF_LOW); 5300 5301 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5302 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5303 H2C_FUNC_SCANOFLD, 1, 1, 5304 len); 5305 5306 if (option->enable) 5307 cond = RTW89_SCANOFLD_WAIT_COND_START; 5308 else 5309 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 5310 5311 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5312 if (ret) { 5313 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 5314 return ret; 5315 } 5316 5317 return 0; 5318 } 5319 5320 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 5321 struct rtw89_scan_option *option) 5322 { 5323 struct ieee80211_supported_band *sband; 5324 struct ieee80211_channel *chan; 5325 u8 i, idx; 5326 5327 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 5328 if (!sband) { 5329 option->prohib_chan = U64_MAX; 5330 return; 5331 } 5332 5333 for (i = 0; i < sband->n_channels; i++) { 5334 chan = &sband->channels[i]; 5335 if (chan->flags & IEEE80211_CHAN_DISABLED) { 5336 idx = (chan->hw_value - 1) / 4; 5337 option->prohib_chan |= BIT(idx); 5338 } 5339 } 5340 } 5341 5342 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 5343 struct rtw89_scan_option *option, 5344 struct rtw89_vif_link *rtwvif_link, 5345 bool wowlan) 5346 { 5347 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 5348 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5349 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5350 struct cfg80211_scan_request *req = rtwvif->scan_req; 5351 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 5352 struct rtw89_chan *op = &scan_info->op_chan; 5353 struct rtw89_h2c_scanofld_be_opch *opch; 5354 struct rtw89_pktofld_info *pkt_info; 5355 struct rtw89_h2c_scanofld_be *h2c; 5356 struct sk_buff *skb; 5357 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 5358 u8 opch_size = sizeof(*opch) * option->num_opch; 5359 u8 probe_id[NUM_NL80211_BANDS]; 5360 u8 cfg_len = sizeof(*h2c); 5361 unsigned int cond; 5362 u8 ver = U8_MAX; 5363 #if defined(__linux__) 5364 void *ptr; 5365 #elif defined(__FreeBSD__) 5366 u8 *ptr; 5367 #endif 5368 int ret; 5369 u32 len; 5370 u8 i; 5371 5372 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 5373 5374 len = cfg_len + macc_role_size + opch_size; 5375 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5376 if (!skb) { 5377 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5378 return -ENOMEM; 5379 } 5380 5381 skb_put(skb, len); 5382 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 5383 ptr = skb->data; 5384 5385 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 5386 5387 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5388 ver = 0; 5389 5390 if (!wowlan) { 5391 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 5392 if (pkt_info->wildcard_6ghz) { 5393 /* Provide wildcard as template */ 5394 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 5395 break; 5396 } 5397 } 5398 } 5399 5400 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 5401 le32_encode_bits(option->scan_mode, 5402 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 5403 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 5404 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 5405 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 5406 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 5407 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 5408 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 5409 5410 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 5411 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 5412 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 5413 5414 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 5415 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 5416 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 5417 5418 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 5419 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 5420 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 5421 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 5422 5423 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 5424 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 5425 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 5426 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 5427 le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 5428 5429 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 5430 5431 h2c->w6 = le32_encode_bits(option->prohib_chan, 5432 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 5433 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 5434 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 5435 if (!wowlan && req->no_cck) { 5436 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); 5437 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, 5438 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | 5439 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5440 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | 5441 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5442 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); 5443 } 5444 5445 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { 5446 cfg_len = offsetofend(typeof(*h2c), w8); 5447 goto flex_member; 5448 } 5449 5450 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), 5451 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | 5452 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0), 5453 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) | 5454 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0), 5455 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP); 5456 5457 flex_member: 5458 ptr += cfg_len; 5459 5460 for (i = 0; i < option->num_macc_role; i++) { 5461 #if defined(__linux__) 5462 macc_role = ptr; 5463 #elif defined(__FreeBSD__) 5464 macc_role = (void *)ptr; 5465 #endif 5466 macc_role->w0 = 5467 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 5468 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 5469 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 5470 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 5471 ptr += sizeof(*macc_role); 5472 } 5473 5474 for (i = 0; i < option->num_opch; i++) { 5475 #if defined(__linux__) 5476 opch = ptr; 5477 #elif defined(__FreeBSD__) 5478 opch = (void *)ptr; 5479 #endif 5480 opch->w0 = le32_encode_bits(rtwvif_link->mac_id, 5481 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 5482 le32_encode_bits(option->band, 5483 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 5484 le32_encode_bits(rtwvif_link->port, 5485 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 5486 le32_encode_bits(RTW89_SCAN_OPMODE_INTV, 5487 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 5488 le32_encode_bits(true, 5489 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 5490 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10, 5491 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 5492 5493 opch->w1 = le32_encode_bits(op->band_type, 5494 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 5495 le32_encode_bits(op->band_width, 5496 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 5497 le32_encode_bits(0x3, 5498 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 5499 le32_encode_bits(op->primary_channel, 5500 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 5501 le32_encode_bits(op->channel, 5502 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 5503 5504 opch->w2 = le32_encode_bits(0, 5505 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 5506 le32_encode_bits(0, 5507 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 5508 le32_encode_bits(2, 5509 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS); 5510 5511 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5512 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 5513 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5514 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 5515 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5516 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 5517 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5518 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 5519 5520 if (ver == 0) 5521 opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME, 5522 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION); 5523 else 5524 opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME, 5525 RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1); 5526 ptr += sizeof(*opch); 5527 } 5528 5529 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5530 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5531 H2C_FUNC_SCANOFLD_BE, 1, 1, 5532 len); 5533 5534 if (option->enable) 5535 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 5536 else 5537 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 5538 5539 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5540 if (ret) { 5541 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 5542 return ret; 5543 } 5544 5545 return 0; 5546 } 5547 5548 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 5549 struct rtw89_fw_h2c_rf_reg_info *info, 5550 u16 len, u8 page) 5551 { 5552 struct sk_buff *skb; 5553 u8 class = info->rf_path == RF_PATH_A ? 5554 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 5555 int ret; 5556 5557 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5558 if (!skb) { 5559 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 5560 return -ENOMEM; 5561 } 5562 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 5563 5564 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5565 H2C_CAT_OUTSRC, class, page, 0, 0, 5566 len); 5567 5568 ret = rtw89_h2c_tx(rtwdev, skb, false); 5569 if (ret) { 5570 rtw89_err(rtwdev, "failed to send h2c\n"); 5571 goto fail; 5572 } 5573 5574 return 0; 5575 fail: 5576 dev_kfree_skb_any(skb); 5577 5578 return ret; 5579 } 5580 5581 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 5582 { 5583 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; 5584 struct rtw89_fw_h2c_rf_get_mccch *mccch; 5585 struct sk_buff *skb; 5586 int ret; 5587 u8 idx; 5588 5589 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 5590 if (!skb) { 5591 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5592 return -ENOMEM; 5593 } 5594 skb_put(skb, sizeof(*mccch)); 5595 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 5596 5597 idx = rfk_mcc->table_idx; 5598 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 5599 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 5600 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 5601 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 5602 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 5603 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 5604 5605 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5606 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 5607 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 5608 sizeof(*mccch)); 5609 5610 ret = rtw89_h2c_tx(rtwdev, skb, false); 5611 if (ret) { 5612 rtw89_err(rtwdev, "failed to send h2c\n"); 5613 goto fail; 5614 } 5615 5616 return 0; 5617 fail: 5618 dev_kfree_skb_any(skb); 5619 5620 return ret; 5621 } 5622 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 5623 5624 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 5625 enum rtw89_phy_idx phy_idx) 5626 { 5627 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 5628 struct rtw89_fw_h2c_rfk_pre_info_common *common; 5629 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; 5630 struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1; 5631 struct rtw89_fw_h2c_rfk_pre_info *h2c; 5632 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH]; 5633 u32 len = sizeof(*h2c); 5634 struct sk_buff *skb; 5635 u8 ver = U8_MAX; 5636 u8 tbl, path; 5637 u32 val32; 5638 int ret; 5639 5640 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) { 5641 len = sizeof(*h2c_v1); 5642 ver = 1; 5643 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 5644 len = sizeof(*h2c_v0); 5645 ver = 0; 5646 } 5647 5648 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5649 if (!skb) { 5650 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 5651 return -ENOMEM; 5652 } 5653 skb_put(skb, len); 5654 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 5655 common = &h2c->base_v1.common; 5656 5657 common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 5658 5659 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 5660 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH); 5661 5662 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 5663 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5664 common->dbcc.ch[path][tbl] = 5665 cpu_to_le32(rfk_mcc->data[path].ch[tbl]); 5666 common->dbcc.band[path][tbl] = 5667 cpu_to_le32(rfk_mcc->data[path].band[tbl]); 5668 } 5669 } 5670 5671 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5672 tbl_sel[path] = rfk_mcc->data[path].table_idx; 5673 5674 common->tbl.cur_ch[path] = 5675 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]); 5676 common->tbl.cur_band[path] = 5677 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]); 5678 5679 if (ver <= 1) 5680 continue; 5681 5682 h2c->cur_bandwidth[path] = 5683 cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]); 5684 } 5685 5686 common->phy_idx = cpu_to_le32(phy_idx); 5687 5688 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ 5689 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; 5690 5691 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]); 5692 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]); 5693 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]); 5694 5695 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 5696 h2c_v0->ktbl_sel0 = cpu_to_le32(val32); 5697 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 5698 h2c_v0->ktbl_sel1 = cpu_to_le32(val32); 5699 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 5700 h2c_v0->rfmod0 = cpu_to_le32(val32); 5701 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 5702 h2c_v0->rfmod1 = cpu_to_le32(val32); 5703 5704 if (rtw89_is_mlo_1_1(rtwdev)) 5705 h2c_v0->mlo_1_1 = cpu_to_le32(1); 5706 5707 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 5708 5709 goto done; 5710 } 5711 5712 if (rtw89_is_mlo_1_1(rtwdev)) { 5713 h2c_v1 = &h2c->base_v1; 5714 h2c_v1->mlo_1_1 = cpu_to_le32(1); 5715 } 5716 done: 5717 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5718 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5719 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 5720 len); 5721 5722 ret = rtw89_h2c_tx(rtwdev, skb, false); 5723 if (ret) { 5724 rtw89_err(rtwdev, "failed to send h2c\n"); 5725 goto fail; 5726 } 5727 5728 return 0; 5729 fail: 5730 dev_kfree_skb_any(skb); 5731 5732 return ret; 5733 } 5734 5735 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5736 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode) 5737 { 5738 struct rtw89_hal *hal = &rtwdev->hal; 5739 struct rtw89_h2c_rf_tssi *h2c; 5740 u32 len = sizeof(*h2c); 5741 struct sk_buff *skb; 5742 int ret; 5743 5744 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5745 if (!skb) { 5746 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 5747 return -ENOMEM; 5748 } 5749 skb_put(skb, len); 5750 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 5751 5752 h2c->len = cpu_to_le16(len); 5753 h2c->phy = phy_idx; 5754 h2c->ch = chan->channel; 5755 h2c->bw = chan->band_width; 5756 h2c->band = chan->band_type; 5757 h2c->hwtx_en = true; 5758 h2c->cv = hal->cv; 5759 h2c->tssi_mode = tssi_mode; 5760 5761 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 5762 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 5763 5764 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5765 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5766 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 5767 5768 ret = rtw89_h2c_tx(rtwdev, skb, false); 5769 if (ret) { 5770 rtw89_err(rtwdev, "failed to send h2c\n"); 5771 goto fail; 5772 } 5773 5774 return 0; 5775 fail: 5776 dev_kfree_skb_any(skb); 5777 5778 return ret; 5779 } 5780 5781 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5782 const struct rtw89_chan *chan) 5783 { 5784 struct rtw89_h2c_rf_iqk *h2c; 5785 u32 len = sizeof(*h2c); 5786 struct sk_buff *skb; 5787 int ret; 5788 5789 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5790 if (!skb) { 5791 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 5792 return -ENOMEM; 5793 } 5794 skb_put(skb, len); 5795 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 5796 5797 h2c->phy_idx = cpu_to_le32(phy_idx); 5798 h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en); 5799 5800 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5801 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5802 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 5803 5804 ret = rtw89_h2c_tx(rtwdev, skb, false); 5805 if (ret) { 5806 rtw89_err(rtwdev, "failed to send h2c\n"); 5807 goto fail; 5808 } 5809 5810 return 0; 5811 fail: 5812 dev_kfree_skb_any(skb); 5813 5814 return ret; 5815 } 5816 5817 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5818 const struct rtw89_chan *chan) 5819 { 5820 struct rtw89_h2c_rf_dpk *h2c; 5821 u32 len = sizeof(*h2c); 5822 struct sk_buff *skb; 5823 int ret; 5824 5825 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5826 if (!skb) { 5827 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 5828 return -ENOMEM; 5829 } 5830 skb_put(skb, len); 5831 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 5832 5833 h2c->len = len; 5834 h2c->phy = phy_idx; 5835 h2c->dpk_enable = true; 5836 h2c->kpath = RF_AB; 5837 h2c->cur_band = chan->band_type; 5838 h2c->cur_bw = chan->band_width; 5839 h2c->cur_ch = chan->channel; 5840 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5841 5842 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5843 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5844 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 5845 5846 ret = rtw89_h2c_tx(rtwdev, skb, false); 5847 if (ret) { 5848 rtw89_err(rtwdev, "failed to send h2c\n"); 5849 goto fail; 5850 } 5851 5852 return 0; 5853 fail: 5854 dev_kfree_skb_any(skb); 5855 5856 return ret; 5857 } 5858 5859 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5860 const struct rtw89_chan *chan) 5861 { 5862 struct rtw89_hal *hal = &rtwdev->hal; 5863 struct rtw89_h2c_rf_txgapk *h2c; 5864 u32 len = sizeof(*h2c); 5865 struct sk_buff *skb; 5866 int ret; 5867 5868 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5869 if (!skb) { 5870 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 5871 return -ENOMEM; 5872 } 5873 skb_put(skb, len); 5874 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 5875 5876 h2c->len = len; 5877 h2c->ktype = 2; 5878 h2c->phy = phy_idx; 5879 h2c->kpath = RF_AB; 5880 h2c->band = chan->band_type; 5881 h2c->bw = chan->band_width; 5882 h2c->ch = chan->channel; 5883 h2c->cv = hal->cv; 5884 5885 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5886 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5887 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 5888 5889 ret = rtw89_h2c_tx(rtwdev, skb, false); 5890 if (ret) { 5891 rtw89_err(rtwdev, "failed to send h2c\n"); 5892 goto fail; 5893 } 5894 5895 return 0; 5896 fail: 5897 dev_kfree_skb_any(skb); 5898 5899 return ret; 5900 } 5901 5902 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5903 const struct rtw89_chan *chan) 5904 { 5905 struct rtw89_h2c_rf_dack *h2c; 5906 u32 len = sizeof(*h2c); 5907 struct sk_buff *skb; 5908 int ret; 5909 5910 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5911 if (!skb) { 5912 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 5913 return -ENOMEM; 5914 } 5915 skb_put(skb, len); 5916 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 5917 5918 h2c->len = cpu_to_le32(len); 5919 h2c->phy = cpu_to_le32(phy_idx); 5920 h2c->type = cpu_to_le32(0); 5921 5922 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5923 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5924 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 5925 5926 ret = rtw89_h2c_tx(rtwdev, skb, false); 5927 if (ret) { 5928 rtw89_err(rtwdev, "failed to send h2c\n"); 5929 goto fail; 5930 } 5931 5932 return 0; 5933 fail: 5934 dev_kfree_skb_any(skb); 5935 5936 return ret; 5937 } 5938 5939 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5940 const struct rtw89_chan *chan, bool is_chl_k) 5941 { 5942 struct rtw89_h2c_rf_rxdck_v0 *v0; 5943 struct rtw89_h2c_rf_rxdck *h2c; 5944 u32 len = sizeof(*h2c); 5945 struct sk_buff *skb; 5946 int ver = -1; 5947 int ret; 5948 5949 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) { 5950 len = sizeof(*v0); 5951 ver = 0; 5952 } 5953 5954 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5955 if (!skb) { 5956 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 5957 return -ENOMEM; 5958 } 5959 skb_put(skb, len); 5960 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data; 5961 5962 v0->len = len; 5963 v0->phy = phy_idx; 5964 v0->is_afe = false; 5965 v0->kpath = RF_AB; 5966 v0->cur_band = chan->band_type; 5967 v0->cur_bw = chan->band_width; 5968 v0->cur_ch = chan->channel; 5969 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5970 5971 if (ver == 0) 5972 goto hdr; 5973 5974 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 5975 h2c->is_chl_k = is_chl_k; 5976 5977 hdr: 5978 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5979 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5980 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 5981 5982 ret = rtw89_h2c_tx(rtwdev, skb, false); 5983 if (ret) { 5984 rtw89_err(rtwdev, "failed to send h2c\n"); 5985 goto fail; 5986 } 5987 5988 return 0; 5989 fail: 5990 dev_kfree_skb_any(skb); 5991 5992 return ret; 5993 } 5994 5995 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 5996 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 5997 bool rack, bool dack) 5998 { 5999 struct sk_buff *skb; 6000 int ret; 6001 6002 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6003 if (!skb) { 6004 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 6005 return -ENOMEM; 6006 } 6007 skb_put_data(skb, buf, len); 6008 6009 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6010 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 6011 len); 6012 6013 ret = rtw89_h2c_tx(rtwdev, skb, false); 6014 if (ret) { 6015 rtw89_err(rtwdev, "failed to send h2c\n"); 6016 goto fail; 6017 } 6018 6019 return 0; 6020 fail: 6021 dev_kfree_skb_any(skb); 6022 6023 return ret; 6024 } 6025 6026 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 6027 { 6028 struct sk_buff *skb; 6029 int ret; 6030 6031 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 6032 if (!skb) { 6033 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 6034 return -ENOMEM; 6035 } 6036 skb_put_data(skb, buf, len); 6037 6038 ret = rtw89_h2c_tx(rtwdev, skb, false); 6039 if (ret) { 6040 rtw89_err(rtwdev, "failed to send h2c\n"); 6041 goto fail; 6042 } 6043 6044 return 0; 6045 fail: 6046 dev_kfree_skb_any(skb); 6047 6048 return ret; 6049 } 6050 6051 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 6052 { 6053 struct rtw89_early_h2c *early_h2c; 6054 6055 lockdep_assert_held(&rtwdev->mutex); 6056 6057 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 6058 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 6059 } 6060 } 6061 6062 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 6063 { 6064 struct rtw89_early_h2c *early_h2c, *tmp; 6065 6066 mutex_lock(&rtwdev->mutex); 6067 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 6068 list_del(&early_h2c->list); 6069 kfree(early_h2c->h2c); 6070 kfree(early_h2c); 6071 } 6072 mutex_unlock(&rtwdev->mutex); 6073 } 6074 6075 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 6076 { 6077 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 6078 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6079 6080 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 6081 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 6082 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 6083 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 6084 } 6085 6086 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 6087 struct sk_buff *c2h) 6088 { 6089 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6090 u8 category = attr->category; 6091 u8 class = attr->class; 6092 u8 func = attr->func; 6093 6094 switch (category) { 6095 default: 6096 return false; 6097 case RTW89_C2H_CAT_MAC: 6098 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 6099 case RTW89_C2H_CAT_OUTSRC: 6100 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 6101 } 6102 } 6103 6104 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 6105 { 6106 rtw89_fw_c2h_parse_attr(c2h); 6107 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 6108 goto enqueue; 6109 6110 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 6111 dev_kfree_skb_any(c2h); 6112 return; 6113 6114 enqueue: 6115 skb_queue_tail(&rtwdev->c2h_queue, c2h); 6116 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 6117 } 6118 6119 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 6120 struct sk_buff *skb) 6121 { 6122 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 6123 u8 category = attr->category; 6124 u8 class = attr->class; 6125 u8 func = attr->func; 6126 u16 len = attr->len; 6127 bool dump = true; 6128 6129 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 6130 return; 6131 6132 switch (category) { 6133 case RTW89_C2H_CAT_TEST: 6134 break; 6135 case RTW89_C2H_CAT_MAC: 6136 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 6137 if (class == RTW89_MAC_C2H_CLASS_INFO && 6138 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 6139 dump = false; 6140 break; 6141 case RTW89_C2H_CAT_OUTSRC: 6142 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 6143 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 6144 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 6145 else 6146 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 6147 break; 6148 } 6149 6150 if (dump) 6151 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 6152 } 6153 6154 void rtw89_fw_c2h_work(struct work_struct *work) 6155 { 6156 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 6157 c2h_work); 6158 struct sk_buff *skb, *tmp; 6159 6160 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 6161 skb_unlink(skb, &rtwdev->c2h_queue); 6162 mutex_lock(&rtwdev->mutex); 6163 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 6164 mutex_unlock(&rtwdev->mutex); 6165 dev_kfree_skb_any(skb); 6166 } 6167 } 6168 6169 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 6170 struct rtw89_mac_h2c_info *info) 6171 { 6172 const struct rtw89_chip_info *chip = rtwdev->chip; 6173 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6174 const u32 *h2c_reg = chip->h2c_regs; 6175 u8 i, val, len; 6176 int ret; 6177 6178 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 6179 rtwdev, chip->h2c_ctrl_reg); 6180 if (ret) { 6181 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 6182 return ret; 6183 } 6184 6185 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 6186 sizeof(info->u.h2creg[0])); 6187 6188 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 6189 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 6190 6191 for (i = 0; i < RTW89_H2CREG_MAX; i++) 6192 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 6193 6194 fw_info->h2c_counter++; 6195 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 6196 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 6197 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 6198 6199 return 0; 6200 } 6201 6202 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 6203 struct rtw89_mac_c2h_info *info) 6204 { 6205 const struct rtw89_chip_info *chip = rtwdev->chip; 6206 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6207 const u32 *c2h_reg = chip->c2h_regs; 6208 u32 ret; 6209 u8 i, val; 6210 6211 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 6212 6213 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 6214 RTW89_C2H_TIMEOUT, false, rtwdev, 6215 chip->c2h_ctrl_reg); 6216 if (ret) { 6217 rtw89_warn(rtwdev, "c2h reg timeout\n"); 6218 return ret; 6219 } 6220 6221 for (i = 0; i < RTW89_C2HREG_MAX; i++) 6222 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 6223 6224 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 6225 6226 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 6227 info->content_len = 6228 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 6229 RTW89_C2HREG_HDR_LEN; 6230 6231 fw_info->c2h_counter++; 6232 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 6233 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 6234 6235 return 0; 6236 } 6237 6238 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 6239 struct rtw89_mac_h2c_info *h2c_info, 6240 struct rtw89_mac_c2h_info *c2h_info) 6241 { 6242 u32 ret; 6243 6244 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 6245 lockdep_assert_held(&rtwdev->mutex); 6246 6247 if (!h2c_info && !c2h_info) 6248 return -EINVAL; 6249 6250 if (!h2c_info) 6251 goto recv_c2h; 6252 6253 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 6254 if (ret) 6255 return ret; 6256 6257 recv_c2h: 6258 if (!c2h_info) 6259 return 0; 6260 6261 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 6262 if (ret) 6263 return ret; 6264 6265 return 0; 6266 } 6267 6268 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 6269 { 6270 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 6271 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 6272 return; 6273 } 6274 6275 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 6276 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 6277 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 6278 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 6279 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 6280 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 6281 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 6282 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 6283 6284 rtw89_fw_prog_cnt_dump(rtwdev); 6285 } 6286 6287 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 6288 { 6289 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6290 struct rtw89_pktofld_info *info, *tmp; 6291 u8 idx; 6292 6293 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 6294 if (!(rtwdev->chip->support_bands & BIT(idx))) 6295 continue; 6296 6297 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 6298 if (test_bit(info->id, rtwdev->pkt_offload)) 6299 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 6300 list_del(&info->list); 6301 kfree(info); 6302 } 6303 } 6304 } 6305 6306 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 6307 struct cfg80211_scan_request *req, 6308 struct rtw89_pktofld_info *info, 6309 enum nl80211_band band, u8 ssid_idx) 6310 { 6311 if (band != NL80211_BAND_6GHZ) 6312 return false; 6313 6314 if (req->ssids[ssid_idx].ssid_len) { 6315 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 6316 req->ssids[ssid_idx].ssid_len); 6317 info->ssid_len = req->ssids[ssid_idx].ssid_len; 6318 return false; 6319 } else { 6320 info->wildcard_6ghz = true; 6321 return true; 6322 } 6323 } 6324 6325 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 6326 struct rtw89_vif_link *rtwvif_link, 6327 struct sk_buff *skb, u8 ssid_idx) 6328 { 6329 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6330 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6331 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 6332 struct cfg80211_scan_request *req = rtwvif->scan_req; 6333 struct rtw89_pktofld_info *info; 6334 struct sk_buff *new; 6335 int ret = 0; 6336 u8 band; 6337 6338 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 6339 if (!(rtwdev->chip->support_bands & BIT(band))) 6340 continue; 6341 6342 new = skb_copy(skb, GFP_KERNEL); 6343 if (!new) { 6344 ret = -ENOMEM; 6345 goto out; 6346 } 6347 skb_put_data(new, ies->ies[band], ies->len[band]); 6348 skb_put_data(new, ies->common_ies, ies->common_ie_len); 6349 6350 info = kzalloc(sizeof(*info), GFP_KERNEL); 6351 if (!info) { 6352 ret = -ENOMEM; 6353 kfree_skb(new); 6354 goto out; 6355 } 6356 6357 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx); 6358 6359 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 6360 if (ret) { 6361 kfree_skb(new); 6362 kfree(info); 6363 goto out; 6364 } 6365 6366 list_add_tail(&info->list, &scan_info->pkt_list[band]); 6367 kfree_skb(new); 6368 } 6369 out: 6370 return ret; 6371 } 6372 6373 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 6374 struct rtw89_vif_link *rtwvif_link) 6375 { 6376 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6377 struct cfg80211_scan_request *req = rtwvif->scan_req; 6378 struct sk_buff *skb; 6379 u8 num = req->n_ssids, i; 6380 int ret; 6381 6382 for (i = 0; i < num; i++) { 6383 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 6384 req->ssids[i].ssid, 6385 req->ssids[i].ssid_len, 6386 req->ie_len); 6387 if (!skb) 6388 return -ENOMEM; 6389 6390 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i); 6391 kfree_skb(skb); 6392 6393 if (ret) 6394 return ret; 6395 } 6396 6397 return 0; 6398 } 6399 6400 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 6401 struct ieee80211_scan_ies *ies, 6402 struct cfg80211_scan_request *req, 6403 struct rtw89_mac_chinfo *ch_info) 6404 { 6405 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6406 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6407 struct cfg80211_scan_6ghz_params *params; 6408 struct rtw89_pktofld_info *info, *tmp; 6409 struct ieee80211_hdr *hdr; 6410 struct sk_buff *skb; 6411 bool found; 6412 int ret = 0; 6413 u8 i; 6414 6415 if (!req->n_6ghz_params) 6416 return 0; 6417 6418 for (i = 0; i < req->n_6ghz_params; i++) { 6419 params = &req->scan_6ghz_params[i]; 6420 6421 if (req->channels[params->channel_idx]->hw_value != 6422 ch_info->pri_ch) 6423 continue; 6424 6425 found = false; 6426 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 6427 if (ether_addr_equal(tmp->bssid, params->bssid)) { 6428 found = true; 6429 break; 6430 } 6431 } 6432 if (found) 6433 continue; 6434 6435 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 6436 NULL, 0, req->ie_len); 6437 if (!skb) 6438 return -ENOMEM; 6439 6440 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 6441 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 6442 hdr = (struct ieee80211_hdr *)skb->data; 6443 ether_addr_copy(hdr->addr3, params->bssid); 6444 6445 info = kzalloc(sizeof(*info), GFP_KERNEL); 6446 if (!info) { 6447 ret = -ENOMEM; 6448 kfree_skb(skb); 6449 goto out; 6450 } 6451 6452 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 6453 if (ret) { 6454 kfree_skb(skb); 6455 kfree(info); 6456 goto out; 6457 } 6458 6459 ether_addr_copy(info->bssid, params->bssid); 6460 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 6461 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 6462 6463 ch_info->tx_pkt = true; 6464 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6465 6466 kfree_skb(skb); 6467 } 6468 6469 out: 6470 return ret; 6471 } 6472 6473 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, 6474 int chan_type, int ssid_num, 6475 struct rtw89_mac_chinfo *ch_info) 6476 { 6477 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6478 struct rtw89_pktofld_info *info; 6479 u8 probe_count = 0; 6480 6481 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6482 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6483 ch_info->bw = RTW89_SCAN_WIDTH; 6484 ch_info->tx_pkt = true; 6485 ch_info->cfg_tx_pwr = false; 6486 ch_info->tx_pwr_idx = 0; 6487 ch_info->tx_null = false; 6488 ch_info->pause_data = false; 6489 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6490 6491 if (ssid_num) { 6492 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6493 if (info->channel_6ghz && 6494 ch_info->pri_ch != info->channel_6ghz) 6495 continue; 6496 else if (info->channel_6ghz && probe_count != 0) 6497 ch_info->period += RTW89_CHANNEL_TIME_6G; 6498 6499 if (info->wildcard_6ghz) 6500 continue; 6501 6502 ch_info->pkt_id[probe_count++] = info->id; 6503 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6504 break; 6505 } 6506 ch_info->num_pkt = probe_count; 6507 } 6508 6509 switch (chan_type) { 6510 case RTW89_CHAN_DFS: 6511 if (ch_info->ch_band != RTW89_BAND_6G) 6512 ch_info->period = max_t(u8, ch_info->period, 6513 RTW89_DFS_CHAN_TIME); 6514 ch_info->dwell_time = RTW89_DWELL_TIME; 6515 break; 6516 case RTW89_CHAN_ACTIVE: 6517 break; 6518 default: 6519 rtw89_err(rtwdev, "Channel type out of bound\n"); 6520 } 6521 } 6522 6523 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 6524 int ssid_num, 6525 struct rtw89_mac_chinfo *ch_info) 6526 { 6527 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6528 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6529 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6530 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 6531 struct cfg80211_scan_request *req = rtwvif->scan_req; 6532 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 6533 struct rtw89_pktofld_info *info; 6534 u8 band, probe_count = 0; 6535 int ret; 6536 6537 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6538 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6539 ch_info->bw = RTW89_SCAN_WIDTH; 6540 ch_info->tx_pkt = true; 6541 ch_info->cfg_tx_pwr = false; 6542 ch_info->tx_pwr_idx = 0; 6543 ch_info->tx_null = false; 6544 ch_info->pause_data = false; 6545 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6546 6547 if (ch_info->ch_band == RTW89_BAND_6G) { 6548 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6549 !ch_info->is_psc) { 6550 ch_info->tx_pkt = false; 6551 if (!req->duration_mandatory) 6552 ch_info->period -= RTW89_DWELL_TIME_6G; 6553 } 6554 } 6555 6556 ret = rtw89_update_6ghz_rnr_chan(rtwdev, ies, req, ch_info); 6557 if (ret) 6558 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 6559 6560 if (ssid_num) { 6561 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6562 6563 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6564 if (info->channel_6ghz && 6565 ch_info->pri_ch != info->channel_6ghz) 6566 continue; 6567 else if (info->channel_6ghz && probe_count != 0) 6568 ch_info->period += RTW89_CHANNEL_TIME_6G; 6569 6570 if (info->wildcard_6ghz) 6571 continue; 6572 6573 ch_info->pkt_id[probe_count++] = info->id; 6574 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6575 break; 6576 } 6577 ch_info->num_pkt = probe_count; 6578 } 6579 6580 switch (chan_type) { 6581 case RTW89_CHAN_OPERATE: 6582 ch_info->central_ch = op->channel; 6583 ch_info->pri_ch = op->primary_channel; 6584 ch_info->ch_band = op->band_type; 6585 ch_info->bw = op->band_width; 6586 ch_info->tx_null = true; 6587 ch_info->num_pkt = 0; 6588 break; 6589 case RTW89_CHAN_DFS: 6590 if (ch_info->ch_band != RTW89_BAND_6G) 6591 ch_info->period = max_t(u8, ch_info->period, 6592 RTW89_DFS_CHAN_TIME); 6593 ch_info->dwell_time = RTW89_DWELL_TIME; 6594 ch_info->pause_data = true; 6595 break; 6596 case RTW89_CHAN_ACTIVE: 6597 ch_info->pause_data = true; 6598 break; 6599 default: 6600 rtw89_err(rtwdev, "Channel type out of bound\n"); 6601 } 6602 } 6603 6604 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6605 int ssid_num, 6606 struct rtw89_mac_chinfo_be *ch_info) 6607 { 6608 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6609 struct rtw89_pktofld_info *info; 6610 u8 probe_count = 0, i; 6611 6612 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6613 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6614 ch_info->bw = RTW89_SCAN_WIDTH; 6615 ch_info->tx_null = false; 6616 ch_info->pause_data = false; 6617 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6618 6619 if (ssid_num) { 6620 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6621 ch_info->pkt_id[probe_count++] = info->id; 6622 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6623 break; 6624 } 6625 } 6626 6627 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6628 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6629 6630 switch (chan_type) { 6631 case RTW89_CHAN_DFS: 6632 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6633 ch_info->dwell_time = RTW89_DWELL_TIME; 6634 break; 6635 case RTW89_CHAN_ACTIVE: 6636 break; 6637 default: 6638 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6639 break; 6640 } 6641 } 6642 6643 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6644 int ssid_num, 6645 struct rtw89_mac_chinfo_be *ch_info) 6646 { 6647 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6648 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6649 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6650 struct cfg80211_scan_request *req = rtwvif->scan_req; 6651 struct rtw89_pktofld_info *info; 6652 u8 band, probe_count = 0, i; 6653 6654 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6655 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6656 ch_info->bw = RTW89_SCAN_WIDTH; 6657 ch_info->tx_null = false; 6658 ch_info->pause_data = false; 6659 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6660 6661 if (ssid_num) { 6662 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6663 6664 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6665 if (info->channel_6ghz && 6666 ch_info->pri_ch != info->channel_6ghz) 6667 continue; 6668 6669 if (info->wildcard_6ghz) 6670 continue; 6671 6672 ch_info->pkt_id[probe_count++] = info->id; 6673 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6674 break; 6675 } 6676 } 6677 6678 if (ch_info->ch_band == RTW89_BAND_6G) { 6679 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6680 !ch_info->is_psc) { 6681 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6682 if (!req->duration_mandatory) 6683 ch_info->period -= RTW89_DWELL_TIME_6G; 6684 } 6685 } 6686 6687 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6688 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6689 6690 switch (chan_type) { 6691 case RTW89_CHAN_DFS: 6692 if (ch_info->ch_band != RTW89_BAND_6G) 6693 ch_info->period = 6694 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6695 ch_info->dwell_time = RTW89_DWELL_TIME; 6696 ch_info->pause_data = true; 6697 break; 6698 case RTW89_CHAN_ACTIVE: 6699 ch_info->pause_data = true; 6700 break; 6701 default: 6702 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6703 break; 6704 } 6705 } 6706 6707 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6708 struct rtw89_vif_link *rtwvif_link) 6709 { 6710 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6711 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6712 struct rtw89_mac_chinfo *ch_info, *tmp; 6713 struct ieee80211_channel *channel; 6714 struct list_head chan_list; 6715 int list_len; 6716 enum rtw89_chan_type type; 6717 int ret = 0; 6718 u32 idx; 6719 6720 INIT_LIST_HEAD(&chan_list); 6721 for (idx = 0, list_len = 0; 6722 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 6723 idx++, list_len++) { 6724 channel = nd_config->channels[idx]; 6725 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6726 if (!ch_info) { 6727 ret = -ENOMEM; 6728 goto out; 6729 } 6730 6731 ch_info->period = RTW89_CHANNEL_TIME; 6732 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6733 ch_info->central_ch = channel->hw_value; 6734 ch_info->pri_ch = channel->hw_value; 6735 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6736 6737 if (channel->flags & 6738 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6739 type = RTW89_CHAN_DFS; 6740 else 6741 type = RTW89_CHAN_ACTIVE; 6742 6743 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); 6744 list_add_tail(&ch_info->list, &chan_list); 6745 } 6746 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6747 6748 out: 6749 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6750 list_del(&ch_info->list); 6751 kfree(ch_info); 6752 } 6753 6754 return ret; 6755 } 6756 6757 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6758 struct rtw89_vif_link *rtwvif_link, bool connected) 6759 { 6760 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6761 struct cfg80211_scan_request *req = rtwvif->scan_req; 6762 struct rtw89_mac_chinfo *ch_info, *tmp; 6763 struct ieee80211_channel *channel; 6764 struct list_head chan_list; 6765 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 6766 int list_len, off_chan_time = 0; 6767 enum rtw89_chan_type type; 6768 int ret = 0; 6769 u32 idx; 6770 6771 INIT_LIST_HEAD(&chan_list); 6772 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6773 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 6774 idx++, list_len++) { 6775 channel = req->channels[idx]; 6776 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6777 if (!ch_info) { 6778 ret = -ENOMEM; 6779 goto out; 6780 } 6781 6782 if (req->duration) 6783 ch_info->period = req->duration; 6784 else if (channel->band == NL80211_BAND_6GHZ) 6785 ch_info->period = RTW89_CHANNEL_TIME_6G + 6786 RTW89_DWELL_TIME_6G; 6787 else 6788 ch_info->period = RTW89_CHANNEL_TIME; 6789 6790 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6791 ch_info->central_ch = channel->hw_value; 6792 ch_info->pri_ch = channel->hw_value; 6793 ch_info->rand_seq_num = random_seq; 6794 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6795 6796 if (channel->flags & 6797 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6798 type = RTW89_CHAN_DFS; 6799 else 6800 type = RTW89_CHAN_ACTIVE; 6801 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 6802 6803 if (connected && 6804 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 6805 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 6806 if (!tmp) { 6807 ret = -ENOMEM; 6808 kfree(ch_info); 6809 goto out; 6810 } 6811 6812 type = RTW89_CHAN_OPERATE; 6813 tmp->period = req->duration_mandatory ? 6814 req->duration : RTW89_CHANNEL_TIME; 6815 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 6816 list_add_tail(&tmp->list, &chan_list); 6817 off_chan_time = 0; 6818 list_len++; 6819 } 6820 list_add_tail(&ch_info->list, &chan_list); 6821 off_chan_time += ch_info->period; 6822 } 6823 rtwdev->scan_info.last_chan_idx = idx; 6824 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6825 6826 out: 6827 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6828 list_del(&ch_info->list); 6829 kfree(ch_info); 6830 } 6831 6832 return ret; 6833 } 6834 6835 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6836 struct rtw89_vif_link *rtwvif_link) 6837 { 6838 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6839 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6840 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6841 struct ieee80211_channel *channel; 6842 struct list_head chan_list; 6843 enum rtw89_chan_type type; 6844 int list_len, ret; 6845 u32 idx; 6846 6847 INIT_LIST_HEAD(&chan_list); 6848 6849 for (idx = 0, list_len = 0; 6850 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 6851 idx++, list_len++) { 6852 channel = nd_config->channels[idx]; 6853 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6854 if (!ch_info) { 6855 ret = -ENOMEM; 6856 goto out; 6857 } 6858 6859 ch_info->period = RTW89_CHANNEL_TIME; 6860 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6861 ch_info->central_ch = channel->hw_value; 6862 ch_info->pri_ch = channel->hw_value; 6863 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6864 6865 if (channel->flags & 6866 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6867 type = RTW89_CHAN_DFS; 6868 else 6869 type = RTW89_CHAN_ACTIVE; 6870 6871 rtw89_pno_scan_add_chan_be(rtwdev, type, 6872 nd_config->n_match_sets, ch_info); 6873 list_add_tail(&ch_info->list, &chan_list); 6874 } 6875 6876 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 6877 rtwvif_link); 6878 6879 out: 6880 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6881 list_del(&ch_info->list); 6882 kfree(ch_info); 6883 } 6884 6885 return ret; 6886 } 6887 6888 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6889 struct rtw89_vif_link *rtwvif_link, bool connected) 6890 { 6891 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6892 struct cfg80211_scan_request *req = rtwvif->scan_req; 6893 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6894 struct ieee80211_channel *channel; 6895 struct list_head chan_list; 6896 enum rtw89_chan_type type; 6897 int list_len, ret; 6898 bool random_seq; 6899 u32 idx; 6900 6901 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 6902 INIT_LIST_HEAD(&chan_list); 6903 6904 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6905 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 6906 idx++, list_len++) { 6907 channel = req->channels[idx]; 6908 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6909 if (!ch_info) { 6910 ret = -ENOMEM; 6911 goto out; 6912 } 6913 6914 if (req->duration) 6915 ch_info->period = req->duration; 6916 else if (channel->band == NL80211_BAND_6GHZ) 6917 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6918 else 6919 ch_info->period = RTW89_CHANNEL_TIME; 6920 6921 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6922 ch_info->central_ch = channel->hw_value; 6923 ch_info->pri_ch = channel->hw_value; 6924 ch_info->rand_seq_num = random_seq; 6925 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6926 6927 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6928 type = RTW89_CHAN_DFS; 6929 else 6930 type = RTW89_CHAN_ACTIVE; 6931 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 6932 6933 list_add_tail(&ch_info->list, &chan_list); 6934 } 6935 6936 rtwdev->scan_info.last_chan_idx = idx; 6937 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 6938 rtwvif_link); 6939 6940 out: 6941 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6942 list_del(&ch_info->list); 6943 kfree(ch_info); 6944 } 6945 6946 return ret; 6947 } 6948 6949 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 6950 struct rtw89_vif_link *rtwvif_link, bool connected) 6951 { 6952 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6953 int ret; 6954 6955 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link); 6956 if (ret) { 6957 #if defined(__linux__) 6958 rtw89_err(rtwdev, "Update probe request failed\n"); 6959 #elif defined(__FreeBSD__) 6960 rtw89_err(rtwdev, "Update probe request failed: ret %d\n", ret); 6961 #endif 6962 goto out; 6963 } 6964 ret = mac->add_chan_list(rtwdev, rtwvif_link, connected); 6965 out: 6966 return ret; 6967 } 6968 6969 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, 6970 struct rtw89_vif_link *rtwvif_link, 6971 struct ieee80211_scan_request *scan_req) 6972 { 6973 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6974 struct cfg80211_scan_request *req = &scan_req->req; 6975 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 6976 rtwvif_link->chanctx_idx); 6977 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6978 u32 rx_fltr = rtwdev->hal.rx_fltr; 6979 u8 mac_addr[ETH_ALEN]; 6980 u32 reg; 6981 6982 /* clone op and keep it during scan */ 6983 rtwdev->scan_info.op_chan = *chan; 6984 6985 rtwdev->scan_info.scanning_vif = rtwvif_link; 6986 rtwdev->scan_info.last_chan_idx = 0; 6987 rtwdev->scan_info.abort = false; 6988 rtwvif->scan_ies = &scan_req->ies; 6989 rtwvif->scan_req = req; 6990 ieee80211_stop_queues(rtwdev->hw); 6991 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false); 6992 6993 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 6994 get_random_mask_addr(mac_addr, req->mac_addr, 6995 req->mac_addr_mask); 6996 else 6997 ether_addr_copy(mac_addr, rtwvif_link->mac_addr); 6998 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true); 6999 7000 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 7001 rx_fltr &= ~B_AX_A_BC; 7002 rx_fltr &= ~B_AX_A_A1_MATCH; 7003 7004 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 7005 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr); 7006 7007 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); 7008 } 7009 7010 struct rtw89_hw_scan_complete_cb_data { 7011 struct rtw89_vif_link *rtwvif_link; 7012 bool aborted; 7013 }; 7014 7015 static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data) 7016 { 7017 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7018 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7019 struct rtw89_hw_scan_complete_cb_data *cb_data = data; 7020 struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link; 7021 struct cfg80211_scan_info info = { 7022 .aborted = cb_data->aborted, 7023 }; 7024 struct rtw89_vif *rtwvif; 7025 u32 reg; 7026 7027 if (!rtwvif_link) 7028 return -EINVAL; 7029 7030 rtwvif = rtwvif_link->rtwvif; 7031 7032 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 7033 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); 7034 7035 rtw89_core_scan_complete(rtwdev, rtwvif_link, true); 7036 ieee80211_scan_completed(rtwdev->hw, &info); 7037 ieee80211_wake_queues(rtwdev->hw); 7038 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true); 7039 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 7040 7041 rtw89_release_pkt_list(rtwdev); 7042 rtwvif->scan_req = NULL; 7043 rtwvif->scan_ies = NULL; 7044 scan_info->last_chan_idx = 0; 7045 scan_info->scanning_vif = NULL; 7046 scan_info->abort = false; 7047 7048 return 0; 7049 } 7050 7051 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, 7052 struct rtw89_vif_link *rtwvif_link, 7053 bool aborted) 7054 { 7055 struct rtw89_hw_scan_complete_cb_data cb_data = { 7056 .rtwvif_link = rtwvif_link, 7057 .aborted = aborted, 7058 }; 7059 const struct rtw89_chanctx_cb_parm cb_parm = { 7060 .cb = rtw89_hw_scan_complete_cb, 7061 .data = &cb_data, 7062 .caller = __func__, 7063 }; 7064 7065 /* The things here needs to be done after setting channel (for coex) 7066 * and before proceeding entity mode (for MCC). So, pass a callback 7067 * of them for the right sequence rather than doing them directly. 7068 */ 7069 rtw89_chanctx_proceed(rtwdev, &cb_parm); 7070 } 7071 7072 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, 7073 struct rtw89_vif_link *rtwvif_link) 7074 { 7075 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7076 int ret; 7077 7078 scan_info->abort = true; 7079 7080 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false); 7081 if (ret) 7082 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); 7083 7084 /* Indicate ieee80211_scan_completed() before returning, which is safe 7085 * because scan abort command always waits for completion of 7086 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan 7087 * work properly. 7088 */ 7089 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true); 7090 } 7091 7092 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 7093 { 7094 struct rtw89_vif_link *rtwvif_link; 7095 struct rtw89_vif *rtwvif; 7096 unsigned int link_id; 7097 7098 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 7099 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 7100 /* This variable implies connected or during attempt to connect */ 7101 if (!is_zero_ether_addr(rtwvif_link->bssid)) 7102 return true; 7103 } 7104 } 7105 7106 return false; 7107 } 7108 7109 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, 7110 struct rtw89_vif_link *rtwvif_link, 7111 bool enable) 7112 { 7113 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7114 struct rtw89_scan_option opt = {0}; 7115 bool connected; 7116 int ret = 0; 7117 7118 if (!rtwvif_link) 7119 return -EINVAL; 7120 7121 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 7122 opt.enable = enable; 7123 opt.target_ch_mode = connected; 7124 if (enable) { 7125 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, connected); 7126 if (ret) 7127 goto out; 7128 } 7129 7130 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 7131 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 7132 opt.scan_mode = RTW89_SCAN_MODE_SA; 7133 opt.band = rtwvif_link->mac_idx; 7134 opt.num_macc_role = 0; 7135 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 7136 opt.num_opch = connected ? 1 : 0; 7137 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 7138 } 7139 7140 ret = mac->scan_offload(rtwdev, &opt, rtwvif_link, false); 7141 out: 7142 return ret; 7143 } 7144 7145 #define H2C_FW_CPU_EXCEPTION_LEN 4 7146 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 7147 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 7148 { 7149 struct sk_buff *skb; 7150 int ret; 7151 7152 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 7153 if (!skb) { 7154 rtw89_err(rtwdev, 7155 "failed to alloc skb for fw cpu exception\n"); 7156 return -ENOMEM; 7157 } 7158 7159 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 7160 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 7161 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 7162 7163 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7164 H2C_CAT_TEST, 7165 H2C_CL_FW_STATUS_TEST, 7166 H2C_FUNC_CPU_EXCEPTION, 0, 0, 7167 H2C_FW_CPU_EXCEPTION_LEN); 7168 7169 ret = rtw89_h2c_tx(rtwdev, skb, false); 7170 if (ret) { 7171 rtw89_err(rtwdev, "failed to send h2c\n"); 7172 goto fail; 7173 } 7174 7175 return 0; 7176 7177 fail: 7178 dev_kfree_skb_any(skb); 7179 return ret; 7180 } 7181 7182 #define H2C_PKT_DROP_LEN 24 7183 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 7184 const struct rtw89_pkt_drop_params *params) 7185 { 7186 struct sk_buff *skb; 7187 int ret; 7188 7189 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 7190 if (!skb) { 7191 rtw89_err(rtwdev, 7192 "failed to alloc skb for packet drop\n"); 7193 return -ENOMEM; 7194 } 7195 7196 switch (params->sel) { 7197 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 7198 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 7199 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 7200 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 7201 case RTW89_PKT_DROP_SEL_BAND_ONCE: 7202 break; 7203 default: 7204 rtw89_debug(rtwdev, RTW89_DBG_FW, 7205 "H2C of pkt drop might not fully support sel: %d yet\n", 7206 params->sel); 7207 break; 7208 } 7209 7210 skb_put(skb, H2C_PKT_DROP_LEN); 7211 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 7212 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 7213 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 7214 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 7215 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 7216 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 7217 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 7218 params->macid_band_sel[0]); 7219 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 7220 params->macid_band_sel[1]); 7221 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 7222 params->macid_band_sel[2]); 7223 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 7224 params->macid_band_sel[3]); 7225 7226 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7227 H2C_CAT_MAC, 7228 H2C_CL_MAC_FW_OFLD, 7229 H2C_FUNC_PKT_DROP, 0, 0, 7230 H2C_PKT_DROP_LEN); 7231 7232 ret = rtw89_h2c_tx(rtwdev, skb, false); 7233 if (ret) { 7234 rtw89_err(rtwdev, "failed to send h2c\n"); 7235 goto fail; 7236 } 7237 7238 return 0; 7239 7240 fail: 7241 dev_kfree_skb_any(skb); 7242 return ret; 7243 } 7244 7245 #define H2C_KEEP_ALIVE_LEN 4 7246 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7247 bool enable) 7248 { 7249 struct sk_buff *skb; 7250 u8 pkt_id = 0; 7251 int ret; 7252 7253 if (enable) { 7254 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7255 RTW89_PKT_OFLD_TYPE_NULL_DATA, 7256 &pkt_id); 7257 if (ret) 7258 return -EPERM; 7259 } 7260 7261 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 7262 if (!skb) { 7263 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7264 return -ENOMEM; 7265 } 7266 7267 skb_put(skb, H2C_KEEP_ALIVE_LEN); 7268 7269 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 7270 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 7271 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 7272 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id); 7273 7274 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7275 H2C_CAT_MAC, 7276 H2C_CL_MAC_WOW, 7277 H2C_FUNC_KEEP_ALIVE, 0, 1, 7278 H2C_KEEP_ALIVE_LEN); 7279 7280 ret = rtw89_h2c_tx(rtwdev, skb, false); 7281 if (ret) { 7282 rtw89_err(rtwdev, "failed to send h2c\n"); 7283 goto fail; 7284 } 7285 7286 return 0; 7287 7288 fail: 7289 dev_kfree_skb_any(skb); 7290 7291 return ret; 7292 } 7293 7294 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7295 bool enable) 7296 { 7297 struct rtw89_h2c_arp_offload *h2c; 7298 u32 len = sizeof(*h2c); 7299 struct sk_buff *skb; 7300 u8 pkt_id = 0; 7301 int ret; 7302 7303 if (enable) { 7304 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7305 RTW89_PKT_OFLD_TYPE_ARP_RSP, 7306 &pkt_id); 7307 if (ret) 7308 return ret; 7309 } 7310 7311 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7312 if (!skb) { 7313 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); 7314 return -ENOMEM; 7315 } 7316 7317 skb_put(skb, len); 7318 h2c = (struct rtw89_h2c_arp_offload *)skb->data; 7319 7320 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | 7321 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | 7322 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | 7323 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); 7324 7325 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7326 H2C_CAT_MAC, 7327 H2C_CL_MAC_WOW, 7328 H2C_FUNC_ARP_OFLD, 0, 1, 7329 len); 7330 7331 ret = rtw89_h2c_tx(rtwdev, skb, false); 7332 if (ret) { 7333 rtw89_err(rtwdev, "failed to send h2c\n"); 7334 goto fail; 7335 } 7336 7337 return 0; 7338 7339 fail: 7340 dev_kfree_skb_any(skb); 7341 7342 return ret; 7343 } 7344 7345 #define H2C_DISCONNECT_DETECT_LEN 8 7346 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 7347 struct rtw89_vif_link *rtwvif_link, bool enable) 7348 { 7349 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7350 struct sk_buff *skb; 7351 u8 macid = rtwvif_link->mac_id; 7352 int ret; 7353 7354 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 7355 if (!skb) { 7356 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7357 return -ENOMEM; 7358 } 7359 7360 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 7361 7362 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 7363 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 7364 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 7365 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 7366 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 7367 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 7368 } 7369 7370 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7371 H2C_CAT_MAC, 7372 H2C_CL_MAC_WOW, 7373 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 7374 H2C_DISCONNECT_DETECT_LEN); 7375 7376 ret = rtw89_h2c_tx(rtwdev, skb, false); 7377 if (ret) { 7378 rtw89_err(rtwdev, "failed to send h2c\n"); 7379 goto fail; 7380 } 7381 7382 return 0; 7383 7384 fail: 7385 dev_kfree_skb_any(skb); 7386 7387 return ret; 7388 } 7389 7390 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7391 bool enable) 7392 { 7393 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7394 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 7395 struct rtw89_h2c_cfg_nlo *h2c; 7396 u32 len = sizeof(*h2c); 7397 struct sk_buff *skb; 7398 int ret, i; 7399 7400 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7401 if (!skb) { 7402 rtw89_err(rtwdev, "failed to alloc skb for nlo\n"); 7403 return -ENOMEM; 7404 } 7405 7406 skb_put(skb, len); 7407 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data; 7408 7409 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) | 7410 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) | 7411 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID); 7412 7413 if (enable) { 7414 h2c->nlo_cnt = nd_config->n_match_sets; 7415 for (i = 0 ; i < nd_config->n_match_sets; i++) { 7416 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len; 7417 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid, 7418 nd_config->match_sets[i].ssid.ssid_len); 7419 } 7420 } 7421 7422 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7423 H2C_CAT_MAC, 7424 H2C_CL_MAC_WOW, 7425 H2C_FUNC_NLO, 0, 1, 7426 len); 7427 7428 ret = rtw89_h2c_tx(rtwdev, skb, false); 7429 if (ret) { 7430 rtw89_err(rtwdev, "failed to send h2c\n"); 7431 goto fail; 7432 } 7433 7434 return 0; 7435 7436 fail: 7437 dev_kfree_skb_any(skb); 7438 return ret; 7439 } 7440 7441 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7442 bool enable) 7443 { 7444 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7445 struct rtw89_h2c_wow_global *h2c; 7446 u8 macid = rtwvif_link->mac_id; 7447 u32 len = sizeof(*h2c); 7448 struct sk_buff *skb; 7449 int ret; 7450 7451 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7452 if (!skb) { 7453 rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); 7454 return -ENOMEM; 7455 } 7456 7457 skb_put(skb, len); 7458 h2c = (struct rtw89_h2c_wow_global *)skb->data; 7459 7460 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | 7461 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | 7462 le32_encode_bits(rtw_wow->ptk_alg, 7463 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | 7464 le32_encode_bits(rtw_wow->gtk_alg, 7465 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); 7466 h2c->key_info = rtw_wow->key_info; 7467 7468 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7469 H2C_CAT_MAC, 7470 H2C_CL_MAC_WOW, 7471 H2C_FUNC_WOW_GLOBAL, 0, 1, 7472 len); 7473 7474 ret = rtw89_h2c_tx(rtwdev, skb, false); 7475 if (ret) { 7476 rtw89_err(rtwdev, "failed to send h2c\n"); 7477 goto fail; 7478 } 7479 7480 return 0; 7481 7482 fail: 7483 dev_kfree_skb_any(skb); 7484 7485 return ret; 7486 } 7487 7488 #define H2C_WAKEUP_CTRL_LEN 4 7489 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 7490 struct rtw89_vif_link *rtwvif_link, 7491 bool enable) 7492 { 7493 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7494 struct sk_buff *skb; 7495 u8 macid = rtwvif_link->mac_id; 7496 int ret; 7497 7498 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 7499 if (!skb) { 7500 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); 7501 return -ENOMEM; 7502 } 7503 7504 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 7505 7506 if (rtw_wow->pattern_cnt) 7507 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 7508 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 7509 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 7510 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 7511 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 7512 7513 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 7514 7515 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7516 H2C_CAT_MAC, 7517 H2C_CL_MAC_WOW, 7518 H2C_FUNC_WAKEUP_CTRL, 0, 1, 7519 H2C_WAKEUP_CTRL_LEN); 7520 7521 ret = rtw89_h2c_tx(rtwdev, skb, false); 7522 if (ret) { 7523 rtw89_err(rtwdev, "failed to send h2c\n"); 7524 goto fail; 7525 } 7526 7527 return 0; 7528 7529 fail: 7530 dev_kfree_skb_any(skb); 7531 7532 return ret; 7533 } 7534 7535 #define H2C_WOW_CAM_UPD_LEN 24 7536 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 7537 struct rtw89_wow_cam_info *cam_info) 7538 { 7539 struct sk_buff *skb; 7540 int ret; 7541 7542 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 7543 if (!skb) { 7544 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7545 return -ENOMEM; 7546 } 7547 7548 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 7549 7550 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 7551 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 7552 if (cam_info->valid) { 7553 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 7554 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 7555 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 7556 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 7557 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 7558 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 7559 cam_info->negative_pattern_match); 7560 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 7561 cam_info->skip_mac_hdr); 7562 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 7563 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 7564 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 7565 } 7566 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 7567 7568 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7569 H2C_CAT_MAC, 7570 H2C_CL_MAC_WOW, 7571 H2C_FUNC_WOW_CAM_UPD, 0, 1, 7572 H2C_WOW_CAM_UPD_LEN); 7573 7574 ret = rtw89_h2c_tx(rtwdev, skb, false); 7575 if (ret) { 7576 rtw89_err(rtwdev, "failed to send h2c\n"); 7577 goto fail; 7578 } 7579 7580 return 0; 7581 fail: 7582 dev_kfree_skb_any(skb); 7583 7584 return ret; 7585 } 7586 7587 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, 7588 struct rtw89_vif_link *rtwvif_link, 7589 bool enable) 7590 { 7591 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7592 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; 7593 struct rtw89_h2c_wow_gtk_ofld *h2c; 7594 u8 macid = rtwvif_link->mac_id; 7595 u32 len = sizeof(*h2c); 7596 u8 pkt_id_sa_query = 0; 7597 struct sk_buff *skb; 7598 u8 pkt_id_eapol = 0; 7599 int ret; 7600 7601 if (!rtw_wow->gtk_alg) 7602 return 0; 7603 7604 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7605 if (!skb) { 7606 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); 7607 return -ENOMEM; 7608 } 7609 7610 skb_put(skb, len); 7611 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; 7612 7613 if (!enable) 7614 goto hdr; 7615 7616 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7617 RTW89_PKT_OFLD_TYPE_EAPOL_KEY, 7618 &pkt_id_eapol); 7619 if (ret) 7620 goto fail; 7621 7622 if (gtk_info->igtk_keyid) { 7623 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7624 RTW89_PKT_OFLD_TYPE_SA_QUERY, 7625 &pkt_id_sa_query); 7626 if (ret) 7627 goto fail; 7628 } 7629 7630 /* not support TKIP yet */ 7631 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 7632 le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 7633 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 7634 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 7635 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | 7636 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); 7637 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, 7638 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | 7639 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); 7640 h2c->gtk_info = rtw_wow->gtk_info; 7641 7642 hdr: 7643 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7644 H2C_CAT_MAC, 7645 H2C_CL_MAC_WOW, 7646 H2C_FUNC_GTK_OFLD, 0, 1, 7647 len); 7648 7649 ret = rtw89_h2c_tx(rtwdev, skb, false); 7650 if (ret) { 7651 rtw89_err(rtwdev, "failed to send h2c\n"); 7652 goto fail; 7653 } 7654 return 0; 7655 fail: 7656 dev_kfree_skb_any(skb); 7657 7658 return ret; 7659 } 7660 7661 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7662 bool enable) 7663 { 7664 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait; 7665 struct rtw89_h2c_fwips *h2c; 7666 u32 len = sizeof(*h2c); 7667 struct sk_buff *skb; 7668 7669 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7670 if (!skb) { 7671 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n"); 7672 return -ENOMEM; 7673 } 7674 skb_put(skb, len); 7675 h2c = (struct rtw89_h2c_fwips *)skb->data; 7676 7677 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) | 7678 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE); 7679 7680 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7681 H2C_CAT_MAC, 7682 H2C_CL_MAC_PS, 7683 H2C_FUNC_IPS_CFG, 0, 1, 7684 len); 7685 7686 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG); 7687 } 7688 7689 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) 7690 { 7691 struct rtw89_wait_info *wait = &rtwdev->wow.wait; 7692 struct rtw89_h2c_wow_aoac *h2c; 7693 u32 len = sizeof(*h2c); 7694 struct sk_buff *skb; 7695 7696 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7697 if (!skb) { 7698 rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); 7699 return -ENOMEM; 7700 } 7701 7702 skb_put(skb, len); 7703 7704 /* This H2C only nofity firmware to generate AOAC report C2H, 7705 * no need any parameter. 7706 */ 7707 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7708 H2C_CAT_MAC, 7709 H2C_CL_MAC_WOW, 7710 H2C_FUNC_AOAC_REPORT_REQ, 1, 0, 7711 len); 7712 7713 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC); 7714 } 7715 7716 /* Return < 0, if failures happen during waiting for the condition. 7717 * Return 0, when waiting for the condition succeeds. 7718 * Return > 0, if the wait is considered unreachable due to driver/FW design, 7719 * where 1 means during SER. 7720 */ 7721 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 7722 struct rtw89_wait_info *wait, unsigned int cond) 7723 { 7724 int ret; 7725 7726 ret = rtw89_h2c_tx(rtwdev, skb, false); 7727 if (ret) { 7728 rtw89_err(rtwdev, "failed to send h2c\n"); 7729 dev_kfree_skb_any(skb); 7730 return -EBUSY; 7731 } 7732 7733 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 7734 return 1; 7735 7736 return rtw89_wait_for_cond(wait, cond); 7737 } 7738 7739 #define H2C_ADD_MCC_LEN 16 7740 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 7741 const struct rtw89_fw_mcc_add_req *p) 7742 { 7743 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7744 struct sk_buff *skb; 7745 unsigned int cond; 7746 7747 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 7748 if (!skb) { 7749 rtw89_err(rtwdev, 7750 "failed to alloc skb for add mcc\n"); 7751 return -ENOMEM; 7752 } 7753 7754 skb_put(skb, H2C_ADD_MCC_LEN); 7755 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 7756 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 7757 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 7758 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 7759 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 7760 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 7761 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 7762 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 7763 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 7764 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 7765 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 7766 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 7767 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 7768 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 7769 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 7770 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 7771 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 7772 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 7773 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 7774 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 7775 7776 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7777 H2C_CAT_MAC, 7778 H2C_CL_MCC, 7779 H2C_FUNC_ADD_MCC, 0, 0, 7780 H2C_ADD_MCC_LEN); 7781 7782 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 7783 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7784 } 7785 7786 #define H2C_START_MCC_LEN 12 7787 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 7788 const struct rtw89_fw_mcc_start_req *p) 7789 { 7790 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7791 struct sk_buff *skb; 7792 unsigned int cond; 7793 7794 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 7795 if (!skb) { 7796 rtw89_err(rtwdev, 7797 "failed to alloc skb for start mcc\n"); 7798 return -ENOMEM; 7799 } 7800 7801 skb_put(skb, H2C_START_MCC_LEN); 7802 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 7803 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 7804 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 7805 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 7806 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 7807 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 7808 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 7809 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 7810 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 7811 7812 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7813 H2C_CAT_MAC, 7814 H2C_CL_MCC, 7815 H2C_FUNC_START_MCC, 0, 0, 7816 H2C_START_MCC_LEN); 7817 7818 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 7819 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7820 } 7821 7822 #define H2C_STOP_MCC_LEN 4 7823 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7824 bool prev_groups) 7825 { 7826 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7827 struct sk_buff *skb; 7828 unsigned int cond; 7829 7830 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 7831 if (!skb) { 7832 rtw89_err(rtwdev, 7833 "failed to alloc skb for stop mcc\n"); 7834 return -ENOMEM; 7835 } 7836 7837 skb_put(skb, H2C_STOP_MCC_LEN); 7838 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 7839 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 7840 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 7841 7842 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7843 H2C_CAT_MAC, 7844 H2C_CL_MCC, 7845 H2C_FUNC_STOP_MCC, 0, 0, 7846 H2C_STOP_MCC_LEN); 7847 7848 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 7849 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7850 } 7851 7852 #define H2C_DEL_MCC_GROUP_LEN 4 7853 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 7854 bool prev_groups) 7855 { 7856 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7857 struct sk_buff *skb; 7858 unsigned int cond; 7859 7860 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 7861 if (!skb) { 7862 rtw89_err(rtwdev, 7863 "failed to alloc skb for del mcc group\n"); 7864 return -ENOMEM; 7865 } 7866 7867 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 7868 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 7869 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 7870 7871 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7872 H2C_CAT_MAC, 7873 H2C_CL_MCC, 7874 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 7875 H2C_DEL_MCC_GROUP_LEN); 7876 7877 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 7878 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7879 } 7880 7881 #define H2C_RESET_MCC_GROUP_LEN 4 7882 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 7883 { 7884 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7885 struct sk_buff *skb; 7886 unsigned int cond; 7887 7888 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 7889 if (!skb) { 7890 rtw89_err(rtwdev, 7891 "failed to alloc skb for reset mcc group\n"); 7892 return -ENOMEM; 7893 } 7894 7895 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 7896 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 7897 7898 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7899 H2C_CAT_MAC, 7900 H2C_CL_MCC, 7901 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 7902 H2C_RESET_MCC_GROUP_LEN); 7903 7904 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 7905 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7906 } 7907 7908 #define H2C_MCC_REQ_TSF_LEN 4 7909 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 7910 const struct rtw89_fw_mcc_tsf_req *req, 7911 struct rtw89_mac_mcc_tsf_rpt *rpt) 7912 { 7913 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7914 struct rtw89_mac_mcc_tsf_rpt *tmp; 7915 struct sk_buff *skb; 7916 unsigned int cond; 7917 int ret; 7918 7919 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 7920 if (!skb) { 7921 rtw89_err(rtwdev, 7922 "failed to alloc skb for mcc req tsf\n"); 7923 return -ENOMEM; 7924 } 7925 7926 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 7927 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 7928 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 7929 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 7930 7931 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7932 H2C_CAT_MAC, 7933 H2C_CL_MCC, 7934 H2C_FUNC_MCC_REQ_TSF, 0, 0, 7935 H2C_MCC_REQ_TSF_LEN); 7936 7937 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 7938 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7939 if (ret) 7940 return ret; 7941 7942 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 7943 *rpt = *tmp; 7944 7945 return 0; 7946 } 7947 7948 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 7949 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7950 u8 *bitmap) 7951 { 7952 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7953 struct sk_buff *skb; 7954 unsigned int cond; 7955 u8 map_len; 7956 u8 h2c_len; 7957 7958 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 7959 map_len = RTW89_MAX_MAC_ID_NUM / 8; 7960 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 7961 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 7962 if (!skb) { 7963 rtw89_err(rtwdev, 7964 "failed to alloc skb for mcc macid bitmap\n"); 7965 return -ENOMEM; 7966 } 7967 7968 skb_put(skb, h2c_len); 7969 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 7970 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 7971 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 7972 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 7973 7974 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7975 H2C_CAT_MAC, 7976 H2C_CL_MCC, 7977 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 7978 h2c_len); 7979 7980 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 7981 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7982 } 7983 7984 #define H2C_MCC_SYNC_LEN 4 7985 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 7986 u8 target, u8 offset) 7987 { 7988 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7989 struct sk_buff *skb; 7990 unsigned int cond; 7991 7992 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 7993 if (!skb) { 7994 rtw89_err(rtwdev, 7995 "failed to alloc skb for mcc sync\n"); 7996 return -ENOMEM; 7997 } 7998 7999 skb_put(skb, H2C_MCC_SYNC_LEN); 8000 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 8001 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 8002 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 8003 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 8004 8005 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8006 H2C_CAT_MAC, 8007 H2C_CL_MCC, 8008 H2C_FUNC_MCC_SYNC, 0, 0, 8009 H2C_MCC_SYNC_LEN); 8010 8011 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 8012 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8013 } 8014 8015 #define H2C_MCC_SET_DURATION_LEN 20 8016 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 8017 const struct rtw89_fw_mcc_duration *p) 8018 { 8019 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8020 struct sk_buff *skb; 8021 unsigned int cond; 8022 8023 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 8024 if (!skb) { 8025 rtw89_err(rtwdev, 8026 "failed to alloc skb for mcc set duration\n"); 8027 return -ENOMEM; 8028 } 8029 8030 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 8031 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 8032 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 8033 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 8034 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 8035 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 8036 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 8037 p->start_tsf_low); 8038 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 8039 p->start_tsf_high); 8040 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 8041 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 8042 8043 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8044 H2C_CAT_MAC, 8045 H2C_CL_MCC, 8046 H2C_FUNC_MCC_SET_DURATION, 0, 0, 8047 H2C_MCC_SET_DURATION_LEN); 8048 8049 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 8050 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8051 } 8052 8053 static 8054 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 8055 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 8056 struct rtw89_h2c_mrc_add_slot *slot_h2c) 8057 { 8058 bool fill_h2c = !!slot_h2c; 8059 unsigned int i; 8060 8061 if (!fill_h2c) 8062 goto calc_len; 8063 8064 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 8065 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 8066 le32_encode_bits(slot_arg->courtesy_en, 8067 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 8068 le32_encode_bits(slot_arg->role_num, 8069 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 8070 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 8071 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 8072 le32_encode_bits(slot_arg->courtesy_target, 8073 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 8074 8075 for (i = 0; i < slot_arg->role_num; i++) { 8076 slot_h2c->roles[i].w0 = 8077 le32_encode_bits(slot_arg->roles[i].macid, 8078 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 8079 le32_encode_bits(slot_arg->roles[i].role_type, 8080 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 8081 le32_encode_bits(slot_arg->roles[i].is_master, 8082 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 8083 le32_encode_bits(slot_arg->roles[i].en_tx_null, 8084 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 8085 le32_encode_bits(false, 8086 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 8087 le32_encode_bits(false, 8088 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 8089 slot_h2c->roles[i].w1 = 8090 le32_encode_bits(slot_arg->roles[i].central_ch, 8091 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 8092 le32_encode_bits(slot_arg->roles[i].primary_ch, 8093 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 8094 le32_encode_bits(slot_arg->roles[i].bw, 8095 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 8096 le32_encode_bits(slot_arg->roles[i].band, 8097 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 8098 le32_encode_bits(slot_arg->roles[i].null_early, 8099 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 8100 le32_encode_bits(false, 8101 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 8102 le32_encode_bits(true, 8103 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 8104 slot_h2c->roles[i].macid_main_bitmap = 8105 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 8106 slot_h2c->roles[i].macid_paired_bitmap = 8107 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 8108 } 8109 8110 calc_len: 8111 return struct_size(slot_h2c, roles, slot_arg->role_num); 8112 } 8113 8114 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 8115 const struct rtw89_fw_mrc_add_arg *arg) 8116 { 8117 struct rtw89_h2c_mrc_add *h2c_head; 8118 struct sk_buff *skb; 8119 unsigned int i; 8120 #if defined(__linux__) 8121 void *tmp; 8122 #elif defined(__FreeBSD__) 8123 u8 *tmp; 8124 #endif 8125 u32 len; 8126 int ret; 8127 8128 len = sizeof(*h2c_head); 8129 for (i = 0; i < arg->slot_num; i++) 8130 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 8131 8132 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8133 if (!skb) { 8134 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 8135 return -ENOMEM; 8136 } 8137 8138 skb_put(skb, len); 8139 tmp = skb->data; 8140 8141 #if defined(__linux__) 8142 h2c_head = tmp; 8143 #elif defined(__FreeBSD__) 8144 h2c_head = (void *)tmp; 8145 #endif 8146 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 8147 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 8148 le32_encode_bits(arg->sch_type, 8149 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 8150 le32_encode_bits(arg->slot_num, 8151 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 8152 le32_encode_bits(arg->btc_in_sch, 8153 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 8154 8155 tmp += sizeof(*h2c_head); 8156 for (i = 0; i < arg->slot_num; i++) 8157 #if defined(__linux__) 8158 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 8159 #elif defined(__FreeBSD__) 8160 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], (void *)tmp); 8161 #endif 8162 8163 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8164 H2C_CAT_MAC, 8165 H2C_CL_MRC, 8166 H2C_FUNC_ADD_MRC, 0, 0, 8167 len); 8168 8169 ret = rtw89_h2c_tx(rtwdev, skb, false); 8170 if (ret) { 8171 rtw89_err(rtwdev, "failed to send h2c\n"); 8172 dev_kfree_skb_any(skb); 8173 return -EBUSY; 8174 } 8175 8176 return 0; 8177 } 8178 8179 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 8180 const struct rtw89_fw_mrc_start_arg *arg) 8181 { 8182 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8183 struct rtw89_h2c_mrc_start *h2c; 8184 u32 len = sizeof(*h2c); 8185 struct sk_buff *skb; 8186 unsigned int cond; 8187 8188 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8189 if (!skb) { 8190 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 8191 return -ENOMEM; 8192 } 8193 8194 skb_put(skb, len); 8195 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 8196 8197 h2c->w0 = le32_encode_bits(arg->sch_idx, 8198 RTW89_H2C_MRC_START_W0_SCH_IDX) | 8199 le32_encode_bits(arg->old_sch_idx, 8200 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 8201 le32_encode_bits(arg->action, 8202 RTW89_H2C_MRC_START_W0_ACTION); 8203 8204 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 8205 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 8206 8207 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8208 H2C_CAT_MAC, 8209 H2C_CL_MRC, 8210 H2C_FUNC_START_MRC, 0, 0, 8211 len); 8212 8213 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 8214 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8215 } 8216 8217 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx) 8218 { 8219 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8220 struct rtw89_h2c_mrc_del *h2c; 8221 u32 len = sizeof(*h2c); 8222 struct sk_buff *skb; 8223 unsigned int cond; 8224 8225 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8226 if (!skb) { 8227 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 8228 return -ENOMEM; 8229 } 8230 8231 skb_put(skb, len); 8232 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 8233 8234 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) | 8235 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX); 8236 8237 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8238 H2C_CAT_MAC, 8239 H2C_CL_MRC, 8240 H2C_FUNC_DEL_MRC, 0, 0, 8241 len); 8242 8243 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 8244 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8245 } 8246 8247 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 8248 const struct rtw89_fw_mrc_req_tsf_arg *arg, 8249 struct rtw89_mac_mrc_tsf_rpt *rpt) 8250 { 8251 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8252 struct rtw89_h2c_mrc_req_tsf *h2c; 8253 struct rtw89_mac_mrc_tsf_rpt *tmp; 8254 struct sk_buff *skb; 8255 unsigned int i; 8256 u32 len; 8257 int ret; 8258 8259 len = struct_size(h2c, infos, arg->num); 8260 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8261 if (!skb) { 8262 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 8263 return -ENOMEM; 8264 } 8265 8266 skb_put(skb, len); 8267 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 8268 8269 h2c->req_tsf_num = arg->num; 8270 for (i = 0; i < arg->num; i++) 8271 h2c->infos[i] = 8272 u8_encode_bits(arg->infos[i].band, 8273 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 8274 u8_encode_bits(arg->infos[i].port, 8275 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 8276 8277 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8278 H2C_CAT_MAC, 8279 H2C_CL_MRC, 8280 H2C_FUNC_MRC_REQ_TSF, 0, 0, 8281 len); 8282 8283 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 8284 if (ret) 8285 return ret; 8286 8287 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 8288 *rpt = *tmp; 8289 8290 return 0; 8291 } 8292 8293 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 8294 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 8295 { 8296 struct rtw89_h2c_mrc_upd_bitmap *h2c; 8297 u32 len = sizeof(*h2c); 8298 struct sk_buff *skb; 8299 int ret; 8300 8301 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8302 if (!skb) { 8303 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 8304 return -ENOMEM; 8305 } 8306 8307 skb_put(skb, len); 8308 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 8309 8310 h2c->w0 = le32_encode_bits(arg->sch_idx, 8311 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 8312 le32_encode_bits(arg->action, 8313 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 8314 le32_encode_bits(arg->macid, 8315 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 8316 h2c->w1 = le32_encode_bits(arg->client_macid, 8317 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 8318 8319 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8320 H2C_CAT_MAC, 8321 H2C_CL_MRC, 8322 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 8323 len); 8324 8325 ret = rtw89_h2c_tx(rtwdev, skb, false); 8326 if (ret) { 8327 rtw89_err(rtwdev, "failed to send h2c\n"); 8328 dev_kfree_skb_any(skb); 8329 return -EBUSY; 8330 } 8331 8332 return 0; 8333 } 8334 8335 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 8336 const struct rtw89_fw_mrc_sync_arg *arg) 8337 { 8338 struct rtw89_h2c_mrc_sync *h2c; 8339 u32 len = sizeof(*h2c); 8340 struct sk_buff *skb; 8341 int ret; 8342 8343 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8344 if (!skb) { 8345 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 8346 return -ENOMEM; 8347 } 8348 8349 skb_put(skb, len); 8350 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 8351 8352 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 8353 le32_encode_bits(arg->src.port, 8354 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 8355 le32_encode_bits(arg->src.band, 8356 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 8357 le32_encode_bits(arg->dest.port, 8358 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 8359 le32_encode_bits(arg->dest.band, 8360 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 8361 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 8362 8363 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8364 H2C_CAT_MAC, 8365 H2C_CL_MRC, 8366 H2C_FUNC_MRC_SYNC, 0, 0, 8367 len); 8368 8369 ret = rtw89_h2c_tx(rtwdev, skb, false); 8370 if (ret) { 8371 rtw89_err(rtwdev, "failed to send h2c\n"); 8372 dev_kfree_skb_any(skb); 8373 return -EBUSY; 8374 } 8375 8376 return 0; 8377 } 8378 8379 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 8380 const struct rtw89_fw_mrc_upd_duration_arg *arg) 8381 { 8382 struct rtw89_h2c_mrc_upd_duration *h2c; 8383 struct sk_buff *skb; 8384 unsigned int i; 8385 u32 len; 8386 int ret; 8387 8388 len = struct_size(h2c, slots, arg->slot_num); 8389 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8390 if (!skb) { 8391 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 8392 return -ENOMEM; 8393 } 8394 8395 skb_put(skb, len); 8396 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 8397 8398 h2c->w0 = le32_encode_bits(arg->sch_idx, 8399 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 8400 le32_encode_bits(arg->slot_num, 8401 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 8402 le32_encode_bits(false, 8403 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 8404 8405 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 8406 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 8407 8408 for (i = 0; i < arg->slot_num; i++) { 8409 h2c->slots[i] = 8410 le32_encode_bits(arg->slots[i].slot_idx, 8411 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 8412 le32_encode_bits(arg->slots[i].duration, 8413 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 8414 } 8415 8416 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8417 H2C_CAT_MAC, 8418 H2C_CL_MRC, 8419 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 8420 len); 8421 8422 ret = rtw89_h2c_tx(rtwdev, skb, false); 8423 if (ret) { 8424 rtw89_err(rtwdev, "failed to send h2c\n"); 8425 dev_kfree_skb_any(skb); 8426 return -EBUSY; 8427 } 8428 8429 return 0; 8430 } 8431 8432 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en) 8433 { 8434 struct rtw89_h2c_ap_info *h2c; 8435 u32 len = sizeof(*h2c); 8436 struct sk_buff *skb; 8437 int ret; 8438 8439 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8440 if (!skb) { 8441 rtw89_err(rtwdev, "failed to alloc skb for ap info\n"); 8442 return -ENOMEM; 8443 } 8444 8445 skb_put(skb, len); 8446 h2c = (struct rtw89_h2c_ap_info *)skb->data; 8447 8448 h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN); 8449 8450 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8451 H2C_CAT_MAC, 8452 H2C_CL_AP, 8453 H2C_FUNC_AP_INFO, 0, 0, 8454 len); 8455 8456 ret = rtw89_h2c_tx(rtwdev, skb, false); 8457 if (ret) { 8458 rtw89_err(rtwdev, "failed to send h2c\n"); 8459 dev_kfree_skb_any(skb); 8460 return -EBUSY; 8461 } 8462 8463 return 0; 8464 } 8465 8466 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en) 8467 { 8468 int ret; 8469 8470 if (en) { 8471 if (refcount_inc_not_zero(&rtwdev->refcount_ap_info)) 8472 return 0; 8473 } else { 8474 if (!refcount_dec_and_test(&rtwdev->refcount_ap_info)) 8475 return 0; 8476 } 8477 8478 ret = rtw89_fw_h2c_ap_info(rtwdev, en); 8479 if (ret) { 8480 if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 8481 return ret; 8482 8483 /* During recovery, neither driver nor stack has full error 8484 * handling, so show a warning, but return 0 with refcount 8485 * increased normally. It can avoid underflow when calling 8486 * with @en == false later. 8487 */ 8488 rtw89_warn(rtwdev, "h2c ap_info failed during SER\n"); 8489 } 8490 8491 if (en) 8492 refcount_set(&rtwdev->refcount_ap_info, 1); 8493 8494 return 0; 8495 } 8496 8497 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 8498 { 8499 static const u8 zeros[U8_MAX] = {}; 8500 8501 return memcmp(ext_ptr, zeros, ext_len) == 0; 8502 } 8503 8504 #if defined(__linux__) 8505 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 8506 ({ \ 8507 u8 __var_sz = sizeof(*(e)); \ 8508 bool __accept; \ 8509 if (__var_sz >= (ent_sz)) \ 8510 __accept = true; \ 8511 else \ 8512 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 8513 (ent_sz) - __var_sz);\ 8514 __accept; \ 8515 }) 8516 #elif defined(__FreeBSD__) 8517 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 8518 ({ \ 8519 u8 __var_sz = sizeof(*(e)); \ 8520 bool __accept; \ 8521 if (__var_sz >= (ent_sz)) \ 8522 __accept = true; \ 8523 else \ 8524 __accept = __fw_txpwr_entry_zero_ext((const u8 *)(cursor) + __var_sz,\ 8525 (ent_sz) - __var_sz);\ 8526 __accept; \ 8527 }) 8528 #endif 8529 8530 static bool 8531 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 8532 const void *cursor, 8533 const struct rtw89_txpwr_conf *conf) 8534 { 8535 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8536 return false; 8537 8538 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 8539 return false; 8540 8541 switch (e->rs) { 8542 case RTW89_RS_CCK: 8543 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 8544 return false; 8545 break; 8546 case RTW89_RS_OFDM: 8547 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 8548 return false; 8549 break; 8550 case RTW89_RS_MCS: 8551 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 8552 e->nss >= RTW89_NSS_NUM || 8553 e->ofdma >= RTW89_OFDMA_NUM) 8554 return false; 8555 break; 8556 case RTW89_RS_HEDCM: 8557 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 8558 e->nss >= RTW89_NSS_HEDCM_NUM || 8559 e->ofdma >= RTW89_OFDMA_NUM) 8560 return false; 8561 break; 8562 case RTW89_RS_OFFSET: 8563 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 8564 return false; 8565 break; 8566 default: 8567 return false; 8568 } 8569 8570 return true; 8571 } 8572 8573 static 8574 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 8575 const struct rtw89_txpwr_table *tbl) 8576 { 8577 const struct rtw89_txpwr_conf *conf = tbl->data; 8578 struct rtw89_fw_txpwr_byrate_entry entry = {}; 8579 struct rtw89_txpwr_byrate *byr_head; 8580 struct rtw89_rate_desc desc = {}; 8581 #if defined(__linux__) 8582 const void *cursor; 8583 #elif defined(__FreeBSD__) 8584 const u8 *cursor; 8585 #endif 8586 u32 data; 8587 s8 *byr; 8588 int i; 8589 8590 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8591 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 8592 continue; 8593 8594 byr_head = &rtwdev->byr[entry.band][entry.bw]; 8595 data = le32_to_cpu(entry.data); 8596 desc.ofdma = entry.ofdma; 8597 desc.nss = entry.nss; 8598 desc.rs = entry.rs; 8599 8600 for (i = 0; i < entry.len; i++, data >>= 8) { 8601 desc.idx = entry.shf + i; 8602 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 8603 *byr = data & 0xff; 8604 } 8605 } 8606 } 8607 8608 static bool 8609 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 8610 const void *cursor, 8611 const struct rtw89_txpwr_conf *conf) 8612 { 8613 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8614 return false; 8615 8616 if (e->bw >= RTW89_2G_BW_NUM) 8617 return false; 8618 if (e->nt >= RTW89_NTX_NUM) 8619 return false; 8620 if (e->rs >= RTW89_RS_LMT_NUM) 8621 return false; 8622 if (e->bf >= RTW89_BF_NUM) 8623 return false; 8624 if (e->regd >= RTW89_REGD_NUM) 8625 return false; 8626 if (e->ch_idx >= RTW89_2G_CH_NUM) 8627 return false; 8628 8629 return true; 8630 } 8631 8632 static 8633 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 8634 { 8635 const struct rtw89_txpwr_conf *conf = &data->conf; 8636 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 8637 #if defined(__linux__) 8638 const void *cursor; 8639 #elif defined(__FreeBSD__) 8640 const u8 *cursor; 8641 #endif 8642 8643 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8644 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 8645 continue; 8646 8647 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8648 [entry.ch_idx] = entry.v; 8649 } 8650 } 8651 8652 static bool 8653 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 8654 const void *cursor, 8655 const struct rtw89_txpwr_conf *conf) 8656 { 8657 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8658 return false; 8659 8660 if (e->bw >= RTW89_5G_BW_NUM) 8661 return false; 8662 if (e->nt >= RTW89_NTX_NUM) 8663 return false; 8664 if (e->rs >= RTW89_RS_LMT_NUM) 8665 return false; 8666 if (e->bf >= RTW89_BF_NUM) 8667 return false; 8668 if (e->regd >= RTW89_REGD_NUM) 8669 return false; 8670 if (e->ch_idx >= RTW89_5G_CH_NUM) 8671 return false; 8672 8673 return true; 8674 } 8675 8676 static 8677 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 8678 { 8679 const struct rtw89_txpwr_conf *conf = &data->conf; 8680 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 8681 #if defined(__linux__) 8682 const void *cursor; 8683 #elif defined(__FreeBSD__) 8684 const u8 *cursor; 8685 #endif 8686 8687 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8688 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 8689 continue; 8690 8691 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8692 [entry.ch_idx] = entry.v; 8693 } 8694 } 8695 8696 static bool 8697 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 8698 const void *cursor, 8699 const struct rtw89_txpwr_conf *conf) 8700 { 8701 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8702 return false; 8703 8704 if (e->bw >= RTW89_6G_BW_NUM) 8705 return false; 8706 if (e->nt >= RTW89_NTX_NUM) 8707 return false; 8708 if (e->rs >= RTW89_RS_LMT_NUM) 8709 return false; 8710 if (e->bf >= RTW89_BF_NUM) 8711 return false; 8712 if (e->regd >= RTW89_REGD_NUM) 8713 return false; 8714 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8715 return false; 8716 if (e->ch_idx >= RTW89_6G_CH_NUM) 8717 return false; 8718 8719 return true; 8720 } 8721 8722 static 8723 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 8724 { 8725 const struct rtw89_txpwr_conf *conf = &data->conf; 8726 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 8727 #if defined(__linux__) 8728 const void *cursor; 8729 #elif defined(__FreeBSD__) 8730 const u8 *cursor; 8731 #endif 8732 8733 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8734 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 8735 continue; 8736 8737 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8738 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 8739 } 8740 } 8741 8742 static bool 8743 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 8744 const void *cursor, 8745 const struct rtw89_txpwr_conf *conf) 8746 { 8747 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8748 return false; 8749 8750 if (e->ru >= RTW89_RU_NUM) 8751 return false; 8752 if (e->nt >= RTW89_NTX_NUM) 8753 return false; 8754 if (e->regd >= RTW89_REGD_NUM) 8755 return false; 8756 if (e->ch_idx >= RTW89_2G_CH_NUM) 8757 return false; 8758 8759 return true; 8760 } 8761 8762 static 8763 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 8764 { 8765 const struct rtw89_txpwr_conf *conf = &data->conf; 8766 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 8767 #if defined(__linux__) 8768 const void *cursor; 8769 #elif defined(__FreeBSD__) 8770 const u8 *cursor; 8771 #endif 8772 8773 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8774 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 8775 continue; 8776 8777 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8778 } 8779 } 8780 8781 static bool 8782 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 8783 const void *cursor, 8784 const struct rtw89_txpwr_conf *conf) 8785 { 8786 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8787 return false; 8788 8789 if (e->ru >= RTW89_RU_NUM) 8790 return false; 8791 if (e->nt >= RTW89_NTX_NUM) 8792 return false; 8793 if (e->regd >= RTW89_REGD_NUM) 8794 return false; 8795 if (e->ch_idx >= RTW89_5G_CH_NUM) 8796 return false; 8797 8798 return true; 8799 } 8800 8801 static 8802 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 8803 { 8804 const struct rtw89_txpwr_conf *conf = &data->conf; 8805 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 8806 #if defined(__linux__) 8807 const void *cursor; 8808 #elif defined(__FreeBSD__) 8809 const u8 *cursor; 8810 #endif 8811 8812 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8813 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 8814 continue; 8815 8816 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8817 } 8818 } 8819 8820 static bool 8821 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 8822 const void *cursor, 8823 const struct rtw89_txpwr_conf *conf) 8824 { 8825 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8826 return false; 8827 8828 if (e->ru >= RTW89_RU_NUM) 8829 return false; 8830 if (e->nt >= RTW89_NTX_NUM) 8831 return false; 8832 if (e->regd >= RTW89_REGD_NUM) 8833 return false; 8834 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8835 return false; 8836 if (e->ch_idx >= RTW89_6G_CH_NUM) 8837 return false; 8838 8839 return true; 8840 } 8841 8842 static 8843 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 8844 { 8845 const struct rtw89_txpwr_conf *conf = &data->conf; 8846 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 8847 #if defined(__linux__) 8848 const void *cursor; 8849 #elif defined(__FreeBSD__) 8850 const u8 *cursor; 8851 #endif 8852 8853 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8854 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 8855 continue; 8856 8857 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 8858 [entry.ch_idx] = entry.v; 8859 } 8860 } 8861 8862 static bool 8863 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 8864 const void *cursor, 8865 const struct rtw89_txpwr_conf *conf) 8866 { 8867 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8868 return false; 8869 8870 if (e->band >= RTW89_BAND_NUM) 8871 return false; 8872 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 8873 return false; 8874 if (e->regd >= RTW89_REGD_NUM) 8875 return false; 8876 8877 return true; 8878 } 8879 8880 static 8881 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 8882 { 8883 const struct rtw89_txpwr_conf *conf = &data->conf; 8884 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 8885 #if defined(__linux__) 8886 const void *cursor; 8887 #elif defined(__FreeBSD__) 8888 const u8 *cursor; 8889 #endif 8890 8891 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8892 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 8893 continue; 8894 8895 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 8896 } 8897 } 8898 8899 static bool 8900 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 8901 const void *cursor, 8902 const struct rtw89_txpwr_conf *conf) 8903 { 8904 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8905 return false; 8906 8907 if (e->band >= RTW89_BAND_NUM) 8908 return false; 8909 if (e->regd >= RTW89_REGD_NUM) 8910 return false; 8911 8912 return true; 8913 } 8914 8915 static 8916 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 8917 { 8918 const struct rtw89_txpwr_conf *conf = &data->conf; 8919 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 8920 #if defined(__linux__) 8921 const void *cursor; 8922 #elif defined(__FreeBSD__) 8923 const u8 *cursor; 8924 #endif 8925 8926 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8927 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 8928 continue; 8929 8930 data->v[entry.band][entry.regd] = entry.v; 8931 } 8932 } 8933 8934 const struct rtw89_rfe_parms * 8935 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 8936 const struct rtw89_rfe_parms *init) 8937 { 8938 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 8939 struct rtw89_rfe_parms *parms; 8940 8941 if (!rfe_data) 8942 return init; 8943 8944 parms = &rfe_data->rfe_parms; 8945 if (init) 8946 *parms = *init; 8947 8948 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 8949 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 8950 rfe_data->byrate.tbl.size = 0; /* don't care here */ 8951 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 8952 parms->byr_tbl = &rfe_data->byrate.tbl; 8953 } 8954 8955 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 8956 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 8957 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 8958 } 8959 8960 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 8961 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 8962 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 8963 } 8964 8965 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 8966 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 8967 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 8968 } 8969 8970 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 8971 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 8972 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 8973 } 8974 8975 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 8976 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 8977 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 8978 } 8979 8980 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 8981 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 8982 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 8983 } 8984 8985 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 8986 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 8987 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 8988 } 8989 8990 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 8991 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 8992 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 8993 } 8994 8995 return parms; 8996 } 8997