1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/if_arp.h> 6 #include "cam.h" 7 #include "chan.h" 8 #include "coex.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "phy.h" 13 #include "ps.h" 14 #include "reg.h" 15 #include "util.h" 16 #include "wow.h" 17 18 struct rtw89_eapol_2_of_2 { 19 u8 gtkbody[14]; 20 u8 key_des_ver; 21 u8 rsvd[92]; 22 } __packed; 23 24 struct rtw89_sa_query { 25 u8 category; 26 u8 action; 27 } __packed; 28 29 struct rtw89_arp_rsp { 30 u8 llc_hdr[sizeof(rfc1042_header)]; 31 __be16 llc_type; 32 struct arphdr arp_hdr; 33 u8 sender_hw[ETH_ALEN]; 34 __be32 sender_ip; 35 u8 target_hw[ETH_ALEN]; 36 __be32 target_ip; 37 } __packed; 38 39 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 40 41 union rtw89_fw_element_arg { 42 size_t offset; 43 enum rtw89_rf_path rf_path; 44 enum rtw89_fw_type fw_type; 45 }; 46 47 struct rtw89_fw_element_handler { 48 int (*fn)(struct rtw89_dev *rtwdev, 49 const struct rtw89_fw_element_hdr *elm, 50 const union rtw89_fw_element_arg arg); 51 const union rtw89_fw_element_arg arg; 52 const char *name; 53 }; 54 55 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 56 struct sk_buff *skb); 57 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 58 struct rtw89_wait_info *wait, unsigned int cond); 59 static int __parse_security_section(struct rtw89_dev *rtwdev, 60 struct rtw89_fw_bin_info *info, 61 struct rtw89_fw_hdr_section_info *section_info, 62 const void *content, 63 u32 *mssc_len); 64 65 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 66 bool header) 67 { 68 struct sk_buff *skb; 69 u32 header_len = 0; 70 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 71 72 if (header) 73 header_len = H2C_HEADER_LEN; 74 75 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 76 if (!skb) 77 return NULL; 78 skb_reserve(skb, header_len + h2c_desc_size); 79 memset(skb->data, 0, len); 80 81 return skb; 82 } 83 84 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 85 { 86 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 87 } 88 89 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 90 { 91 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 92 } 93 94 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 95 { 96 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 97 u8 val; 98 int ret; 99 100 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 101 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 102 1, FWDL_WAIT_CNT, false, rtwdev, type); 103 if (ret) { 104 switch (val) { 105 case RTW89_FWDL_CHECKSUM_FAIL: 106 rtw89_err(rtwdev, "fw checksum fail\n"); 107 return -EINVAL; 108 109 case RTW89_FWDL_SECURITY_FAIL: 110 rtw89_err(rtwdev, "fw security fail\n"); 111 return -EINVAL; 112 113 case RTW89_FWDL_CV_NOT_MATCH: 114 rtw89_err(rtwdev, "fw cv not match\n"); 115 return -EINVAL; 116 117 default: 118 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 119 return -EBUSY; 120 } 121 } 122 123 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 124 125 return 0; 126 } 127 128 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 129 struct rtw89_fw_bin_info *info) 130 { 131 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 132 const struct rtw89_chip_info *chip = rtwdev->chip; 133 struct rtw89_fw_hdr_section_info *section_info; 134 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 135 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 136 const struct rtw89_fw_hdr_section *section; 137 const u8 *fw_end = fw + len; 138 const u8 *bin; 139 u32 base_hdr_len; 140 u32 mssc_len; 141 int ret; 142 u32 i; 143 144 if (!info) 145 return -EINVAL; 146 147 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 148 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 149 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 150 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE); 151 152 if (info->dynamic_hdr_en) { 153 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 154 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 155 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 156 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 157 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 158 return -EINVAL; 159 } 160 } else { 161 info->hdr_len = base_hdr_len; 162 info->dynamic_hdr_len = 0; 163 } 164 165 bin = fw + info->hdr_len; 166 167 /* jump to section header */ 168 section_info = info->section_info; 169 for (i = 0; i < info->section_num; i++) { 170 section = &fw_hdr->sections[i]; 171 section_info->type = 172 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 173 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 174 175 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 176 section_info->len += FWDL_SECTION_CHKSUM_LEN; 177 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 178 section_info->dladdr = 179 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 180 section_info->addr = bin; 181 182 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 183 section_info->mssc = 184 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 185 186 ret = __parse_security_section(rtwdev, info, section_info, 187 bin, &mssc_len); 188 if (ret) 189 return ret; 190 191 if (sec->secure_boot && chip->chip_id == RTL8852B) 192 section_info->len_override = 960; 193 } else { 194 section_info->mssc = 0; 195 mssc_len = 0; 196 } 197 198 rtw89_debug(rtwdev, RTW89_DBG_FW, 199 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 200 i, section_info->type, section_info->len, 201 section_info->mssc, mssc_len, bin - fw); 202 rtw89_debug(rtwdev, RTW89_DBG_FW, 203 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 204 section_info->ignore, section_info->key_addr, 205 section_info->key_addr ? 206 section_info->key_addr - section_info->addr : 0, 207 section_info->key_len, section_info->key_idx); 208 209 bin += section_info->len + mssc_len; 210 section_info++; 211 } 212 213 if (fw_end != bin) { 214 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 215 return -EINVAL; 216 } 217 218 return 0; 219 } 220 221 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 222 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 223 u32 rmp_tbl_size, u32 *key_idx) 224 { 225 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 226 u32 sel_byte_idx; 227 u32 mss_sel_idx; 228 u8 sel_bit_idx; 229 int i; 230 231 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 232 if (!mss_hdr->defen) 233 return -ENOENT; 234 235 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 236 sec->mss_key_num; 237 } else { 238 if (mss_hdr->defen) 239 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 240 else 241 mss_sel_idx = 0; 242 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 243 le16_to_cpu(mss_hdr->msscust_max) + 244 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 245 sec->mss_key_num; 246 } 247 248 sel_byte_idx = mss_sel_idx >> 3; 249 sel_bit_idx = mss_sel_idx & 0x7; 250 251 if (sel_byte_idx >= rmp_tbl_size) 252 return -EFAULT; 253 254 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 255 return -ENOENT; 256 257 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 258 259 for (i = 0; i < sel_byte_idx; i++) 260 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 261 262 return 0; 263 } 264 265 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 266 struct rtw89_fw_bin_info *info, 267 struct rtw89_fw_hdr_section_info *section_info, 268 const void *content, 269 u32 *mssc_len) 270 { 271 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 272 const union rtw89_fw_section_mssc_content *section_content = content; 273 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 274 u32 rmp_tbl_size; 275 u32 key_sign_len; 276 u32 real_key_idx; 277 u32 sb_sel_ver; 278 int ret; 279 280 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 281 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 282 return -ENOENT; 283 } 284 285 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 286 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 287 le16_to_cpu(mss_hdr->msscust_max) * 288 mss_hdr->mssdev_max) >> 3; 289 if (mss_hdr->defen) 290 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 291 } else { 292 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 293 mss_hdr->rmpfmt); 294 return -EINVAL; 295 } 296 297 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 298 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 299 rmp_tbl_size, (int)sizeof(*mss_hdr), 300 le32_to_cpu(mss_hdr->key_raw_offset)); 301 return -EINVAL; 302 } 303 304 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 305 if (!key_sign_len) 306 key_sign_len = 512; 307 308 if (info->dsp_checksum) 309 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 310 311 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 312 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 313 314 if (!sec->secure_boot) 315 goto out; 316 317 sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v); 318 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 319 goto ignore; 320 321 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 322 if (ret) 323 goto ignore; 324 325 section_info->key_addr = content + section_info->len + 326 le32_to_cpu(mss_hdr->key_raw_offset) + 327 key_sign_len * real_key_idx; 328 section_info->key_len = key_sign_len; 329 section_info->key_idx = real_key_idx; 330 331 out: 332 if (info->secure_section_exist) { 333 section_info->ignore = true; 334 return 0; 335 } 336 337 info->secure_section_exist = true; 338 339 return 0; 340 341 ignore: 342 section_info->ignore = true; 343 344 return 0; 345 } 346 347 static int __parse_security_section(struct rtw89_dev *rtwdev, 348 struct rtw89_fw_bin_info *info, 349 struct rtw89_fw_hdr_section_info *section_info, 350 const void *content, 351 u32 *mssc_len) 352 { 353 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 354 int ret; 355 356 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) { 357 ret = __parse_formatted_mssc(rtwdev, info, section_info, 358 content, mssc_len); 359 if (ret) 360 return -EINVAL; 361 } else { 362 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 363 if (info->dsp_checksum) 364 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 365 366 if (sec->secure_boot) { 367 if (sec->mss_idx >= section_info->mssc) 368 return -EFAULT; 369 section_info->key_addr = content + section_info->len + 370 sec->mss_idx * FWDL_SECURITY_SIGLEN; 371 section_info->key_len = FWDL_SECURITY_SIGLEN; 372 } 373 374 info->secure_section_exist = true; 375 } 376 377 return 0; 378 } 379 380 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 381 struct rtw89_fw_bin_info *info) 382 { 383 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 384 struct rtw89_fw_hdr_section_info *section_info; 385 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 386 const struct rtw89_fw_hdr_section_v1 *section; 387 const u8 *fw_end = fw + len; 388 const u8 *bin; 389 u32 base_hdr_len; 390 u32 mssc_len; 391 int ret; 392 u32 i; 393 394 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 395 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 396 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 397 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 398 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE); 399 400 if (info->dynamic_hdr_en) { 401 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 402 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 403 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 404 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 405 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 406 return -EINVAL; 407 } 408 } else { 409 info->hdr_len = base_hdr_len; 410 info->dynamic_hdr_len = 0; 411 } 412 413 bin = fw + info->hdr_len; 414 415 /* jump to section header */ 416 section_info = info->section_info; 417 for (i = 0; i < info->section_num; i++) { 418 section = &fw_hdr->sections[i]; 419 420 section_info->type = 421 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 422 section_info->len = 423 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 424 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 425 section_info->len += FWDL_SECTION_CHKSUM_LEN; 426 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 427 section_info->dladdr = 428 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 429 section_info->addr = bin; 430 431 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 432 section_info->mssc = 433 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 434 435 ret = __parse_security_section(rtwdev, info, section_info, 436 bin, &mssc_len); 437 if (ret) 438 return ret; 439 } else { 440 section_info->mssc = 0; 441 mssc_len = 0; 442 } 443 444 rtw89_debug(rtwdev, RTW89_DBG_FW, 445 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 446 i, section_info->type, section_info->len, 447 section_info->mssc, mssc_len, bin - fw); 448 rtw89_debug(rtwdev, RTW89_DBG_FW, 449 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 450 section_info->ignore, section_info->key_addr, 451 section_info->key_addr ? 452 section_info->key_addr - section_info->addr : 0, 453 section_info->key_len, section_info->key_idx); 454 455 bin += section_info->len + mssc_len; 456 section_info++; 457 } 458 459 if (fw_end != bin) { 460 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 461 return -EINVAL; 462 } 463 464 if (!info->secure_section_exist) 465 rtw89_warn(rtwdev, "no firmware secure section\n"); 466 467 return 0; 468 } 469 470 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 471 const struct rtw89_fw_suit *fw_suit, 472 struct rtw89_fw_bin_info *info) 473 { 474 const u8 *fw = fw_suit->data; 475 u32 len = fw_suit->size; 476 477 if (!fw || !len) { 478 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 479 return -ENOENT; 480 } 481 482 switch (fw_suit->hdr_ver) { 483 case 0: 484 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 485 case 1: 486 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 487 default: 488 return -ENOENT; 489 } 490 } 491 492 static int rtw89_mfw_validate_hdr(struct rtw89_dev *rtwdev, 493 const struct firmware *firmware, 494 const struct rtw89_mfw_hdr *mfw_hdr) 495 { 496 const void *mfw = firmware->data; 497 u32 mfw_len = firmware->size; 498 u8 fw_nr = mfw_hdr->fw_nr; 499 const void *ptr; 500 501 if (fw_nr == 0) { 502 rtw89_err(rtwdev, "mfw header has no fw entry\n"); 503 return -ENOENT; 504 } 505 506 ptr = &mfw_hdr->info[fw_nr]; 507 508 if (ptr > mfw + mfw_len) { 509 rtw89_err(rtwdev, "mfw header out of address\n"); 510 return -EFAULT; 511 } 512 513 return 0; 514 } 515 516 static 517 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 518 struct rtw89_fw_suit *fw_suit, bool nowarn) 519 { 520 struct rtw89_fw_info *fw_info = &rtwdev->fw; 521 const struct firmware *firmware = fw_info->req.firmware; 522 const u8 *mfw = firmware->data; 523 u32 mfw_len = firmware->size; 524 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 525 const struct rtw89_mfw_info *mfw_info = NULL, *tmp; 526 int ret; 527 int i; 528 529 if (mfw_hdr->sig != RTW89_MFW_SIG) { 530 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 531 /* legacy firmware support normal type only */ 532 if (type != RTW89_FW_NORMAL) 533 return -EINVAL; 534 fw_suit->data = mfw; 535 fw_suit->size = mfw_len; 536 return 0; 537 } 538 539 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); 540 if (ret) 541 return ret; 542 543 for (i = 0; i < mfw_hdr->fw_nr; i++) { 544 tmp = &mfw_hdr->info[i]; 545 if (tmp->type != type) 546 continue; 547 548 if (type == RTW89_FW_LOGFMT) { 549 mfw_info = tmp; 550 goto found; 551 } 552 553 /* Version order of WiFi firmware in firmware file are not in order, 554 * pass all firmware to find the equal or less but closest version. 555 */ 556 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { 557 if (!mfw_info || mfw_info->cv < tmp->cv) 558 mfw_info = tmp; 559 } 560 } 561 562 if (mfw_info) 563 goto found; 564 565 if (!nowarn) 566 rtw89_err(rtwdev, "no suitable firmware found\n"); 567 return -ENOENT; 568 569 found: 570 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 571 fw_suit->size = le32_to_cpu(mfw_info->size); 572 573 if (fw_suit->data + fw_suit->size > mfw + mfw_len) { 574 rtw89_err(rtwdev, "fw_suit %d out of address\n", type); 575 return -EFAULT; 576 } 577 578 return 0; 579 } 580 581 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 582 { 583 struct rtw89_fw_info *fw_info = &rtwdev->fw; 584 const struct firmware *firmware = fw_info->req.firmware; 585 const struct rtw89_mfw_hdr *mfw_hdr = 586 (const struct rtw89_mfw_hdr *)firmware->data; 587 const struct rtw89_mfw_info *mfw_info; 588 u32 size; 589 int ret; 590 591 if (mfw_hdr->sig != RTW89_MFW_SIG) { 592 rtw89_warn(rtwdev, "not mfw format\n"); 593 return 0; 594 } 595 596 ret = rtw89_mfw_validate_hdr(rtwdev, firmware, mfw_hdr); 597 if (ret) 598 return ret; 599 600 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 601 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 602 603 return size; 604 } 605 606 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 607 struct rtw89_fw_suit *fw_suit, 608 const struct rtw89_fw_hdr *hdr) 609 { 610 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 611 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 612 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 613 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 614 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 615 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 616 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 617 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 618 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 619 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 620 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 621 } 622 623 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 624 struct rtw89_fw_suit *fw_suit, 625 const struct rtw89_fw_hdr_v1 *hdr) 626 { 627 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 628 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 629 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 630 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 631 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 632 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 633 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 634 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 635 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 636 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 637 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 638 } 639 640 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 641 enum rtw89_fw_type type, 642 struct rtw89_fw_suit *fw_suit) 643 { 644 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 645 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 646 647 if (type == RTW89_FW_LOGFMT) 648 return 0; 649 650 fw_suit->type = type; 651 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 652 653 switch (fw_suit->hdr_ver) { 654 case 0: 655 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 656 break; 657 case 1: 658 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 659 break; 660 default: 661 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 662 fw_suit->hdr_ver); 663 return -ENOENT; 664 } 665 666 rtw89_info(rtwdev, 667 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 668 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 669 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 670 671 return 0; 672 } 673 674 static 675 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 676 bool nowarn) 677 { 678 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 679 int ret; 680 681 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 682 if (ret) 683 return ret; 684 685 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 686 } 687 688 static 689 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 690 const struct rtw89_fw_element_hdr *elm, 691 const union rtw89_fw_element_arg arg) 692 { 693 enum rtw89_fw_type type = arg.fw_type; 694 struct rtw89_hal *hal = &rtwdev->hal; 695 struct rtw89_fw_suit *fw_suit; 696 697 /* Version of BB MCU is in decreasing order in firmware file, so take 698 * first equal or less version, which is equal or less but closest version. 699 */ 700 if (hal->cv < elm->u.bbmcu.cv) 701 return 1; /* ignore this element */ 702 703 fw_suit = rtw89_fw_suit_get(rtwdev, type); 704 if (fw_suit->data) 705 return 1; /* ignore this element (a firmware is taken already) */ 706 707 fw_suit->data = elm->u.bbmcu.contents; 708 fw_suit->size = le32_to_cpu(elm->size); 709 710 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 711 } 712 713 #define __DEF_FW_FEAT_COND(__cond, __op) \ 714 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 715 { \ 716 return suit_ver_code __op comp_ver_code; \ 717 } 718 719 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 720 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 721 __DEF_FW_FEAT_COND(lt, <); /* less than */ 722 723 struct __fw_feat_cfg { 724 enum rtw89_core_chip_id chip_id; 725 enum rtw89_fw_feature feature; 726 u32 ver_code; 727 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 728 }; 729 730 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 731 { \ 732 .chip_id = _chip, \ 733 .feature = RTW89_FW_FEATURE_ ## _feat, \ 734 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 735 .cond = __fw_feat_cond_ ## _cond, \ 736 } 737 738 static const struct __fw_feat_cfg fw_feat_tbl[] = { 739 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 740 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 741 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 742 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 743 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 744 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 745 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 746 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX), 747 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 748 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 749 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 750 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 751 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 752 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER), 753 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX), 754 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 755 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 756 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER), 757 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 758 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER), 759 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 760 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 761 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 762 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 763 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 764 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 765 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), 766 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 767 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 768 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0), 769 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 770 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 771 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0), 772 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO), 773 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0), 774 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO), 775 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0), 776 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1), 777 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1), 778 }; 779 780 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 781 const struct rtw89_chip_info *chip, 782 u32 ver_code) 783 { 784 int i; 785 786 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 787 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 788 789 if (chip->chip_id != ent->chip_id) 790 continue; 791 792 if (ent->cond(ver_code, ent->ver_code)) 793 RTW89_SET_FW_FEATURE(ent->feature, fw); 794 } 795 } 796 797 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 798 { 799 const struct rtw89_chip_info *chip = rtwdev->chip; 800 const struct rtw89_fw_suit *fw_suit; 801 u32 suit_ver_code; 802 803 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 804 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 805 806 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 807 } 808 809 const struct firmware * 810 rtw89_early_fw_feature_recognize(struct device *device, 811 const struct rtw89_chip_info *chip, 812 struct rtw89_fw_info *early_fw, 813 int *used_fw_format) 814 { 815 const struct firmware *firmware; 816 char fw_name[64]; 817 int fw_format; 818 u32 ver_code; 819 int ret; 820 821 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 822 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 823 chip->fw_basename, fw_format); 824 825 ret = request_firmware(&firmware, fw_name, device); 826 if (!ret) { 827 dev_info(device, "loaded firmware %s\n", fw_name); 828 *used_fw_format = fw_format; 829 break; 830 } 831 } 832 833 if (ret) { 834 dev_err(device, "failed to early request firmware: %d\n", ret); 835 return NULL; 836 } 837 838 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 839 840 if (!ver_code) 841 goto out; 842 843 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 844 845 out: 846 return firmware; 847 } 848 849 static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev) 850 { 851 const struct rtw89_chip_variant *variant = rtwdev->variant; 852 const struct rtw89_fw_suit *fw_suit; 853 u32 suit_ver_code; 854 855 if (!variant) 856 return 0; 857 858 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 859 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 860 861 if (variant->fw_min_ver_code > suit_ver_code) { 862 rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n", 863 variant->fw_min_ver_code); 864 return -ENOENT; 865 } 866 867 return 0; 868 } 869 870 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 871 { 872 const struct rtw89_chip_info *chip = rtwdev->chip; 873 int ret; 874 875 if (chip->try_ce_fw) { 876 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 877 if (!ret) 878 goto normal_done; 879 } 880 881 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 882 if (ret) 883 return ret; 884 885 normal_done: 886 ret = rtw89_fw_validate_ver_required(rtwdev); 887 if (ret) 888 return ret; 889 890 /* It still works if wowlan firmware isn't existing. */ 891 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 892 893 /* It still works if log format file isn't existing. */ 894 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 895 896 rtw89_fw_recognize_features(rtwdev); 897 898 rtw89_coex_recognize_ver(rtwdev); 899 900 return 0; 901 } 902 903 static 904 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 905 const struct rtw89_fw_element_hdr *elm, 906 const union rtw89_fw_element_arg arg) 907 { 908 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 909 struct rtw89_phy_table *tbl; 910 struct rtw89_reg2_def *regs; 911 enum rtw89_rf_path rf_path; 912 u32 n_regs, i; 913 u8 idx; 914 915 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 916 if (!tbl) 917 return -ENOMEM; 918 919 switch (le32_to_cpu(elm->id)) { 920 case RTW89_FW_ELEMENT_ID_BB_REG: 921 elm_info->bb_tbl = tbl; 922 break; 923 case RTW89_FW_ELEMENT_ID_BB_GAIN: 924 elm_info->bb_gain = tbl; 925 break; 926 case RTW89_FW_ELEMENT_ID_RADIO_A: 927 case RTW89_FW_ELEMENT_ID_RADIO_B: 928 case RTW89_FW_ELEMENT_ID_RADIO_C: 929 case RTW89_FW_ELEMENT_ID_RADIO_D: 930 rf_path = arg.rf_path; 931 idx = elm->u.reg2.idx; 932 933 elm_info->rf_radio[idx] = tbl; 934 tbl->rf_path = rf_path; 935 tbl->config = rtw89_phy_config_rf_reg_v1; 936 break; 937 case RTW89_FW_ELEMENT_ID_RF_NCTL: 938 elm_info->rf_nctl = tbl; 939 break; 940 default: 941 kfree(tbl); 942 return -ENOENT; 943 } 944 945 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 946 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); 947 if (!regs) 948 goto out; 949 950 for (i = 0; i < n_regs; i++) { 951 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 952 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 953 } 954 955 tbl->n_regs = n_regs; 956 tbl->regs = regs; 957 958 return 0; 959 960 out: 961 kfree(tbl); 962 return -ENOMEM; 963 } 964 965 static 966 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 967 const struct rtw89_fw_element_hdr *elm, 968 const union rtw89_fw_element_arg arg) 969 { 970 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 971 const unsigned long offset = arg.offset; 972 struct rtw89_efuse *efuse = &rtwdev->efuse; 973 struct rtw89_txpwr_conf *conf; 974 975 if (!rtwdev->rfe_data) { 976 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 977 if (!rtwdev->rfe_data) 978 return -ENOMEM; 979 } 980 981 conf = (void *)rtwdev->rfe_data + offset; 982 983 /* if multiple matched, take the last eventually */ 984 if (txpwr_elm->rfe_type == efuse->rfe_type) 985 goto setup; 986 987 /* without one is matched, accept default */ 988 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 989 (!rtw89_txpwr_conf_valid(conf) || 990 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 991 goto setup; 992 993 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 994 elm->id, txpwr_elm->rfe_type); 995 return 0; 996 997 setup: 998 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 999 elm->id, txpwr_elm->rfe_type); 1000 1001 conf->rfe_type = txpwr_elm->rfe_type; 1002 conf->ent_sz = txpwr_elm->ent_sz; 1003 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 1004 conf->data = txpwr_elm->content; 1005 return 0; 1006 } 1007 1008 static 1009 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 1010 const struct rtw89_fw_element_hdr *elm, 1011 const union rtw89_fw_element_arg arg) 1012 { 1013 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1014 const struct rtw89_chip_info *chip = rtwdev->chip; 1015 u32 needed_bitmap = 0; 1016 u32 offset = 0; 1017 int subband; 1018 u32 bitmap; 1019 int type; 1020 1021 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 1022 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 1023 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 1024 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 1025 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 1026 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 1027 1028 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 1029 1030 if ((bitmap & needed_bitmap) != needed_bitmap) { 1031 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n", 1032 needed_bitmap, bitmap); 1033 return -ENOENT; 1034 } 1035 1036 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL); 1037 if (!elm_info->txpwr_trk) 1038 return -ENOMEM; 1039 1040 for (type = 0; bitmap; type++, bitmap >>= 1) { 1041 if (!(bitmap & BIT(0))) 1042 continue; 1043 1044 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 1045 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 1046 subband = 4; 1047 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 1048 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 1049 subband = 3; 1050 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 1051 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 1052 subband = 1; 1053 else 1054 break; 1055 1056 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 1057 1058 offset += subband; 1059 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 1060 goto err; 1061 } 1062 1063 return 0; 1064 1065 err: 1066 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 1067 offset, le32_to_cpu(elm->size)); 1068 kfree(elm_info->txpwr_trk); 1069 elm_info->txpwr_trk = NULL; 1070 1071 return -EFAULT; 1072 } 1073 1074 static 1075 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 1076 const struct rtw89_fw_element_hdr *elm, 1077 const union rtw89_fw_element_arg arg) 1078 { 1079 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1080 u8 rfk_id; 1081 1082 if (elm_info->rfk_log_fmt) 1083 goto allocated; 1084 1085 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL); 1086 if (!elm_info->rfk_log_fmt) 1087 return 1; /* this is an optional element, so just ignore this */ 1088 1089 allocated: 1090 rfk_id = elm->u.rfk_log_fmt.rfk_id; 1091 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 1092 return 1; 1093 1094 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 1095 1096 return 0; 1097 } 1098 1099 static bool rtw89_regd_entcpy(struct rtw89_regd *regd, const void *cursor, 1100 u8 cursor_size) 1101 { 1102 /* fill default values if needed for backward compatibility */ 1103 struct rtw89_fw_regd_entry entry = { 1104 .rule_2ghz = RTW89_NA, 1105 .rule_5ghz = RTW89_NA, 1106 .rule_6ghz = RTW89_NA, 1107 .fmap = cpu_to_le32(0x0), 1108 }; 1109 u8 valid_size = min_t(u8, sizeof(entry), cursor_size); 1110 unsigned int i; 1111 u32 fmap; 1112 1113 memcpy(&entry, cursor, valid_size); 1114 memset(regd, 0, sizeof(*regd)); 1115 1116 regd->alpha2[0] = entry.alpha2_0; 1117 regd->alpha2[1] = entry.alpha2_1; 1118 regd->alpha2[2] = '\0'; 1119 1120 /* also need to consider forward compatibility */ 1121 regd->txpwr_regd[RTW89_BAND_2G] = entry.rule_2ghz < RTW89_REGD_NUM ? 1122 entry.rule_2ghz : RTW89_NA; 1123 regd->txpwr_regd[RTW89_BAND_5G] = entry.rule_5ghz < RTW89_REGD_NUM ? 1124 entry.rule_5ghz : RTW89_NA; 1125 regd->txpwr_regd[RTW89_BAND_6G] = entry.rule_6ghz < RTW89_REGD_NUM ? 1126 entry.rule_6ghz : RTW89_NA; 1127 1128 BUILD_BUG_ON(sizeof(fmap) != sizeof(entry.fmap)); 1129 BUILD_BUG_ON(sizeof(fmap) * 8 < NUM_OF_RTW89_REGD_FUNC); 1130 1131 fmap = le32_to_cpu(entry.fmap); 1132 for (i = 0; i < NUM_OF_RTW89_REGD_FUNC; i++) { 1133 if (fmap & BIT(i)) 1134 set_bit(i, regd->func_bitmap); 1135 } 1136 1137 return true; 1138 } 1139 1140 #define rtw89_for_each_in_regd_element(regd, element) \ 1141 for (const void *cursor = (element)->content, \ 1142 *end = (element)->content + \ 1143 le32_to_cpu((element)->num_ents) * (element)->ent_sz; \ 1144 cursor < end; cursor += (element)->ent_sz) \ 1145 if (rtw89_regd_entcpy(regd, cursor, (element)->ent_sz)) 1146 1147 static 1148 int rtw89_recognize_regd_from_elm(struct rtw89_dev *rtwdev, 1149 const struct rtw89_fw_element_hdr *elm, 1150 const union rtw89_fw_element_arg arg) 1151 { 1152 const struct __rtw89_fw_regd_element *regd_elm = &elm->u.regd; 1153 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1154 u32 num_ents = le32_to_cpu(regd_elm->num_ents); 1155 struct rtw89_regd_data *p; 1156 struct rtw89_regd regd; 1157 u32 i = 0; 1158 1159 if (num_ents > RTW89_REGD_MAX_COUNTRY_NUM) { 1160 rtw89_warn(rtwdev, 1161 "regd element ents (%d) are over max num (%d)\n", 1162 num_ents, RTW89_REGD_MAX_COUNTRY_NUM); 1163 rtw89_warn(rtwdev, 1164 "regd element ignore and take another/common\n"); 1165 return 1; 1166 } 1167 1168 if (elm_info->regd) { 1169 rtw89_debug(rtwdev, RTW89_DBG_REGD, 1170 "regd element take the latter\n"); 1171 devm_kfree(rtwdev->dev, elm_info->regd); 1172 elm_info->regd = NULL; 1173 } 1174 1175 p = devm_kzalloc(rtwdev->dev, struct_size(p, map, num_ents), GFP_KERNEL); 1176 if (!p) 1177 return -ENOMEM; 1178 1179 p->nr = num_ents; 1180 rtw89_for_each_in_regd_element(®d, regd_elm) 1181 p->map[i++] = regd; 1182 1183 if (i != num_ents) { 1184 rtw89_err(rtwdev, "regd element has %d invalid ents\n", 1185 num_ents - i); 1186 devm_kfree(rtwdev->dev, p); 1187 return -EINVAL; 1188 } 1189 1190 elm_info->regd = p; 1191 return 0; 1192 } 1193 1194 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 1195 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 1196 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 1197 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 1198 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 1199 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 1200 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 1201 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 1202 { .rf_path = RF_PATH_A }, "radio A"}, 1203 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 1204 { .rf_path = RF_PATH_B }, NULL}, 1205 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 1206 { .rf_path = RF_PATH_C }, NULL}, 1207 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 1208 { .rf_path = RF_PATH_D }, NULL}, 1209 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 1210 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 1211 rtw89_fw_recognize_txpwr_from_elm, 1212 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 1213 }, 1214 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 1215 rtw89_fw_recognize_txpwr_from_elm, 1216 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 1217 }, 1218 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 1219 rtw89_fw_recognize_txpwr_from_elm, 1220 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 1221 }, 1222 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 1223 rtw89_fw_recognize_txpwr_from_elm, 1224 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 1225 }, 1226 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 1227 rtw89_fw_recognize_txpwr_from_elm, 1228 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 1229 }, 1230 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 1231 rtw89_fw_recognize_txpwr_from_elm, 1232 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 1233 }, 1234 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 1235 rtw89_fw_recognize_txpwr_from_elm, 1236 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 1237 }, 1238 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 1239 rtw89_fw_recognize_txpwr_from_elm, 1240 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 1241 }, 1242 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 1243 rtw89_fw_recognize_txpwr_from_elm, 1244 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 1245 }, 1246 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 1247 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 1248 }, 1249 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 1250 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 1251 }, 1252 [RTW89_FW_ELEMENT_ID_REGD] = { 1253 rtw89_recognize_regd_from_elm, {}, "REGD", 1254 }, 1255 }; 1256 1257 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 1258 { 1259 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1260 const struct firmware *firmware = fw_info->req.firmware; 1261 const struct rtw89_chip_info *chip = rtwdev->chip; 1262 u32 unrecognized_elements = chip->needed_fw_elms; 1263 const struct rtw89_fw_element_handler *handler; 1264 const struct rtw89_fw_element_hdr *hdr; 1265 u32 elm_size; 1266 u32 elem_id; 1267 u32 offset; 1268 int ret; 1269 1270 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1271 1272 offset = rtw89_mfw_get_size(rtwdev); 1273 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1274 if (offset == 0) 1275 return -EINVAL; 1276 1277 while (offset + sizeof(*hdr) < firmware->size) { 1278 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1279 1280 elm_size = le32_to_cpu(hdr->size); 1281 if (offset + elm_size >= firmware->size) { 1282 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1283 break; 1284 } 1285 1286 elem_id = le32_to_cpu(hdr->id); 1287 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1288 goto next; 1289 1290 handler = &__fw_element_handlers[elem_id]; 1291 if (!handler->fn) 1292 goto next; 1293 1294 ret = handler->fn(rtwdev, hdr, handler->arg); 1295 if (ret == 1) /* ignore this element */ 1296 goto next; 1297 if (ret) 1298 return ret; 1299 1300 if (handler->name) 1301 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1302 handler->name, hdr->ver); 1303 1304 unrecognized_elements &= ~BIT(elem_id); 1305 next: 1306 offset += sizeof(*hdr) + elm_size; 1307 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1308 } 1309 1310 if (unrecognized_elements) { 1311 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1312 unrecognized_elements); 1313 return -ENOENT; 1314 } 1315 1316 return 0; 1317 } 1318 1319 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1320 u8 type, u8 cat, u8 class, u8 func, 1321 bool rack, bool dack, u32 len) 1322 { 1323 struct fwcmd_hdr *hdr; 1324 1325 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1326 1327 if (!(rtwdev->fw.h2c_seq % 4)) 1328 rack = true; 1329 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1330 FIELD_PREP(H2C_HDR_CAT, cat) | 1331 FIELD_PREP(H2C_HDR_CLASS, class) | 1332 FIELD_PREP(H2C_HDR_FUNC, func) | 1333 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1334 1335 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1336 len + H2C_HEADER_LEN) | 1337 (rack ? H2C_HDR_REC_ACK : 0) | 1338 (dack ? H2C_HDR_DONE_ACK : 0)); 1339 1340 rtwdev->fw.h2c_seq++; 1341 } 1342 1343 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1344 struct sk_buff *skb, 1345 u8 type, u8 cat, u8 class, u8 func, 1346 u32 len) 1347 { 1348 struct fwcmd_hdr *hdr; 1349 1350 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1351 1352 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1353 FIELD_PREP(H2C_HDR_CAT, cat) | 1354 FIELD_PREP(H2C_HDR_CLASS, class) | 1355 FIELD_PREP(H2C_HDR_FUNC, func) | 1356 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1357 1358 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1359 len + H2C_HEADER_LEN)); 1360 } 1361 1362 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1363 struct rtw89_fw_bin_info *info, 1364 struct rtw89_fw_hdr *fw_hdr) 1365 { 1366 struct rtw89_fw_hdr_section_info *section_info; 1367 struct rtw89_fw_hdr_section *section; 1368 int i; 1369 1370 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1371 FW_HDR_W7_PART_SIZE); 1372 1373 for (i = 0; i < info->section_num; i++) { 1374 section_info = &info->section_info[i]; 1375 1376 if (!section_info->len_override) 1377 continue; 1378 1379 section = &fw_hdr->sections[i]; 1380 le32p_replace_bits(§ion->w1, section_info->len_override, 1381 FWSECTION_HDR_W1_SEC_SIZE); 1382 } 1383 1384 return 0; 1385 } 1386 1387 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1388 struct rtw89_fw_bin_info *info, 1389 struct rtw89_fw_hdr_v1 *fw_hdr) 1390 { 1391 struct rtw89_fw_hdr_section_info *section_info; 1392 struct rtw89_fw_hdr_section_v1 *section; 1393 u8 dst_sec_idx = 0; 1394 u8 sec_idx; 1395 1396 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1397 FW_HDR_V1_W7_PART_SIZE); 1398 1399 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1400 section_info = &info->section_info[sec_idx]; 1401 section = &fw_hdr->sections[sec_idx]; 1402 1403 if (section_info->ignore) 1404 continue; 1405 1406 if (dst_sec_idx != sec_idx) 1407 fw_hdr->sections[dst_sec_idx] = *section; 1408 1409 dst_sec_idx++; 1410 } 1411 1412 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1413 1414 return (info->section_num - dst_sec_idx) * sizeof(*section); 1415 } 1416 1417 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1418 const struct rtw89_fw_suit *fw_suit, 1419 struct rtw89_fw_bin_info *info) 1420 { 1421 u32 len = info->hdr_len - info->dynamic_hdr_len; 1422 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1423 const u8 *fw = fw_suit->data; 1424 struct rtw89_fw_hdr *fw_hdr; 1425 struct sk_buff *skb; 1426 u32 truncated; 1427 u32 ret = 0; 1428 1429 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1430 if (!skb) { 1431 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1432 return -ENOMEM; 1433 } 1434 1435 skb_put_data(skb, fw, len); 1436 1437 switch (fw_suit->hdr_ver) { 1438 case 0: 1439 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1440 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1441 break; 1442 case 1: 1443 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1444 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1445 break; 1446 default: 1447 ret = -EOPNOTSUPP; 1448 goto fail; 1449 } 1450 1451 if (truncated) { 1452 len -= truncated; 1453 skb_trim(skb, len); 1454 } 1455 1456 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1457 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1458 H2C_FUNC_MAC_FWHDR_DL, len); 1459 1460 ret = rtw89_h2c_tx(rtwdev, skb, false); 1461 if (ret) { 1462 rtw89_err(rtwdev, "failed to send h2c\n"); 1463 ret = -1; 1464 goto fail; 1465 } 1466 1467 return 0; 1468 fail: 1469 dev_kfree_skb_any(skb); 1470 1471 return ret; 1472 } 1473 1474 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1475 const struct rtw89_fw_suit *fw_suit, 1476 struct rtw89_fw_bin_info *info) 1477 { 1478 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1479 int ret; 1480 1481 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1482 if (ret) { 1483 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1484 return ret; 1485 } 1486 1487 ret = mac->fwdl_check_path_ready(rtwdev, false); 1488 if (ret) { 1489 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1490 return ret; 1491 } 1492 1493 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1494 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1495 1496 return 0; 1497 } 1498 1499 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1500 struct rtw89_fw_hdr_section_info *info) 1501 { 1502 struct sk_buff *skb; 1503 const u8 *section = info->addr; 1504 u32 residue_len = info->len; 1505 bool copy_key = false; 1506 u32 pkt_len; 1507 int ret; 1508 1509 if (info->ignore) 1510 return 0; 1511 1512 if (info->len_override) { 1513 if (info->len_override > info->len) 1514 rtw89_warn(rtwdev, "override length %u larger than original %u\n", 1515 info->len_override, info->len); 1516 else 1517 residue_len = info->len_override; 1518 } 1519 1520 if (info->key_addr && info->key_len) { 1521 if (residue_len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) 1522 rtw89_warn(rtwdev, 1523 "ignore to copy key data because of len %d, %d, %d, %d\n", 1524 info->len, FWDL_SECTION_PER_PKT_LEN, 1525 info->key_len, residue_len); 1526 else 1527 copy_key = true; 1528 } 1529 1530 while (residue_len) { 1531 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 1532 pkt_len = FWDL_SECTION_PER_PKT_LEN; 1533 else 1534 pkt_len = residue_len; 1535 1536 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1537 if (!skb) { 1538 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1539 return -ENOMEM; 1540 } 1541 skb_put_data(skb, section, pkt_len); 1542 1543 if (copy_key) 1544 memcpy(skb->data + pkt_len - info->key_len, 1545 info->key_addr, info->key_len); 1546 1547 ret = rtw89_h2c_tx(rtwdev, skb, true); 1548 if (ret) { 1549 rtw89_err(rtwdev, "failed to send h2c\n"); 1550 ret = -1; 1551 goto fail; 1552 } 1553 1554 section += pkt_len; 1555 residue_len -= pkt_len; 1556 } 1557 1558 return 0; 1559 fail: 1560 dev_kfree_skb_any(skb); 1561 1562 return ret; 1563 } 1564 1565 static enum rtw89_fwdl_check_type 1566 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1567 const struct rtw89_fw_suit *fw_suit) 1568 { 1569 switch (fw_suit->type) { 1570 case RTW89_FW_BBMCU0: 1571 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1572 case RTW89_FW_BBMCU1: 1573 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1574 default: 1575 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1576 } 1577 } 1578 1579 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1580 const struct rtw89_fw_suit *fw_suit, 1581 struct rtw89_fw_bin_info *info) 1582 { 1583 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1584 const struct rtw89_chip_info *chip = rtwdev->chip; 1585 enum rtw89_fwdl_check_type chk_type; 1586 u8 section_num = info->section_num; 1587 int ret; 1588 1589 while (section_num--) { 1590 ret = __rtw89_fw_download_main(rtwdev, section_info); 1591 if (ret) 1592 return ret; 1593 section_info++; 1594 } 1595 1596 if (chip->chip_gen == RTW89_CHIP_AX) 1597 return 0; 1598 1599 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1600 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1601 if (ret) { 1602 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1603 fw_suit->type); 1604 return ret; 1605 } 1606 1607 return 0; 1608 } 1609 1610 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1611 { 1612 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1613 u32 addr = R_AX_DBG_PORT_SEL; 1614 u32 val32; 1615 u16 index; 1616 1617 if (chip_gen == RTW89_CHIP_BE) { 1618 addr = R_BE_WLCPU_PORT_PC; 1619 goto dump; 1620 } 1621 1622 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1623 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1624 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1625 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1626 1627 dump: 1628 for (index = 0; index < 15; index++) { 1629 val32 = rtw89_read32(rtwdev, addr); 1630 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1631 fsleep(10); 1632 } 1633 } 1634 1635 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1636 { 1637 u32 val32; 1638 1639 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1640 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1641 1642 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1643 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1644 1645 rtw89_fw_prog_cnt_dump(rtwdev); 1646 } 1647 1648 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1649 struct rtw89_fw_suit *fw_suit) 1650 { 1651 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1652 struct rtw89_fw_bin_info info = {}; 1653 int ret; 1654 1655 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1656 if (ret) { 1657 rtw89_err(rtwdev, "parse fw header fail\n"); 1658 return ret; 1659 } 1660 1661 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode); 1662 1663 if (rtwdev->chip->chip_id == RTL8922A && 1664 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1665 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1666 1667 ret = mac->fwdl_check_path_ready(rtwdev, true); 1668 if (ret) { 1669 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1670 return ret; 1671 } 1672 1673 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1674 if (ret) 1675 return ret; 1676 1677 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1678 if (ret) 1679 return ret; 1680 1681 return 0; 1682 } 1683 1684 static 1685 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1686 bool include_bb) 1687 { 1688 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1689 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1690 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1691 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1692 int ret; 1693 int i; 1694 1695 mac->disable_cpu(rtwdev); 1696 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1697 if (ret) 1698 return ret; 1699 1700 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1701 if (ret) 1702 goto fwdl_err; 1703 1704 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1705 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1706 1707 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1708 if (ret) 1709 goto fwdl_err; 1710 } 1711 1712 fw_info->h2c_seq = 0; 1713 fw_info->rec_seq = 0; 1714 fw_info->h2c_counter = 0; 1715 fw_info->c2h_counter = 0; 1716 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1717 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1718 1719 mdelay(5); 1720 1721 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1722 if (ret) { 1723 rtw89_warn(rtwdev, "download firmware fail\n"); 1724 goto fwdl_err; 1725 } 1726 1727 return ret; 1728 1729 fwdl_err: 1730 rtw89_fw_dl_fail_dump(rtwdev); 1731 return ret; 1732 } 1733 1734 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1735 bool include_bb) 1736 { 1737 int retry; 1738 int ret; 1739 1740 for (retry = 0; retry < 5; retry++) { 1741 ret = __rtw89_fw_download(rtwdev, type, include_bb); 1742 if (!ret) 1743 return 0; 1744 } 1745 1746 return ret; 1747 } 1748 1749 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1750 { 1751 struct rtw89_fw_info *fw = &rtwdev->fw; 1752 1753 wait_for_completion(&fw->req.completion); 1754 if (!fw->req.firmware) 1755 return -EINVAL; 1756 1757 return 0; 1758 } 1759 1760 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1761 struct rtw89_fw_req_info *req, 1762 const char *fw_name, bool nowarn) 1763 { 1764 int ret; 1765 1766 if (req->firmware) { 1767 rtw89_debug(rtwdev, RTW89_DBG_FW, 1768 "full firmware has been early requested\n"); 1769 complete_all(&req->completion); 1770 return 0; 1771 } 1772 1773 if (nowarn) 1774 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1775 else 1776 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1777 1778 complete_all(&req->completion); 1779 1780 return ret; 1781 } 1782 1783 void rtw89_load_firmware_work(struct work_struct *work) 1784 { 1785 struct rtw89_dev *rtwdev = 1786 container_of(work, struct rtw89_dev, load_firmware_work); 1787 const struct rtw89_chip_info *chip = rtwdev->chip; 1788 char fw_name[64]; 1789 1790 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1791 chip->fw_basename, rtwdev->fw.fw_format); 1792 1793 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1794 } 1795 1796 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1797 { 1798 if (!tbl) 1799 return; 1800 1801 kfree(tbl->regs); 1802 kfree(tbl); 1803 } 1804 1805 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1806 { 1807 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1808 int i; 1809 1810 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1811 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1812 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1813 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1814 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1815 1816 kfree(elm_info->txpwr_trk); 1817 kfree(elm_info->rfk_log_fmt); 1818 } 1819 1820 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1821 { 1822 struct rtw89_fw_info *fw = &rtwdev->fw; 1823 1824 cancel_work_sync(&rtwdev->load_firmware_work); 1825 1826 if (fw->req.firmware) { 1827 release_firmware(fw->req.firmware); 1828 1829 /* assign NULL back in case rtw89_free_ieee80211_hw() 1830 * try to release the same one again. 1831 */ 1832 fw->req.firmware = NULL; 1833 } 1834 1835 kfree(fw->log.fmts); 1836 rtw89_unload_firmware_elements(rtwdev); 1837 } 1838 1839 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1840 { 1841 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1842 u32 i; 1843 1844 if (fmt_id > fw_log->last_fmt_id) 1845 return 0; 1846 1847 for (i = 0; i < fw_log->fmt_count; i++) { 1848 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1849 return i; 1850 } 1851 return 0; 1852 } 1853 1854 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1855 { 1856 struct rtw89_fw_log *log = &rtwdev->fw.log; 1857 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1858 struct rtw89_fw_suit *suit = &log->suit; 1859 const void *fmts_ptr, *fmts_end_ptr; 1860 u32 fmt_count; 1861 int i; 1862 1863 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1864 fmt_count = le32_to_cpu(suit_hdr->count); 1865 log->fmt_ids = suit_hdr->ids; 1866 fmts_ptr = &suit_hdr->ids[fmt_count]; 1867 fmts_end_ptr = suit->data + suit->size; 1868 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1869 if (!log->fmts) 1870 return -ENOMEM; 1871 1872 for (i = 0; i < fmt_count; i++) { 1873 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1874 if (!fmts_ptr) 1875 break; 1876 1877 (*log->fmts)[i] = fmts_ptr; 1878 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1879 log->fmt_count++; 1880 fmts_ptr += strlen(fmts_ptr); 1881 } 1882 1883 return 0; 1884 } 1885 1886 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1887 { 1888 struct rtw89_fw_log *log = &rtwdev->fw.log; 1889 struct rtw89_fw_suit *suit = &log->suit; 1890 1891 if (!suit || !suit->data) { 1892 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1893 return -EINVAL; 1894 } 1895 if (log->fmts) 1896 return 0; 1897 1898 return rtw89_fw_log_create_fmts_dict(rtwdev); 1899 } 1900 1901 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 1902 const struct rtw89_fw_c2h_log_fmt *log_fmt, 1903 u32 fmt_idx, u8 para_int, bool raw_data) 1904 { 1905 const char *(*fmts)[] = rtwdev->fw.log.fmts; 1906 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 1907 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 1908 int i; 1909 1910 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 1911 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 1912 log_fmt->argc); 1913 return; 1914 } 1915 1916 if (para_int) 1917 for (i = 0 ; i < log_fmt->argc; i++) 1918 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 1919 1920 if (raw_data) { 1921 if (para_int) 1922 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1923 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 1924 para_int, log_fmt->argc, (int)sizeof(args), args); 1925 else 1926 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1927 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 1928 para_int, log_fmt->argc, log_fmt->u.raw); 1929 } else { 1930 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 1931 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 1932 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 1933 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 1934 args[0xf]); 1935 } 1936 1937 rtw89_info(rtwdev, "C2H log: %s", str_buf); 1938 } 1939 1940 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 1941 { 1942 const struct rtw89_fw_c2h_log_fmt *log_fmt; 1943 u8 para_int; 1944 u32 fmt_idx; 1945 1946 if (len < RTW89_C2H_HEADER_LEN) { 1947 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 1948 return; 1949 } 1950 1951 buf += RTW89_C2H_HEADER_LEN; 1952 len -= RTW89_C2H_HEADER_LEN; 1953 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 1954 1955 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 1956 goto plain_log; 1957 1958 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 1959 goto plain_log; 1960 1961 if (!rtwdev->fw.log.fmts) 1962 return; 1963 1964 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 1965 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 1966 1967 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 1968 rtw89_info(rtwdev, "C2H log: %s%s", 1969 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 1970 else if (fmt_idx != 0 && para_int) 1971 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 1972 else 1973 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 1974 return; 1975 1976 plain_log: 1977 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 1978 1979 } 1980 1981 #define H2C_CAM_LEN 60 1982 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 1983 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr) 1984 { 1985 struct sk_buff *skb; 1986 int ret; 1987 1988 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 1989 if (!skb) { 1990 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1991 return -ENOMEM; 1992 } 1993 skb_put(skb, H2C_CAM_LEN); 1994 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, scan_mac_addr, 1995 skb->data); 1996 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, skb->data); 1997 1998 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1999 H2C_CAT_MAC, 2000 H2C_CL_MAC_ADDR_CAM_UPDATE, 2001 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 2002 H2C_CAM_LEN); 2003 2004 ret = rtw89_h2c_tx(rtwdev, skb, false); 2005 if (ret) { 2006 rtw89_err(rtwdev, "failed to send h2c\n"); 2007 goto fail; 2008 } 2009 2010 return 0; 2011 fail: 2012 dev_kfree_skb_any(skb); 2013 2014 return ret; 2015 } 2016 2017 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 2018 struct rtw89_vif_link *rtwvif_link, 2019 struct rtw89_sta_link *rtwsta_link) 2020 { 2021 struct rtw89_h2c_dctlinfo_ud_v1 *h2c; 2022 u32 len = sizeof(*h2c); 2023 struct sk_buff *skb; 2024 int ret; 2025 2026 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2027 if (!skb) { 2028 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 2029 return -ENOMEM; 2030 } 2031 skb_put(skb, len); 2032 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; 2033 2034 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c); 2035 2036 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2037 H2C_CAT_MAC, 2038 H2C_CL_MAC_FR_EXCHG, 2039 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 2040 len); 2041 2042 ret = rtw89_h2c_tx(rtwdev, skb, false); 2043 if (ret) { 2044 rtw89_err(rtwdev, "failed to send h2c\n"); 2045 goto fail; 2046 } 2047 2048 return 0; 2049 fail: 2050 dev_kfree_skb_any(skb); 2051 2052 return ret; 2053 } 2054 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 2055 2056 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 2057 struct rtw89_vif_link *rtwvif_link, 2058 struct rtw89_sta_link *rtwsta_link) 2059 { 2060 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 2061 u32 len = sizeof(*h2c); 2062 struct sk_buff *skb; 2063 int ret; 2064 2065 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2066 if (!skb) { 2067 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 2068 return -ENOMEM; 2069 } 2070 skb_put(skb, len); 2071 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 2072 2073 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c); 2074 2075 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2076 H2C_CAT_MAC, 2077 H2C_CL_MAC_FR_EXCHG, 2078 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 2079 len); 2080 2081 ret = rtw89_h2c_tx(rtwdev, skb, false); 2082 if (ret) { 2083 rtw89_err(rtwdev, "failed to send h2c\n"); 2084 goto fail; 2085 } 2086 2087 return 0; 2088 fail: 2089 dev_kfree_skb_any(skb); 2090 2091 return ret; 2092 } 2093 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 2094 2095 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 2096 struct rtw89_vif_link *rtwvif_link, 2097 struct rtw89_sta_link *rtwsta_link) 2098 { 2099 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2100 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 2101 u32 len = sizeof(*h2c); 2102 struct sk_buff *skb; 2103 int ret; 2104 2105 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2106 if (!skb) { 2107 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 2108 return -ENOMEM; 2109 } 2110 skb_put(skb, len); 2111 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 2112 2113 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 2114 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 2115 2116 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 2117 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 2118 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 2119 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 2120 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 2121 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 2122 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 2123 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 2124 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 2125 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 2126 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 2127 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 2128 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 2129 2130 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2131 H2C_CAT_MAC, 2132 H2C_CL_MAC_FR_EXCHG, 2133 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 2134 len); 2135 2136 ret = rtw89_h2c_tx(rtwdev, skb, false); 2137 if (ret) { 2138 rtw89_err(rtwdev, "failed to send h2c\n"); 2139 goto fail; 2140 } 2141 2142 return 0; 2143 fail: 2144 dev_kfree_skb_any(skb); 2145 2146 return ret; 2147 } 2148 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 2149 2150 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, 2151 struct rtw89_vif_link *rtwvif_link, 2152 struct rtw89_sta_link *rtwsta_link, 2153 bool valid, struct ieee80211_ampdu_params *params) 2154 { 2155 const struct rtw89_chip_info *chip = rtwdev->chip; 2156 struct rtw89_h2c_ba_cam *h2c; 2157 u8 macid = rtwsta_link->mac_id; 2158 u32 len = sizeof(*h2c); 2159 struct sk_buff *skb; 2160 u8 entry_idx; 2161 int ret; 2162 2163 ret = valid ? 2164 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2165 &entry_idx) : 2166 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2167 &entry_idx); 2168 if (ret) { 2169 /* it still works even if we don't have static BA CAM, because 2170 * hardware can create dynamic BA CAM automatically. 2171 */ 2172 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2173 "failed to %s entry tid=%d for h2c ba cam\n", 2174 valid ? "alloc" : "free", params->tid); 2175 return 0; 2176 } 2177 2178 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2179 if (!skb) { 2180 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2181 return -ENOMEM; 2182 } 2183 skb_put(skb, len); 2184 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2185 2186 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 2187 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 2188 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 2189 else 2190 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 2191 if (!valid) 2192 goto end; 2193 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 2194 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 2195 if (params->buf_size > 64) 2196 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2197 else 2198 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2199 /* If init req is set, hw will set the ssn */ 2200 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 2201 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 2202 2203 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 2204 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 2205 le32_encode_bits(rtwvif_link->mac_idx, 2206 RTW89_H2C_BA_CAM_W1_BAND); 2207 } 2208 2209 end: 2210 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2211 H2C_CAT_MAC, 2212 H2C_CL_BA_CAM, 2213 H2C_FUNC_MAC_BA_CAM, 0, 1, 2214 len); 2215 2216 ret = rtw89_h2c_tx(rtwdev, skb, false); 2217 if (ret) { 2218 rtw89_err(rtwdev, "failed to send h2c\n"); 2219 goto fail; 2220 } 2221 2222 return 0; 2223 fail: 2224 dev_kfree_skb_any(skb); 2225 2226 return ret; 2227 } 2228 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 2229 2230 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 2231 u8 entry_idx, u8 uid) 2232 { 2233 struct rtw89_h2c_ba_cam *h2c; 2234 u32 len = sizeof(*h2c); 2235 struct sk_buff *skb; 2236 int ret; 2237 2238 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2239 if (!skb) { 2240 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 2241 return -ENOMEM; 2242 } 2243 skb_put(skb, len); 2244 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2245 2246 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 2247 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 2248 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 2249 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 2250 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 2251 2252 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2253 H2C_CAT_MAC, 2254 H2C_CL_BA_CAM, 2255 H2C_FUNC_MAC_BA_CAM, 0, 1, 2256 len); 2257 2258 ret = rtw89_h2c_tx(rtwdev, skb, false); 2259 if (ret) { 2260 rtw89_err(rtwdev, "failed to send h2c\n"); 2261 goto fail; 2262 } 2263 2264 return 0; 2265 fail: 2266 dev_kfree_skb_any(skb); 2267 2268 return ret; 2269 } 2270 2271 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 2272 { 2273 const struct rtw89_chip_info *chip = rtwdev->chip; 2274 u8 entry_idx = chip->bacam_num; 2275 u8 uid = 0; 2276 int i; 2277 2278 for (i = 0; i < chip->bacam_dynamic_num; i++) { 2279 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 2280 entry_idx++; 2281 uid++; 2282 } 2283 } 2284 2285 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, 2286 struct rtw89_vif_link *rtwvif_link, 2287 struct rtw89_sta_link *rtwsta_link, 2288 bool valid, struct ieee80211_ampdu_params *params) 2289 { 2290 const struct rtw89_chip_info *chip = rtwdev->chip; 2291 struct rtw89_h2c_ba_cam_v1 *h2c; 2292 u8 macid = rtwsta_link->mac_id; 2293 u32 len = sizeof(*h2c); 2294 struct sk_buff *skb; 2295 u8 entry_idx; 2296 u8 bmap_size; 2297 int ret; 2298 2299 ret = valid ? 2300 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2301 &entry_idx) : 2302 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2303 &entry_idx); 2304 if (ret) { 2305 /* it still works even if we don't have static BA CAM, because 2306 * hardware can create dynamic BA CAM automatically. 2307 */ 2308 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2309 "failed to %s entry tid=%d for h2c ba cam\n", 2310 valid ? "alloc" : "free", params->tid); 2311 return 0; 2312 } 2313 2314 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2315 if (!skb) { 2316 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2317 return -ENOMEM; 2318 } 2319 skb_put(skb, len); 2320 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2321 2322 if (params->buf_size > 512) 2323 bmap_size = 10; 2324 else if (params->buf_size > 256) 2325 bmap_size = 8; 2326 else if (params->buf_size > 64) 2327 bmap_size = 4; 2328 else 2329 bmap_size = 0; 2330 2331 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2332 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2333 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2334 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2335 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2336 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2337 2338 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2339 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2340 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2341 le32_encode_bits(!!rtwvif_link->mac_idx, 2342 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2343 2344 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2345 H2C_CAT_MAC, 2346 H2C_CL_BA_CAM, 2347 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2348 len); 2349 2350 ret = rtw89_h2c_tx(rtwdev, skb, false); 2351 if (ret) { 2352 rtw89_err(rtwdev, "failed to send h2c\n"); 2353 goto fail; 2354 } 2355 2356 return 0; 2357 fail: 2358 dev_kfree_skb_any(skb); 2359 2360 return ret; 2361 } 2362 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2363 2364 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2365 u8 offset, u8 mac_idx) 2366 { 2367 struct rtw89_h2c_ba_cam_init *h2c; 2368 u32 len = sizeof(*h2c); 2369 struct sk_buff *skb; 2370 int ret; 2371 2372 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2373 if (!skb) { 2374 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2375 return -ENOMEM; 2376 } 2377 skb_put(skb, len); 2378 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2379 2380 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2381 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2382 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2383 2384 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2385 H2C_CAT_MAC, 2386 H2C_CL_BA_CAM, 2387 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2388 len); 2389 2390 ret = rtw89_h2c_tx(rtwdev, skb, false); 2391 if (ret) { 2392 rtw89_err(rtwdev, "failed to send h2c\n"); 2393 goto fail; 2394 } 2395 2396 return 0; 2397 fail: 2398 dev_kfree_skb_any(skb); 2399 2400 return ret; 2401 } 2402 2403 #define H2C_LOG_CFG_LEN 12 2404 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2405 { 2406 struct sk_buff *skb; 2407 u32 comp = 0; 2408 int ret; 2409 2410 if (enable) 2411 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2412 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2413 BIT(RTW89_FW_LOG_COMP_SCAN); 2414 2415 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2416 if (!skb) { 2417 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2418 return -ENOMEM; 2419 } 2420 2421 skb_put(skb, H2C_LOG_CFG_LEN); 2422 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2423 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2424 SET_LOG_CFG_COMP(skb->data, comp); 2425 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2426 2427 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2428 H2C_CAT_MAC, 2429 H2C_CL_FW_INFO, 2430 H2C_FUNC_LOG_CFG, 0, 0, 2431 H2C_LOG_CFG_LEN); 2432 2433 ret = rtw89_h2c_tx(rtwdev, skb, false); 2434 if (ret) { 2435 rtw89_err(rtwdev, "failed to send h2c\n"); 2436 goto fail; 2437 } 2438 2439 return 0; 2440 fail: 2441 dev_kfree_skb_any(skb); 2442 2443 return ret; 2444 } 2445 2446 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 2447 struct rtw89_vif_link *rtwvif_link) 2448 { 2449 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 2450 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; 2451 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2452 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2453 struct rtw89_eapol_2_of_2 *eapol_pkt; 2454 struct ieee80211_bss_conf *bss_conf; 2455 struct ieee80211_hdr_3addr *hdr; 2456 struct sk_buff *skb; 2457 u8 key_des_ver; 2458 2459 if (rtw_wow->ptk_alg == 3) 2460 key_des_ver = 1; 2461 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) 2462 key_des_ver = 2; 2463 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) 2464 key_des_ver = 3; 2465 else 2466 key_des_ver = 0; 2467 2468 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); 2469 if (!skb) 2470 return NULL; 2471 2472 hdr = skb_put_zero(skb, sizeof(*hdr)); 2473 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2474 IEEE80211_FCTL_TODS | 2475 IEEE80211_FCTL_PROTECTED); 2476 2477 rcu_read_lock(); 2478 2479 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2480 2481 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2482 ether_addr_copy(hdr->addr2, bss_conf->addr); 2483 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2484 2485 rcu_read_unlock(); 2486 2487 skb_put_zero(skb, sec_hdr_len); 2488 2489 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); 2490 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); 2491 eapol_pkt->key_des_ver = key_des_ver; 2492 2493 return skb; 2494 } 2495 2496 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, 2497 struct rtw89_vif_link *rtwvif_link) 2498 { 2499 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2500 struct ieee80211_bss_conf *bss_conf; 2501 struct ieee80211_hdr_3addr *hdr; 2502 struct rtw89_sa_query *sa_query; 2503 struct sk_buff *skb; 2504 2505 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); 2506 if (!skb) 2507 return NULL; 2508 2509 hdr = skb_put_zero(skb, sizeof(*hdr)); 2510 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2511 IEEE80211_STYPE_ACTION | 2512 IEEE80211_FCTL_PROTECTED); 2513 2514 rcu_read_lock(); 2515 2516 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2517 2518 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2519 ether_addr_copy(hdr->addr2, bss_conf->addr); 2520 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2521 2522 rcu_read_unlock(); 2523 2524 skb_put_zero(skb, sec_hdr_len); 2525 2526 sa_query = skb_put_zero(skb, sizeof(*sa_query)); 2527 sa_query->category = WLAN_CATEGORY_SA_QUERY; 2528 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; 2529 2530 return skb; 2531 } 2532 2533 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, 2534 struct rtw89_vif_link *rtwvif_link) 2535 { 2536 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 2537 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2538 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2539 struct ieee80211_hdr_3addr *hdr; 2540 struct rtw89_arp_rsp *arp_skb; 2541 struct arphdr *arp_hdr; 2542 struct sk_buff *skb; 2543 __le16 fc; 2544 2545 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); 2546 if (!skb) 2547 return NULL; 2548 2549 hdr = skb_put_zero(skb, sizeof(*hdr)); 2550 2551 if (rtw_wow->ptk_alg) 2552 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | 2553 IEEE80211_FCTL_PROTECTED); 2554 else 2555 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); 2556 2557 hdr->frame_control = fc; 2558 ether_addr_copy(hdr->addr1, rtwvif_link->bssid); 2559 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr); 2560 ether_addr_copy(hdr->addr3, rtwvif_link->bssid); 2561 2562 skb_put_zero(skb, sec_hdr_len); 2563 2564 arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); 2565 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 2566 arp_skb->llc_type = htons(ETH_P_ARP); 2567 2568 arp_hdr = &arp_skb->arp_hdr; 2569 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 2570 arp_hdr->ar_pro = htons(ETH_P_IP); 2571 arp_hdr->ar_hln = ETH_ALEN; 2572 arp_hdr->ar_pln = 4; 2573 arp_hdr->ar_op = htons(ARPOP_REPLY); 2574 2575 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr); 2576 arp_skb->sender_ip = rtwvif->ip_addr; 2577 2578 return skb; 2579 } 2580 2581 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2582 struct rtw89_vif_link *rtwvif_link, 2583 enum rtw89_fw_pkt_ofld_type type, 2584 u8 *id) 2585 { 2586 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 2587 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; 2588 struct rtw89_pktofld_info *info; 2589 struct sk_buff *skb; 2590 int ret; 2591 2592 info = kzalloc(sizeof(*info), GFP_KERNEL); 2593 if (!info) 2594 return -ENOMEM; 2595 2596 switch (type) { 2597 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2598 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2599 break; 2600 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2601 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2602 break; 2603 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2604 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false); 2605 break; 2606 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2607 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true); 2608 break; 2609 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2610 skb = rtw89_eapol_get(rtwdev, rtwvif_link); 2611 break; 2612 case RTW89_PKT_OFLD_TYPE_SA_QUERY: 2613 skb = rtw89_sa_query_get(rtwdev, rtwvif_link); 2614 break; 2615 case RTW89_PKT_OFLD_TYPE_ARP_RSP: 2616 skb = rtw89_arp_response_get(rtwdev, rtwvif_link); 2617 break; 2618 default: 2619 goto err; 2620 } 2621 2622 if (!skb) 2623 goto err; 2624 2625 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 2626 kfree_skb(skb); 2627 2628 if (ret) 2629 goto err; 2630 2631 list_add_tail(&info->list, &rtwvif_link->general_pkt_list); 2632 *id = info->id; 2633 return 0; 2634 2635 err: 2636 kfree(info); 2637 return -ENOMEM; 2638 } 2639 2640 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 2641 struct rtw89_vif_link *rtwvif_link, 2642 bool notify_fw) 2643 { 2644 struct list_head *pkt_list = &rtwvif_link->general_pkt_list; 2645 struct rtw89_pktofld_info *info, *tmp; 2646 2647 list_for_each_entry_safe(info, tmp, pkt_list, list) { 2648 if (notify_fw) 2649 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2650 else 2651 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 2652 list_del(&info->list); 2653 kfree(info); 2654 } 2655 } 2656 2657 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 2658 { 2659 struct rtw89_vif_link *rtwvif_link; 2660 struct rtw89_vif *rtwvif; 2661 unsigned int link_id; 2662 2663 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2664 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 2665 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link, 2666 notify_fw); 2667 } 2668 2669 #define H2C_GENERAL_PKT_LEN 6 2670 #define H2C_GENERAL_PKT_ID_UND 0xff 2671 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 2672 struct rtw89_vif_link *rtwvif_link, u8 macid) 2673 { 2674 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 2675 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 2676 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 2677 struct sk_buff *skb; 2678 int ret; 2679 2680 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2681 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 2682 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2683 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 2684 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2685 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 2686 2687 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 2688 if (!skb) { 2689 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2690 return -ENOMEM; 2691 } 2692 skb_put(skb, H2C_GENERAL_PKT_LEN); 2693 SET_GENERAL_PKT_MACID(skb->data, macid); 2694 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2695 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 2696 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 2697 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 2698 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2699 2700 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2701 H2C_CAT_MAC, 2702 H2C_CL_FW_INFO, 2703 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 2704 H2C_GENERAL_PKT_LEN); 2705 2706 ret = rtw89_h2c_tx(rtwdev, skb, false); 2707 if (ret) { 2708 rtw89_err(rtwdev, "failed to send h2c\n"); 2709 goto fail; 2710 } 2711 2712 return 0; 2713 fail: 2714 dev_kfree_skb_any(skb); 2715 2716 return ret; 2717 } 2718 2719 #define H2C_LPS_PARM_LEN 8 2720 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 2721 struct rtw89_lps_parm *lps_param) 2722 { 2723 struct sk_buff *skb; 2724 int ret; 2725 2726 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 2727 if (!skb) { 2728 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2729 return -ENOMEM; 2730 } 2731 skb_put(skb, H2C_LPS_PARM_LEN); 2732 2733 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 2734 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 2735 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 2736 SET_LPS_PARM_RLBM(skb->data, 1); 2737 SET_LPS_PARM_SMARTPS(skb->data, 1); 2738 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 2739 SET_LPS_PARM_VOUAPSD(skb->data, 0); 2740 SET_LPS_PARM_VIUAPSD(skb->data, 0); 2741 SET_LPS_PARM_BEUAPSD(skb->data, 0); 2742 SET_LPS_PARM_BKUAPSD(skb->data, 0); 2743 2744 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2745 H2C_CAT_MAC, 2746 H2C_CL_MAC_PS, 2747 H2C_FUNC_MAC_LPS_PARM, 0, !lps_param->psmode, 2748 H2C_LPS_PARM_LEN); 2749 2750 ret = rtw89_h2c_tx(rtwdev, skb, false); 2751 if (ret) { 2752 rtw89_err(rtwdev, "failed to send h2c\n"); 2753 goto fail; 2754 } 2755 2756 return 0; 2757 fail: 2758 dev_kfree_skb_any(skb); 2759 2760 return ret; 2761 } 2762 2763 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2764 { 2765 const struct rtw89_chip_info *chip = rtwdev->chip; 2766 const struct rtw89_chan *chan; 2767 struct rtw89_vif_link *rtwvif_link; 2768 struct rtw89_h2c_lps_ch_info *h2c; 2769 u32 len = sizeof(*h2c); 2770 unsigned int link_id; 2771 struct sk_buff *skb; 2772 bool no_chan = true; 2773 u8 phy_idx; 2774 u32 done; 2775 int ret; 2776 2777 if (chip->chip_gen != RTW89_CHIP_BE) 2778 return 0; 2779 2780 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2781 if (!skb) { 2782 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 2783 return -ENOMEM; 2784 } 2785 skb_put(skb, len); 2786 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2787 2788 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2789 phy_idx = rtwvif_link->phy_idx; 2790 if (phy_idx >= ARRAY_SIZE(h2c->info)) 2791 continue; 2792 2793 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2794 no_chan = false; 2795 2796 h2c->info[phy_idx].central_ch = chan->channel; 2797 h2c->info[phy_idx].pri_ch = chan->primary_channel; 2798 h2c->info[phy_idx].band = chan->band_type; 2799 h2c->info[phy_idx].bw = chan->band_width; 2800 } 2801 2802 if (no_chan) { 2803 rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n"); 2804 ret = -ENOENT; 2805 goto fail; 2806 } 2807 2808 h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2809 2810 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2811 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2812 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 2813 2814 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2815 ret = rtw89_h2c_tx(rtwdev, skb, false); 2816 if (ret) { 2817 rtw89_err(rtwdev, "failed to send h2c\n"); 2818 goto fail; 2819 } 2820 2821 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2822 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2823 if (ret) 2824 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 2825 2826 return 0; 2827 fail: 2828 dev_kfree_skb_any(skb); 2829 2830 return ret; 2831 } 2832 2833 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, 2834 struct rtw89_vif *rtwvif) 2835 { 2836 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be; 2837 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 2838 static const u8 bcn_bw_ofst[] = {0, 0, 0, 3, 6, 9, 0, 12}; 2839 const struct rtw89_chip_info *chip = rtwdev->chip; 2840 struct rtw89_efuse *efuse = &rtwdev->efuse; 2841 struct rtw89_h2c_lps_ml_cmn_info *h2c; 2842 struct rtw89_vif_link *rtwvif_link; 2843 const struct rtw89_chan *chan; 2844 u8 bw_idx = RTW89_BB_BW_20_40; 2845 u32 len = sizeof(*h2c); 2846 unsigned int link_id; 2847 struct sk_buff *skb; 2848 u8 beacon_bw_ofst; 2849 u8 gain_band; 2850 u32 done; 2851 u8 path; 2852 int ret; 2853 int i; 2854 2855 if (chip->chip_gen != RTW89_CHIP_BE) 2856 return 0; 2857 2858 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2859 if (!skb) { 2860 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n"); 2861 return -ENOMEM; 2862 } 2863 skb_put(skb, len); 2864 h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data; 2865 2866 h2c->fmt_id = 0x3; 2867 2868 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2869 h2c->rfe_type = efuse->rfe_type; 2870 2871 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2872 path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A; 2873 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2874 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type); 2875 2876 h2c->central_ch[rtwvif_link->phy_idx] = chan->channel; 2877 h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel; 2878 h2c->band[rtwvif_link->phy_idx] = chan->band_type; 2879 h2c->bw[rtwvif_link->phy_idx] = chan->band_width; 2880 if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6) 2881 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1; 2882 else 2883 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2; 2884 2885 /* Fill BW20 RX gain table for beacon mode */ 2886 for (i = 0; i < TIA_GAIN_NUM; i++) { 2887 h2c->tia_gain[rtwvif_link->phy_idx][i] = 2888 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]); 2889 } 2890 2891 if (rtwvif_link->bcn_bw_idx < ARRAY_SIZE(bcn_bw_ofst)) { 2892 beacon_bw_ofst = bcn_bw_ofst[rtwvif_link->bcn_bw_idx]; 2893 h2c->dup_bcn_ofst[rtwvif_link->phy_idx] = beacon_bw_ofst; 2894 } 2895 2896 memcpy(h2c->lna_gain[rtwvif_link->phy_idx], 2897 gain->lna_gain[gain_band][bw_idx][path], 2898 LNA_GAIN_NUM); 2899 memcpy(h2c->tia_lna_op1db[rtwvif_link->phy_idx], 2900 gain->tia_lna_op1db[gain_band][bw_idx][path], 2901 LNA_GAIN_NUM + 1); 2902 memcpy(h2c->lna_op1db[rtwvif_link->phy_idx], 2903 gain->lna_op1db[gain_band][bw_idx][path], 2904 LNA_GAIN_NUM); 2905 } 2906 2907 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2908 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2909 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len); 2910 2911 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2912 ret = rtw89_h2c_tx(rtwdev, skb, false); 2913 if (ret) { 2914 rtw89_err(rtwdev, "failed to send h2c\n"); 2915 goto fail; 2916 } 2917 2918 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2919 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2920 if (ret) 2921 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n"); 2922 2923 return 0; 2924 fail: 2925 dev_kfree_skb_any(skb); 2926 2927 return ret; 2928 } 2929 2930 #define H2C_P2P_ACT_LEN 20 2931 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, 2932 struct rtw89_vif_link *rtwvif_link, 2933 struct ieee80211_bss_conf *bss_conf, 2934 struct ieee80211_p2p_noa_desc *desc, 2935 u8 act, u8 noa_id) 2936 { 2937 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 2938 u8 ctwindow_oppps = bss_conf->p2p_noa_attr.oppps_ctwindow; 2939 struct sk_buff *skb; 2940 u8 *cmd; 2941 int ret; 2942 2943 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 2944 if (!skb) { 2945 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 2946 return -ENOMEM; 2947 } 2948 skb_put(skb, H2C_P2P_ACT_LEN); 2949 cmd = skb->data; 2950 2951 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id); 2952 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 2953 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 2954 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 2955 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 2956 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 2957 if (desc) { 2958 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 2959 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 2960 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 2961 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 2962 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 2963 } 2964 2965 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2966 H2C_CAT_MAC, H2C_CL_MAC_PS, 2967 H2C_FUNC_P2P_ACT, 0, 0, 2968 H2C_P2P_ACT_LEN); 2969 2970 ret = rtw89_h2c_tx(rtwdev, skb, false); 2971 if (ret) { 2972 rtw89_err(rtwdev, "failed to send h2c\n"); 2973 goto fail; 2974 } 2975 2976 return 0; 2977 fail: 2978 dev_kfree_skb_any(skb); 2979 2980 return ret; 2981 } 2982 2983 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 2984 struct sk_buff *skb) 2985 { 2986 const struct rtw89_chip_info *chip = rtwdev->chip; 2987 struct rtw89_hal *hal = &rtwdev->hal; 2988 u8 ntx_path; 2989 u8 map_b; 2990 2991 if (chip->rf_path_num == 1) { 2992 ntx_path = RF_A; 2993 map_b = 0; 2994 } else { 2995 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 2996 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 2997 } 2998 2999 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 3000 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 3001 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 3002 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 3003 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 3004 } 3005 3006 #define H2C_CMC_TBL_LEN 68 3007 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 3008 struct rtw89_vif_link *rtwvif_link, 3009 struct rtw89_sta_link *rtwsta_link) 3010 { 3011 const struct rtw89_chip_info *chip = rtwdev->chip; 3012 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3013 struct sk_buff *skb; 3014 int ret; 3015 3016 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3017 if (!skb) { 3018 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3019 return -ENOMEM; 3020 } 3021 skb_put(skb, H2C_CMC_TBL_LEN); 3022 SET_CTRL_INFO_MACID(skb->data, macid); 3023 SET_CTRL_INFO_OPERATION(skb->data, 1); 3024 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3025 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 3026 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3027 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 3028 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 3029 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 3030 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 3031 } 3032 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 3033 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 3034 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3035 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3036 3037 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3038 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3039 chip->h2c_cctl_func_id, 0, 1, 3040 H2C_CMC_TBL_LEN); 3041 3042 ret = rtw89_h2c_tx(rtwdev, skb, false); 3043 if (ret) { 3044 rtw89_err(rtwdev, "failed to send h2c\n"); 3045 goto fail; 3046 } 3047 3048 return 0; 3049 fail: 3050 dev_kfree_skb_any(skb); 3051 3052 return ret; 3053 } 3054 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 3055 3056 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3057 struct rtw89_vif_link *rtwvif_link, 3058 struct rtw89_sta_link *rtwsta_link) 3059 { 3060 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3061 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3062 u32 len = sizeof(*h2c); 3063 struct sk_buff *skb; 3064 int ret; 3065 3066 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3067 if (!skb) { 3068 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3069 return -ENOMEM; 3070 } 3071 skb_put(skb, len); 3072 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3073 3074 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3075 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3076 3077 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE); 3078 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 3079 3080 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 3081 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 3082 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3083 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 3084 3085 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 3086 3087 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 3088 3089 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3090 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 3091 3092 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3093 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3094 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3095 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3096 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3097 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 3098 3099 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 3100 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 3101 3102 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 3103 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 3104 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 3105 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 3106 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 3107 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 3108 3109 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 3110 3111 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 3112 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 3113 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 3114 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 3115 3116 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 3117 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 3118 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 3119 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 3120 3121 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3122 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3123 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3124 len); 3125 3126 ret = rtw89_h2c_tx(rtwdev, skb, false); 3127 if (ret) { 3128 rtw89_err(rtwdev, "failed to send h2c\n"); 3129 goto fail; 3130 } 3131 3132 return 0; 3133 fail: 3134 dev_kfree_skb_any(skb); 3135 3136 return ret; 3137 } 3138 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 3139 3140 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 3141 struct ieee80211_link_sta *link_sta, 3142 u8 *pads) 3143 { 3144 bool ppe_th; 3145 u8 ppe16, ppe8; 3146 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3147 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0]; 3148 u8 ru_bitmap; 3149 u8 n, idx, sh; 3150 u16 ppe; 3151 int i; 3152 3153 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 3154 link_sta->he_cap.he_cap_elem.phy_cap_info[6]); 3155 if (!ppe_th) { 3156 u8 pad; 3157 3158 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 3159 link_sta->he_cap.he_cap_elem.phy_cap_info[9]); 3160 3161 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3162 pads[i] = pad; 3163 3164 return; 3165 } 3166 3167 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 3168 n = hweight8(ru_bitmap); 3169 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3170 3171 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3172 if (!(ru_bitmap & BIT(i))) { 3173 pads[i] = 1; 3174 continue; 3175 } 3176 3177 idx = n >> 3; 3178 sh = n & 7; 3179 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 3180 3181 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx])); 3182 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3183 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 3184 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3185 3186 if (ppe16 != 7 && ppe8 == 7) 3187 pads[i] = RTW89_PE_DURATION_16; 3188 else if (ppe8 != 7) 3189 pads[i] = RTW89_PE_DURATION_8; 3190 else 3191 pads[i] = RTW89_PE_DURATION_0; 3192 } 3193 } 3194 3195 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 3196 struct rtw89_vif_link *rtwvif_link, 3197 struct rtw89_sta_link *rtwsta_link) 3198 { 3199 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3200 const struct rtw89_chip_info *chip = rtwdev->chip; 3201 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3202 rtwvif_link->chanctx_idx); 3203 struct ieee80211_link_sta *link_sta; 3204 struct sk_buff *skb; 3205 u8 pads[RTW89_PPE_BW_NUM]; 3206 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3207 u16 lowest_rate; 3208 int ret; 3209 3210 memset(pads, 0, sizeof(pads)); 3211 3212 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3213 if (!skb) { 3214 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3215 return -ENOMEM; 3216 } 3217 3218 rcu_read_lock(); 3219 3220 if (rtwsta_link) 3221 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3222 3223 if (rtwsta_link && link_sta->he_cap.has_he) 3224 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3225 3226 if (vif->p2p) 3227 lowest_rate = RTW89_HW_RATE_OFDM6; 3228 else if (chan->band_type == RTW89_BAND_2G) 3229 lowest_rate = RTW89_HW_RATE_CCK1; 3230 else 3231 lowest_rate = RTW89_HW_RATE_OFDM6; 3232 3233 skb_put(skb, H2C_CMC_TBL_LEN); 3234 SET_CTRL_INFO_MACID(skb->data, mac_id); 3235 SET_CTRL_INFO_OPERATION(skb->data, 1); 3236 SET_CMC_TBL_DISRTSFB(skb->data, 1); 3237 SET_CMC_TBL_DISDATAFB(skb->data, 1); 3238 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 3239 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 3240 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 3241 if (vif->type == NL80211_IFTYPE_STATION) 3242 SET_CMC_TBL_ULDL(skb->data, 1); 3243 else 3244 SET_CMC_TBL_ULDL(skb->data, 0); 3245 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port); 3246 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 3247 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3248 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3249 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3250 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3251 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3252 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3253 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3254 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3255 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3256 } 3257 if (rtwsta_link) 3258 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 3259 link_sta->he_cap.has_he); 3260 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3261 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3262 3263 rcu_read_unlock(); 3264 3265 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3266 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3267 chip->h2c_cctl_func_id, 0, 1, 3268 H2C_CMC_TBL_LEN); 3269 3270 ret = rtw89_h2c_tx(rtwdev, skb, false); 3271 if (ret) { 3272 rtw89_err(rtwdev, "failed to send h2c\n"); 3273 goto fail; 3274 } 3275 3276 return 0; 3277 fail: 3278 dev_kfree_skb_any(skb); 3279 3280 return ret; 3281 } 3282 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 3283 3284 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 3285 struct ieee80211_link_sta *link_sta, 3286 u8 *pads) 3287 { 3288 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3289 u16 ppe_thres_hdr; 3290 u8 ppe16, ppe8; 3291 u8 n, idx, sh; 3292 u8 ru_bitmap; 3293 bool ppe_th; 3294 u16 ppe; 3295 int i; 3296 3297 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3298 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 3299 if (!ppe_th) { 3300 u8 pad; 3301 3302 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3303 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 3304 3305 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3306 pads[i] = pad; 3307 3308 return; 3309 } 3310 3311 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres); 3312 ru_bitmap = u16_get_bits(ppe_thres_hdr, 3313 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 3314 n = hweight8(ru_bitmap); 3315 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 3316 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3317 3318 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3319 if (!(ru_bitmap & BIT(i))) { 3320 pads[i] = 1; 3321 continue; 3322 } 3323 3324 idx = n >> 3; 3325 sh = n & 7; 3326 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 3327 3328 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx); 3329 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3330 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 3331 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3332 3333 if (ppe16 != 7 && ppe8 == 7) 3334 pads[i] = RTW89_PE_DURATION_16_20; 3335 else if (ppe8 != 7) 3336 pads[i] = RTW89_PE_DURATION_8; 3337 else 3338 pads[i] = RTW89_PE_DURATION_0; 3339 } 3340 } 3341 3342 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3343 struct rtw89_vif_link *rtwvif_link, 3344 struct rtw89_sta_link *rtwsta_link) 3345 { 3346 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3347 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3348 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3349 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3350 struct ieee80211_bss_conf *bss_conf; 3351 struct ieee80211_link_sta *link_sta; 3352 u8 pads[RTW89_PPE_BW_NUM]; 3353 u32 len = sizeof(*h2c); 3354 struct sk_buff *skb; 3355 u16 lowest_rate; 3356 int ret; 3357 3358 memset(pads, 0, sizeof(pads)); 3359 3360 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3361 if (!skb) { 3362 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3363 return -ENOMEM; 3364 } 3365 3366 rcu_read_lock(); 3367 3368 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3369 3370 if (rtwsta_link) { 3371 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3372 3373 if (link_sta->eht_cap.has_eht) 3374 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads); 3375 else if (link_sta->he_cap.has_he) 3376 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3377 } 3378 3379 if (vif->p2p) 3380 lowest_rate = RTW89_HW_RATE_OFDM6; 3381 else if (chan->band_type == RTW89_BAND_2G) 3382 lowest_rate = RTW89_HW_RATE_CCK1; 3383 else 3384 lowest_rate = RTW89_HW_RATE_OFDM6; 3385 3386 skb_put(skb, len); 3387 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3388 3389 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3390 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3391 3392 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 3393 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 3394 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 3395 CCTLINFO_G7_W0_DISDATAFB); 3396 3397 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3398 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3399 3400 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3401 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3402 3403 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3404 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3405 3406 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 3407 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 3408 3409 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3410 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 3411 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 3412 } 3413 3414 if (bss_conf->eht_support) { 3415 u16 punct = bss_conf->chanreq.oper.punctured; 3416 3417 h2c->w4 |= le32_encode_bits(~punct, 3418 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3419 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3420 } 3421 3422 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 3423 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3424 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 3425 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3426 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 3427 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3428 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 3429 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3430 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 3431 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3432 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 3433 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 3434 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 3435 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 3436 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3437 3438 h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 3439 CCTLINFO_G7_W6_ULDL); 3440 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL); 3441 3442 if (rtwsta_link) { 3443 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he, 3444 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3445 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3446 } 3447 3448 rcu_read_unlock(); 3449 3450 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3451 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3452 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3453 len); 3454 3455 ret = rtw89_h2c_tx(rtwdev, skb, false); 3456 if (ret) { 3457 rtw89_err(rtwdev, "failed to send h2c\n"); 3458 goto fail; 3459 } 3460 3461 return 0; 3462 fail: 3463 dev_kfree_skb_any(skb); 3464 3465 return ret; 3466 } 3467 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 3468 3469 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3470 struct rtw89_vif_link *rtwvif_link, 3471 struct rtw89_sta_link *rtwsta_link) 3472 { 3473 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 3474 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3475 u32 len = sizeof(*h2c); 3476 struct sk_buff *skb; 3477 u16 agg_num = 0; 3478 u8 ba_bmap = 0; 3479 int ret; 3480 u8 tid; 3481 3482 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3483 if (!skb) { 3484 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 3485 return -ENOMEM; 3486 } 3487 skb_put(skb, len); 3488 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3489 3490 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 3491 if (agg_num == 0) 3492 agg_num = rtwsta->ampdu_params[tid].agg_num; 3493 else 3494 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 3495 } 3496 3497 if (agg_num <= 0x20) 3498 ba_bmap = 3; 3499 else if (agg_num > 0x20 && agg_num <= 0x40) 3500 ba_bmap = 0; 3501 else if (agg_num > 0x40 && agg_num <= 0x80) 3502 ba_bmap = 1; 3503 else if (agg_num > 0x80 && agg_num <= 0x100) 3504 ba_bmap = 2; 3505 else if (agg_num > 0x100 && agg_num <= 0x200) 3506 ba_bmap = 4; 3507 else if (agg_num > 0x200 && agg_num <= 0x400) 3508 ba_bmap = 5; 3509 3510 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 3511 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3512 3513 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 3514 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 3515 3516 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3517 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3518 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 3519 len); 3520 3521 ret = rtw89_h2c_tx(rtwdev, skb, false); 3522 if (ret) { 3523 rtw89_err(rtwdev, "failed to send h2c\n"); 3524 goto fail; 3525 } 3526 3527 return 0; 3528 fail: 3529 dev_kfree_skb_any(skb); 3530 3531 return ret; 3532 } 3533 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 3534 3535 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 3536 struct rtw89_sta_link *rtwsta_link) 3537 { 3538 const struct rtw89_chip_info *chip = rtwdev->chip; 3539 struct sk_buff *skb; 3540 int ret; 3541 3542 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3543 if (!skb) { 3544 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3545 return -ENOMEM; 3546 } 3547 skb_put(skb, H2C_CMC_TBL_LEN); 3548 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3549 SET_CTRL_INFO_OPERATION(skb->data, 1); 3550 if (rtwsta_link->cctl_tx_time) { 3551 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 3552 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time); 3553 } 3554 if (rtwsta_link->cctl_tx_retry_limit) { 3555 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 3556 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt); 3557 } 3558 3559 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3560 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3561 chip->h2c_cctl_func_id, 0, 1, 3562 H2C_CMC_TBL_LEN); 3563 3564 ret = rtw89_h2c_tx(rtwdev, skb, false); 3565 if (ret) { 3566 rtw89_err(rtwdev, "failed to send h2c\n"); 3567 goto fail; 3568 } 3569 3570 return 0; 3571 fail: 3572 dev_kfree_skb_any(skb); 3573 3574 return ret; 3575 } 3576 3577 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 3578 struct rtw89_sta_link *rtwsta_link) 3579 { 3580 const struct rtw89_chip_info *chip = rtwdev->chip; 3581 struct sk_buff *skb; 3582 int ret; 3583 3584 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 3585 return 0; 3586 3587 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3588 if (!skb) { 3589 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3590 return -ENOMEM; 3591 } 3592 skb_put(skb, H2C_CMC_TBL_LEN); 3593 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3594 SET_CTRL_INFO_OPERATION(skb->data, 1); 3595 3596 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3597 3598 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3599 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3600 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 3601 H2C_CMC_TBL_LEN); 3602 3603 ret = rtw89_h2c_tx(rtwdev, skb, false); 3604 if (ret) { 3605 rtw89_err(rtwdev, "failed to send h2c\n"); 3606 goto fail; 3607 } 3608 3609 return 0; 3610 fail: 3611 dev_kfree_skb_any(skb); 3612 3613 return ret; 3614 } 3615 3616 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 3617 struct rtw89_vif_link *rtwvif_link) 3618 { 3619 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3620 rtwvif_link->chanctx_idx); 3621 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3622 struct rtw89_h2c_bcn_upd *h2c; 3623 struct sk_buff *skb_beacon; 3624 struct ieee80211_hdr *hdr; 3625 u32 len = sizeof(*h2c); 3626 struct sk_buff *skb; 3627 int bcn_total_len; 3628 u16 beacon_rate; 3629 u16 tim_offset; 3630 void *noa_data; 3631 u8 noa_len; 3632 int ret; 3633 3634 if (vif->p2p) 3635 beacon_rate = RTW89_HW_RATE_OFDM6; 3636 else if (chan->band_type == RTW89_BAND_2G) 3637 beacon_rate = RTW89_HW_RATE_CCK1; 3638 else 3639 beacon_rate = RTW89_HW_RATE_OFDM6; 3640 3641 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3642 NULL, 0); 3643 if (!skb_beacon) { 3644 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3645 return -ENOMEM; 3646 } 3647 3648 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3649 if (noa_len && 3650 (noa_len <= skb_tailroom(skb_beacon) || 3651 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3652 skb_put_data(skb_beacon, noa_data, noa_len); 3653 } 3654 3655 hdr = (struct ieee80211_hdr *)skb_beacon; 3656 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3657 3658 bcn_total_len = len + skb_beacon->len; 3659 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3660 if (!skb) { 3661 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3662 dev_kfree_skb_any(skb_beacon); 3663 return -ENOMEM; 3664 } 3665 skb_put(skb, len); 3666 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 3667 3668 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) | 3669 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 3670 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 3671 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 3672 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 3673 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 3674 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 3675 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 3676 3677 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3678 dev_kfree_skb_any(skb_beacon); 3679 3680 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3681 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3682 H2C_FUNC_MAC_BCN_UPD, 0, 1, 3683 bcn_total_len); 3684 3685 ret = rtw89_h2c_tx(rtwdev, skb, false); 3686 if (ret) { 3687 rtw89_err(rtwdev, "failed to send h2c\n"); 3688 dev_kfree_skb_any(skb); 3689 return ret; 3690 } 3691 3692 return 0; 3693 } 3694 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 3695 3696 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 3697 struct rtw89_vif_link *rtwvif_link) 3698 { 3699 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3700 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3701 struct rtw89_h2c_bcn_upd_be *h2c; 3702 struct sk_buff *skb_beacon; 3703 struct ieee80211_hdr *hdr; 3704 u32 len = sizeof(*h2c); 3705 struct sk_buff *skb; 3706 int bcn_total_len; 3707 u16 beacon_rate; 3708 u16 tim_offset; 3709 void *noa_data; 3710 u8 noa_len; 3711 int ret; 3712 3713 if (vif->p2p) 3714 beacon_rate = RTW89_HW_RATE_OFDM6; 3715 else if (chan->band_type == RTW89_BAND_2G) 3716 beacon_rate = RTW89_HW_RATE_CCK1; 3717 else 3718 beacon_rate = RTW89_HW_RATE_OFDM6; 3719 3720 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3721 NULL, 0); 3722 if (!skb_beacon) { 3723 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3724 return -ENOMEM; 3725 } 3726 3727 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3728 if (noa_len && 3729 (noa_len <= skb_tailroom(skb_beacon) || 3730 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3731 skb_put_data(skb_beacon, noa_data, noa_len); 3732 } 3733 3734 hdr = (struct ieee80211_hdr *)skb_beacon; 3735 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3736 3737 bcn_total_len = len + skb_beacon->len; 3738 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3739 if (!skb) { 3740 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3741 dev_kfree_skb_any(skb_beacon); 3742 return -ENOMEM; 3743 } 3744 skb_put(skb, len); 3745 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 3746 3747 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 3748 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 3749 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 3750 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 3751 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 3752 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 3753 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 3754 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 3755 3756 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3757 dev_kfree_skb_any(skb_beacon); 3758 3759 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3760 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3761 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 3762 bcn_total_len); 3763 3764 ret = rtw89_h2c_tx(rtwdev, skb, false); 3765 if (ret) { 3766 rtw89_err(rtwdev, "failed to send h2c\n"); 3767 goto fail; 3768 } 3769 3770 return 0; 3771 3772 fail: 3773 dev_kfree_skb_any(skb); 3774 3775 return ret; 3776 } 3777 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 3778 3779 #define H2C_ROLE_MAINTAIN_LEN 4 3780 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 3781 struct rtw89_vif_link *rtwvif_link, 3782 struct rtw89_sta_link *rtwsta_link, 3783 enum rtw89_upd_mode upd_mode) 3784 { 3785 struct sk_buff *skb; 3786 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3787 u8 self_role; 3788 int ret; 3789 3790 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3791 if (rtwsta_link) 3792 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3793 else 3794 self_role = rtwvif_link->self_role; 3795 } else { 3796 self_role = rtwvif_link->self_role; 3797 } 3798 3799 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 3800 if (!skb) { 3801 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3802 return -ENOMEM; 3803 } 3804 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 3805 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 3806 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 3807 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 3808 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif_link->wifi_role); 3809 3810 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3811 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3812 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 3813 H2C_ROLE_MAINTAIN_LEN); 3814 3815 ret = rtw89_h2c_tx(rtwdev, skb, false); 3816 if (ret) { 3817 rtw89_err(rtwdev, "failed to send h2c\n"); 3818 goto fail; 3819 } 3820 3821 return 0; 3822 fail: 3823 dev_kfree_skb_any(skb); 3824 3825 return ret; 3826 } 3827 3828 static enum rtw89_fw_sta_type 3829 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3830 struct rtw89_sta_link *rtwsta_link) 3831 { 3832 struct ieee80211_bss_conf *bss_conf; 3833 struct ieee80211_link_sta *link_sta; 3834 enum rtw89_fw_sta_type type; 3835 3836 rcu_read_lock(); 3837 3838 if (!rtwsta_link) 3839 goto by_vif; 3840 3841 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3842 3843 if (link_sta->eht_cap.has_eht) 3844 type = RTW89_FW_BE_STA; 3845 else if (link_sta->he_cap.has_he) 3846 type = RTW89_FW_AX_STA; 3847 else 3848 type = RTW89_FW_N_AC_STA; 3849 3850 goto out; 3851 3852 by_vif: 3853 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3854 3855 if (bss_conf->eht_support) 3856 type = RTW89_FW_BE_STA; 3857 else if (bss_conf->he_support) 3858 type = RTW89_FW_AX_STA; 3859 else 3860 type = RTW89_FW_N_AC_STA; 3861 3862 out: 3863 rcu_read_unlock(); 3864 3865 return type; 3866 } 3867 3868 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3869 struct rtw89_sta_link *rtwsta_link, bool dis_conn) 3870 { 3871 struct sk_buff *skb; 3872 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3873 u8 self_role = rtwvif_link->self_role; 3874 enum rtw89_fw_sta_type sta_type; 3875 u8 net_type = rtwvif_link->net_type; 3876 struct rtw89_h2c_join_v1 *h2c_v1; 3877 struct rtw89_h2c_join *h2c; 3878 u32 len = sizeof(*h2c); 3879 bool format_v1 = false; 3880 int ret; 3881 3882 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 3883 len = sizeof(*h2c_v1); 3884 format_v1 = true; 3885 } 3886 3887 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) { 3888 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3889 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 3890 } 3891 3892 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3893 if (!skb) { 3894 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3895 return -ENOMEM; 3896 } 3897 skb_put(skb, len); 3898 h2c = (struct rtw89_h2c_join *)skb->data; 3899 3900 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 3901 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 3902 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 3903 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) | 3904 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) | 3905 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 3906 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 3907 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 3908 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 3909 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 3910 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 3911 le32_encode_bits(rtwvif_link->wifi_role, 3912 RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 3913 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 3914 3915 if (!format_v1) 3916 goto done; 3917 3918 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 3919 3920 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link); 3921 3922 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE); 3923 h2c_v1->w2 = 0; 3924 3925 done: 3926 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3927 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3928 H2C_FUNC_MAC_JOININFO, 0, 1, 3929 len); 3930 3931 ret = rtw89_h2c_tx(rtwdev, skb, false); 3932 if (ret) { 3933 rtw89_err(rtwdev, "failed to send h2c\n"); 3934 goto fail; 3935 } 3936 3937 return 0; 3938 fail: 3939 dev_kfree_skb_any(skb); 3940 3941 return ret; 3942 } 3943 3944 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 3945 { 3946 struct rtw89_h2c_notify_dbcc *h2c; 3947 u32 len = sizeof(*h2c); 3948 struct sk_buff *skb; 3949 int ret; 3950 3951 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3952 if (!skb) { 3953 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 3954 return -ENOMEM; 3955 } 3956 skb_put(skb, len); 3957 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 3958 3959 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 3960 3961 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3962 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3963 H2C_FUNC_NOTIFY_DBCC, 0, 1, 3964 len); 3965 3966 ret = rtw89_h2c_tx(rtwdev, skb, false); 3967 if (ret) { 3968 rtw89_err(rtwdev, "failed to send h2c\n"); 3969 goto fail; 3970 } 3971 3972 return 0; 3973 fail: 3974 dev_kfree_skb_any(skb); 3975 3976 return ret; 3977 } 3978 3979 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 3980 bool pause) 3981 { 3982 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 3983 struct rtw89_fw_macid_pause_grp *h2c; 3984 __le32 set = cpu_to_le32(BIT(sh)); 3985 u8 h2c_macid_pause_id; 3986 struct sk_buff *skb; 3987 u32 len; 3988 int ret; 3989 3990 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 3991 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 3992 len = sizeof(*h2c_new); 3993 } else { 3994 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 3995 len = sizeof(*h2c); 3996 } 3997 3998 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3999 if (!skb) { 4000 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 4001 return -ENOMEM; 4002 } 4003 skb_put(skb, len); 4004 4005 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 4006 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 4007 4008 h2c_new->n[0].pause_mask_grp[grp] = set; 4009 h2c_new->n[0].sleep_mask_grp[grp] = set; 4010 if (pause) { 4011 h2c_new->n[0].pause_grp[grp] = set; 4012 h2c_new->n[0].sleep_grp[grp] = set; 4013 } 4014 } else { 4015 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 4016 4017 h2c->mask_grp[grp] = set; 4018 if (pause) 4019 h2c->pause_grp[grp] = set; 4020 } 4021 4022 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4023 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4024 h2c_macid_pause_id, 1, 0, 4025 len); 4026 4027 ret = rtw89_h2c_tx(rtwdev, skb, false); 4028 if (ret) { 4029 rtw89_err(rtwdev, "failed to send h2c\n"); 4030 goto fail; 4031 } 4032 4033 return 0; 4034 fail: 4035 dev_kfree_skb_any(skb); 4036 4037 return ret; 4038 } 4039 4040 #define H2C_EDCA_LEN 12 4041 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 4042 u8 ac, u32 val) 4043 { 4044 struct sk_buff *skb; 4045 int ret; 4046 4047 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 4048 if (!skb) { 4049 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 4050 return -ENOMEM; 4051 } 4052 skb_put(skb, H2C_EDCA_LEN); 4053 RTW89_SET_EDCA_SEL(skb->data, 0); 4054 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx); 4055 RTW89_SET_EDCA_WMM(skb->data, 0); 4056 RTW89_SET_EDCA_AC(skb->data, ac); 4057 RTW89_SET_EDCA_PARAM(skb->data, val); 4058 4059 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4060 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4061 H2C_FUNC_USR_EDCA, 0, 1, 4062 H2C_EDCA_LEN); 4063 4064 ret = rtw89_h2c_tx(rtwdev, skb, false); 4065 if (ret) { 4066 rtw89_err(rtwdev, "failed to send h2c\n"); 4067 goto fail; 4068 } 4069 4070 return 0; 4071 fail: 4072 dev_kfree_skb_any(skb); 4073 4074 return ret; 4075 } 4076 4077 #define H2C_TSF32_TOGL_LEN 4 4078 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, 4079 struct rtw89_vif_link *rtwvif_link, 4080 bool en) 4081 { 4082 struct sk_buff *skb; 4083 u16 early_us = en ? 2000 : 0; 4084 u8 *cmd; 4085 int ret; 4086 4087 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 4088 if (!skb) { 4089 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 4090 return -ENOMEM; 4091 } 4092 skb_put(skb, H2C_TSF32_TOGL_LEN); 4093 cmd = skb->data; 4094 4095 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx); 4096 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 4097 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port); 4098 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 4099 4100 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4101 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4102 H2C_FUNC_TSF32_TOGL, 0, 0, 4103 H2C_TSF32_TOGL_LEN); 4104 4105 ret = rtw89_h2c_tx(rtwdev, skb, false); 4106 if (ret) { 4107 rtw89_err(rtwdev, "failed to send h2c\n"); 4108 goto fail; 4109 } 4110 4111 return 0; 4112 fail: 4113 dev_kfree_skb_any(skb); 4114 4115 return ret; 4116 } 4117 4118 #define H2C_OFLD_CFG_LEN 8 4119 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 4120 { 4121 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 4122 struct sk_buff *skb; 4123 int ret; 4124 4125 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 4126 if (!skb) { 4127 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 4128 return -ENOMEM; 4129 } 4130 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 4131 4132 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4133 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4134 H2C_FUNC_OFLD_CFG, 0, 1, 4135 H2C_OFLD_CFG_LEN); 4136 4137 ret = rtw89_h2c_tx(rtwdev, skb, false); 4138 if (ret) { 4139 rtw89_err(rtwdev, "failed to send h2c\n"); 4140 goto fail; 4141 } 4142 4143 return 0; 4144 fail: 4145 dev_kfree_skb_any(skb); 4146 4147 return ret; 4148 } 4149 4150 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv) 4151 { 4152 struct rtw89_h2c_tx_duty *h2c; 4153 u32 len = sizeof(*h2c); 4154 struct sk_buff *skb; 4155 u16 pause, active; 4156 int ret; 4157 4158 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4159 if (!skb) { 4160 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n"); 4161 return -ENOMEM; 4162 } 4163 4164 skb_put(skb, len); 4165 h2c = (struct rtw89_h2c_tx_duty *)skb->data; 4166 4167 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100); 4168 4169 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) { 4170 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP); 4171 } else { 4172 active = 100 - lv * RTW89_THERMAL_PROT_STEP; 4173 pause = 100 - active; 4174 4175 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) | 4176 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK); 4177 } 4178 4179 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4180 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4181 H2C_FUNC_TX_DUTY, 0, 0, len); 4182 4183 ret = rtw89_h2c_tx(rtwdev, skb, false); 4184 if (ret) { 4185 rtw89_err(rtwdev, "failed to send h2c\n"); 4186 goto fail; 4187 } 4188 4189 return 0; 4190 fail: 4191 dev_kfree_skb_any(skb); 4192 4193 return ret; 4194 } 4195 4196 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 4197 struct rtw89_vif_link *rtwvif_link, 4198 bool connect) 4199 { 4200 struct ieee80211_bss_conf *bss_conf; 4201 s32 thold = RTW89_DEFAULT_CQM_THOLD; 4202 u32 hyst = RTW89_DEFAULT_CQM_HYST; 4203 struct rtw89_h2c_bcnfltr *h2c; 4204 u32 len = sizeof(*h2c); 4205 struct sk_buff *skb; 4206 int ret; 4207 4208 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4209 return -EINVAL; 4210 4211 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4212 return -EINVAL; 4213 4214 rcu_read_lock(); 4215 4216 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 4217 4218 if (bss_conf->cqm_rssi_hyst) 4219 hyst = bss_conf->cqm_rssi_hyst; 4220 if (bss_conf->cqm_rssi_thold) 4221 thold = bss_conf->cqm_rssi_thold; 4222 4223 rcu_read_unlock(); 4224 4225 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4226 if (!skb) { 4227 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 4228 return -ENOMEM; 4229 } 4230 4231 skb_put(skb, len); 4232 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 4233 4234 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 4235 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 4236 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 4237 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 4238 RTW89_H2C_BCNFLTR_W0_MODE) | 4239 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 4240 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 4241 le32_encode_bits(thold + MAX_RSSI, 4242 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 4243 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 4244 4245 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4246 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4247 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 4248 4249 ret = rtw89_h2c_tx(rtwdev, skb, false); 4250 if (ret) { 4251 rtw89_err(rtwdev, "failed to send h2c\n"); 4252 goto fail; 4253 } 4254 4255 return 0; 4256 fail: 4257 dev_kfree_skb_any(skb); 4258 4259 return ret; 4260 } 4261 4262 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 4263 struct rtw89_rx_phy_ppdu *phy_ppdu) 4264 { 4265 struct rtw89_h2c_ofld_rssi *h2c; 4266 u32 len = sizeof(*h2c); 4267 struct sk_buff *skb; 4268 s8 rssi; 4269 int ret; 4270 4271 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4272 return -EINVAL; 4273 4274 if (!phy_ppdu) 4275 return -EINVAL; 4276 4277 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4278 if (!skb) { 4279 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 4280 return -ENOMEM; 4281 } 4282 4283 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 4284 skb_put(skb, len); 4285 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 4286 4287 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 4288 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 4289 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 4290 4291 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4292 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4293 H2C_FUNC_OFLD_RSSI, 0, 1, len); 4294 4295 ret = rtw89_h2c_tx(rtwdev, skb, false); 4296 if (ret) { 4297 rtw89_err(rtwdev, "failed to send h2c\n"); 4298 goto fail; 4299 } 4300 4301 return 0; 4302 fail: 4303 dev_kfree_skb_any(skb); 4304 4305 return ret; 4306 } 4307 4308 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4309 { 4310 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 4311 struct rtw89_traffic_stats *stats = &rtwvif->stats; 4312 struct rtw89_h2c_ofld *h2c; 4313 u32 len = sizeof(*h2c); 4314 struct sk_buff *skb; 4315 int ret; 4316 4317 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4318 return -EINVAL; 4319 4320 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4321 if (!skb) { 4322 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 4323 return -ENOMEM; 4324 } 4325 4326 skb_put(skb, len); 4327 h2c = (struct rtw89_h2c_ofld *)skb->data; 4328 4329 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 4330 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 4331 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 4332 4333 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4334 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4335 H2C_FUNC_OFLD_TP, 0, 1, len); 4336 4337 ret = rtw89_h2c_tx(rtwdev, skb, false); 4338 if (ret) { 4339 rtw89_err(rtwdev, "failed to send h2c\n"); 4340 goto fail; 4341 } 4342 4343 return 0; 4344 fail: 4345 dev_kfree_skb_any(skb); 4346 4347 return ret; 4348 } 4349 4350 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 4351 { 4352 const struct rtw89_chip_info *chip = rtwdev->chip; 4353 struct rtw89_h2c_ra_v1 *h2c_v1; 4354 struct rtw89_h2c_ra *h2c; 4355 u32 len = sizeof(*h2c); 4356 bool format_v1 = false; 4357 struct sk_buff *skb; 4358 int ret; 4359 4360 if (chip->chip_gen == RTW89_CHIP_BE) { 4361 len = sizeof(*h2c_v1); 4362 format_v1 = true; 4363 } 4364 4365 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4366 if (!skb) { 4367 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4368 return -ENOMEM; 4369 } 4370 skb_put(skb, len); 4371 h2c = (struct rtw89_h2c_ra *)skb->data; 4372 rtw89_debug(rtwdev, RTW89_DBG_RA, 4373 "ra cmd msk: %llx ", ra->ra_mask); 4374 4375 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 4376 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 4377 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 4378 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 4379 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 4380 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 4381 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 4382 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 4383 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 4384 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 4385 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 4386 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 4387 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 4388 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 4389 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 4390 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 4391 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 4392 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 4393 4394 if (!format_v1) 4395 goto csi; 4396 4397 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 4398 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 4399 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 4400 4401 csi: 4402 if (!csi) 4403 goto done; 4404 4405 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 4406 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 4407 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 4408 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 4409 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 4410 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 4411 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 4412 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 4413 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 4414 4415 done: 4416 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4417 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 4418 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 4419 len); 4420 4421 ret = rtw89_h2c_tx(rtwdev, skb, false); 4422 if (ret) { 4423 rtw89_err(rtwdev, "failed to send h2c\n"); 4424 goto fail; 4425 } 4426 4427 return 0; 4428 fail: 4429 dev_kfree_skb_any(skb); 4430 4431 return ret; 4432 } 4433 4434 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 4435 { 4436 struct rtw89_btc *btc = &rtwdev->btc; 4437 struct rtw89_btc_dm *dm = &btc->dm; 4438 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 4439 struct rtw89_btc_module *module = &init_info->module; 4440 struct rtw89_btc_ant_info *ant = &module->ant; 4441 struct rtw89_h2c_cxinit *h2c; 4442 u32 len = sizeof(*h2c); 4443 struct sk_buff *skb; 4444 int ret; 4445 4446 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4447 if (!skb) { 4448 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 4449 return -ENOMEM; 4450 } 4451 skb_put(skb, len); 4452 h2c = (struct rtw89_h2c_cxinit *)skb->data; 4453 4454 h2c->hdr.type = type; 4455 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 4456 4457 h2c->ant_type = ant->type; 4458 h2c->ant_num = ant->num; 4459 h2c->ant_iso = ant->isolation; 4460 h2c->ant_info = 4461 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 4462 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 4463 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 4464 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 4465 4466 h2c->mod_rfe = module->rfe_type; 4467 h2c->mod_cv = module->cv; 4468 h2c->mod_info = 4469 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 4470 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 4471 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 4472 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 4473 h2c->mod_adie_kt = module->kt_ver_adie; 4474 h2c->wl_gch = init_info->wl_guard_ch; 4475 4476 h2c->info = 4477 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 4478 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 4479 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 4480 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 4481 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 4482 4483 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4484 H2C_CAT_OUTSRC, BTFC_SET, 4485 SET_DRV_INFO, 0, 0, 4486 len); 4487 4488 ret = rtw89_h2c_tx(rtwdev, skb, false); 4489 if (ret) { 4490 rtw89_err(rtwdev, "failed to send h2c\n"); 4491 goto fail; 4492 } 4493 4494 return 0; 4495 fail: 4496 dev_kfree_skb_any(skb); 4497 4498 return ret; 4499 } 4500 4501 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 4502 { 4503 struct rtw89_btc *btc = &rtwdev->btc; 4504 struct rtw89_btc_dm *dm = &btc->dm; 4505 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 4506 struct rtw89_h2c_cxinit_v7 *h2c; 4507 u32 len = sizeof(*h2c); 4508 struct sk_buff *skb; 4509 int ret; 4510 4511 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4512 if (!skb) { 4513 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 4514 return -ENOMEM; 4515 } 4516 skb_put(skb, len); 4517 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 4518 4519 h2c->hdr.type = type; 4520 h2c->hdr.ver = btc->ver->fcxinit; 4521 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4522 h2c->init = *init_info; 4523 4524 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4525 H2C_CAT_OUTSRC, BTFC_SET, 4526 SET_DRV_INFO, 0, 0, 4527 len); 4528 4529 ret = rtw89_h2c_tx(rtwdev, skb, false); 4530 if (ret) { 4531 rtw89_err(rtwdev, "failed to send h2c\n"); 4532 goto fail; 4533 } 4534 4535 return 0; 4536 fail: 4537 dev_kfree_skb_any(skb); 4538 4539 return ret; 4540 } 4541 4542 #define PORT_DATA_OFFSET 4 4543 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 4544 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 4545 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 4546 4547 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 4548 { 4549 struct rtw89_btc *btc = &rtwdev->btc; 4550 const struct rtw89_btc_ver *ver = btc->ver; 4551 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4552 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 4553 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4554 struct rtw89_btc_wl_active_role *active = role_info->active_role; 4555 struct sk_buff *skb; 4556 u32 len; 4557 u8 offset = 0; 4558 u8 *cmd; 4559 int ret; 4560 int i; 4561 4562 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 4563 4564 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4565 if (!skb) { 4566 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4567 return -ENOMEM; 4568 } 4569 skb_put(skb, len); 4570 cmd = skb->data; 4571 4572 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4573 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4574 4575 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4576 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4577 4578 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4579 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4580 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4581 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4582 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4583 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4584 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4585 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4586 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4587 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4588 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4589 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4590 4591 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4592 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4593 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4594 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4595 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4596 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4597 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4598 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4599 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4600 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4601 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4602 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4603 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4604 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4605 } 4606 4607 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4608 H2C_CAT_OUTSRC, BTFC_SET, 4609 SET_DRV_INFO, 0, 0, 4610 len); 4611 4612 ret = rtw89_h2c_tx(rtwdev, skb, false); 4613 if (ret) { 4614 rtw89_err(rtwdev, "failed to send h2c\n"); 4615 goto fail; 4616 } 4617 4618 return 0; 4619 fail: 4620 dev_kfree_skb_any(skb); 4621 4622 return ret; 4623 } 4624 4625 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 4626 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4627 4628 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 4629 { 4630 struct rtw89_btc *btc = &rtwdev->btc; 4631 const struct rtw89_btc_ver *ver = btc->ver; 4632 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4633 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 4634 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4635 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 4636 struct sk_buff *skb; 4637 u32 len; 4638 u8 *cmd, offset; 4639 int ret; 4640 int i; 4641 4642 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 4643 4644 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4645 if (!skb) { 4646 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4647 return -ENOMEM; 4648 } 4649 skb_put(skb, len); 4650 cmd = skb->data; 4651 4652 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4653 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4654 4655 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4656 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4657 4658 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4659 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4660 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4661 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4662 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4663 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4664 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4665 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4666 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4667 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4668 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4669 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4670 4671 offset = PORT_DATA_OFFSET; 4672 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4673 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4674 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4675 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4676 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4677 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4678 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4679 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4680 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4681 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4682 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4683 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4684 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4685 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4686 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 4687 } 4688 4689 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4690 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4691 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4692 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4693 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4694 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4695 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4696 4697 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4698 H2C_CAT_OUTSRC, BTFC_SET, 4699 SET_DRV_INFO, 0, 0, 4700 len); 4701 4702 ret = rtw89_h2c_tx(rtwdev, skb, false); 4703 if (ret) { 4704 rtw89_err(rtwdev, "failed to send h2c\n"); 4705 goto fail; 4706 } 4707 4708 return 0; 4709 fail: 4710 dev_kfree_skb_any(skb); 4711 4712 return ret; 4713 } 4714 4715 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 4716 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4717 4718 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 4719 { 4720 struct rtw89_btc *btc = &rtwdev->btc; 4721 const struct rtw89_btc_ver *ver = btc->ver; 4722 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4723 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 4724 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4725 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 4726 struct sk_buff *skb; 4727 u32 len; 4728 u8 *cmd, offset; 4729 int ret; 4730 int i; 4731 4732 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 4733 4734 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4735 if (!skb) { 4736 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4737 return -ENOMEM; 4738 } 4739 skb_put(skb, len); 4740 cmd = skb->data; 4741 4742 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4743 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4744 4745 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4746 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4747 4748 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4749 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4750 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4751 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4752 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4753 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4754 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4755 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4756 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4757 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4758 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4759 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4760 4761 offset = PORT_DATA_OFFSET; 4762 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4763 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 4764 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 4765 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 4766 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 4767 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 4768 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 4769 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 4770 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 4771 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 4772 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 4773 } 4774 4775 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4776 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4777 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4778 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4779 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4780 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4781 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4782 4783 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4784 H2C_CAT_OUTSRC, BTFC_SET, 4785 SET_DRV_INFO, 0, 0, 4786 len); 4787 4788 ret = rtw89_h2c_tx(rtwdev, skb, false); 4789 if (ret) { 4790 rtw89_err(rtwdev, "failed to send h2c\n"); 4791 goto fail; 4792 } 4793 4794 return 0; 4795 fail: 4796 dev_kfree_skb_any(skb); 4797 4798 return ret; 4799 } 4800 4801 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type) 4802 { 4803 struct rtw89_btc *btc = &rtwdev->btc; 4804 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7; 4805 struct rtw89_h2c_cxrole_v7 *h2c; 4806 u32 len = sizeof(*h2c); 4807 struct sk_buff *skb; 4808 int ret; 4809 4810 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4811 if (!skb) { 4812 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4813 return -ENOMEM; 4814 } 4815 skb_put(skb, len); 4816 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data; 4817 4818 h2c->hdr.type = type; 4819 h2c->hdr.ver = btc->ver->fwlrole; 4820 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4821 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4822 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4823 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4824 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4825 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en); 4826 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg); 4827 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy); 4828 4829 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4830 H2C_CAT_OUTSRC, BTFC_SET, 4831 SET_DRV_INFO, 0, 0, 4832 len); 4833 4834 ret = rtw89_h2c_tx(rtwdev, skb, false); 4835 if (ret) { 4836 rtw89_err(rtwdev, "failed to send h2c\n"); 4837 goto fail; 4838 } 4839 4840 return 0; 4841 fail: 4842 dev_kfree_skb_any(skb); 4843 4844 return ret; 4845 } 4846 4847 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 4848 { 4849 struct rtw89_btc *btc = &rtwdev->btc; 4850 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 4851 struct rtw89_h2c_cxrole_v8 *h2c; 4852 u32 len = sizeof(*h2c); 4853 struct sk_buff *skb; 4854 int ret; 4855 4856 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4857 if (!skb) { 4858 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4859 return -ENOMEM; 4860 } 4861 skb_put(skb, len); 4862 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 4863 4864 h2c->hdr.type = type; 4865 h2c->hdr.ver = btc->ver->fwlrole; 4866 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4867 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4868 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4869 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4870 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4871 4872 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4873 H2C_CAT_OUTSRC, BTFC_SET, 4874 SET_DRV_INFO, 0, 0, 4875 len); 4876 4877 ret = rtw89_h2c_tx(rtwdev, skb, false); 4878 if (ret) { 4879 rtw89_err(rtwdev, "failed to send h2c\n"); 4880 goto fail; 4881 } 4882 4883 return 0; 4884 fail: 4885 dev_kfree_skb_any(skb); 4886 4887 return ret; 4888 } 4889 4890 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 4891 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 4892 { 4893 struct rtw89_btc *btc = &rtwdev->btc; 4894 const struct rtw89_btc_ver *ver = btc->ver; 4895 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 4896 struct sk_buff *skb; 4897 u8 *cmd; 4898 int ret; 4899 4900 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 4901 if (!skb) { 4902 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4903 return -ENOMEM; 4904 } 4905 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 4906 cmd = skb->data; 4907 4908 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4909 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 4910 4911 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 4912 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 4913 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 4914 if (ver->fcxctrl == 0) 4915 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 4916 4917 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4918 H2C_CAT_OUTSRC, BTFC_SET, 4919 SET_DRV_INFO, 0, 0, 4920 H2C_LEN_CXDRVINFO_CTRL); 4921 4922 ret = rtw89_h2c_tx(rtwdev, skb, false); 4923 if (ret) { 4924 rtw89_err(rtwdev, "failed to send h2c\n"); 4925 goto fail; 4926 } 4927 4928 return 0; 4929 fail: 4930 dev_kfree_skb_any(skb); 4931 4932 return ret; 4933 } 4934 4935 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 4936 { 4937 struct rtw89_btc *btc = &rtwdev->btc; 4938 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 4939 struct rtw89_h2c_cxctrl_v7 *h2c; 4940 u32 len = sizeof(*h2c); 4941 struct sk_buff *skb; 4942 int ret; 4943 4944 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4945 if (!skb) { 4946 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n"); 4947 return -ENOMEM; 4948 } 4949 skb_put(skb, len); 4950 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 4951 4952 h2c->hdr.type = type; 4953 h2c->hdr.ver = btc->ver->fcxctrl; 4954 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 4955 h2c->ctrl = *ctrl; 4956 4957 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4958 H2C_CAT_OUTSRC, BTFC_SET, 4959 SET_DRV_INFO, 0, 0, len); 4960 4961 ret = rtw89_h2c_tx(rtwdev, skb, false); 4962 if (ret) { 4963 rtw89_err(rtwdev, "failed to send h2c\n"); 4964 goto fail; 4965 } 4966 4967 return 0; 4968 fail: 4969 dev_kfree_skb_any(skb); 4970 4971 return ret; 4972 } 4973 4974 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 4975 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 4976 { 4977 struct rtw89_btc *btc = &rtwdev->btc; 4978 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 4979 struct sk_buff *skb; 4980 u8 *cmd; 4981 int ret; 4982 4983 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 4984 if (!skb) { 4985 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 4986 return -ENOMEM; 4987 } 4988 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 4989 cmd = skb->data; 4990 4991 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4992 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 4993 4994 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 4995 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 4996 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 4997 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 4998 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 4999 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 5000 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 5001 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 5002 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 5003 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 5004 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 5005 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 5006 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 5007 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 5008 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 5009 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 5010 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 5011 5012 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5013 H2C_CAT_OUTSRC, BTFC_SET, 5014 SET_DRV_INFO, 0, 0, 5015 H2C_LEN_CXDRVINFO_TRX); 5016 5017 ret = rtw89_h2c_tx(rtwdev, skb, false); 5018 if (ret) { 5019 rtw89_err(rtwdev, "failed to send h2c\n"); 5020 goto fail; 5021 } 5022 5023 return 0; 5024 fail: 5025 dev_kfree_skb_any(skb); 5026 5027 return ret; 5028 } 5029 5030 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 5031 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 5032 { 5033 struct rtw89_btc *btc = &rtwdev->btc; 5034 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 5035 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 5036 struct sk_buff *skb; 5037 u8 *cmd; 5038 int ret; 5039 5040 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 5041 if (!skb) { 5042 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5043 return -ENOMEM; 5044 } 5045 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 5046 cmd = skb->data; 5047 5048 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 5049 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 5050 5051 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 5052 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 5053 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 5054 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 5055 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 5056 5057 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5058 H2C_CAT_OUTSRC, BTFC_SET, 5059 SET_DRV_INFO, 0, 0, 5060 H2C_LEN_CXDRVINFO_RFK); 5061 5062 ret = rtw89_h2c_tx(rtwdev, skb, false); 5063 if (ret) { 5064 rtw89_err(rtwdev, "failed to send h2c\n"); 5065 goto fail; 5066 } 5067 5068 return 0; 5069 fail: 5070 dev_kfree_skb_any(skb); 5071 5072 return ret; 5073 } 5074 5075 #define H2C_LEN_PKT_OFLD 4 5076 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 5077 { 5078 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5079 struct sk_buff *skb; 5080 unsigned int cond; 5081 u8 *cmd; 5082 int ret; 5083 5084 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 5085 if (!skb) { 5086 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 5087 return -ENOMEM; 5088 } 5089 skb_put(skb, H2C_LEN_PKT_OFLD); 5090 cmd = skb->data; 5091 5092 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 5093 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 5094 5095 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5096 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5097 H2C_FUNC_PACKET_OFLD, 1, 1, 5098 H2C_LEN_PKT_OFLD); 5099 5100 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 5101 5102 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5103 if (ret < 0) { 5104 rtw89_debug(rtwdev, RTW89_DBG_FW, 5105 "failed to del pkt ofld: id %d, ret %d\n", 5106 id, ret); 5107 return ret; 5108 } 5109 5110 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 5111 return 0; 5112 } 5113 5114 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 5115 struct sk_buff *skb_ofld) 5116 { 5117 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5118 struct sk_buff *skb; 5119 unsigned int cond; 5120 u8 *cmd; 5121 u8 alloc_id; 5122 int ret; 5123 5124 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 5125 RTW89_MAX_PKT_OFLD_NUM); 5126 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 5127 return -ENOSPC; 5128 5129 *id = alloc_id; 5130 5131 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 5132 if (!skb) { 5133 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 5134 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 5135 return -ENOMEM; 5136 } 5137 skb_put(skb, H2C_LEN_PKT_OFLD); 5138 cmd = skb->data; 5139 5140 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 5141 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 5142 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 5143 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 5144 5145 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5146 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5147 H2C_FUNC_PACKET_OFLD, 1, 1, 5148 H2C_LEN_PKT_OFLD + skb_ofld->len); 5149 5150 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 5151 5152 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5153 if (ret < 0) { 5154 rtw89_debug(rtwdev, RTW89_DBG_FW, 5155 "failed to add pkt ofld: id %d, ret %d\n", 5156 alloc_id, ret); 5157 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 5158 return ret; 5159 } 5160 5161 return 0; 5162 } 5163 5164 static 5165 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num, 5166 struct list_head *chan_list) 5167 { 5168 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5169 struct rtw89_h2c_chinfo_elem *elem; 5170 struct rtw89_mac_chinfo *ch_info; 5171 struct rtw89_h2c_chinfo *h2c; 5172 struct sk_buff *skb; 5173 unsigned int cond; 5174 int skb_len; 5175 int ret; 5176 5177 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 5178 5179 skb_len = struct_size(h2c, elem, ch_num); 5180 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5181 if (!skb) { 5182 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5183 return -ENOMEM; 5184 } 5185 skb_put(skb, sizeof(*h2c)); 5186 h2c = (struct rtw89_h2c_chinfo *)skb->data; 5187 5188 h2c->ch_num = ch_num; 5189 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5190 5191 list_for_each_entry(ch_info, chan_list, list) { 5192 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 5193 5194 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 5195 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 5196 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 5197 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 5198 5199 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 5200 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 5201 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 5202 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 5203 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 5204 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 5205 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 5206 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 5207 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 5208 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 5209 5210 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 5211 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 5212 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 5213 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 5214 5215 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 5216 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 5217 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 5218 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 5219 } 5220 5221 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5222 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5223 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5224 5225 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5226 5227 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5228 if (ret) { 5229 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5230 return ret; 5231 } 5232 5233 return 0; 5234 } 5235 5236 static 5237 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 5238 struct list_head *chan_list, 5239 struct rtw89_vif_link *rtwvif_link) 5240 { 5241 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5242 struct rtw89_h2c_chinfo_elem_be *elem; 5243 struct rtw89_mac_chinfo_be *ch_info; 5244 struct rtw89_h2c_chinfo_be *h2c; 5245 struct sk_buff *skb; 5246 unsigned int cond; 5247 u8 ver = U8_MAX; 5248 int skb_len; 5249 int ret; 5250 5251 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE); 5252 5253 skb_len = struct_size(h2c, elem, ch_num); 5254 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5255 if (!skb) { 5256 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5257 return -ENOMEM; 5258 } 5259 5260 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5261 ver = 0; 5262 5263 skb_put(skb, sizeof(*h2c)); 5264 h2c = (struct rtw89_h2c_chinfo_be *)skb->data; 5265 5266 h2c->ch_num = ch_num; 5267 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5268 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx, 5269 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 5270 5271 list_for_each_entry(ch_info, chan_list, list) { 5272 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 5273 5274 elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 5275 le32_encode_bits(ch_info->central_ch, 5276 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 5277 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 5278 5279 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 5280 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 5281 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 5282 le32_encode_bits(ch_info->pause_data, 5283 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 5284 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 5285 le32_encode_bits(ch_info->rand_seq_num, 5286 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 5287 le32_encode_bits(ch_info->notify_action, 5288 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 5289 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 5290 RTW89_H2C_CHINFO_BE_W1_PROBE) | 5291 le32_encode_bits(ch_info->leave_crit, 5292 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 5293 le32_encode_bits(ch_info->chkpt_timer, 5294 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 5295 5296 elem->w2 = le32_encode_bits(ch_info->leave_time, 5297 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 5298 le32_encode_bits(ch_info->leave_th, 5299 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 5300 le32_encode_bits(ch_info->tx_pkt_ctrl, 5301 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 5302 5303 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 5304 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 5305 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 5306 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 5307 5308 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 5309 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 5310 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 5311 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 5312 5313 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 5314 le32_encode_bits(ch_info->fw_probe0_ssids, 5315 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 5316 5317 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 5318 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 5319 le32_encode_bits(ch_info->fw_probe0_bssids, 5320 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 5321 if (ver == 0) 5322 elem->w0 |= 5323 le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD); 5324 else 5325 elem->w7 = le32_encode_bits(ch_info->period, 5326 RTW89_H2C_CHINFO_BE_W7_PERIOD_V1); 5327 } 5328 5329 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5330 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5331 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5332 5333 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5334 5335 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5336 if (ret) { 5337 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5338 return ret; 5339 } 5340 5341 return 0; 5342 } 5343 5344 #define RTW89_SCAN_DELAY_TSF_UNIT 104800 5345 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, 5346 struct rtw89_scan_option *option, 5347 struct rtw89_vif_link *rtwvif_link, 5348 bool wowlan) 5349 { 5350 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5351 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 5352 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE; 5353 struct rtw89_h2c_scanofld *h2c; 5354 u32 len = sizeof(*h2c); 5355 struct sk_buff *skb; 5356 unsigned int cond; 5357 u64 tsf = 0; 5358 int ret; 5359 5360 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5361 if (!skb) { 5362 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5363 return -ENOMEM; 5364 } 5365 skb_put(skb, len); 5366 h2c = (struct rtw89_h2c_scanofld *)skb->data; 5367 5368 if (option->delay) { 5369 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 5370 if (ret) { 5371 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret); 5372 scan_mode = RTW89_SCAN_IMMEDIATE; 5373 } else { 5374 scan_mode = RTW89_SCAN_DELAY; 5375 tsf += (u64)option->delay * RTW89_SCAN_DELAY_TSF_UNIT; 5376 } 5377 } 5378 5379 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 5380 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 5381 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) | 5382 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 5383 5384 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 5385 le32_encode_bits(option->target_ch_mode, 5386 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 5387 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) | 5388 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 5389 5390 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) | 5391 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD); 5392 5393 if (option->target_ch_mode) { 5394 h2c->w1 |= le32_encode_bits(op->band_width, 5395 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 5396 le32_encode_bits(op->primary_channel, 5397 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 5398 le32_encode_bits(op->channel, 5399 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 5400 h2c->w0 |= le32_encode_bits(op->band_type, 5401 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 5402 } 5403 5404 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf), 5405 RTW89_H2C_SCANOFLD_W3_TSF_HIGH); 5406 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf), 5407 RTW89_H2C_SCANOFLD_W4_TSF_LOW); 5408 5409 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5410 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5411 H2C_FUNC_SCANOFLD, 1, 1, 5412 len); 5413 5414 if (option->enable) 5415 cond = RTW89_SCANOFLD_WAIT_COND_START; 5416 else 5417 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 5418 5419 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5420 if (ret) { 5421 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 5422 return ret; 5423 } 5424 5425 return 0; 5426 } 5427 5428 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 5429 struct rtw89_scan_option *option) 5430 { 5431 struct ieee80211_supported_band *sband; 5432 struct ieee80211_channel *chan; 5433 u8 i, idx; 5434 5435 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 5436 if (!sband) { 5437 option->prohib_chan = U64_MAX; 5438 return; 5439 } 5440 5441 for (i = 0; i < sband->n_channels; i++) { 5442 chan = &sband->channels[i]; 5443 if (chan->flags & IEEE80211_CHAN_DISABLED) { 5444 idx = (chan->hw_value - 1) / 4; 5445 option->prohib_chan |= BIT(idx); 5446 } 5447 } 5448 } 5449 5450 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 5451 struct rtw89_scan_option *option, 5452 struct rtw89_vif_link *rtwvif_link, 5453 bool wowlan) 5454 { 5455 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 5456 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5457 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5458 struct cfg80211_scan_request *req = rtwvif->scan_req; 5459 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 5460 struct rtw89_chan *op = &scan_info->op_chan; 5461 struct rtw89_h2c_scanofld_be_opch *opch; 5462 struct rtw89_pktofld_info *pkt_info; 5463 struct rtw89_h2c_scanofld_be *h2c; 5464 struct sk_buff *skb; 5465 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 5466 u8 opch_size = sizeof(*opch) * option->num_opch; 5467 u8 probe_id[NUM_NL80211_BANDS]; 5468 u8 scan_offload_ver = U8_MAX; 5469 u8 cfg_len = sizeof(*h2c); 5470 unsigned int cond; 5471 u8 ver = U8_MAX; 5472 void *ptr; 5473 int ret; 5474 u32 len; 5475 u8 i; 5476 5477 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 5478 5479 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { 5480 cfg_len = offsetofend(typeof(*h2c), w8); 5481 scan_offload_ver = 0; 5482 } 5483 5484 len = cfg_len + macc_role_size + opch_size; 5485 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5486 if (!skb) { 5487 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5488 return -ENOMEM; 5489 } 5490 5491 skb_put(skb, len); 5492 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 5493 ptr = skb->data; 5494 5495 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 5496 5497 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5498 ver = 0; 5499 5500 if (!wowlan) { 5501 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 5502 if (pkt_info->wildcard_6ghz) { 5503 /* Provide wildcard as template */ 5504 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 5505 break; 5506 } 5507 } 5508 } 5509 5510 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 5511 le32_encode_bits(option->scan_mode, 5512 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 5513 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 5514 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 5515 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 5516 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 5517 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 5518 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 5519 5520 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 5521 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 5522 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 5523 5524 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 5525 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 5526 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 5527 5528 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 5529 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 5530 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 5531 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 5532 5533 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 5534 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 5535 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 5536 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 5537 le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 5538 5539 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 5540 5541 h2c->w6 = le32_encode_bits(option->prohib_chan, 5542 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 5543 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 5544 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 5545 if (!wowlan && req->no_cck) { 5546 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); 5547 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, 5548 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | 5549 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5550 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | 5551 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5552 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); 5553 } 5554 5555 if (scan_offload_ver == 0) 5556 goto flex_member; 5557 5558 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), 5559 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | 5560 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0), 5561 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) | 5562 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0), 5563 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP); 5564 5565 flex_member: 5566 ptr += cfg_len; 5567 5568 for (i = 0; i < option->num_macc_role; i++) { 5569 macc_role = ptr; 5570 macc_role->w0 = 5571 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 5572 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 5573 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 5574 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 5575 ptr += sizeof(*macc_role); 5576 } 5577 5578 for (i = 0; i < option->num_opch; i++) { 5579 opch = ptr; 5580 opch->w0 = le32_encode_bits(rtwvif_link->mac_id, 5581 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 5582 le32_encode_bits(option->band, 5583 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 5584 le32_encode_bits(rtwvif_link->port, 5585 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 5586 le32_encode_bits(RTW89_SCAN_OPMODE_INTV, 5587 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 5588 le32_encode_bits(true, 5589 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 5590 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10, 5591 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 5592 5593 opch->w1 = le32_encode_bits(op->band_type, 5594 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 5595 le32_encode_bits(op->band_width, 5596 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 5597 le32_encode_bits(0x3, 5598 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 5599 le32_encode_bits(op->primary_channel, 5600 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 5601 le32_encode_bits(op->channel, 5602 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 5603 5604 opch->w2 = le32_encode_bits(0, 5605 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 5606 le32_encode_bits(0, 5607 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 5608 le32_encode_bits(2, 5609 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS); 5610 5611 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5612 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 5613 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5614 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 5615 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5616 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 5617 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5618 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 5619 5620 if (ver == 0) 5621 opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME, 5622 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION); 5623 else 5624 opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME, 5625 RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1); 5626 ptr += sizeof(*opch); 5627 } 5628 5629 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5630 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5631 H2C_FUNC_SCANOFLD_BE, 1, 1, 5632 len); 5633 5634 if (option->enable) 5635 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 5636 else 5637 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 5638 5639 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5640 if (ret) { 5641 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 5642 return ret; 5643 } 5644 5645 return 0; 5646 } 5647 5648 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 5649 struct rtw89_fw_h2c_rf_reg_info *info, 5650 u16 len, u8 page) 5651 { 5652 struct sk_buff *skb; 5653 u8 class = info->rf_path == RF_PATH_A ? 5654 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 5655 int ret; 5656 5657 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5658 if (!skb) { 5659 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 5660 return -ENOMEM; 5661 } 5662 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 5663 5664 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5665 H2C_CAT_OUTSRC, class, page, 0, 0, 5666 len); 5667 5668 ret = rtw89_h2c_tx(rtwdev, skb, false); 5669 if (ret) { 5670 rtw89_err(rtwdev, "failed to send h2c\n"); 5671 goto fail; 5672 } 5673 5674 return 0; 5675 fail: 5676 dev_kfree_skb_any(skb); 5677 5678 return ret; 5679 } 5680 5681 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 5682 { 5683 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; 5684 struct rtw89_fw_h2c_rf_get_mccch *mccch; 5685 struct sk_buff *skb; 5686 int ret; 5687 u8 idx; 5688 5689 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 5690 if (!skb) { 5691 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5692 return -ENOMEM; 5693 } 5694 skb_put(skb, sizeof(*mccch)); 5695 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 5696 5697 idx = rfk_mcc->table_idx; 5698 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 5699 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 5700 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 5701 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 5702 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 5703 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 5704 5705 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5706 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 5707 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 5708 sizeof(*mccch)); 5709 5710 ret = rtw89_h2c_tx(rtwdev, skb, false); 5711 if (ret) { 5712 rtw89_err(rtwdev, "failed to send h2c\n"); 5713 goto fail; 5714 } 5715 5716 return 0; 5717 fail: 5718 dev_kfree_skb_any(skb); 5719 5720 return ret; 5721 } 5722 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 5723 5724 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 5725 enum rtw89_phy_idx phy_idx) 5726 { 5727 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 5728 struct rtw89_fw_h2c_rfk_pre_info_common *common; 5729 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; 5730 struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1; 5731 struct rtw89_fw_h2c_rfk_pre_info *h2c; 5732 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH]; 5733 u32 len = sizeof(*h2c); 5734 struct sk_buff *skb; 5735 u8 ver = U8_MAX; 5736 u8 tbl, path; 5737 u32 val32; 5738 int ret; 5739 5740 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) { 5741 len = sizeof(*h2c_v1); 5742 ver = 1; 5743 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 5744 len = sizeof(*h2c_v0); 5745 ver = 0; 5746 } 5747 5748 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5749 if (!skb) { 5750 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 5751 return -ENOMEM; 5752 } 5753 skb_put(skb, len); 5754 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 5755 common = &h2c->base_v1.common; 5756 5757 common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 5758 5759 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 5760 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH); 5761 5762 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 5763 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5764 common->dbcc.ch[path][tbl] = 5765 cpu_to_le32(rfk_mcc->data[path].ch[tbl]); 5766 common->dbcc.band[path][tbl] = 5767 cpu_to_le32(rfk_mcc->data[path].band[tbl]); 5768 } 5769 } 5770 5771 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5772 tbl_sel[path] = rfk_mcc->data[path].table_idx; 5773 5774 common->tbl.cur_ch[path] = 5775 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]); 5776 common->tbl.cur_band[path] = 5777 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]); 5778 5779 if (ver <= 1) 5780 continue; 5781 5782 h2c->cur_bandwidth[path] = 5783 cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]); 5784 } 5785 5786 common->phy_idx = cpu_to_le32(phy_idx); 5787 5788 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ 5789 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; 5790 5791 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]); 5792 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]); 5793 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]); 5794 5795 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 5796 h2c_v0->ktbl_sel0 = cpu_to_le32(val32); 5797 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 5798 h2c_v0->ktbl_sel1 = cpu_to_le32(val32); 5799 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 5800 h2c_v0->rfmod0 = cpu_to_le32(val32); 5801 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 5802 h2c_v0->rfmod1 = cpu_to_le32(val32); 5803 5804 if (rtw89_is_mlo_1_1(rtwdev)) 5805 h2c_v0->mlo_1_1 = cpu_to_le32(1); 5806 5807 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 5808 5809 goto done; 5810 } 5811 5812 if (rtw89_is_mlo_1_1(rtwdev)) { 5813 h2c_v1 = &h2c->base_v1; 5814 h2c_v1->mlo_1_1 = cpu_to_le32(1); 5815 } 5816 done: 5817 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5818 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5819 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 5820 len); 5821 5822 ret = rtw89_h2c_tx(rtwdev, skb, false); 5823 if (ret) { 5824 rtw89_err(rtwdev, "failed to send h2c\n"); 5825 goto fail; 5826 } 5827 5828 return 0; 5829 fail: 5830 dev_kfree_skb_any(skb); 5831 5832 return ret; 5833 } 5834 5835 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5836 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode) 5837 { 5838 struct rtw89_hal *hal = &rtwdev->hal; 5839 struct rtw89_h2c_rf_tssi *h2c; 5840 u32 len = sizeof(*h2c); 5841 struct sk_buff *skb; 5842 int ret; 5843 5844 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5845 if (!skb) { 5846 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 5847 return -ENOMEM; 5848 } 5849 skb_put(skb, len); 5850 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 5851 5852 h2c->len = cpu_to_le16(len); 5853 h2c->phy = phy_idx; 5854 h2c->ch = chan->channel; 5855 h2c->bw = chan->band_width; 5856 h2c->band = chan->band_type; 5857 h2c->hwtx_en = true; 5858 h2c->cv = hal->cv; 5859 h2c->tssi_mode = tssi_mode; 5860 5861 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 5862 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 5863 5864 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5865 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5866 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 5867 5868 ret = rtw89_h2c_tx(rtwdev, skb, false); 5869 if (ret) { 5870 rtw89_err(rtwdev, "failed to send h2c\n"); 5871 goto fail; 5872 } 5873 5874 return 0; 5875 fail: 5876 dev_kfree_skb_any(skb); 5877 5878 return ret; 5879 } 5880 5881 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5882 const struct rtw89_chan *chan) 5883 { 5884 struct rtw89_h2c_rf_iqk *h2c; 5885 u32 len = sizeof(*h2c); 5886 struct sk_buff *skb; 5887 int ret; 5888 5889 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5890 if (!skb) { 5891 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 5892 return -ENOMEM; 5893 } 5894 skb_put(skb, len); 5895 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 5896 5897 h2c->phy_idx = cpu_to_le32(phy_idx); 5898 h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en); 5899 5900 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5901 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5902 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 5903 5904 ret = rtw89_h2c_tx(rtwdev, skb, false); 5905 if (ret) { 5906 rtw89_err(rtwdev, "failed to send h2c\n"); 5907 goto fail; 5908 } 5909 5910 return 0; 5911 fail: 5912 dev_kfree_skb_any(skb); 5913 5914 return ret; 5915 } 5916 5917 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5918 const struct rtw89_chan *chan) 5919 { 5920 struct rtw89_h2c_rf_dpk *h2c; 5921 u32 len = sizeof(*h2c); 5922 struct sk_buff *skb; 5923 int ret; 5924 5925 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5926 if (!skb) { 5927 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 5928 return -ENOMEM; 5929 } 5930 skb_put(skb, len); 5931 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 5932 5933 h2c->len = len; 5934 h2c->phy = phy_idx; 5935 h2c->dpk_enable = true; 5936 h2c->kpath = RF_AB; 5937 h2c->cur_band = chan->band_type; 5938 h2c->cur_bw = chan->band_width; 5939 h2c->cur_ch = chan->channel; 5940 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5941 5942 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5943 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5944 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 5945 5946 ret = rtw89_h2c_tx(rtwdev, skb, false); 5947 if (ret) { 5948 rtw89_err(rtwdev, "failed to send h2c\n"); 5949 goto fail; 5950 } 5951 5952 return 0; 5953 fail: 5954 dev_kfree_skb_any(skb); 5955 5956 return ret; 5957 } 5958 5959 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5960 const struct rtw89_chan *chan) 5961 { 5962 struct rtw89_hal *hal = &rtwdev->hal; 5963 struct rtw89_h2c_rf_txgapk *h2c; 5964 u32 len = sizeof(*h2c); 5965 struct sk_buff *skb; 5966 int ret; 5967 5968 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5969 if (!skb) { 5970 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 5971 return -ENOMEM; 5972 } 5973 skb_put(skb, len); 5974 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 5975 5976 h2c->len = len; 5977 h2c->ktype = 2; 5978 h2c->phy = phy_idx; 5979 h2c->kpath = RF_AB; 5980 h2c->band = chan->band_type; 5981 h2c->bw = chan->band_width; 5982 h2c->ch = chan->channel; 5983 h2c->cv = hal->cv; 5984 5985 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5986 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5987 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 5988 5989 ret = rtw89_h2c_tx(rtwdev, skb, false); 5990 if (ret) { 5991 rtw89_err(rtwdev, "failed to send h2c\n"); 5992 goto fail; 5993 } 5994 5995 return 0; 5996 fail: 5997 dev_kfree_skb_any(skb); 5998 5999 return ret; 6000 } 6001 6002 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6003 const struct rtw89_chan *chan) 6004 { 6005 struct rtw89_h2c_rf_dack *h2c; 6006 u32 len = sizeof(*h2c); 6007 struct sk_buff *skb; 6008 int ret; 6009 6010 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6011 if (!skb) { 6012 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 6013 return -ENOMEM; 6014 } 6015 skb_put(skb, len); 6016 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 6017 6018 h2c->len = cpu_to_le32(len); 6019 h2c->phy = cpu_to_le32(phy_idx); 6020 h2c->type = cpu_to_le32(0); 6021 6022 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6023 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6024 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 6025 6026 ret = rtw89_h2c_tx(rtwdev, skb, false); 6027 if (ret) { 6028 rtw89_err(rtwdev, "failed to send h2c\n"); 6029 goto fail; 6030 } 6031 6032 return 0; 6033 fail: 6034 dev_kfree_skb_any(skb); 6035 6036 return ret; 6037 } 6038 6039 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 6040 const struct rtw89_chan *chan, bool is_chl_k) 6041 { 6042 struct rtw89_h2c_rf_rxdck_v0 *v0; 6043 struct rtw89_h2c_rf_rxdck *h2c; 6044 u32 len = sizeof(*h2c); 6045 struct sk_buff *skb; 6046 int ver = -1; 6047 int ret; 6048 6049 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) { 6050 len = sizeof(*v0); 6051 ver = 0; 6052 } 6053 6054 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6055 if (!skb) { 6056 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 6057 return -ENOMEM; 6058 } 6059 skb_put(skb, len); 6060 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data; 6061 6062 v0->len = len; 6063 v0->phy = phy_idx; 6064 v0->is_afe = false; 6065 v0->kpath = RF_AB; 6066 v0->cur_band = chan->band_type; 6067 v0->cur_bw = chan->band_width; 6068 v0->cur_ch = chan->channel; 6069 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 6070 6071 if (ver == 0) 6072 goto hdr; 6073 6074 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 6075 h2c->is_chl_k = is_chl_k; 6076 6077 hdr: 6078 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6079 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 6080 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 6081 6082 ret = rtw89_h2c_tx(rtwdev, skb, false); 6083 if (ret) { 6084 rtw89_err(rtwdev, "failed to send h2c\n"); 6085 goto fail; 6086 } 6087 6088 return 0; 6089 fail: 6090 dev_kfree_skb_any(skb); 6091 6092 return ret; 6093 } 6094 6095 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 6096 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 6097 bool rack, bool dack) 6098 { 6099 struct sk_buff *skb; 6100 int ret; 6101 6102 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6103 if (!skb) { 6104 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 6105 return -ENOMEM; 6106 } 6107 skb_put_data(skb, buf, len); 6108 6109 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6110 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 6111 len); 6112 6113 ret = rtw89_h2c_tx(rtwdev, skb, false); 6114 if (ret) { 6115 rtw89_err(rtwdev, "failed to send h2c\n"); 6116 goto fail; 6117 } 6118 6119 return 0; 6120 fail: 6121 dev_kfree_skb_any(skb); 6122 6123 return ret; 6124 } 6125 6126 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 6127 { 6128 struct sk_buff *skb; 6129 int ret; 6130 6131 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 6132 if (!skb) { 6133 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 6134 return -ENOMEM; 6135 } 6136 skb_put_data(skb, buf, len); 6137 6138 ret = rtw89_h2c_tx(rtwdev, skb, false); 6139 if (ret) { 6140 rtw89_err(rtwdev, "failed to send h2c\n"); 6141 goto fail; 6142 } 6143 6144 return 0; 6145 fail: 6146 dev_kfree_skb_any(skb); 6147 6148 return ret; 6149 } 6150 6151 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 6152 { 6153 struct rtw89_early_h2c *early_h2c; 6154 6155 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6156 6157 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 6158 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 6159 } 6160 } 6161 6162 void __rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 6163 { 6164 struct rtw89_early_h2c *early_h2c, *tmp; 6165 6166 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 6167 list_del(&early_h2c->list); 6168 kfree(early_h2c->h2c); 6169 kfree(early_h2c); 6170 } 6171 } 6172 6173 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 6174 { 6175 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6176 6177 __rtw89_fw_free_all_early_h2c(rtwdev); 6178 } 6179 6180 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 6181 { 6182 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 6183 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6184 6185 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 6186 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 6187 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 6188 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 6189 } 6190 6191 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 6192 struct sk_buff *c2h) 6193 { 6194 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6195 u8 category = attr->category; 6196 u8 class = attr->class; 6197 u8 func = attr->func; 6198 6199 switch (category) { 6200 default: 6201 return false; 6202 case RTW89_C2H_CAT_MAC: 6203 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 6204 case RTW89_C2H_CAT_OUTSRC: 6205 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 6206 } 6207 } 6208 6209 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 6210 { 6211 rtw89_fw_c2h_parse_attr(c2h); 6212 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 6213 goto enqueue; 6214 6215 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 6216 dev_kfree_skb_any(c2h); 6217 return; 6218 6219 enqueue: 6220 skb_queue_tail(&rtwdev->c2h_queue, c2h); 6221 wiphy_work_queue(rtwdev->hw->wiphy, &rtwdev->c2h_work); 6222 } 6223 6224 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 6225 struct sk_buff *skb) 6226 { 6227 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 6228 u8 category = attr->category; 6229 u8 class = attr->class; 6230 u8 func = attr->func; 6231 u16 len = attr->len; 6232 bool dump = true; 6233 6234 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 6235 return; 6236 6237 switch (category) { 6238 case RTW89_C2H_CAT_TEST: 6239 break; 6240 case RTW89_C2H_CAT_MAC: 6241 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 6242 if (class == RTW89_MAC_C2H_CLASS_INFO && 6243 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 6244 dump = false; 6245 break; 6246 case RTW89_C2H_CAT_OUTSRC: 6247 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 6248 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 6249 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 6250 else 6251 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 6252 break; 6253 } 6254 6255 if (dump) 6256 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 6257 } 6258 6259 void rtw89_fw_c2h_work(struct wiphy *wiphy, struct wiphy_work *work) 6260 { 6261 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 6262 c2h_work); 6263 struct sk_buff *skb, *tmp; 6264 6265 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6266 6267 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 6268 skb_unlink(skb, &rtwdev->c2h_queue); 6269 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 6270 dev_kfree_skb_any(skb); 6271 } 6272 } 6273 6274 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 6275 struct rtw89_mac_h2c_info *info) 6276 { 6277 const struct rtw89_chip_info *chip = rtwdev->chip; 6278 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6279 const u32 *h2c_reg = chip->h2c_regs; 6280 u8 i, val, len; 6281 int ret; 6282 6283 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 6284 rtwdev, chip->h2c_ctrl_reg); 6285 if (ret) { 6286 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 6287 return ret; 6288 } 6289 6290 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 6291 sizeof(info->u.h2creg[0])); 6292 6293 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 6294 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 6295 6296 for (i = 0; i < RTW89_H2CREG_MAX; i++) 6297 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 6298 6299 fw_info->h2c_counter++; 6300 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 6301 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 6302 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 6303 6304 return 0; 6305 } 6306 6307 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 6308 struct rtw89_mac_c2h_info *info) 6309 { 6310 const struct rtw89_chip_info *chip = rtwdev->chip; 6311 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6312 const u32 *c2h_reg = chip->c2h_regs; 6313 u32 ret; 6314 u8 i, val; 6315 6316 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 6317 6318 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 6319 RTW89_C2H_TIMEOUT, false, rtwdev, 6320 chip->c2h_ctrl_reg); 6321 if (ret) { 6322 rtw89_warn(rtwdev, "c2h reg timeout\n"); 6323 return ret; 6324 } 6325 6326 for (i = 0; i < RTW89_C2HREG_MAX; i++) 6327 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 6328 6329 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 6330 6331 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 6332 info->content_len = 6333 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 6334 RTW89_C2HREG_HDR_LEN; 6335 6336 fw_info->c2h_counter++; 6337 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 6338 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 6339 6340 return 0; 6341 } 6342 6343 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 6344 struct rtw89_mac_h2c_info *h2c_info, 6345 struct rtw89_mac_c2h_info *c2h_info) 6346 { 6347 u32 ret; 6348 6349 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 6350 lockdep_assert_wiphy(rtwdev->hw->wiphy); 6351 6352 if (!h2c_info && !c2h_info) 6353 return -EINVAL; 6354 6355 if (!h2c_info) 6356 goto recv_c2h; 6357 6358 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 6359 if (ret) 6360 return ret; 6361 6362 recv_c2h: 6363 if (!c2h_info) 6364 return 0; 6365 6366 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 6367 if (ret) 6368 return ret; 6369 6370 return 0; 6371 } 6372 6373 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 6374 { 6375 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 6376 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 6377 return; 6378 } 6379 6380 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 6381 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 6382 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 6383 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 6384 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 6385 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 6386 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 6387 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 6388 6389 rtw89_fw_prog_cnt_dump(rtwdev); 6390 } 6391 6392 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 6393 { 6394 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6395 struct rtw89_pktofld_info *info, *tmp; 6396 u8 idx; 6397 6398 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 6399 if (!(rtwdev->chip->support_bands & BIT(idx))) 6400 continue; 6401 6402 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 6403 if (test_bit(info->id, rtwdev->pkt_offload)) 6404 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 6405 list_del(&info->list); 6406 kfree(info); 6407 } 6408 } 6409 } 6410 6411 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 6412 struct cfg80211_scan_request *req, 6413 struct rtw89_pktofld_info *info, 6414 enum nl80211_band band, u8 ssid_idx) 6415 { 6416 if (band != NL80211_BAND_6GHZ) 6417 return false; 6418 6419 if (req->ssids[ssid_idx].ssid_len) { 6420 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 6421 req->ssids[ssid_idx].ssid_len); 6422 info->ssid_len = req->ssids[ssid_idx].ssid_len; 6423 return false; 6424 } else { 6425 info->wildcard_6ghz = true; 6426 return true; 6427 } 6428 } 6429 6430 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 6431 struct rtw89_vif_link *rtwvif_link, 6432 struct sk_buff *skb, u8 ssid_idx) 6433 { 6434 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6435 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6436 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 6437 struct cfg80211_scan_request *req = rtwvif->scan_req; 6438 struct rtw89_pktofld_info *info; 6439 struct sk_buff *new; 6440 int ret = 0; 6441 u8 band; 6442 6443 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 6444 if (!(rtwdev->chip->support_bands & BIT(band))) 6445 continue; 6446 6447 new = skb_copy(skb, GFP_KERNEL); 6448 if (!new) { 6449 ret = -ENOMEM; 6450 goto out; 6451 } 6452 skb_put_data(new, ies->ies[band], ies->len[band]); 6453 skb_put_data(new, ies->common_ies, ies->common_ie_len); 6454 6455 info = kzalloc(sizeof(*info), GFP_KERNEL); 6456 if (!info) { 6457 ret = -ENOMEM; 6458 kfree_skb(new); 6459 goto out; 6460 } 6461 6462 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx); 6463 6464 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 6465 if (ret) { 6466 kfree_skb(new); 6467 kfree(info); 6468 goto out; 6469 } 6470 6471 list_add_tail(&info->list, &scan_info->pkt_list[band]); 6472 kfree_skb(new); 6473 } 6474 out: 6475 return ret; 6476 } 6477 6478 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 6479 struct rtw89_vif_link *rtwvif_link) 6480 { 6481 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6482 struct cfg80211_scan_request *req = rtwvif->scan_req; 6483 struct sk_buff *skb; 6484 u8 num = req->n_ssids, i; 6485 int ret; 6486 6487 for (i = 0; i < num; i++) { 6488 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 6489 req->ssids[i].ssid, 6490 req->ssids[i].ssid_len, 6491 req->ie_len); 6492 if (!skb) 6493 return -ENOMEM; 6494 6495 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i); 6496 kfree_skb(skb); 6497 6498 if (ret) 6499 return ret; 6500 } 6501 6502 return 0; 6503 } 6504 6505 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 6506 struct ieee80211_scan_ies *ies, 6507 struct cfg80211_scan_request *req, 6508 struct rtw89_mac_chinfo *ch_info) 6509 { 6510 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6511 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6512 struct cfg80211_scan_6ghz_params *params; 6513 struct rtw89_pktofld_info *info, *tmp; 6514 struct ieee80211_hdr *hdr; 6515 struct sk_buff *skb; 6516 bool found; 6517 int ret = 0; 6518 u8 i; 6519 6520 if (!req->n_6ghz_params) 6521 return 0; 6522 6523 for (i = 0; i < req->n_6ghz_params; i++) { 6524 params = &req->scan_6ghz_params[i]; 6525 6526 if (req->channels[params->channel_idx]->hw_value != 6527 ch_info->pri_ch) 6528 continue; 6529 6530 found = false; 6531 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 6532 if (ether_addr_equal(tmp->bssid, params->bssid)) { 6533 found = true; 6534 break; 6535 } 6536 } 6537 if (found) 6538 continue; 6539 6540 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 6541 NULL, 0, req->ie_len); 6542 if (!skb) 6543 return -ENOMEM; 6544 6545 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 6546 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 6547 hdr = (struct ieee80211_hdr *)skb->data; 6548 ether_addr_copy(hdr->addr3, params->bssid); 6549 6550 info = kzalloc(sizeof(*info), GFP_KERNEL); 6551 if (!info) { 6552 ret = -ENOMEM; 6553 kfree_skb(skb); 6554 goto out; 6555 } 6556 6557 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 6558 if (ret) { 6559 kfree_skb(skb); 6560 kfree(info); 6561 goto out; 6562 } 6563 6564 ether_addr_copy(info->bssid, params->bssid); 6565 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 6566 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 6567 6568 ch_info->tx_pkt = true; 6569 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6570 6571 kfree_skb(skb); 6572 } 6573 6574 out: 6575 return ret; 6576 } 6577 6578 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, 6579 int chan_type, int ssid_num, 6580 struct rtw89_mac_chinfo *ch_info) 6581 { 6582 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6583 struct rtw89_pktofld_info *info; 6584 u8 probe_count = 0; 6585 6586 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6587 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6588 ch_info->bw = RTW89_SCAN_WIDTH; 6589 ch_info->tx_pkt = true; 6590 ch_info->cfg_tx_pwr = false; 6591 ch_info->tx_pwr_idx = 0; 6592 ch_info->tx_null = false; 6593 ch_info->pause_data = false; 6594 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6595 6596 if (ssid_num) { 6597 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6598 if (info->channel_6ghz && 6599 ch_info->pri_ch != info->channel_6ghz) 6600 continue; 6601 else if (info->channel_6ghz && probe_count != 0) 6602 ch_info->period += RTW89_CHANNEL_TIME_6G; 6603 6604 if (info->wildcard_6ghz) 6605 continue; 6606 6607 ch_info->pkt_id[probe_count++] = info->id; 6608 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6609 break; 6610 } 6611 ch_info->num_pkt = probe_count; 6612 } 6613 6614 switch (chan_type) { 6615 case RTW89_CHAN_DFS: 6616 if (ch_info->ch_band != RTW89_BAND_6G) 6617 ch_info->period = max_t(u8, ch_info->period, 6618 RTW89_DFS_CHAN_TIME); 6619 ch_info->dwell_time = RTW89_DWELL_TIME; 6620 break; 6621 case RTW89_CHAN_ACTIVE: 6622 break; 6623 default: 6624 rtw89_err(rtwdev, "Channel type out of bound\n"); 6625 } 6626 } 6627 6628 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 6629 int ssid_num, 6630 struct rtw89_mac_chinfo *ch_info) 6631 { 6632 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6633 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6634 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6635 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 6636 struct cfg80211_scan_request *req = rtwvif->scan_req; 6637 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 6638 struct rtw89_pktofld_info *info; 6639 u8 band, probe_count = 0; 6640 int ret; 6641 6642 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6643 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6644 ch_info->bw = RTW89_SCAN_WIDTH; 6645 ch_info->tx_pkt = true; 6646 ch_info->cfg_tx_pwr = false; 6647 ch_info->tx_pwr_idx = 0; 6648 ch_info->tx_null = false; 6649 ch_info->pause_data = false; 6650 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6651 6652 if (ch_info->ch_band == RTW89_BAND_6G) { 6653 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6654 !ch_info->is_psc) { 6655 ch_info->tx_pkt = false; 6656 if (!req->duration_mandatory) 6657 ch_info->period -= RTW89_DWELL_TIME_6G; 6658 } 6659 } 6660 6661 ret = rtw89_update_6ghz_rnr_chan(rtwdev, ies, req, ch_info); 6662 if (ret) 6663 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 6664 6665 if (ssid_num) { 6666 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6667 6668 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6669 if (info->channel_6ghz && 6670 ch_info->pri_ch != info->channel_6ghz) 6671 continue; 6672 else if (info->channel_6ghz && probe_count != 0) 6673 ch_info->period += RTW89_CHANNEL_TIME_6G; 6674 6675 if (info->wildcard_6ghz) 6676 continue; 6677 6678 ch_info->pkt_id[probe_count++] = info->id; 6679 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6680 break; 6681 } 6682 ch_info->num_pkt = probe_count; 6683 } 6684 6685 switch (chan_type) { 6686 case RTW89_CHAN_OPERATE: 6687 ch_info->central_ch = op->channel; 6688 ch_info->pri_ch = op->primary_channel; 6689 ch_info->ch_band = op->band_type; 6690 ch_info->bw = op->band_width; 6691 ch_info->tx_null = true; 6692 ch_info->num_pkt = 0; 6693 break; 6694 case RTW89_CHAN_DFS: 6695 if (ch_info->ch_band != RTW89_BAND_6G) 6696 ch_info->period = max_t(u8, ch_info->period, 6697 RTW89_DFS_CHAN_TIME); 6698 ch_info->dwell_time = RTW89_DWELL_TIME; 6699 ch_info->pause_data = true; 6700 break; 6701 case RTW89_CHAN_ACTIVE: 6702 ch_info->pause_data = true; 6703 break; 6704 default: 6705 rtw89_err(rtwdev, "Channel type out of bound\n"); 6706 } 6707 } 6708 6709 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6710 int ssid_num, 6711 struct rtw89_mac_chinfo_be *ch_info) 6712 { 6713 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6714 struct rtw89_pktofld_info *info; 6715 u8 probe_count = 0, i; 6716 6717 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6718 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6719 ch_info->bw = RTW89_SCAN_WIDTH; 6720 ch_info->tx_null = false; 6721 ch_info->pause_data = false; 6722 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6723 6724 if (ssid_num) { 6725 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6726 ch_info->pkt_id[probe_count++] = info->id; 6727 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6728 break; 6729 } 6730 } 6731 6732 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6733 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6734 6735 switch (chan_type) { 6736 case RTW89_CHAN_DFS: 6737 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6738 ch_info->dwell_time = RTW89_DWELL_TIME; 6739 break; 6740 case RTW89_CHAN_ACTIVE: 6741 break; 6742 default: 6743 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6744 break; 6745 } 6746 } 6747 6748 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6749 int ssid_num, 6750 struct rtw89_mac_chinfo_be *ch_info) 6751 { 6752 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6753 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6754 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6755 struct cfg80211_scan_request *req = rtwvif->scan_req; 6756 struct rtw89_pktofld_info *info; 6757 u8 band, probe_count = 0, i; 6758 6759 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6760 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6761 ch_info->bw = RTW89_SCAN_WIDTH; 6762 ch_info->tx_null = false; 6763 ch_info->pause_data = false; 6764 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6765 6766 if (ssid_num) { 6767 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6768 6769 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6770 if (info->channel_6ghz && 6771 ch_info->pri_ch != info->channel_6ghz) 6772 continue; 6773 6774 if (info->wildcard_6ghz) 6775 continue; 6776 6777 ch_info->pkt_id[probe_count++] = info->id; 6778 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6779 break; 6780 } 6781 } 6782 6783 if (ch_info->ch_band == RTW89_BAND_6G) { 6784 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6785 !ch_info->is_psc) { 6786 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6787 if (!req->duration_mandatory) 6788 ch_info->period -= RTW89_DWELL_TIME_6G; 6789 } 6790 } 6791 6792 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6793 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6794 6795 switch (chan_type) { 6796 case RTW89_CHAN_DFS: 6797 if (ch_info->ch_band != RTW89_BAND_6G) 6798 ch_info->period = 6799 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6800 ch_info->dwell_time = RTW89_DWELL_TIME; 6801 ch_info->pause_data = true; 6802 break; 6803 case RTW89_CHAN_ACTIVE: 6804 ch_info->pause_data = true; 6805 break; 6806 default: 6807 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6808 break; 6809 } 6810 } 6811 6812 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6813 struct rtw89_vif_link *rtwvif_link) 6814 { 6815 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6816 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6817 struct rtw89_mac_chinfo *ch_info, *tmp; 6818 struct ieee80211_channel *channel; 6819 struct list_head chan_list; 6820 int list_len; 6821 enum rtw89_chan_type type; 6822 int ret = 0; 6823 u32 idx; 6824 6825 INIT_LIST_HEAD(&chan_list); 6826 for (idx = 0, list_len = 0; 6827 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 6828 idx++, list_len++) { 6829 channel = nd_config->channels[idx]; 6830 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6831 if (!ch_info) { 6832 ret = -ENOMEM; 6833 goto out; 6834 } 6835 6836 ch_info->period = RTW89_CHANNEL_TIME; 6837 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6838 ch_info->central_ch = channel->hw_value; 6839 ch_info->pri_ch = channel->hw_value; 6840 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6841 6842 if (channel->flags & 6843 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6844 type = RTW89_CHAN_DFS; 6845 else 6846 type = RTW89_CHAN_ACTIVE; 6847 6848 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); 6849 list_add_tail(&ch_info->list, &chan_list); 6850 } 6851 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6852 6853 out: 6854 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6855 list_del(&ch_info->list); 6856 kfree(ch_info); 6857 } 6858 6859 return ret; 6860 } 6861 6862 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6863 struct rtw89_vif_link *rtwvif_link, bool connected) 6864 { 6865 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6866 struct cfg80211_scan_request *req = rtwvif->scan_req; 6867 struct rtw89_mac_chinfo *ch_info, *tmp; 6868 struct ieee80211_channel *channel; 6869 struct list_head chan_list; 6870 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 6871 int list_len, off_chan_time = 0; 6872 enum rtw89_chan_type type; 6873 int ret = 0; 6874 u32 idx; 6875 6876 INIT_LIST_HEAD(&chan_list); 6877 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6878 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 6879 idx++, list_len++) { 6880 channel = req->channels[idx]; 6881 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6882 if (!ch_info) { 6883 ret = -ENOMEM; 6884 goto out; 6885 } 6886 6887 if (req->duration) 6888 ch_info->period = req->duration; 6889 else if (channel->band == NL80211_BAND_6GHZ) 6890 ch_info->period = RTW89_CHANNEL_TIME_6G + 6891 RTW89_DWELL_TIME_6G; 6892 else 6893 ch_info->period = RTW89_CHANNEL_TIME; 6894 6895 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6896 ch_info->central_ch = channel->hw_value; 6897 ch_info->pri_ch = channel->hw_value; 6898 ch_info->rand_seq_num = random_seq; 6899 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6900 6901 if (channel->flags & 6902 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6903 type = RTW89_CHAN_DFS; 6904 else 6905 type = RTW89_CHAN_ACTIVE; 6906 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 6907 6908 if (connected && 6909 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 6910 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 6911 if (!tmp) { 6912 ret = -ENOMEM; 6913 kfree(ch_info); 6914 goto out; 6915 } 6916 6917 type = RTW89_CHAN_OPERATE; 6918 tmp->period = req->duration_mandatory ? 6919 req->duration : RTW89_CHANNEL_TIME; 6920 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 6921 list_add_tail(&tmp->list, &chan_list); 6922 off_chan_time = 0; 6923 list_len++; 6924 } 6925 list_add_tail(&ch_info->list, &chan_list); 6926 off_chan_time += ch_info->period; 6927 } 6928 rtwdev->scan_info.last_chan_idx = idx; 6929 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6930 6931 out: 6932 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6933 list_del(&ch_info->list); 6934 kfree(ch_info); 6935 } 6936 6937 return ret; 6938 } 6939 6940 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6941 struct rtw89_vif_link *rtwvif_link) 6942 { 6943 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6944 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6945 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6946 struct ieee80211_channel *channel; 6947 struct list_head chan_list; 6948 enum rtw89_chan_type type; 6949 int list_len, ret; 6950 u32 idx; 6951 6952 INIT_LIST_HEAD(&chan_list); 6953 6954 for (idx = 0, list_len = 0; 6955 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 6956 idx++, list_len++) { 6957 channel = nd_config->channels[idx]; 6958 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6959 if (!ch_info) { 6960 ret = -ENOMEM; 6961 goto out; 6962 } 6963 6964 ch_info->period = RTW89_CHANNEL_TIME; 6965 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6966 ch_info->central_ch = channel->hw_value; 6967 ch_info->pri_ch = channel->hw_value; 6968 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6969 6970 if (channel->flags & 6971 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6972 type = RTW89_CHAN_DFS; 6973 else 6974 type = RTW89_CHAN_ACTIVE; 6975 6976 rtw89_pno_scan_add_chan_be(rtwdev, type, 6977 nd_config->n_match_sets, ch_info); 6978 list_add_tail(&ch_info->list, &chan_list); 6979 } 6980 6981 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 6982 rtwvif_link); 6983 6984 out: 6985 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6986 list_del(&ch_info->list); 6987 kfree(ch_info); 6988 } 6989 6990 return ret; 6991 } 6992 6993 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6994 struct rtw89_vif_link *rtwvif_link, bool connected) 6995 { 6996 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6997 struct cfg80211_scan_request *req = rtwvif->scan_req; 6998 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6999 struct ieee80211_channel *channel; 7000 struct list_head chan_list; 7001 enum rtw89_chan_type type; 7002 int list_len, ret; 7003 bool random_seq; 7004 u32 idx; 7005 7006 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 7007 INIT_LIST_HEAD(&chan_list); 7008 7009 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 7010 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 7011 idx++, list_len++) { 7012 channel = req->channels[idx]; 7013 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 7014 if (!ch_info) { 7015 ret = -ENOMEM; 7016 goto out; 7017 } 7018 7019 if (req->duration) 7020 ch_info->period = req->duration; 7021 else if (channel->band == NL80211_BAND_6GHZ) 7022 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 7023 else 7024 ch_info->period = RTW89_CHANNEL_TIME; 7025 7026 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 7027 ch_info->central_ch = channel->hw_value; 7028 ch_info->pri_ch = channel->hw_value; 7029 ch_info->rand_seq_num = random_seq; 7030 ch_info->is_psc = cfg80211_channel_is_psc(channel); 7031 7032 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 7033 type = RTW89_CHAN_DFS; 7034 else 7035 type = RTW89_CHAN_ACTIVE; 7036 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 7037 7038 list_add_tail(&ch_info->list, &chan_list); 7039 } 7040 7041 rtwdev->scan_info.last_chan_idx = idx; 7042 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 7043 rtwvif_link); 7044 7045 out: 7046 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 7047 list_del(&ch_info->list); 7048 kfree(ch_info); 7049 } 7050 7051 return ret; 7052 } 7053 7054 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 7055 struct rtw89_vif_link *rtwvif_link, bool connected) 7056 { 7057 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7058 int ret; 7059 7060 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link); 7061 if (ret) { 7062 rtw89_err(rtwdev, "Update probe request failed\n"); 7063 goto out; 7064 } 7065 ret = mac->add_chan_list(rtwdev, rtwvif_link, connected); 7066 out: 7067 return ret; 7068 } 7069 7070 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, 7071 struct rtw89_vif_link *rtwvif_link, 7072 struct ieee80211_scan_request *scan_req) 7073 { 7074 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7075 struct cfg80211_scan_request *req = &scan_req->req; 7076 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 7077 rtwvif_link->chanctx_idx); 7078 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 7079 u32 rx_fltr = rtwdev->hal.rx_fltr; 7080 u8 mac_addr[ETH_ALEN]; 7081 u32 reg; 7082 7083 /* clone op and keep it during scan */ 7084 rtwdev->scan_info.op_chan = *chan; 7085 7086 rtwdev->scan_info.scanning_vif = rtwvif_link; 7087 rtwdev->scan_info.last_chan_idx = 0; 7088 rtwdev->scan_info.abort = false; 7089 rtwvif->scan_ies = &scan_req->ies; 7090 rtwvif->scan_req = req; 7091 ieee80211_stop_queues(rtwdev->hw); 7092 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false); 7093 7094 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 7095 get_random_mask_addr(mac_addr, req->mac_addr, 7096 req->mac_addr_mask); 7097 else 7098 ether_addr_copy(mac_addr, rtwvif_link->mac_addr); 7099 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true); 7100 7101 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 7102 rx_fltr &= ~B_AX_A_BC; 7103 rx_fltr &= ~B_AX_A_A1_MATCH; 7104 7105 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 7106 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr); 7107 7108 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); 7109 } 7110 7111 struct rtw89_hw_scan_complete_cb_data { 7112 struct rtw89_vif_link *rtwvif_link; 7113 bool aborted; 7114 }; 7115 7116 static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data) 7117 { 7118 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7119 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7120 struct rtw89_hw_scan_complete_cb_data *cb_data = data; 7121 struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link; 7122 struct cfg80211_scan_info info = { 7123 .aborted = cb_data->aborted, 7124 }; 7125 struct rtw89_vif *rtwvif; 7126 u32 reg; 7127 7128 if (!rtwvif_link) 7129 return -EINVAL; 7130 7131 rtwvif = rtwvif_link->rtwvif; 7132 7133 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 7134 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); 7135 7136 rtw89_core_scan_complete(rtwdev, rtwvif_link, true); 7137 ieee80211_scan_completed(rtwdev->hw, &info); 7138 ieee80211_wake_queues(rtwdev->hw); 7139 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true); 7140 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 7141 7142 rtw89_release_pkt_list(rtwdev); 7143 rtwvif->scan_req = NULL; 7144 rtwvif->scan_ies = NULL; 7145 scan_info->last_chan_idx = 0; 7146 scan_info->scanning_vif = NULL; 7147 scan_info->abort = false; 7148 7149 return 0; 7150 } 7151 7152 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, 7153 struct rtw89_vif_link *rtwvif_link, 7154 bool aborted) 7155 { 7156 struct rtw89_hw_scan_complete_cb_data cb_data = { 7157 .rtwvif_link = rtwvif_link, 7158 .aborted = aborted, 7159 }; 7160 const struct rtw89_chanctx_cb_parm cb_parm = { 7161 .cb = rtw89_hw_scan_complete_cb, 7162 .data = &cb_data, 7163 .caller = __func__, 7164 }; 7165 7166 /* The things here needs to be done after setting channel (for coex) 7167 * and before proceeding entity mode (for MCC). So, pass a callback 7168 * of them for the right sequence rather than doing them directly. 7169 */ 7170 rtw89_chanctx_proceed(rtwdev, &cb_parm); 7171 } 7172 7173 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, 7174 struct rtw89_vif_link *rtwvif_link) 7175 { 7176 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7177 int ret; 7178 7179 scan_info->abort = true; 7180 7181 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false); 7182 if (ret) 7183 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); 7184 7185 /* Indicate ieee80211_scan_completed() before returning, which is safe 7186 * because scan abort command always waits for completion of 7187 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan 7188 * work properly. 7189 */ 7190 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true); 7191 } 7192 7193 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 7194 { 7195 struct rtw89_vif_link *rtwvif_link; 7196 struct rtw89_vif *rtwvif; 7197 unsigned int link_id; 7198 7199 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 7200 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 7201 /* This variable implies connected or during attempt to connect */ 7202 if (!is_zero_ether_addr(rtwvif_link->bssid)) 7203 return true; 7204 } 7205 } 7206 7207 return false; 7208 } 7209 7210 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, 7211 struct rtw89_vif_link *rtwvif_link, 7212 bool enable) 7213 { 7214 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7215 struct rtw89_scan_option opt = {0}; 7216 bool connected; 7217 int ret = 0; 7218 7219 if (!rtwvif_link) 7220 return -EINVAL; 7221 7222 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 7223 opt.enable = enable; 7224 opt.target_ch_mode = connected; 7225 if (enable) { 7226 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, connected); 7227 if (ret) 7228 goto out; 7229 } 7230 7231 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 7232 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 7233 opt.scan_mode = RTW89_SCAN_MODE_SA; 7234 opt.band = rtwvif_link->mac_idx; 7235 opt.num_macc_role = 0; 7236 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 7237 opt.num_opch = connected ? 1 : 0; 7238 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 7239 } 7240 7241 ret = mac->scan_offload(rtwdev, &opt, rtwvif_link, false); 7242 out: 7243 return ret; 7244 } 7245 7246 #define H2C_FW_CPU_EXCEPTION_LEN 4 7247 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 7248 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 7249 { 7250 struct sk_buff *skb; 7251 int ret; 7252 7253 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 7254 if (!skb) { 7255 rtw89_err(rtwdev, 7256 "failed to alloc skb for fw cpu exception\n"); 7257 return -ENOMEM; 7258 } 7259 7260 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 7261 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 7262 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 7263 7264 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7265 H2C_CAT_TEST, 7266 H2C_CL_FW_STATUS_TEST, 7267 H2C_FUNC_CPU_EXCEPTION, 0, 0, 7268 H2C_FW_CPU_EXCEPTION_LEN); 7269 7270 ret = rtw89_h2c_tx(rtwdev, skb, false); 7271 if (ret) { 7272 rtw89_err(rtwdev, "failed to send h2c\n"); 7273 goto fail; 7274 } 7275 7276 return 0; 7277 7278 fail: 7279 dev_kfree_skb_any(skb); 7280 return ret; 7281 } 7282 7283 #define H2C_PKT_DROP_LEN 24 7284 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 7285 const struct rtw89_pkt_drop_params *params) 7286 { 7287 struct sk_buff *skb; 7288 int ret; 7289 7290 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 7291 if (!skb) { 7292 rtw89_err(rtwdev, 7293 "failed to alloc skb for packet drop\n"); 7294 return -ENOMEM; 7295 } 7296 7297 switch (params->sel) { 7298 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 7299 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 7300 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 7301 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 7302 case RTW89_PKT_DROP_SEL_BAND_ONCE: 7303 break; 7304 default: 7305 rtw89_debug(rtwdev, RTW89_DBG_FW, 7306 "H2C of pkt drop might not fully support sel: %d yet\n", 7307 params->sel); 7308 break; 7309 } 7310 7311 skb_put(skb, H2C_PKT_DROP_LEN); 7312 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 7313 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 7314 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 7315 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 7316 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 7317 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 7318 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 7319 params->macid_band_sel[0]); 7320 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 7321 params->macid_band_sel[1]); 7322 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 7323 params->macid_band_sel[2]); 7324 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 7325 params->macid_band_sel[3]); 7326 7327 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7328 H2C_CAT_MAC, 7329 H2C_CL_MAC_FW_OFLD, 7330 H2C_FUNC_PKT_DROP, 0, 0, 7331 H2C_PKT_DROP_LEN); 7332 7333 ret = rtw89_h2c_tx(rtwdev, skb, false); 7334 if (ret) { 7335 rtw89_err(rtwdev, "failed to send h2c\n"); 7336 goto fail; 7337 } 7338 7339 return 0; 7340 7341 fail: 7342 dev_kfree_skb_any(skb); 7343 return ret; 7344 } 7345 7346 #define H2C_KEEP_ALIVE_LEN 4 7347 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7348 bool enable) 7349 { 7350 struct sk_buff *skb; 7351 u8 pkt_id = 0; 7352 int ret; 7353 7354 if (enable) { 7355 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7356 RTW89_PKT_OFLD_TYPE_NULL_DATA, 7357 &pkt_id); 7358 if (ret) 7359 return -EPERM; 7360 } 7361 7362 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 7363 if (!skb) { 7364 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7365 return -ENOMEM; 7366 } 7367 7368 skb_put(skb, H2C_KEEP_ALIVE_LEN); 7369 7370 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 7371 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 7372 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 7373 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id); 7374 7375 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7376 H2C_CAT_MAC, 7377 H2C_CL_MAC_WOW, 7378 H2C_FUNC_KEEP_ALIVE, 0, 1, 7379 H2C_KEEP_ALIVE_LEN); 7380 7381 ret = rtw89_h2c_tx(rtwdev, skb, false); 7382 if (ret) { 7383 rtw89_err(rtwdev, "failed to send h2c\n"); 7384 goto fail; 7385 } 7386 7387 return 0; 7388 7389 fail: 7390 dev_kfree_skb_any(skb); 7391 7392 return ret; 7393 } 7394 7395 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7396 bool enable) 7397 { 7398 struct rtw89_h2c_arp_offload *h2c; 7399 u32 len = sizeof(*h2c); 7400 struct sk_buff *skb; 7401 u8 pkt_id = 0; 7402 int ret; 7403 7404 if (enable) { 7405 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7406 RTW89_PKT_OFLD_TYPE_ARP_RSP, 7407 &pkt_id); 7408 if (ret) 7409 return ret; 7410 } 7411 7412 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7413 if (!skb) { 7414 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); 7415 return -ENOMEM; 7416 } 7417 7418 skb_put(skb, len); 7419 h2c = (struct rtw89_h2c_arp_offload *)skb->data; 7420 7421 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | 7422 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | 7423 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | 7424 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); 7425 7426 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7427 H2C_CAT_MAC, 7428 H2C_CL_MAC_WOW, 7429 H2C_FUNC_ARP_OFLD, 0, 1, 7430 len); 7431 7432 ret = rtw89_h2c_tx(rtwdev, skb, false); 7433 if (ret) { 7434 rtw89_err(rtwdev, "failed to send h2c\n"); 7435 goto fail; 7436 } 7437 7438 return 0; 7439 7440 fail: 7441 dev_kfree_skb_any(skb); 7442 7443 return ret; 7444 } 7445 7446 #define H2C_DISCONNECT_DETECT_LEN 8 7447 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 7448 struct rtw89_vif_link *rtwvif_link, bool enable) 7449 { 7450 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7451 struct sk_buff *skb; 7452 u8 macid = rtwvif_link->mac_id; 7453 int ret; 7454 7455 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 7456 if (!skb) { 7457 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7458 return -ENOMEM; 7459 } 7460 7461 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 7462 7463 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 7464 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 7465 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 7466 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 7467 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 7468 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 7469 } 7470 7471 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7472 H2C_CAT_MAC, 7473 H2C_CL_MAC_WOW, 7474 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 7475 H2C_DISCONNECT_DETECT_LEN); 7476 7477 ret = rtw89_h2c_tx(rtwdev, skb, false); 7478 if (ret) { 7479 rtw89_err(rtwdev, "failed to send h2c\n"); 7480 goto fail; 7481 } 7482 7483 return 0; 7484 7485 fail: 7486 dev_kfree_skb_any(skb); 7487 7488 return ret; 7489 } 7490 7491 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7492 bool enable) 7493 { 7494 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7495 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 7496 struct rtw89_h2c_cfg_nlo *h2c; 7497 u32 len = sizeof(*h2c); 7498 struct sk_buff *skb; 7499 int ret, i; 7500 7501 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7502 if (!skb) { 7503 rtw89_err(rtwdev, "failed to alloc skb for nlo\n"); 7504 return -ENOMEM; 7505 } 7506 7507 skb_put(skb, len); 7508 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data; 7509 7510 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) | 7511 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) | 7512 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID); 7513 7514 if (enable) { 7515 h2c->nlo_cnt = nd_config->n_match_sets; 7516 for (i = 0 ; i < nd_config->n_match_sets; i++) { 7517 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len; 7518 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid, 7519 nd_config->match_sets[i].ssid.ssid_len); 7520 } 7521 } 7522 7523 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7524 H2C_CAT_MAC, 7525 H2C_CL_MAC_WOW, 7526 H2C_FUNC_NLO, 0, 1, 7527 len); 7528 7529 ret = rtw89_h2c_tx(rtwdev, skb, false); 7530 if (ret) { 7531 rtw89_err(rtwdev, "failed to send h2c\n"); 7532 goto fail; 7533 } 7534 7535 return 0; 7536 7537 fail: 7538 dev_kfree_skb_any(skb); 7539 return ret; 7540 } 7541 7542 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7543 bool enable) 7544 { 7545 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7546 struct rtw89_h2c_wow_global *h2c; 7547 u8 macid = rtwvif_link->mac_id; 7548 u32 len = sizeof(*h2c); 7549 struct sk_buff *skb; 7550 int ret; 7551 7552 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7553 if (!skb) { 7554 rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); 7555 return -ENOMEM; 7556 } 7557 7558 skb_put(skb, len); 7559 h2c = (struct rtw89_h2c_wow_global *)skb->data; 7560 7561 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | 7562 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | 7563 le32_encode_bits(rtw_wow->ptk_alg, 7564 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | 7565 le32_encode_bits(rtw_wow->gtk_alg, 7566 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); 7567 h2c->key_info = rtw_wow->key_info; 7568 7569 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7570 H2C_CAT_MAC, 7571 H2C_CL_MAC_WOW, 7572 H2C_FUNC_WOW_GLOBAL, 0, 1, 7573 len); 7574 7575 ret = rtw89_h2c_tx(rtwdev, skb, false); 7576 if (ret) { 7577 rtw89_err(rtwdev, "failed to send h2c\n"); 7578 goto fail; 7579 } 7580 7581 return 0; 7582 7583 fail: 7584 dev_kfree_skb_any(skb); 7585 7586 return ret; 7587 } 7588 7589 #define H2C_WAKEUP_CTRL_LEN 4 7590 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 7591 struct rtw89_vif_link *rtwvif_link, 7592 bool enable) 7593 { 7594 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7595 struct sk_buff *skb; 7596 u8 macid = rtwvif_link->mac_id; 7597 int ret; 7598 7599 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 7600 if (!skb) { 7601 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); 7602 return -ENOMEM; 7603 } 7604 7605 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 7606 7607 if (rtw_wow->pattern_cnt) 7608 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 7609 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 7610 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 7611 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 7612 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 7613 7614 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 7615 7616 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7617 H2C_CAT_MAC, 7618 H2C_CL_MAC_WOW, 7619 H2C_FUNC_WAKEUP_CTRL, 0, 1, 7620 H2C_WAKEUP_CTRL_LEN); 7621 7622 ret = rtw89_h2c_tx(rtwdev, skb, false); 7623 if (ret) { 7624 rtw89_err(rtwdev, "failed to send h2c\n"); 7625 goto fail; 7626 } 7627 7628 return 0; 7629 7630 fail: 7631 dev_kfree_skb_any(skb); 7632 7633 return ret; 7634 } 7635 7636 #define H2C_WOW_CAM_UPD_LEN 24 7637 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 7638 struct rtw89_wow_cam_info *cam_info) 7639 { 7640 struct sk_buff *skb; 7641 int ret; 7642 7643 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 7644 if (!skb) { 7645 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7646 return -ENOMEM; 7647 } 7648 7649 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 7650 7651 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 7652 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 7653 if (cam_info->valid) { 7654 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 7655 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 7656 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 7657 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 7658 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 7659 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 7660 cam_info->negative_pattern_match); 7661 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 7662 cam_info->skip_mac_hdr); 7663 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 7664 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 7665 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 7666 } 7667 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 7668 7669 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7670 H2C_CAT_MAC, 7671 H2C_CL_MAC_WOW, 7672 H2C_FUNC_WOW_CAM_UPD, 0, 1, 7673 H2C_WOW_CAM_UPD_LEN); 7674 7675 ret = rtw89_h2c_tx(rtwdev, skb, false); 7676 if (ret) { 7677 rtw89_err(rtwdev, "failed to send h2c\n"); 7678 goto fail; 7679 } 7680 7681 return 0; 7682 fail: 7683 dev_kfree_skb_any(skb); 7684 7685 return ret; 7686 } 7687 7688 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, 7689 struct rtw89_vif_link *rtwvif_link, 7690 bool enable) 7691 { 7692 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7693 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; 7694 struct rtw89_h2c_wow_gtk_ofld *h2c; 7695 u8 macid = rtwvif_link->mac_id; 7696 u32 len = sizeof(*h2c); 7697 u8 pkt_id_sa_query = 0; 7698 struct sk_buff *skb; 7699 u8 pkt_id_eapol = 0; 7700 int ret; 7701 7702 if (!rtw_wow->gtk_alg) 7703 return 0; 7704 7705 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7706 if (!skb) { 7707 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); 7708 return -ENOMEM; 7709 } 7710 7711 skb_put(skb, len); 7712 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; 7713 7714 if (!enable) 7715 goto hdr; 7716 7717 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7718 RTW89_PKT_OFLD_TYPE_EAPOL_KEY, 7719 &pkt_id_eapol); 7720 if (ret) 7721 goto fail; 7722 7723 if (gtk_info->igtk_keyid) { 7724 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7725 RTW89_PKT_OFLD_TYPE_SA_QUERY, 7726 &pkt_id_sa_query); 7727 if (ret) 7728 goto fail; 7729 } 7730 7731 /* not support TKIP yet */ 7732 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 7733 le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 7734 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 7735 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 7736 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | 7737 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); 7738 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, 7739 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | 7740 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); 7741 h2c->gtk_info = rtw_wow->gtk_info; 7742 7743 hdr: 7744 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7745 H2C_CAT_MAC, 7746 H2C_CL_MAC_WOW, 7747 H2C_FUNC_GTK_OFLD, 0, 1, 7748 len); 7749 7750 ret = rtw89_h2c_tx(rtwdev, skb, false); 7751 if (ret) { 7752 rtw89_err(rtwdev, "failed to send h2c\n"); 7753 goto fail; 7754 } 7755 return 0; 7756 fail: 7757 dev_kfree_skb_any(skb); 7758 7759 return ret; 7760 } 7761 7762 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7763 bool enable) 7764 { 7765 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait; 7766 struct rtw89_h2c_fwips *h2c; 7767 u32 len = sizeof(*h2c); 7768 struct sk_buff *skb; 7769 7770 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7771 if (!skb) { 7772 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n"); 7773 return -ENOMEM; 7774 } 7775 skb_put(skb, len); 7776 h2c = (struct rtw89_h2c_fwips *)skb->data; 7777 7778 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) | 7779 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE); 7780 7781 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7782 H2C_CAT_MAC, 7783 H2C_CL_MAC_PS, 7784 H2C_FUNC_IPS_CFG, 0, 1, 7785 len); 7786 7787 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG); 7788 } 7789 7790 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) 7791 { 7792 struct rtw89_wait_info *wait = &rtwdev->wow.wait; 7793 struct rtw89_h2c_wow_aoac *h2c; 7794 u32 len = sizeof(*h2c); 7795 struct sk_buff *skb; 7796 7797 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7798 if (!skb) { 7799 rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); 7800 return -ENOMEM; 7801 } 7802 7803 skb_put(skb, len); 7804 7805 /* This H2C only nofity firmware to generate AOAC report C2H, 7806 * no need any parameter. 7807 */ 7808 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7809 H2C_CAT_MAC, 7810 H2C_CL_MAC_WOW, 7811 H2C_FUNC_AOAC_REPORT_REQ, 1, 0, 7812 len); 7813 7814 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC); 7815 } 7816 7817 /* Return < 0, if failures happen during waiting for the condition. 7818 * Return 0, when waiting for the condition succeeds. 7819 * Return > 0, if the wait is considered unreachable due to driver/FW design, 7820 * where 1 means during SER. 7821 */ 7822 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 7823 struct rtw89_wait_info *wait, unsigned int cond) 7824 { 7825 int ret; 7826 7827 ret = rtw89_h2c_tx(rtwdev, skb, false); 7828 if (ret) { 7829 rtw89_err(rtwdev, "failed to send h2c\n"); 7830 dev_kfree_skb_any(skb); 7831 return -EBUSY; 7832 } 7833 7834 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 7835 return 1; 7836 7837 return rtw89_wait_for_cond(wait, cond); 7838 } 7839 7840 #define H2C_ADD_MCC_LEN 16 7841 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 7842 const struct rtw89_fw_mcc_add_req *p) 7843 { 7844 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7845 struct sk_buff *skb; 7846 unsigned int cond; 7847 7848 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 7849 if (!skb) { 7850 rtw89_err(rtwdev, 7851 "failed to alloc skb for add mcc\n"); 7852 return -ENOMEM; 7853 } 7854 7855 skb_put(skb, H2C_ADD_MCC_LEN); 7856 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 7857 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 7858 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 7859 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 7860 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 7861 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 7862 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 7863 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 7864 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 7865 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 7866 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 7867 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 7868 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 7869 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 7870 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 7871 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 7872 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 7873 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 7874 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 7875 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 7876 7877 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7878 H2C_CAT_MAC, 7879 H2C_CL_MCC, 7880 H2C_FUNC_ADD_MCC, 0, 0, 7881 H2C_ADD_MCC_LEN); 7882 7883 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 7884 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7885 } 7886 7887 #define H2C_START_MCC_LEN 12 7888 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 7889 const struct rtw89_fw_mcc_start_req *p) 7890 { 7891 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7892 struct sk_buff *skb; 7893 unsigned int cond; 7894 7895 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 7896 if (!skb) { 7897 rtw89_err(rtwdev, 7898 "failed to alloc skb for start mcc\n"); 7899 return -ENOMEM; 7900 } 7901 7902 skb_put(skb, H2C_START_MCC_LEN); 7903 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 7904 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 7905 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 7906 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 7907 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 7908 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 7909 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 7910 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 7911 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 7912 7913 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7914 H2C_CAT_MAC, 7915 H2C_CL_MCC, 7916 H2C_FUNC_START_MCC, 0, 0, 7917 H2C_START_MCC_LEN); 7918 7919 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 7920 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7921 } 7922 7923 #define H2C_STOP_MCC_LEN 4 7924 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7925 bool prev_groups) 7926 { 7927 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7928 struct sk_buff *skb; 7929 unsigned int cond; 7930 7931 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 7932 if (!skb) { 7933 rtw89_err(rtwdev, 7934 "failed to alloc skb for stop mcc\n"); 7935 return -ENOMEM; 7936 } 7937 7938 skb_put(skb, H2C_STOP_MCC_LEN); 7939 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 7940 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 7941 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 7942 7943 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7944 H2C_CAT_MAC, 7945 H2C_CL_MCC, 7946 H2C_FUNC_STOP_MCC, 0, 0, 7947 H2C_STOP_MCC_LEN); 7948 7949 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 7950 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7951 } 7952 7953 #define H2C_DEL_MCC_GROUP_LEN 4 7954 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 7955 bool prev_groups) 7956 { 7957 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7958 struct sk_buff *skb; 7959 unsigned int cond; 7960 7961 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 7962 if (!skb) { 7963 rtw89_err(rtwdev, 7964 "failed to alloc skb for del mcc group\n"); 7965 return -ENOMEM; 7966 } 7967 7968 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 7969 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 7970 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 7971 7972 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7973 H2C_CAT_MAC, 7974 H2C_CL_MCC, 7975 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 7976 H2C_DEL_MCC_GROUP_LEN); 7977 7978 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 7979 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7980 } 7981 7982 #define H2C_RESET_MCC_GROUP_LEN 4 7983 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 7984 { 7985 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7986 struct sk_buff *skb; 7987 unsigned int cond; 7988 7989 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 7990 if (!skb) { 7991 rtw89_err(rtwdev, 7992 "failed to alloc skb for reset mcc group\n"); 7993 return -ENOMEM; 7994 } 7995 7996 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 7997 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 7998 7999 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8000 H2C_CAT_MAC, 8001 H2C_CL_MCC, 8002 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 8003 H2C_RESET_MCC_GROUP_LEN); 8004 8005 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 8006 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8007 } 8008 8009 #define H2C_MCC_REQ_TSF_LEN 4 8010 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 8011 const struct rtw89_fw_mcc_tsf_req *req, 8012 struct rtw89_mac_mcc_tsf_rpt *rpt) 8013 { 8014 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8015 struct rtw89_mac_mcc_tsf_rpt *tmp; 8016 struct sk_buff *skb; 8017 unsigned int cond; 8018 int ret; 8019 8020 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 8021 if (!skb) { 8022 rtw89_err(rtwdev, 8023 "failed to alloc skb for mcc req tsf\n"); 8024 return -ENOMEM; 8025 } 8026 8027 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 8028 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 8029 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 8030 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 8031 8032 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8033 H2C_CAT_MAC, 8034 H2C_CL_MCC, 8035 H2C_FUNC_MCC_REQ_TSF, 0, 0, 8036 H2C_MCC_REQ_TSF_LEN); 8037 8038 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 8039 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8040 if (ret) 8041 return ret; 8042 8043 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 8044 *rpt = *tmp; 8045 8046 return 0; 8047 } 8048 8049 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 8050 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 8051 u8 *bitmap) 8052 { 8053 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8054 struct sk_buff *skb; 8055 unsigned int cond; 8056 u8 map_len; 8057 u8 h2c_len; 8058 8059 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 8060 map_len = RTW89_MAX_MAC_ID_NUM / 8; 8061 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 8062 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 8063 if (!skb) { 8064 rtw89_err(rtwdev, 8065 "failed to alloc skb for mcc macid bitmap\n"); 8066 return -ENOMEM; 8067 } 8068 8069 skb_put(skb, h2c_len); 8070 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 8071 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 8072 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 8073 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 8074 8075 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8076 H2C_CAT_MAC, 8077 H2C_CL_MCC, 8078 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 8079 h2c_len); 8080 8081 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 8082 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8083 } 8084 8085 #define H2C_MCC_SYNC_LEN 4 8086 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 8087 u8 target, u8 offset) 8088 { 8089 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8090 struct sk_buff *skb; 8091 unsigned int cond; 8092 8093 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 8094 if (!skb) { 8095 rtw89_err(rtwdev, 8096 "failed to alloc skb for mcc sync\n"); 8097 return -ENOMEM; 8098 } 8099 8100 skb_put(skb, H2C_MCC_SYNC_LEN); 8101 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 8102 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 8103 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 8104 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 8105 8106 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8107 H2C_CAT_MAC, 8108 H2C_CL_MCC, 8109 H2C_FUNC_MCC_SYNC, 0, 0, 8110 H2C_MCC_SYNC_LEN); 8111 8112 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 8113 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8114 } 8115 8116 #define H2C_MCC_SET_DURATION_LEN 20 8117 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 8118 const struct rtw89_fw_mcc_duration *p) 8119 { 8120 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8121 struct sk_buff *skb; 8122 unsigned int cond; 8123 8124 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 8125 if (!skb) { 8126 rtw89_err(rtwdev, 8127 "failed to alloc skb for mcc set duration\n"); 8128 return -ENOMEM; 8129 } 8130 8131 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 8132 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 8133 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 8134 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 8135 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 8136 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 8137 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 8138 p->start_tsf_low); 8139 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 8140 p->start_tsf_high); 8141 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 8142 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 8143 8144 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8145 H2C_CAT_MAC, 8146 H2C_CL_MCC, 8147 H2C_FUNC_MCC_SET_DURATION, 0, 0, 8148 H2C_MCC_SET_DURATION_LEN); 8149 8150 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 8151 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8152 } 8153 8154 static 8155 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 8156 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 8157 struct rtw89_h2c_mrc_add_slot *slot_h2c) 8158 { 8159 bool fill_h2c = !!slot_h2c; 8160 unsigned int i; 8161 8162 if (!fill_h2c) 8163 goto calc_len; 8164 8165 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 8166 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 8167 le32_encode_bits(slot_arg->courtesy_en, 8168 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 8169 le32_encode_bits(slot_arg->role_num, 8170 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 8171 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 8172 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 8173 le32_encode_bits(slot_arg->courtesy_target, 8174 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 8175 8176 for (i = 0; i < slot_arg->role_num; i++) { 8177 slot_h2c->roles[i].w0 = 8178 le32_encode_bits(slot_arg->roles[i].macid, 8179 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 8180 le32_encode_bits(slot_arg->roles[i].role_type, 8181 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 8182 le32_encode_bits(slot_arg->roles[i].is_master, 8183 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 8184 le32_encode_bits(slot_arg->roles[i].en_tx_null, 8185 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 8186 le32_encode_bits(false, 8187 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 8188 le32_encode_bits(false, 8189 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 8190 slot_h2c->roles[i].w1 = 8191 le32_encode_bits(slot_arg->roles[i].central_ch, 8192 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 8193 le32_encode_bits(slot_arg->roles[i].primary_ch, 8194 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 8195 le32_encode_bits(slot_arg->roles[i].bw, 8196 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 8197 le32_encode_bits(slot_arg->roles[i].band, 8198 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 8199 le32_encode_bits(slot_arg->roles[i].null_early, 8200 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 8201 le32_encode_bits(false, 8202 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 8203 le32_encode_bits(true, 8204 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 8205 slot_h2c->roles[i].macid_main_bitmap = 8206 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 8207 slot_h2c->roles[i].macid_paired_bitmap = 8208 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 8209 } 8210 8211 calc_len: 8212 return struct_size(slot_h2c, roles, slot_arg->role_num); 8213 } 8214 8215 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 8216 const struct rtw89_fw_mrc_add_arg *arg) 8217 { 8218 struct rtw89_h2c_mrc_add *h2c_head; 8219 struct sk_buff *skb; 8220 unsigned int i; 8221 void *tmp; 8222 u32 len; 8223 int ret; 8224 8225 len = sizeof(*h2c_head); 8226 for (i = 0; i < arg->slot_num; i++) 8227 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 8228 8229 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8230 if (!skb) { 8231 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 8232 return -ENOMEM; 8233 } 8234 8235 skb_put(skb, len); 8236 tmp = skb->data; 8237 8238 h2c_head = tmp; 8239 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 8240 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 8241 le32_encode_bits(arg->sch_type, 8242 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 8243 le32_encode_bits(arg->slot_num, 8244 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 8245 le32_encode_bits(arg->btc_in_sch, 8246 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 8247 8248 tmp += sizeof(*h2c_head); 8249 for (i = 0; i < arg->slot_num; i++) 8250 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 8251 8252 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8253 H2C_CAT_MAC, 8254 H2C_CL_MRC, 8255 H2C_FUNC_ADD_MRC, 0, 0, 8256 len); 8257 8258 ret = rtw89_h2c_tx(rtwdev, skb, false); 8259 if (ret) { 8260 rtw89_err(rtwdev, "failed to send h2c\n"); 8261 dev_kfree_skb_any(skb); 8262 return -EBUSY; 8263 } 8264 8265 return 0; 8266 } 8267 8268 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 8269 const struct rtw89_fw_mrc_start_arg *arg) 8270 { 8271 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8272 struct rtw89_h2c_mrc_start *h2c; 8273 u32 len = sizeof(*h2c); 8274 struct sk_buff *skb; 8275 unsigned int cond; 8276 8277 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8278 if (!skb) { 8279 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 8280 return -ENOMEM; 8281 } 8282 8283 skb_put(skb, len); 8284 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 8285 8286 h2c->w0 = le32_encode_bits(arg->sch_idx, 8287 RTW89_H2C_MRC_START_W0_SCH_IDX) | 8288 le32_encode_bits(arg->old_sch_idx, 8289 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 8290 le32_encode_bits(arg->action, 8291 RTW89_H2C_MRC_START_W0_ACTION); 8292 8293 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 8294 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 8295 8296 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8297 H2C_CAT_MAC, 8298 H2C_CL_MRC, 8299 H2C_FUNC_START_MRC, 0, 0, 8300 len); 8301 8302 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 8303 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8304 } 8305 8306 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx) 8307 { 8308 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8309 struct rtw89_h2c_mrc_del *h2c; 8310 u32 len = sizeof(*h2c); 8311 struct sk_buff *skb; 8312 unsigned int cond; 8313 8314 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8315 if (!skb) { 8316 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 8317 return -ENOMEM; 8318 } 8319 8320 skb_put(skb, len); 8321 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 8322 8323 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) | 8324 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX); 8325 8326 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8327 H2C_CAT_MAC, 8328 H2C_CL_MRC, 8329 H2C_FUNC_DEL_MRC, 0, 0, 8330 len); 8331 8332 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 8333 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8334 } 8335 8336 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 8337 const struct rtw89_fw_mrc_req_tsf_arg *arg, 8338 struct rtw89_mac_mrc_tsf_rpt *rpt) 8339 { 8340 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8341 struct rtw89_h2c_mrc_req_tsf *h2c; 8342 struct rtw89_mac_mrc_tsf_rpt *tmp; 8343 struct sk_buff *skb; 8344 unsigned int i; 8345 u32 len; 8346 int ret; 8347 8348 len = struct_size(h2c, infos, arg->num); 8349 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8350 if (!skb) { 8351 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 8352 return -ENOMEM; 8353 } 8354 8355 skb_put(skb, len); 8356 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 8357 8358 h2c->req_tsf_num = arg->num; 8359 for (i = 0; i < arg->num; i++) 8360 h2c->infos[i] = 8361 u8_encode_bits(arg->infos[i].band, 8362 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 8363 u8_encode_bits(arg->infos[i].port, 8364 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 8365 8366 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8367 H2C_CAT_MAC, 8368 H2C_CL_MRC, 8369 H2C_FUNC_MRC_REQ_TSF, 0, 0, 8370 len); 8371 8372 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 8373 if (ret) 8374 return ret; 8375 8376 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 8377 *rpt = *tmp; 8378 8379 return 0; 8380 } 8381 8382 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 8383 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 8384 { 8385 struct rtw89_h2c_mrc_upd_bitmap *h2c; 8386 u32 len = sizeof(*h2c); 8387 struct sk_buff *skb; 8388 int ret; 8389 8390 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8391 if (!skb) { 8392 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 8393 return -ENOMEM; 8394 } 8395 8396 skb_put(skb, len); 8397 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 8398 8399 h2c->w0 = le32_encode_bits(arg->sch_idx, 8400 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 8401 le32_encode_bits(arg->action, 8402 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 8403 le32_encode_bits(arg->macid, 8404 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 8405 h2c->w1 = le32_encode_bits(arg->client_macid, 8406 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 8407 8408 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8409 H2C_CAT_MAC, 8410 H2C_CL_MRC, 8411 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 8412 len); 8413 8414 ret = rtw89_h2c_tx(rtwdev, skb, false); 8415 if (ret) { 8416 rtw89_err(rtwdev, "failed to send h2c\n"); 8417 dev_kfree_skb_any(skb); 8418 return -EBUSY; 8419 } 8420 8421 return 0; 8422 } 8423 8424 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 8425 const struct rtw89_fw_mrc_sync_arg *arg) 8426 { 8427 struct rtw89_h2c_mrc_sync *h2c; 8428 u32 len = sizeof(*h2c); 8429 struct sk_buff *skb; 8430 int ret; 8431 8432 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8433 if (!skb) { 8434 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 8435 return -ENOMEM; 8436 } 8437 8438 skb_put(skb, len); 8439 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 8440 8441 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 8442 le32_encode_bits(arg->src.port, 8443 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 8444 le32_encode_bits(arg->src.band, 8445 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 8446 le32_encode_bits(arg->dest.port, 8447 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 8448 le32_encode_bits(arg->dest.band, 8449 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 8450 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 8451 8452 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8453 H2C_CAT_MAC, 8454 H2C_CL_MRC, 8455 H2C_FUNC_MRC_SYNC, 0, 0, 8456 len); 8457 8458 ret = rtw89_h2c_tx(rtwdev, skb, false); 8459 if (ret) { 8460 rtw89_err(rtwdev, "failed to send h2c\n"); 8461 dev_kfree_skb_any(skb); 8462 return -EBUSY; 8463 } 8464 8465 return 0; 8466 } 8467 8468 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 8469 const struct rtw89_fw_mrc_upd_duration_arg *arg) 8470 { 8471 struct rtw89_h2c_mrc_upd_duration *h2c; 8472 struct sk_buff *skb; 8473 unsigned int i; 8474 u32 len; 8475 int ret; 8476 8477 len = struct_size(h2c, slots, arg->slot_num); 8478 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8479 if (!skb) { 8480 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 8481 return -ENOMEM; 8482 } 8483 8484 skb_put(skb, len); 8485 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 8486 8487 h2c->w0 = le32_encode_bits(arg->sch_idx, 8488 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 8489 le32_encode_bits(arg->slot_num, 8490 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 8491 le32_encode_bits(false, 8492 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 8493 8494 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 8495 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 8496 8497 for (i = 0; i < arg->slot_num; i++) { 8498 h2c->slots[i] = 8499 le32_encode_bits(arg->slots[i].slot_idx, 8500 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 8501 le32_encode_bits(arg->slots[i].duration, 8502 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 8503 } 8504 8505 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8506 H2C_CAT_MAC, 8507 H2C_CL_MRC, 8508 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 8509 len); 8510 8511 ret = rtw89_h2c_tx(rtwdev, skb, false); 8512 if (ret) { 8513 rtw89_err(rtwdev, "failed to send h2c\n"); 8514 dev_kfree_skb_any(skb); 8515 return -EBUSY; 8516 } 8517 8518 return 0; 8519 } 8520 8521 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en) 8522 { 8523 struct rtw89_h2c_ap_info *h2c; 8524 u32 len = sizeof(*h2c); 8525 struct sk_buff *skb; 8526 int ret; 8527 8528 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8529 if (!skb) { 8530 rtw89_err(rtwdev, "failed to alloc skb for ap info\n"); 8531 return -ENOMEM; 8532 } 8533 8534 skb_put(skb, len); 8535 h2c = (struct rtw89_h2c_ap_info *)skb->data; 8536 8537 h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN); 8538 8539 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8540 H2C_CAT_MAC, 8541 H2C_CL_AP, 8542 H2C_FUNC_AP_INFO, 0, 0, 8543 len); 8544 8545 ret = rtw89_h2c_tx(rtwdev, skb, false); 8546 if (ret) { 8547 rtw89_err(rtwdev, "failed to send h2c\n"); 8548 dev_kfree_skb_any(skb); 8549 return -EBUSY; 8550 } 8551 8552 return 0; 8553 } 8554 8555 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en) 8556 { 8557 int ret; 8558 8559 if (en) { 8560 if (refcount_inc_not_zero(&rtwdev->refcount_ap_info)) 8561 return 0; 8562 } else { 8563 if (!refcount_dec_and_test(&rtwdev->refcount_ap_info)) 8564 return 0; 8565 } 8566 8567 ret = rtw89_fw_h2c_ap_info(rtwdev, en); 8568 if (ret) { 8569 if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 8570 return ret; 8571 8572 /* During recovery, neither driver nor stack has full error 8573 * handling, so show a warning, but return 0 with refcount 8574 * increased normally. It can avoid underflow when calling 8575 * with @en == false later. 8576 */ 8577 rtw89_warn(rtwdev, "h2c ap_info failed during SER\n"); 8578 } 8579 8580 if (en) 8581 refcount_set(&rtwdev->refcount_ap_info, 1); 8582 8583 return 0; 8584 } 8585 8586 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 8587 { 8588 static const u8 zeros[U8_MAX] = {}; 8589 8590 return memcmp(ext_ptr, zeros, ext_len) == 0; 8591 } 8592 8593 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 8594 ({ \ 8595 u8 __var_sz = sizeof(*(e)); \ 8596 bool __accept; \ 8597 if (__var_sz >= (ent_sz)) \ 8598 __accept = true; \ 8599 else \ 8600 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 8601 (ent_sz) - __var_sz);\ 8602 __accept; \ 8603 }) 8604 8605 static bool 8606 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 8607 const void *cursor, 8608 const struct rtw89_txpwr_conf *conf) 8609 { 8610 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8611 return false; 8612 8613 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 8614 return false; 8615 8616 switch (e->rs) { 8617 case RTW89_RS_CCK: 8618 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 8619 return false; 8620 break; 8621 case RTW89_RS_OFDM: 8622 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 8623 return false; 8624 break; 8625 case RTW89_RS_MCS: 8626 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 8627 e->nss >= RTW89_NSS_NUM || 8628 e->ofdma >= RTW89_OFDMA_NUM) 8629 return false; 8630 break; 8631 case RTW89_RS_HEDCM: 8632 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 8633 e->nss >= RTW89_NSS_HEDCM_NUM || 8634 e->ofdma >= RTW89_OFDMA_NUM) 8635 return false; 8636 break; 8637 case RTW89_RS_OFFSET: 8638 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 8639 return false; 8640 break; 8641 default: 8642 return false; 8643 } 8644 8645 return true; 8646 } 8647 8648 static 8649 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 8650 const struct rtw89_txpwr_table *tbl) 8651 { 8652 const struct rtw89_txpwr_conf *conf = tbl->data; 8653 struct rtw89_fw_txpwr_byrate_entry entry = {}; 8654 struct rtw89_txpwr_byrate *byr_head; 8655 struct rtw89_rate_desc desc = {}; 8656 const void *cursor; 8657 u32 data; 8658 s8 *byr; 8659 int i; 8660 8661 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8662 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 8663 continue; 8664 8665 byr_head = &rtwdev->byr[entry.band][entry.bw]; 8666 data = le32_to_cpu(entry.data); 8667 desc.ofdma = entry.ofdma; 8668 desc.nss = entry.nss; 8669 desc.rs = entry.rs; 8670 8671 for (i = 0; i < entry.len; i++, data >>= 8) { 8672 desc.idx = entry.shf + i; 8673 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 8674 *byr = data & 0xff; 8675 } 8676 } 8677 } 8678 8679 static bool 8680 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 8681 const void *cursor, 8682 const struct rtw89_txpwr_conf *conf) 8683 { 8684 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8685 return false; 8686 8687 if (e->bw >= RTW89_2G_BW_NUM) 8688 return false; 8689 if (e->nt >= RTW89_NTX_NUM) 8690 return false; 8691 if (e->rs >= RTW89_RS_LMT_NUM) 8692 return false; 8693 if (e->bf >= RTW89_BF_NUM) 8694 return false; 8695 if (e->regd >= RTW89_REGD_NUM) 8696 return false; 8697 if (e->ch_idx >= RTW89_2G_CH_NUM) 8698 return false; 8699 8700 return true; 8701 } 8702 8703 static 8704 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 8705 { 8706 const struct rtw89_txpwr_conf *conf = &data->conf; 8707 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 8708 const void *cursor; 8709 8710 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8711 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 8712 continue; 8713 8714 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8715 [entry.ch_idx] = entry.v; 8716 } 8717 } 8718 8719 static bool 8720 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 8721 const void *cursor, 8722 const struct rtw89_txpwr_conf *conf) 8723 { 8724 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8725 return false; 8726 8727 if (e->bw >= RTW89_5G_BW_NUM) 8728 return false; 8729 if (e->nt >= RTW89_NTX_NUM) 8730 return false; 8731 if (e->rs >= RTW89_RS_LMT_NUM) 8732 return false; 8733 if (e->bf >= RTW89_BF_NUM) 8734 return false; 8735 if (e->regd >= RTW89_REGD_NUM) 8736 return false; 8737 if (e->ch_idx >= RTW89_5G_CH_NUM) 8738 return false; 8739 8740 return true; 8741 } 8742 8743 static 8744 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 8745 { 8746 const struct rtw89_txpwr_conf *conf = &data->conf; 8747 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 8748 const void *cursor; 8749 8750 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8751 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 8752 continue; 8753 8754 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8755 [entry.ch_idx] = entry.v; 8756 } 8757 } 8758 8759 static bool 8760 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 8761 const void *cursor, 8762 const struct rtw89_txpwr_conf *conf) 8763 { 8764 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8765 return false; 8766 8767 if (e->bw >= RTW89_6G_BW_NUM) 8768 return false; 8769 if (e->nt >= RTW89_NTX_NUM) 8770 return false; 8771 if (e->rs >= RTW89_RS_LMT_NUM) 8772 return false; 8773 if (e->bf >= RTW89_BF_NUM) 8774 return false; 8775 if (e->regd >= RTW89_REGD_NUM) 8776 return false; 8777 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8778 return false; 8779 if (e->ch_idx >= RTW89_6G_CH_NUM) 8780 return false; 8781 8782 return true; 8783 } 8784 8785 static 8786 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 8787 { 8788 const struct rtw89_txpwr_conf *conf = &data->conf; 8789 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 8790 const void *cursor; 8791 8792 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8793 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 8794 continue; 8795 8796 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8797 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 8798 } 8799 } 8800 8801 static bool 8802 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 8803 const void *cursor, 8804 const struct rtw89_txpwr_conf *conf) 8805 { 8806 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8807 return false; 8808 8809 if (e->ru >= RTW89_RU_NUM) 8810 return false; 8811 if (e->nt >= RTW89_NTX_NUM) 8812 return false; 8813 if (e->regd >= RTW89_REGD_NUM) 8814 return false; 8815 if (e->ch_idx >= RTW89_2G_CH_NUM) 8816 return false; 8817 8818 return true; 8819 } 8820 8821 static 8822 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 8823 { 8824 const struct rtw89_txpwr_conf *conf = &data->conf; 8825 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 8826 const void *cursor; 8827 8828 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8829 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 8830 continue; 8831 8832 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8833 } 8834 } 8835 8836 static bool 8837 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 8838 const void *cursor, 8839 const struct rtw89_txpwr_conf *conf) 8840 { 8841 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8842 return false; 8843 8844 if (e->ru >= RTW89_RU_NUM) 8845 return false; 8846 if (e->nt >= RTW89_NTX_NUM) 8847 return false; 8848 if (e->regd >= RTW89_REGD_NUM) 8849 return false; 8850 if (e->ch_idx >= RTW89_5G_CH_NUM) 8851 return false; 8852 8853 return true; 8854 } 8855 8856 static 8857 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 8858 { 8859 const struct rtw89_txpwr_conf *conf = &data->conf; 8860 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 8861 const void *cursor; 8862 8863 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8864 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 8865 continue; 8866 8867 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8868 } 8869 } 8870 8871 static bool 8872 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 8873 const void *cursor, 8874 const struct rtw89_txpwr_conf *conf) 8875 { 8876 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8877 return false; 8878 8879 if (e->ru >= RTW89_RU_NUM) 8880 return false; 8881 if (e->nt >= RTW89_NTX_NUM) 8882 return false; 8883 if (e->regd >= RTW89_REGD_NUM) 8884 return false; 8885 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8886 return false; 8887 if (e->ch_idx >= RTW89_6G_CH_NUM) 8888 return false; 8889 8890 return true; 8891 } 8892 8893 static 8894 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 8895 { 8896 const struct rtw89_txpwr_conf *conf = &data->conf; 8897 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 8898 const void *cursor; 8899 8900 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8901 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 8902 continue; 8903 8904 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 8905 [entry.ch_idx] = entry.v; 8906 } 8907 } 8908 8909 static bool 8910 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 8911 const void *cursor, 8912 const struct rtw89_txpwr_conf *conf) 8913 { 8914 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8915 return false; 8916 8917 if (e->band >= RTW89_BAND_NUM) 8918 return false; 8919 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 8920 return false; 8921 if (e->regd >= RTW89_REGD_NUM) 8922 return false; 8923 8924 return true; 8925 } 8926 8927 static 8928 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 8929 { 8930 const struct rtw89_txpwr_conf *conf = &data->conf; 8931 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 8932 const void *cursor; 8933 8934 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8935 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 8936 continue; 8937 8938 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 8939 } 8940 } 8941 8942 static bool 8943 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 8944 const void *cursor, 8945 const struct rtw89_txpwr_conf *conf) 8946 { 8947 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8948 return false; 8949 8950 if (e->band >= RTW89_BAND_NUM) 8951 return false; 8952 if (e->regd >= RTW89_REGD_NUM) 8953 return false; 8954 8955 return true; 8956 } 8957 8958 static 8959 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 8960 { 8961 const struct rtw89_txpwr_conf *conf = &data->conf; 8962 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 8963 const void *cursor; 8964 8965 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8966 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 8967 continue; 8968 8969 data->v[entry.band][entry.regd] = entry.v; 8970 } 8971 } 8972 8973 const struct rtw89_rfe_parms * 8974 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 8975 const struct rtw89_rfe_parms *init) 8976 { 8977 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 8978 struct rtw89_rfe_parms *parms; 8979 8980 if (!rfe_data) 8981 return init; 8982 8983 parms = &rfe_data->rfe_parms; 8984 if (init) 8985 *parms = *init; 8986 8987 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 8988 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 8989 rfe_data->byrate.tbl.size = 0; /* don't care here */ 8990 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 8991 parms->byr_tbl = &rfe_data->byrate.tbl; 8992 } 8993 8994 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 8995 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 8996 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 8997 } 8998 8999 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 9000 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 9001 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 9002 } 9003 9004 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 9005 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 9006 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 9007 } 9008 9009 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 9010 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 9011 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 9012 } 9013 9014 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 9015 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 9016 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 9017 } 9018 9019 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 9020 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 9021 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 9022 } 9023 9024 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 9025 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 9026 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 9027 } 9028 9029 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 9030 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 9031 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 9032 } 9033 9034 return parms; 9035 } 9036