1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/if_arp.h> 6 #include "cam.h" 7 #include "chan.h" 8 #include "coex.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "phy.h" 13 #include "ps.h" 14 #include "reg.h" 15 #include "util.h" 16 #include "wow.h" 17 18 struct rtw89_eapol_2_of_2 { 19 u8 gtkbody[14]; 20 u8 key_des_ver; 21 u8 rsvd[92]; 22 } __packed; 23 24 struct rtw89_sa_query { 25 u8 category; 26 u8 action; 27 } __packed; 28 29 struct rtw89_arp_rsp { 30 u8 llc_hdr[sizeof(rfc1042_header)]; 31 __be16 llc_type; 32 struct arphdr arp_hdr; 33 u8 sender_hw[ETH_ALEN]; 34 __be32 sender_ip; 35 u8 target_hw[ETH_ALEN]; 36 __be32 target_ip; 37 } __packed; 38 39 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 40 41 union rtw89_fw_element_arg { 42 size_t offset; 43 enum rtw89_rf_path rf_path; 44 enum rtw89_fw_type fw_type; 45 }; 46 47 struct rtw89_fw_element_handler { 48 int (*fn)(struct rtw89_dev *rtwdev, 49 const struct rtw89_fw_element_hdr *elm, 50 const union rtw89_fw_element_arg arg); 51 const union rtw89_fw_element_arg arg; 52 const char *name; 53 }; 54 55 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 56 struct sk_buff *skb); 57 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 58 struct rtw89_wait_info *wait, unsigned int cond); 59 static int __parse_security_section(struct rtw89_dev *rtwdev, 60 struct rtw89_fw_bin_info *info, 61 struct rtw89_fw_hdr_section_info *section_info, 62 const void *content, 63 u32 *mssc_len); 64 65 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 66 bool header) 67 { 68 struct sk_buff *skb; 69 u32 header_len = 0; 70 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 71 72 if (header) 73 header_len = H2C_HEADER_LEN; 74 75 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 76 if (!skb) 77 return NULL; 78 skb_reserve(skb, header_len + h2c_desc_size); 79 memset(skb->data, 0, len); 80 81 return skb; 82 } 83 84 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 85 { 86 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 87 } 88 89 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 90 { 91 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 92 } 93 94 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 95 { 96 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 97 u8 val; 98 int ret; 99 100 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 101 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 102 1, FWDL_WAIT_CNT, false, rtwdev, type); 103 if (ret) { 104 switch (val) { 105 case RTW89_FWDL_CHECKSUM_FAIL: 106 rtw89_err(rtwdev, "fw checksum fail\n"); 107 return -EINVAL; 108 109 case RTW89_FWDL_SECURITY_FAIL: 110 rtw89_err(rtwdev, "fw security fail\n"); 111 return -EINVAL; 112 113 case RTW89_FWDL_CV_NOT_MATCH: 114 rtw89_err(rtwdev, "fw cv not match\n"); 115 return -EINVAL; 116 117 default: 118 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 119 return -EBUSY; 120 } 121 } 122 123 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 124 125 return 0; 126 } 127 128 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 129 struct rtw89_fw_bin_info *info) 130 { 131 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 132 const struct rtw89_chip_info *chip = rtwdev->chip; 133 struct rtw89_fw_hdr_section_info *section_info; 134 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 135 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 136 const struct rtw89_fw_hdr_section *section; 137 const u8 *fw_end = fw + len; 138 const u8 *bin; 139 u32 base_hdr_len; 140 u32 mssc_len; 141 int ret; 142 u32 i; 143 144 if (!info) 145 return -EINVAL; 146 147 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 148 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 149 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 150 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_W7_IDMEM_SHARE_MODE); 151 152 if (info->dynamic_hdr_en) { 153 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 154 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 155 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 156 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 157 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 158 return -EINVAL; 159 } 160 } else { 161 info->hdr_len = base_hdr_len; 162 info->dynamic_hdr_len = 0; 163 } 164 165 bin = fw + info->hdr_len; 166 167 /* jump to section header */ 168 section_info = info->section_info; 169 for (i = 0; i < info->section_num; i++) { 170 section = &fw_hdr->sections[i]; 171 section_info->type = 172 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 173 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 174 175 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 176 section_info->len += FWDL_SECTION_CHKSUM_LEN; 177 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 178 section_info->dladdr = 179 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 180 section_info->addr = bin; 181 182 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 183 section_info->mssc = 184 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 185 186 ret = __parse_security_section(rtwdev, info, section_info, 187 bin, &mssc_len); 188 if (ret) 189 return ret; 190 191 if (sec->secure_boot && chip->chip_id == RTL8852B) 192 section_info->len_override = 960; 193 } else { 194 section_info->mssc = 0; 195 mssc_len = 0; 196 } 197 198 rtw89_debug(rtwdev, RTW89_DBG_FW, 199 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 200 i, section_info->type, section_info->len, 201 section_info->mssc, mssc_len, bin - fw); 202 rtw89_debug(rtwdev, RTW89_DBG_FW, 203 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 204 section_info->ignore, section_info->key_addr, 205 section_info->key_addr ? 206 section_info->key_addr - section_info->addr : 0, 207 section_info->key_len, section_info->key_idx); 208 209 bin += section_info->len + mssc_len; 210 section_info++; 211 } 212 213 if (fw_end != bin) { 214 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 215 return -EINVAL; 216 } 217 218 return 0; 219 } 220 221 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 222 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 223 u32 rmp_tbl_size, u32 *key_idx) 224 { 225 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 226 u32 sel_byte_idx; 227 u32 mss_sel_idx; 228 u8 sel_bit_idx; 229 int i; 230 231 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 232 if (!mss_hdr->defen) 233 return -ENOENT; 234 235 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 236 sec->mss_key_num; 237 } else { 238 if (mss_hdr->defen) 239 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 240 else 241 mss_sel_idx = 0; 242 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 243 le16_to_cpu(mss_hdr->msscust_max) + 244 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 245 sec->mss_key_num; 246 } 247 248 sel_byte_idx = mss_sel_idx >> 3; 249 sel_bit_idx = mss_sel_idx & 0x7; 250 251 if (sel_byte_idx >= rmp_tbl_size) 252 return -EFAULT; 253 254 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 255 return -ENOENT; 256 257 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 258 259 for (i = 0; i < sel_byte_idx; i++) 260 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 261 262 return 0; 263 } 264 265 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 266 struct rtw89_fw_bin_info *info, 267 struct rtw89_fw_hdr_section_info *section_info, 268 const void *content, 269 u32 *mssc_len) 270 { 271 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 272 const union rtw89_fw_section_mssc_content *section_content = content; 273 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 274 u32 rmp_tbl_size; 275 u32 key_sign_len; 276 u32 real_key_idx; 277 u32 sb_sel_ver; 278 int ret; 279 280 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 281 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 282 return -ENOENT; 283 } 284 285 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 286 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 287 le16_to_cpu(mss_hdr->msscust_max) * 288 mss_hdr->mssdev_max) >> 3; 289 if (mss_hdr->defen) 290 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 291 } else { 292 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 293 mss_hdr->rmpfmt); 294 return -EINVAL; 295 } 296 297 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 298 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 299 rmp_tbl_size, (int)sizeof(*mss_hdr), 300 le32_to_cpu(mss_hdr->key_raw_offset)); 301 return -EINVAL; 302 } 303 304 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 305 if (!key_sign_len) 306 key_sign_len = 512; 307 308 if (info->dsp_checksum) 309 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 310 311 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 312 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 313 314 if (!sec->secure_boot) 315 goto out; 316 317 sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v); 318 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 319 goto ignore; 320 321 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 322 if (ret) 323 goto ignore; 324 325 section_info->key_addr = content + section_info->len + 326 le32_to_cpu(mss_hdr->key_raw_offset) + 327 key_sign_len * real_key_idx; 328 section_info->key_len = key_sign_len; 329 section_info->key_idx = real_key_idx; 330 331 out: 332 if (info->secure_section_exist) { 333 section_info->ignore = true; 334 return 0; 335 } 336 337 info->secure_section_exist = true; 338 339 return 0; 340 341 ignore: 342 section_info->ignore = true; 343 344 return 0; 345 } 346 347 static int __parse_security_section(struct rtw89_dev *rtwdev, 348 struct rtw89_fw_bin_info *info, 349 struct rtw89_fw_hdr_section_info *section_info, 350 const void *content, 351 u32 *mssc_len) 352 { 353 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 354 int ret; 355 356 if ((section_info->mssc & FORMATTED_MSSC_MASK) == FORMATTED_MSSC) { 357 ret = __parse_formatted_mssc(rtwdev, info, section_info, 358 content, mssc_len); 359 if (ret) 360 return -EINVAL; 361 } else { 362 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 363 if (info->dsp_checksum) 364 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 365 366 if (sec->secure_boot) { 367 if (sec->mss_idx >= section_info->mssc) 368 return -EFAULT; 369 section_info->key_addr = content + section_info->len + 370 sec->mss_idx * FWDL_SECURITY_SIGLEN; 371 section_info->key_len = FWDL_SECURITY_SIGLEN; 372 } 373 374 info->secure_section_exist = true; 375 } 376 377 return 0; 378 } 379 380 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 381 struct rtw89_fw_bin_info *info) 382 { 383 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 384 struct rtw89_fw_hdr_section_info *section_info; 385 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 386 const struct rtw89_fw_hdr_section_v1 *section; 387 const u8 *fw_end = fw + len; 388 const u8 *bin; 389 u32 base_hdr_len; 390 u32 mssc_len; 391 int ret; 392 u32 i; 393 394 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 395 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 396 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 397 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 398 info->idmem_share_mode = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_IDMEM_SHARE_MODE); 399 400 if (info->dynamic_hdr_en) { 401 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 402 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 403 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 404 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 405 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 406 return -EINVAL; 407 } 408 } else { 409 info->hdr_len = base_hdr_len; 410 info->dynamic_hdr_len = 0; 411 } 412 413 bin = fw + info->hdr_len; 414 415 /* jump to section header */ 416 section_info = info->section_info; 417 for (i = 0; i < info->section_num; i++) { 418 section = &fw_hdr->sections[i]; 419 420 section_info->type = 421 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 422 section_info->len = 423 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 424 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 425 section_info->len += FWDL_SECTION_CHKSUM_LEN; 426 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 427 section_info->dladdr = 428 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 429 section_info->addr = bin; 430 431 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 432 section_info->mssc = 433 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 434 435 ret = __parse_security_section(rtwdev, info, section_info, 436 bin, &mssc_len); 437 if (ret) 438 return ret; 439 } else { 440 section_info->mssc = 0; 441 mssc_len = 0; 442 } 443 444 rtw89_debug(rtwdev, RTW89_DBG_FW, 445 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 446 i, section_info->type, section_info->len, 447 section_info->mssc, mssc_len, bin - fw); 448 rtw89_debug(rtwdev, RTW89_DBG_FW, 449 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 450 section_info->ignore, section_info->key_addr, 451 section_info->key_addr ? 452 section_info->key_addr - section_info->addr : 0, 453 section_info->key_len, section_info->key_idx); 454 455 bin += section_info->len + mssc_len; 456 section_info++; 457 } 458 459 if (fw_end != bin) { 460 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 461 return -EINVAL; 462 } 463 464 if (!info->secure_section_exist) 465 rtw89_warn(rtwdev, "no firmware secure section\n"); 466 467 return 0; 468 } 469 470 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 471 const struct rtw89_fw_suit *fw_suit, 472 struct rtw89_fw_bin_info *info) 473 { 474 const u8 *fw = fw_suit->data; 475 u32 len = fw_suit->size; 476 477 if (!fw || !len) { 478 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 479 return -ENOENT; 480 } 481 482 switch (fw_suit->hdr_ver) { 483 case 0: 484 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 485 case 1: 486 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 487 default: 488 return -ENOENT; 489 } 490 } 491 492 static 493 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 494 struct rtw89_fw_suit *fw_suit, bool nowarn) 495 { 496 struct rtw89_fw_info *fw_info = &rtwdev->fw; 497 const struct firmware *firmware = fw_info->req.firmware; 498 const u8 *mfw = firmware->data; 499 u32 mfw_len = firmware->size; 500 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 501 const struct rtw89_mfw_info *mfw_info = NULL, *tmp; 502 int i; 503 504 if (mfw_hdr->sig != RTW89_MFW_SIG) { 505 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 506 /* legacy firmware support normal type only */ 507 if (type != RTW89_FW_NORMAL) 508 return -EINVAL; 509 fw_suit->data = mfw; 510 fw_suit->size = mfw_len; 511 return 0; 512 } 513 514 for (i = 0; i < mfw_hdr->fw_nr; i++) { 515 tmp = &mfw_hdr->info[i]; 516 if (tmp->type != type) 517 continue; 518 519 if (type == RTW89_FW_LOGFMT) { 520 mfw_info = tmp; 521 goto found; 522 } 523 524 /* Version order of WiFi firmware in firmware file are not in order, 525 * pass all firmware to find the equal or less but closest version. 526 */ 527 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { 528 if (!mfw_info || mfw_info->cv < tmp->cv) 529 mfw_info = tmp; 530 } 531 } 532 533 if (mfw_info) 534 goto found; 535 536 if (!nowarn) 537 rtw89_err(rtwdev, "no suitable firmware found\n"); 538 return -ENOENT; 539 540 found: 541 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 542 fw_suit->size = le32_to_cpu(mfw_info->size); 543 return 0; 544 } 545 546 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 547 { 548 struct rtw89_fw_info *fw_info = &rtwdev->fw; 549 const struct firmware *firmware = fw_info->req.firmware; 550 const struct rtw89_mfw_hdr *mfw_hdr = 551 (const struct rtw89_mfw_hdr *)firmware->data; 552 const struct rtw89_mfw_info *mfw_info; 553 u32 size; 554 555 if (mfw_hdr->sig != RTW89_MFW_SIG) { 556 rtw89_warn(rtwdev, "not mfw format\n"); 557 return 0; 558 } 559 560 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 561 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 562 563 return size; 564 } 565 566 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 567 struct rtw89_fw_suit *fw_suit, 568 const struct rtw89_fw_hdr *hdr) 569 { 570 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 571 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 572 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 573 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 574 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 575 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 576 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 577 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 578 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 579 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 580 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 581 } 582 583 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 584 struct rtw89_fw_suit *fw_suit, 585 const struct rtw89_fw_hdr_v1 *hdr) 586 { 587 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 588 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 589 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 590 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 591 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 592 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 593 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 594 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 595 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 596 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 597 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 598 } 599 600 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 601 enum rtw89_fw_type type, 602 struct rtw89_fw_suit *fw_suit) 603 { 604 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 605 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 606 607 if (type == RTW89_FW_LOGFMT) 608 return 0; 609 610 fw_suit->type = type; 611 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 612 613 switch (fw_suit->hdr_ver) { 614 case 0: 615 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 616 break; 617 case 1: 618 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 619 break; 620 default: 621 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 622 fw_suit->hdr_ver); 623 return -ENOENT; 624 } 625 626 rtw89_info(rtwdev, 627 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 628 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 629 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 630 631 return 0; 632 } 633 634 static 635 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 636 bool nowarn) 637 { 638 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 639 int ret; 640 641 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 642 if (ret) 643 return ret; 644 645 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 646 } 647 648 static 649 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 650 const struct rtw89_fw_element_hdr *elm, 651 const union rtw89_fw_element_arg arg) 652 { 653 enum rtw89_fw_type type = arg.fw_type; 654 struct rtw89_hal *hal = &rtwdev->hal; 655 struct rtw89_fw_suit *fw_suit; 656 657 /* Version of BB MCU is in decreasing order in firmware file, so take 658 * first equal or less version, which is equal or less but closest version. 659 */ 660 if (hal->cv < elm->u.bbmcu.cv) 661 return 1; /* ignore this element */ 662 663 fw_suit = rtw89_fw_suit_get(rtwdev, type); 664 if (fw_suit->data) 665 return 1; /* ignore this element (a firmware is taken already) */ 666 667 fw_suit->data = elm->u.bbmcu.contents; 668 fw_suit->size = le32_to_cpu(elm->size); 669 670 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 671 } 672 673 #define __DEF_FW_FEAT_COND(__cond, __op) \ 674 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 675 { \ 676 return suit_ver_code __op comp_ver_code; \ 677 } 678 679 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 680 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 681 __DEF_FW_FEAT_COND(lt, <); /* less than */ 682 683 struct __fw_feat_cfg { 684 enum rtw89_core_chip_id chip_id; 685 enum rtw89_fw_feature feature; 686 u32 ver_code; 687 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 688 }; 689 690 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 691 { \ 692 .chip_id = _chip, \ 693 .feature = RTW89_FW_FEATURE_ ## _feat, \ 694 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 695 .cond = __fw_feat_cond_ ## _cond, \ 696 } 697 698 static const struct __fw_feat_cfg fw_feat_tbl[] = { 699 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 700 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 701 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 702 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 703 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 704 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 705 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 706 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 37, 0, NO_WOW_CPU_IO_RX), 707 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 708 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 709 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 710 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 711 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 712 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 7, BEACON_FILTER), 713 __CFG_FW_FEAT(RTL8852B, lt, 0, 29, 30, 0, NO_WOW_CPU_IO_RX), 714 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 715 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 716 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER), 717 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 718 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 110, 0, BEACON_FILTER), 719 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 720 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 721 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 722 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 723 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 724 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 725 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), 726 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 727 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 728 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0), 729 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 730 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 731 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0), 732 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, LPS_CH_INFO), 733 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 42, 0, RFK_RXDCK_V0), 734 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 46, 0, NOTIFY_AP_INFO), 735 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 47, 0, CH_INFO_BE_V0), 736 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 49, 0, RFK_PRE_NOTIFY_V1), 737 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 51, 0, NO_PHYCAP_P1), 738 }; 739 740 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 741 const struct rtw89_chip_info *chip, 742 u32 ver_code) 743 { 744 int i; 745 746 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 747 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 748 749 if (chip->chip_id != ent->chip_id) 750 continue; 751 752 if (ent->cond(ver_code, ent->ver_code)) 753 RTW89_SET_FW_FEATURE(ent->feature, fw); 754 } 755 } 756 757 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 758 { 759 const struct rtw89_chip_info *chip = rtwdev->chip; 760 const struct rtw89_fw_suit *fw_suit; 761 u32 suit_ver_code; 762 763 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 764 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 765 766 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 767 } 768 769 const struct firmware * 770 rtw89_early_fw_feature_recognize(struct device *device, 771 const struct rtw89_chip_info *chip, 772 struct rtw89_fw_info *early_fw, 773 int *used_fw_format) 774 { 775 const struct firmware *firmware; 776 char fw_name[64]; 777 int fw_format; 778 u32 ver_code; 779 int ret; 780 781 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 782 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 783 chip->fw_basename, fw_format); 784 785 ret = request_firmware(&firmware, fw_name, device); 786 if (!ret) { 787 dev_info(device, "loaded firmware %s\n", fw_name); 788 *used_fw_format = fw_format; 789 break; 790 } 791 } 792 793 if (ret) { 794 dev_err(device, "failed to early request firmware: %d\n", ret); 795 return NULL; 796 } 797 798 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 799 800 if (!ver_code) 801 goto out; 802 803 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 804 805 out: 806 return firmware; 807 } 808 809 static int rtw89_fw_validate_ver_required(struct rtw89_dev *rtwdev) 810 { 811 const struct rtw89_chip_variant *variant = rtwdev->variant; 812 const struct rtw89_fw_suit *fw_suit; 813 u32 suit_ver_code; 814 815 if (!variant) 816 return 0; 817 818 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 819 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 820 821 if (variant->fw_min_ver_code > suit_ver_code) { 822 rtw89_err(rtwdev, "minimum required firmware version is 0x%x\n", 823 variant->fw_min_ver_code); 824 return -ENOENT; 825 } 826 827 return 0; 828 } 829 830 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 831 { 832 const struct rtw89_chip_info *chip = rtwdev->chip; 833 int ret; 834 835 if (chip->try_ce_fw) { 836 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 837 if (!ret) 838 goto normal_done; 839 } 840 841 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 842 if (ret) 843 return ret; 844 845 normal_done: 846 ret = rtw89_fw_validate_ver_required(rtwdev); 847 if (ret) 848 return ret; 849 850 /* It still works if wowlan firmware isn't existing. */ 851 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 852 853 /* It still works if log format file isn't existing. */ 854 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 855 856 rtw89_fw_recognize_features(rtwdev); 857 858 rtw89_coex_recognize_ver(rtwdev); 859 860 return 0; 861 } 862 863 static 864 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 865 const struct rtw89_fw_element_hdr *elm, 866 const union rtw89_fw_element_arg arg) 867 { 868 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 869 struct rtw89_phy_table *tbl; 870 struct rtw89_reg2_def *regs; 871 enum rtw89_rf_path rf_path; 872 u32 n_regs, i; 873 u8 idx; 874 875 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 876 if (!tbl) 877 return -ENOMEM; 878 879 switch (le32_to_cpu(elm->id)) { 880 case RTW89_FW_ELEMENT_ID_BB_REG: 881 elm_info->bb_tbl = tbl; 882 break; 883 case RTW89_FW_ELEMENT_ID_BB_GAIN: 884 elm_info->bb_gain = tbl; 885 break; 886 case RTW89_FW_ELEMENT_ID_RADIO_A: 887 case RTW89_FW_ELEMENT_ID_RADIO_B: 888 case RTW89_FW_ELEMENT_ID_RADIO_C: 889 case RTW89_FW_ELEMENT_ID_RADIO_D: 890 rf_path = arg.rf_path; 891 idx = elm->u.reg2.idx; 892 893 elm_info->rf_radio[idx] = tbl; 894 tbl->rf_path = rf_path; 895 tbl->config = rtw89_phy_config_rf_reg_v1; 896 break; 897 case RTW89_FW_ELEMENT_ID_RF_NCTL: 898 elm_info->rf_nctl = tbl; 899 break; 900 default: 901 kfree(tbl); 902 return -ENOENT; 903 } 904 905 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 906 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); 907 if (!regs) 908 goto out; 909 910 for (i = 0; i < n_regs; i++) { 911 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 912 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 913 } 914 915 tbl->n_regs = n_regs; 916 tbl->regs = regs; 917 918 return 0; 919 920 out: 921 kfree(tbl); 922 return -ENOMEM; 923 } 924 925 static 926 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 927 const struct rtw89_fw_element_hdr *elm, 928 const union rtw89_fw_element_arg arg) 929 { 930 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 931 const unsigned long offset = arg.offset; 932 struct rtw89_efuse *efuse = &rtwdev->efuse; 933 struct rtw89_txpwr_conf *conf; 934 935 if (!rtwdev->rfe_data) { 936 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 937 if (!rtwdev->rfe_data) 938 return -ENOMEM; 939 } 940 941 conf = (void *)rtwdev->rfe_data + offset; 942 943 /* if multiple matched, take the last eventually */ 944 if (txpwr_elm->rfe_type == efuse->rfe_type) 945 goto setup; 946 947 /* without one is matched, accept default */ 948 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 949 (!rtw89_txpwr_conf_valid(conf) || 950 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 951 goto setup; 952 953 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 954 elm->id, txpwr_elm->rfe_type); 955 return 0; 956 957 setup: 958 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 959 elm->id, txpwr_elm->rfe_type); 960 961 conf->rfe_type = txpwr_elm->rfe_type; 962 conf->ent_sz = txpwr_elm->ent_sz; 963 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 964 conf->data = txpwr_elm->content; 965 return 0; 966 } 967 968 static 969 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 970 const struct rtw89_fw_element_hdr *elm, 971 const union rtw89_fw_element_arg arg) 972 { 973 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 974 const struct rtw89_chip_info *chip = rtwdev->chip; 975 u32 needed_bitmap = 0; 976 u32 offset = 0; 977 int subband; 978 u32 bitmap; 979 int type; 980 981 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 982 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 983 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 984 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 985 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 986 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 987 988 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 989 990 if ((bitmap & needed_bitmap) != needed_bitmap) { 991 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n", 992 needed_bitmap, bitmap); 993 return -ENOENT; 994 } 995 996 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL); 997 if (!elm_info->txpwr_trk) 998 return -ENOMEM; 999 1000 for (type = 0; bitmap; type++, bitmap >>= 1) { 1001 if (!(bitmap & BIT(0))) 1002 continue; 1003 1004 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 1005 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 1006 subband = 4; 1007 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 1008 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 1009 subband = 3; 1010 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 1011 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 1012 subband = 1; 1013 else 1014 break; 1015 1016 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 1017 1018 offset += subband; 1019 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 1020 goto err; 1021 } 1022 1023 return 0; 1024 1025 err: 1026 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 1027 offset, le32_to_cpu(elm->size)); 1028 kfree(elm_info->txpwr_trk); 1029 elm_info->txpwr_trk = NULL; 1030 1031 return -EFAULT; 1032 } 1033 1034 static 1035 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 1036 const struct rtw89_fw_element_hdr *elm, 1037 const union rtw89_fw_element_arg arg) 1038 { 1039 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1040 u8 rfk_id; 1041 1042 if (elm_info->rfk_log_fmt) 1043 goto allocated; 1044 1045 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL); 1046 if (!elm_info->rfk_log_fmt) 1047 return 1; /* this is an optional element, so just ignore this */ 1048 1049 allocated: 1050 rfk_id = elm->u.rfk_log_fmt.rfk_id; 1051 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 1052 return 1; 1053 1054 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 1055 1056 return 0; 1057 } 1058 1059 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 1060 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 1061 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 1062 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 1063 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 1064 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 1065 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 1066 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 1067 { .rf_path = RF_PATH_A }, "radio A"}, 1068 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 1069 { .rf_path = RF_PATH_B }, NULL}, 1070 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 1071 { .rf_path = RF_PATH_C }, NULL}, 1072 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 1073 { .rf_path = RF_PATH_D }, NULL}, 1074 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 1075 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 1076 rtw89_fw_recognize_txpwr_from_elm, 1077 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 1078 }, 1079 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 1080 rtw89_fw_recognize_txpwr_from_elm, 1081 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 1082 }, 1083 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 1084 rtw89_fw_recognize_txpwr_from_elm, 1085 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 1086 }, 1087 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 1088 rtw89_fw_recognize_txpwr_from_elm, 1089 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 1090 }, 1091 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 1092 rtw89_fw_recognize_txpwr_from_elm, 1093 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 1094 }, 1095 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 1096 rtw89_fw_recognize_txpwr_from_elm, 1097 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 1098 }, 1099 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 1100 rtw89_fw_recognize_txpwr_from_elm, 1101 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 1102 }, 1103 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 1104 rtw89_fw_recognize_txpwr_from_elm, 1105 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 1106 }, 1107 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 1108 rtw89_fw_recognize_txpwr_from_elm, 1109 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 1110 }, 1111 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 1112 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 1113 }, 1114 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 1115 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 1116 }, 1117 }; 1118 1119 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 1120 { 1121 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1122 const struct firmware *firmware = fw_info->req.firmware; 1123 const struct rtw89_chip_info *chip = rtwdev->chip; 1124 u32 unrecognized_elements = chip->needed_fw_elms; 1125 const struct rtw89_fw_element_handler *handler; 1126 const struct rtw89_fw_element_hdr *hdr; 1127 u32 elm_size; 1128 u32 elem_id; 1129 u32 offset; 1130 int ret; 1131 1132 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1133 1134 offset = rtw89_mfw_get_size(rtwdev); 1135 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1136 if (offset == 0) 1137 return -EINVAL; 1138 1139 while (offset + sizeof(*hdr) < firmware->size) { 1140 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1141 1142 elm_size = le32_to_cpu(hdr->size); 1143 if (offset + elm_size >= firmware->size) { 1144 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1145 break; 1146 } 1147 1148 elem_id = le32_to_cpu(hdr->id); 1149 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1150 goto next; 1151 1152 handler = &__fw_element_handlers[elem_id]; 1153 if (!handler->fn) 1154 goto next; 1155 1156 ret = handler->fn(rtwdev, hdr, handler->arg); 1157 if (ret == 1) /* ignore this element */ 1158 goto next; 1159 if (ret) 1160 return ret; 1161 1162 if (handler->name) 1163 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1164 handler->name, hdr->ver); 1165 1166 unrecognized_elements &= ~BIT(elem_id); 1167 next: 1168 offset += sizeof(*hdr) + elm_size; 1169 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1170 } 1171 1172 if (unrecognized_elements) { 1173 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1174 unrecognized_elements); 1175 return -ENOENT; 1176 } 1177 1178 return 0; 1179 } 1180 1181 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1182 u8 type, u8 cat, u8 class, u8 func, 1183 bool rack, bool dack, u32 len) 1184 { 1185 struct fwcmd_hdr *hdr; 1186 1187 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1188 1189 if (!(rtwdev->fw.h2c_seq % 4)) 1190 rack = true; 1191 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1192 FIELD_PREP(H2C_HDR_CAT, cat) | 1193 FIELD_PREP(H2C_HDR_CLASS, class) | 1194 FIELD_PREP(H2C_HDR_FUNC, func) | 1195 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1196 1197 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1198 len + H2C_HEADER_LEN) | 1199 (rack ? H2C_HDR_REC_ACK : 0) | 1200 (dack ? H2C_HDR_DONE_ACK : 0)); 1201 1202 rtwdev->fw.h2c_seq++; 1203 } 1204 1205 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1206 struct sk_buff *skb, 1207 u8 type, u8 cat, u8 class, u8 func, 1208 u32 len) 1209 { 1210 struct fwcmd_hdr *hdr; 1211 1212 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1213 1214 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1215 FIELD_PREP(H2C_HDR_CAT, cat) | 1216 FIELD_PREP(H2C_HDR_CLASS, class) | 1217 FIELD_PREP(H2C_HDR_FUNC, func) | 1218 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1219 1220 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1221 len + H2C_HEADER_LEN)); 1222 } 1223 1224 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1225 struct rtw89_fw_bin_info *info, 1226 struct rtw89_fw_hdr *fw_hdr) 1227 { 1228 struct rtw89_fw_hdr_section_info *section_info; 1229 struct rtw89_fw_hdr_section *section; 1230 int i; 1231 1232 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1233 FW_HDR_W7_PART_SIZE); 1234 1235 for (i = 0; i < info->section_num; i++) { 1236 section_info = &info->section_info[i]; 1237 1238 if (!section_info->len_override) 1239 continue; 1240 1241 section = &fw_hdr->sections[i]; 1242 le32p_replace_bits(§ion->w1, section_info->len_override, 1243 FWSECTION_HDR_W1_SEC_SIZE); 1244 } 1245 1246 return 0; 1247 } 1248 1249 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1250 struct rtw89_fw_bin_info *info, 1251 struct rtw89_fw_hdr_v1 *fw_hdr) 1252 { 1253 struct rtw89_fw_hdr_section_info *section_info; 1254 struct rtw89_fw_hdr_section_v1 *section; 1255 u8 dst_sec_idx = 0; 1256 u8 sec_idx; 1257 1258 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1259 FW_HDR_V1_W7_PART_SIZE); 1260 1261 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1262 section_info = &info->section_info[sec_idx]; 1263 section = &fw_hdr->sections[sec_idx]; 1264 1265 if (section_info->ignore) 1266 continue; 1267 1268 if (dst_sec_idx != sec_idx) 1269 fw_hdr->sections[dst_sec_idx] = *section; 1270 1271 dst_sec_idx++; 1272 } 1273 1274 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1275 1276 return (info->section_num - dst_sec_idx) * sizeof(*section); 1277 } 1278 1279 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1280 const struct rtw89_fw_suit *fw_suit, 1281 struct rtw89_fw_bin_info *info) 1282 { 1283 u32 len = info->hdr_len - info->dynamic_hdr_len; 1284 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1285 const u8 *fw = fw_suit->data; 1286 struct rtw89_fw_hdr *fw_hdr; 1287 struct sk_buff *skb; 1288 u32 truncated; 1289 u32 ret = 0; 1290 1291 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1292 if (!skb) { 1293 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1294 return -ENOMEM; 1295 } 1296 1297 skb_put_data(skb, fw, len); 1298 1299 switch (fw_suit->hdr_ver) { 1300 case 0: 1301 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1302 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1303 break; 1304 case 1: 1305 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1306 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1307 break; 1308 default: 1309 ret = -EOPNOTSUPP; 1310 goto fail; 1311 } 1312 1313 if (truncated) { 1314 len -= truncated; 1315 skb_trim(skb, len); 1316 } 1317 1318 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1319 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1320 H2C_FUNC_MAC_FWHDR_DL, len); 1321 1322 ret = rtw89_h2c_tx(rtwdev, skb, false); 1323 if (ret) { 1324 rtw89_err(rtwdev, "failed to send h2c\n"); 1325 ret = -1; 1326 goto fail; 1327 } 1328 1329 return 0; 1330 fail: 1331 dev_kfree_skb_any(skb); 1332 1333 return ret; 1334 } 1335 1336 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1337 const struct rtw89_fw_suit *fw_suit, 1338 struct rtw89_fw_bin_info *info) 1339 { 1340 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1341 int ret; 1342 1343 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1344 if (ret) { 1345 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1346 return ret; 1347 } 1348 1349 ret = mac->fwdl_check_path_ready(rtwdev, false); 1350 if (ret) { 1351 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1352 return ret; 1353 } 1354 1355 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1356 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1357 1358 return 0; 1359 } 1360 1361 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1362 struct rtw89_fw_hdr_section_info *info) 1363 { 1364 struct sk_buff *skb; 1365 const u8 *section = info->addr; 1366 u32 residue_len = info->len; 1367 bool copy_key = false; 1368 u32 pkt_len; 1369 int ret; 1370 1371 if (info->ignore) 1372 return 0; 1373 1374 if (info->len_override) { 1375 if (info->len_override > info->len) 1376 rtw89_warn(rtwdev, "override length %u larger than original %u\n", 1377 info->len_override, info->len); 1378 else 1379 residue_len = info->len_override; 1380 } 1381 1382 if (info->key_addr && info->key_len) { 1383 if (residue_len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) 1384 rtw89_warn(rtwdev, 1385 "ignore to copy key data because of len %d, %d, %d, %d\n", 1386 info->len, FWDL_SECTION_PER_PKT_LEN, 1387 info->key_len, residue_len); 1388 else 1389 copy_key = true; 1390 } 1391 1392 while (residue_len) { 1393 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 1394 pkt_len = FWDL_SECTION_PER_PKT_LEN; 1395 else 1396 pkt_len = residue_len; 1397 1398 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1399 if (!skb) { 1400 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1401 return -ENOMEM; 1402 } 1403 skb_put_data(skb, section, pkt_len); 1404 1405 if (copy_key) 1406 memcpy(skb->data + pkt_len - info->key_len, 1407 info->key_addr, info->key_len); 1408 1409 ret = rtw89_h2c_tx(rtwdev, skb, true); 1410 if (ret) { 1411 rtw89_err(rtwdev, "failed to send h2c\n"); 1412 ret = -1; 1413 goto fail; 1414 } 1415 1416 section += pkt_len; 1417 residue_len -= pkt_len; 1418 } 1419 1420 return 0; 1421 fail: 1422 dev_kfree_skb_any(skb); 1423 1424 return ret; 1425 } 1426 1427 static enum rtw89_fwdl_check_type 1428 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1429 const struct rtw89_fw_suit *fw_suit) 1430 { 1431 switch (fw_suit->type) { 1432 case RTW89_FW_BBMCU0: 1433 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1434 case RTW89_FW_BBMCU1: 1435 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1436 default: 1437 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1438 } 1439 } 1440 1441 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1442 const struct rtw89_fw_suit *fw_suit, 1443 struct rtw89_fw_bin_info *info) 1444 { 1445 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1446 const struct rtw89_chip_info *chip = rtwdev->chip; 1447 enum rtw89_fwdl_check_type chk_type; 1448 u8 section_num = info->section_num; 1449 int ret; 1450 1451 while (section_num--) { 1452 ret = __rtw89_fw_download_main(rtwdev, section_info); 1453 if (ret) 1454 return ret; 1455 section_info++; 1456 } 1457 1458 if (chip->chip_gen == RTW89_CHIP_AX) 1459 return 0; 1460 1461 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1462 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1463 if (ret) { 1464 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1465 fw_suit->type); 1466 return ret; 1467 } 1468 1469 return 0; 1470 } 1471 1472 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1473 { 1474 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1475 u32 addr = R_AX_DBG_PORT_SEL; 1476 u32 val32; 1477 u16 index; 1478 1479 if (chip_gen == RTW89_CHIP_BE) { 1480 addr = R_BE_WLCPU_PORT_PC; 1481 goto dump; 1482 } 1483 1484 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1485 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1486 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1487 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1488 1489 dump: 1490 for (index = 0; index < 15; index++) { 1491 val32 = rtw89_read32(rtwdev, addr); 1492 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1493 fsleep(10); 1494 } 1495 } 1496 1497 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1498 { 1499 u32 val32; 1500 1501 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1502 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1503 1504 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1505 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1506 1507 rtw89_fw_prog_cnt_dump(rtwdev); 1508 } 1509 1510 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1511 struct rtw89_fw_suit *fw_suit) 1512 { 1513 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1514 struct rtw89_fw_bin_info info = {}; 1515 int ret; 1516 1517 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1518 if (ret) { 1519 rtw89_err(rtwdev, "parse fw header fail\n"); 1520 return ret; 1521 } 1522 1523 rtw89_fwdl_secure_idmem_share_mode(rtwdev, info.idmem_share_mode); 1524 1525 if (rtwdev->chip->chip_id == RTL8922A && 1526 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1527 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1528 1529 ret = mac->fwdl_check_path_ready(rtwdev, true); 1530 if (ret) { 1531 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1532 return ret; 1533 } 1534 1535 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1536 if (ret) 1537 return ret; 1538 1539 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1540 if (ret) 1541 return ret; 1542 1543 return 0; 1544 } 1545 1546 static 1547 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1548 bool include_bb) 1549 { 1550 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1551 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1552 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1553 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1554 int ret; 1555 int i; 1556 1557 mac->disable_cpu(rtwdev); 1558 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1559 if (ret) 1560 return ret; 1561 1562 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1563 if (ret) 1564 goto fwdl_err; 1565 1566 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1567 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1568 1569 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1570 if (ret) 1571 goto fwdl_err; 1572 } 1573 1574 fw_info->h2c_seq = 0; 1575 fw_info->rec_seq = 0; 1576 fw_info->h2c_counter = 0; 1577 fw_info->c2h_counter = 0; 1578 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1579 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1580 1581 mdelay(5); 1582 1583 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1584 if (ret) { 1585 rtw89_warn(rtwdev, "download firmware fail\n"); 1586 goto fwdl_err; 1587 } 1588 1589 return ret; 1590 1591 fwdl_err: 1592 rtw89_fw_dl_fail_dump(rtwdev); 1593 return ret; 1594 } 1595 1596 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1597 bool include_bb) 1598 { 1599 int retry; 1600 int ret; 1601 1602 for (retry = 0; retry < 5; retry++) { 1603 ret = __rtw89_fw_download(rtwdev, type, include_bb); 1604 if (!ret) 1605 return 0; 1606 } 1607 1608 return ret; 1609 } 1610 1611 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1612 { 1613 struct rtw89_fw_info *fw = &rtwdev->fw; 1614 1615 wait_for_completion(&fw->req.completion); 1616 if (!fw->req.firmware) 1617 return -EINVAL; 1618 1619 return 0; 1620 } 1621 1622 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1623 struct rtw89_fw_req_info *req, 1624 const char *fw_name, bool nowarn) 1625 { 1626 int ret; 1627 1628 if (req->firmware) { 1629 rtw89_debug(rtwdev, RTW89_DBG_FW, 1630 "full firmware has been early requested\n"); 1631 complete_all(&req->completion); 1632 return 0; 1633 } 1634 1635 if (nowarn) 1636 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1637 else 1638 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1639 1640 complete_all(&req->completion); 1641 1642 return ret; 1643 } 1644 1645 void rtw89_load_firmware_work(struct work_struct *work) 1646 { 1647 struct rtw89_dev *rtwdev = 1648 container_of(work, struct rtw89_dev, load_firmware_work); 1649 const struct rtw89_chip_info *chip = rtwdev->chip; 1650 char fw_name[64]; 1651 1652 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1653 chip->fw_basename, rtwdev->fw.fw_format); 1654 1655 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1656 } 1657 1658 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1659 { 1660 if (!tbl) 1661 return; 1662 1663 kfree(tbl->regs); 1664 kfree(tbl); 1665 } 1666 1667 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1668 { 1669 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1670 int i; 1671 1672 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1673 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1674 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1675 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1676 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1677 1678 kfree(elm_info->txpwr_trk); 1679 kfree(elm_info->rfk_log_fmt); 1680 } 1681 1682 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1683 { 1684 struct rtw89_fw_info *fw = &rtwdev->fw; 1685 1686 cancel_work_sync(&rtwdev->load_firmware_work); 1687 1688 if (fw->req.firmware) { 1689 release_firmware(fw->req.firmware); 1690 1691 /* assign NULL back in case rtw89_free_ieee80211_hw() 1692 * try to release the same one again. 1693 */ 1694 fw->req.firmware = NULL; 1695 } 1696 1697 kfree(fw->log.fmts); 1698 rtw89_unload_firmware_elements(rtwdev); 1699 } 1700 1701 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1702 { 1703 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1704 u32 i; 1705 1706 if (fmt_id > fw_log->last_fmt_id) 1707 return 0; 1708 1709 for (i = 0; i < fw_log->fmt_count; i++) { 1710 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1711 return i; 1712 } 1713 return 0; 1714 } 1715 1716 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1717 { 1718 struct rtw89_fw_log *log = &rtwdev->fw.log; 1719 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1720 struct rtw89_fw_suit *suit = &log->suit; 1721 const void *fmts_ptr, *fmts_end_ptr; 1722 u32 fmt_count; 1723 int i; 1724 1725 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1726 fmt_count = le32_to_cpu(suit_hdr->count); 1727 log->fmt_ids = suit_hdr->ids; 1728 fmts_ptr = &suit_hdr->ids[fmt_count]; 1729 fmts_end_ptr = suit->data + suit->size; 1730 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1731 if (!log->fmts) 1732 return -ENOMEM; 1733 1734 for (i = 0; i < fmt_count; i++) { 1735 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1736 if (!fmts_ptr) 1737 break; 1738 1739 (*log->fmts)[i] = fmts_ptr; 1740 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1741 log->fmt_count++; 1742 fmts_ptr += strlen(fmts_ptr); 1743 } 1744 1745 return 0; 1746 } 1747 1748 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1749 { 1750 struct rtw89_fw_log *log = &rtwdev->fw.log; 1751 struct rtw89_fw_suit *suit = &log->suit; 1752 1753 if (!suit || !suit->data) { 1754 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1755 return -EINVAL; 1756 } 1757 if (log->fmts) 1758 return 0; 1759 1760 return rtw89_fw_log_create_fmts_dict(rtwdev); 1761 } 1762 1763 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 1764 const struct rtw89_fw_c2h_log_fmt *log_fmt, 1765 u32 fmt_idx, u8 para_int, bool raw_data) 1766 { 1767 const char *(*fmts)[] = rtwdev->fw.log.fmts; 1768 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 1769 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 1770 int i; 1771 1772 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 1773 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 1774 log_fmt->argc); 1775 return; 1776 } 1777 1778 if (para_int) 1779 for (i = 0 ; i < log_fmt->argc; i++) 1780 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 1781 1782 if (raw_data) { 1783 if (para_int) 1784 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1785 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 1786 para_int, log_fmt->argc, (int)sizeof(args), args); 1787 else 1788 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1789 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 1790 para_int, log_fmt->argc, log_fmt->u.raw); 1791 } else { 1792 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 1793 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 1794 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 1795 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 1796 args[0xf]); 1797 } 1798 1799 rtw89_info(rtwdev, "C2H log: %s", str_buf); 1800 } 1801 1802 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 1803 { 1804 const struct rtw89_fw_c2h_log_fmt *log_fmt; 1805 u8 para_int; 1806 u32 fmt_idx; 1807 1808 if (len < RTW89_C2H_HEADER_LEN) { 1809 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 1810 return; 1811 } 1812 1813 buf += RTW89_C2H_HEADER_LEN; 1814 len -= RTW89_C2H_HEADER_LEN; 1815 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 1816 1817 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 1818 goto plain_log; 1819 1820 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 1821 goto plain_log; 1822 1823 if (!rtwdev->fw.log.fmts) 1824 return; 1825 1826 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 1827 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 1828 1829 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 1830 rtw89_info(rtwdev, "C2H log: %s%s", 1831 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 1832 else if (fmt_idx != 0 && para_int) 1833 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 1834 else 1835 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 1836 return; 1837 1838 plain_log: 1839 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 1840 1841 } 1842 1843 #define H2C_CAM_LEN 60 1844 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 1845 struct rtw89_sta_link *rtwsta_link, const u8 *scan_mac_addr) 1846 { 1847 struct sk_buff *skb; 1848 int ret; 1849 1850 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 1851 if (!skb) { 1852 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1853 return -ENOMEM; 1854 } 1855 skb_put(skb, H2C_CAM_LEN); 1856 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif_link, rtwsta_link, scan_mac_addr, 1857 skb->data); 1858 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif_link, rtwsta_link, skb->data); 1859 1860 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1861 H2C_CAT_MAC, 1862 H2C_CL_MAC_ADDR_CAM_UPDATE, 1863 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 1864 H2C_CAM_LEN); 1865 1866 ret = rtw89_h2c_tx(rtwdev, skb, false); 1867 if (ret) { 1868 rtw89_err(rtwdev, "failed to send h2c\n"); 1869 goto fail; 1870 } 1871 1872 return 0; 1873 fail: 1874 dev_kfree_skb_any(skb); 1875 1876 return ret; 1877 } 1878 1879 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 1880 struct rtw89_vif_link *rtwvif_link, 1881 struct rtw89_sta_link *rtwsta_link) 1882 { 1883 struct rtw89_h2c_dctlinfo_ud_v1 *h2c; 1884 u32 len = sizeof(*h2c); 1885 struct sk_buff *skb; 1886 int ret; 1887 1888 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1889 if (!skb) { 1890 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1891 return -ENOMEM; 1892 } 1893 skb_put(skb, len); 1894 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; 1895 1896 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif_link, rtwsta_link, h2c); 1897 1898 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1899 H2C_CAT_MAC, 1900 H2C_CL_MAC_FR_EXCHG, 1901 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 1902 len); 1903 1904 ret = rtw89_h2c_tx(rtwdev, skb, false); 1905 if (ret) { 1906 rtw89_err(rtwdev, "failed to send h2c\n"); 1907 goto fail; 1908 } 1909 1910 return 0; 1911 fail: 1912 dev_kfree_skb_any(skb); 1913 1914 return ret; 1915 } 1916 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 1917 1918 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 1919 struct rtw89_vif_link *rtwvif_link, 1920 struct rtw89_sta_link *rtwsta_link) 1921 { 1922 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1923 u32 len = sizeof(*h2c); 1924 struct sk_buff *skb; 1925 int ret; 1926 1927 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1928 if (!skb) { 1929 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1930 return -ENOMEM; 1931 } 1932 skb_put(skb, len); 1933 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1934 1935 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif_link, rtwsta_link, h2c); 1936 1937 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1938 H2C_CAT_MAC, 1939 H2C_CL_MAC_FR_EXCHG, 1940 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1941 len); 1942 1943 ret = rtw89_h2c_tx(rtwdev, skb, false); 1944 if (ret) { 1945 rtw89_err(rtwdev, "failed to send h2c\n"); 1946 goto fail; 1947 } 1948 1949 return 0; 1950 fail: 1951 dev_kfree_skb_any(skb); 1952 1953 return ret; 1954 } 1955 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 1956 1957 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 1958 struct rtw89_vif_link *rtwvif_link, 1959 struct rtw89_sta_link *rtwsta_link) 1960 { 1961 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 1962 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1963 u32 len = sizeof(*h2c); 1964 struct sk_buff *skb; 1965 int ret; 1966 1967 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1968 if (!skb) { 1969 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 1970 return -ENOMEM; 1971 } 1972 skb_put(skb, len); 1973 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1974 1975 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 1976 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 1977 1978 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 1979 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 1980 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 1981 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 1982 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 1983 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 1984 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 1985 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 1986 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 1987 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 1988 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 1989 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 1990 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 1991 1992 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1993 H2C_CAT_MAC, 1994 H2C_CL_MAC_FR_EXCHG, 1995 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1996 len); 1997 1998 ret = rtw89_h2c_tx(rtwdev, skb, false); 1999 if (ret) { 2000 rtw89_err(rtwdev, "failed to send h2c\n"); 2001 goto fail; 2002 } 2003 2004 return 0; 2005 fail: 2006 dev_kfree_skb_any(skb); 2007 2008 return ret; 2009 } 2010 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 2011 2012 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, 2013 struct rtw89_vif_link *rtwvif_link, 2014 struct rtw89_sta_link *rtwsta_link, 2015 bool valid, struct ieee80211_ampdu_params *params) 2016 { 2017 const struct rtw89_chip_info *chip = rtwdev->chip; 2018 struct rtw89_h2c_ba_cam *h2c; 2019 u8 macid = rtwsta_link->mac_id; 2020 u32 len = sizeof(*h2c); 2021 struct sk_buff *skb; 2022 u8 entry_idx; 2023 int ret; 2024 2025 ret = valid ? 2026 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2027 &entry_idx) : 2028 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2029 &entry_idx); 2030 if (ret) { 2031 /* it still works even if we don't have static BA CAM, because 2032 * hardware can create dynamic BA CAM automatically. 2033 */ 2034 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2035 "failed to %s entry tid=%d for h2c ba cam\n", 2036 valid ? "alloc" : "free", params->tid); 2037 return 0; 2038 } 2039 2040 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2041 if (!skb) { 2042 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2043 return -ENOMEM; 2044 } 2045 skb_put(skb, len); 2046 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2047 2048 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 2049 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 2050 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 2051 else 2052 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 2053 if (!valid) 2054 goto end; 2055 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 2056 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 2057 if (params->buf_size > 64) 2058 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2059 else 2060 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 2061 /* If init req is set, hw will set the ssn */ 2062 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 2063 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 2064 2065 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 2066 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 2067 le32_encode_bits(rtwvif_link->mac_idx, 2068 RTW89_H2C_BA_CAM_W1_BAND); 2069 } 2070 2071 end: 2072 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2073 H2C_CAT_MAC, 2074 H2C_CL_BA_CAM, 2075 H2C_FUNC_MAC_BA_CAM, 0, 1, 2076 len); 2077 2078 ret = rtw89_h2c_tx(rtwdev, skb, false); 2079 if (ret) { 2080 rtw89_err(rtwdev, "failed to send h2c\n"); 2081 goto fail; 2082 } 2083 2084 return 0; 2085 fail: 2086 dev_kfree_skb_any(skb); 2087 2088 return ret; 2089 } 2090 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 2091 2092 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 2093 u8 entry_idx, u8 uid) 2094 { 2095 struct rtw89_h2c_ba_cam *h2c; 2096 u32 len = sizeof(*h2c); 2097 struct sk_buff *skb; 2098 int ret; 2099 2100 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2101 if (!skb) { 2102 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 2103 return -ENOMEM; 2104 } 2105 skb_put(skb, len); 2106 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2107 2108 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 2109 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 2110 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 2111 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 2112 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 2113 2114 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2115 H2C_CAT_MAC, 2116 H2C_CL_BA_CAM, 2117 H2C_FUNC_MAC_BA_CAM, 0, 1, 2118 len); 2119 2120 ret = rtw89_h2c_tx(rtwdev, skb, false); 2121 if (ret) { 2122 rtw89_err(rtwdev, "failed to send h2c\n"); 2123 goto fail; 2124 } 2125 2126 return 0; 2127 fail: 2128 dev_kfree_skb_any(skb); 2129 2130 return ret; 2131 } 2132 2133 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 2134 { 2135 const struct rtw89_chip_info *chip = rtwdev->chip; 2136 u8 entry_idx = chip->bacam_num; 2137 u8 uid = 0; 2138 int i; 2139 2140 for (i = 0; i < chip->bacam_dynamic_num; i++) { 2141 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 2142 entry_idx++; 2143 uid++; 2144 } 2145 } 2146 2147 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, 2148 struct rtw89_vif_link *rtwvif_link, 2149 struct rtw89_sta_link *rtwsta_link, 2150 bool valid, struct ieee80211_ampdu_params *params) 2151 { 2152 const struct rtw89_chip_info *chip = rtwdev->chip; 2153 struct rtw89_h2c_ba_cam_v1 *h2c; 2154 u8 macid = rtwsta_link->mac_id; 2155 u32 len = sizeof(*h2c); 2156 struct sk_buff *skb; 2157 u8 entry_idx; 2158 u8 bmap_size; 2159 int ret; 2160 2161 ret = valid ? 2162 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2163 &entry_idx) : 2164 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta_link, params->tid, 2165 &entry_idx); 2166 if (ret) { 2167 /* it still works even if we don't have static BA CAM, because 2168 * hardware can create dynamic BA CAM automatically. 2169 */ 2170 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2171 "failed to %s entry tid=%d for h2c ba cam\n", 2172 valid ? "alloc" : "free", params->tid); 2173 return 0; 2174 } 2175 2176 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2177 if (!skb) { 2178 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2179 return -ENOMEM; 2180 } 2181 skb_put(skb, len); 2182 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2183 2184 if (params->buf_size > 512) 2185 bmap_size = 10; 2186 else if (params->buf_size > 256) 2187 bmap_size = 8; 2188 else if (params->buf_size > 64) 2189 bmap_size = 4; 2190 else 2191 bmap_size = 0; 2192 2193 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2194 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2195 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2196 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2197 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2198 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2199 2200 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2201 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2202 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2203 le32_encode_bits(!!rtwvif_link->mac_idx, 2204 RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2205 2206 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2207 H2C_CAT_MAC, 2208 H2C_CL_BA_CAM, 2209 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2210 len); 2211 2212 ret = rtw89_h2c_tx(rtwdev, skb, false); 2213 if (ret) { 2214 rtw89_err(rtwdev, "failed to send h2c\n"); 2215 goto fail; 2216 } 2217 2218 return 0; 2219 fail: 2220 dev_kfree_skb_any(skb); 2221 2222 return ret; 2223 } 2224 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2225 2226 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2227 u8 offset, u8 mac_idx) 2228 { 2229 struct rtw89_h2c_ba_cam_init *h2c; 2230 u32 len = sizeof(*h2c); 2231 struct sk_buff *skb; 2232 int ret; 2233 2234 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2235 if (!skb) { 2236 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2237 return -ENOMEM; 2238 } 2239 skb_put(skb, len); 2240 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2241 2242 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2243 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2244 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2245 2246 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2247 H2C_CAT_MAC, 2248 H2C_CL_BA_CAM, 2249 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2250 len); 2251 2252 ret = rtw89_h2c_tx(rtwdev, skb, false); 2253 if (ret) { 2254 rtw89_err(rtwdev, "failed to send h2c\n"); 2255 goto fail; 2256 } 2257 2258 return 0; 2259 fail: 2260 dev_kfree_skb_any(skb); 2261 2262 return ret; 2263 } 2264 2265 #define H2C_LOG_CFG_LEN 12 2266 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2267 { 2268 struct sk_buff *skb; 2269 u32 comp = 0; 2270 int ret; 2271 2272 if (enable) 2273 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2274 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2275 BIT(RTW89_FW_LOG_COMP_SCAN); 2276 2277 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2278 if (!skb) { 2279 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2280 return -ENOMEM; 2281 } 2282 2283 skb_put(skb, H2C_LOG_CFG_LEN); 2284 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2285 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2286 SET_LOG_CFG_COMP(skb->data, comp); 2287 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2288 2289 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2290 H2C_CAT_MAC, 2291 H2C_CL_FW_INFO, 2292 H2C_FUNC_LOG_CFG, 0, 0, 2293 H2C_LOG_CFG_LEN); 2294 2295 ret = rtw89_h2c_tx(rtwdev, skb, false); 2296 if (ret) { 2297 rtw89_err(rtwdev, "failed to send h2c\n"); 2298 goto fail; 2299 } 2300 2301 return 0; 2302 fail: 2303 dev_kfree_skb_any(skb); 2304 2305 return ret; 2306 } 2307 2308 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 2309 struct rtw89_vif_link *rtwvif_link) 2310 { 2311 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 2312 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; 2313 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2314 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2315 struct rtw89_eapol_2_of_2 *eapol_pkt; 2316 struct ieee80211_bss_conf *bss_conf; 2317 struct ieee80211_hdr_3addr *hdr; 2318 struct sk_buff *skb; 2319 u8 key_des_ver; 2320 2321 if (rtw_wow->ptk_alg == 3) 2322 key_des_ver = 1; 2323 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) 2324 key_des_ver = 2; 2325 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) 2326 key_des_ver = 3; 2327 else 2328 key_des_ver = 0; 2329 2330 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); 2331 if (!skb) 2332 return NULL; 2333 2334 hdr = skb_put_zero(skb, sizeof(*hdr)); 2335 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2336 IEEE80211_FCTL_TODS | 2337 IEEE80211_FCTL_PROTECTED); 2338 2339 rcu_read_lock(); 2340 2341 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2342 2343 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2344 ether_addr_copy(hdr->addr2, bss_conf->addr); 2345 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2346 2347 rcu_read_unlock(); 2348 2349 skb_put_zero(skb, sec_hdr_len); 2350 2351 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); 2352 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); 2353 eapol_pkt->key_des_ver = key_des_ver; 2354 2355 return skb; 2356 } 2357 2358 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, 2359 struct rtw89_vif_link *rtwvif_link) 2360 { 2361 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2362 struct ieee80211_bss_conf *bss_conf; 2363 struct ieee80211_hdr_3addr *hdr; 2364 struct rtw89_sa_query *sa_query; 2365 struct sk_buff *skb; 2366 2367 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); 2368 if (!skb) 2369 return NULL; 2370 2371 hdr = skb_put_zero(skb, sizeof(*hdr)); 2372 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2373 IEEE80211_STYPE_ACTION | 2374 IEEE80211_FCTL_PROTECTED); 2375 2376 rcu_read_lock(); 2377 2378 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 2379 2380 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2381 ether_addr_copy(hdr->addr2, bss_conf->addr); 2382 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2383 2384 rcu_read_unlock(); 2385 2386 skb_put_zero(skb, sec_hdr_len); 2387 2388 sa_query = skb_put_zero(skb, sizeof(*sa_query)); 2389 sa_query->category = WLAN_CATEGORY_SA_QUERY; 2390 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; 2391 2392 return skb; 2393 } 2394 2395 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, 2396 struct rtw89_vif_link *rtwvif_link) 2397 { 2398 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 2399 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2400 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2401 struct ieee80211_hdr_3addr *hdr; 2402 struct rtw89_arp_rsp *arp_skb; 2403 struct arphdr *arp_hdr; 2404 struct sk_buff *skb; 2405 __le16 fc; 2406 2407 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); 2408 if (!skb) 2409 return NULL; 2410 2411 hdr = skb_put_zero(skb, sizeof(*hdr)); 2412 2413 if (rtw_wow->ptk_alg) 2414 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | 2415 IEEE80211_FCTL_PROTECTED); 2416 else 2417 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); 2418 2419 hdr->frame_control = fc; 2420 ether_addr_copy(hdr->addr1, rtwvif_link->bssid); 2421 ether_addr_copy(hdr->addr2, rtwvif_link->mac_addr); 2422 ether_addr_copy(hdr->addr3, rtwvif_link->bssid); 2423 2424 skb_put_zero(skb, sec_hdr_len); 2425 2426 arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); 2427 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 2428 arp_skb->llc_type = htons(ETH_P_ARP); 2429 2430 arp_hdr = &arp_skb->arp_hdr; 2431 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 2432 arp_hdr->ar_pro = htons(ETH_P_IP); 2433 arp_hdr->ar_hln = ETH_ALEN; 2434 arp_hdr->ar_pln = 4; 2435 arp_hdr->ar_op = htons(ARPOP_REPLY); 2436 2437 ether_addr_copy(arp_skb->sender_hw, rtwvif_link->mac_addr); 2438 arp_skb->sender_ip = rtwvif->ip_addr; 2439 2440 return skb; 2441 } 2442 2443 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2444 struct rtw89_vif_link *rtwvif_link, 2445 enum rtw89_fw_pkt_ofld_type type, 2446 u8 *id) 2447 { 2448 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 2449 int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1; 2450 struct rtw89_pktofld_info *info; 2451 struct sk_buff *skb; 2452 int ret; 2453 2454 info = kzalloc(sizeof(*info), GFP_KERNEL); 2455 if (!info) 2456 return -ENOMEM; 2457 2458 switch (type) { 2459 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2460 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2461 break; 2462 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2463 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2464 break; 2465 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2466 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, false); 2467 break; 2468 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2469 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, link_id, true); 2470 break; 2471 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2472 skb = rtw89_eapol_get(rtwdev, rtwvif_link); 2473 break; 2474 case RTW89_PKT_OFLD_TYPE_SA_QUERY: 2475 skb = rtw89_sa_query_get(rtwdev, rtwvif_link); 2476 break; 2477 case RTW89_PKT_OFLD_TYPE_ARP_RSP: 2478 skb = rtw89_arp_response_get(rtwdev, rtwvif_link); 2479 break; 2480 default: 2481 goto err; 2482 } 2483 2484 if (!skb) 2485 goto err; 2486 2487 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 2488 kfree_skb(skb); 2489 2490 if (ret) 2491 goto err; 2492 2493 list_add_tail(&info->list, &rtwvif_link->general_pkt_list); 2494 *id = info->id; 2495 return 0; 2496 2497 err: 2498 kfree(info); 2499 return -ENOMEM; 2500 } 2501 2502 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 2503 struct rtw89_vif_link *rtwvif_link, 2504 bool notify_fw) 2505 { 2506 struct list_head *pkt_list = &rtwvif_link->general_pkt_list; 2507 struct rtw89_pktofld_info *info, *tmp; 2508 2509 list_for_each_entry_safe(info, tmp, pkt_list, list) { 2510 if (notify_fw) 2511 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2512 else 2513 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 2514 list_del(&info->list); 2515 kfree(info); 2516 } 2517 } 2518 2519 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 2520 { 2521 struct rtw89_vif_link *rtwvif_link; 2522 struct rtw89_vif *rtwvif; 2523 unsigned int link_id; 2524 2525 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2526 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) 2527 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif_link, 2528 notify_fw); 2529 } 2530 2531 #define H2C_GENERAL_PKT_LEN 6 2532 #define H2C_GENERAL_PKT_ID_UND 0xff 2533 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 2534 struct rtw89_vif_link *rtwvif_link, u8 macid) 2535 { 2536 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 2537 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 2538 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 2539 struct sk_buff *skb; 2540 int ret; 2541 2542 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2543 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 2544 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2545 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 2546 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 2547 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 2548 2549 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 2550 if (!skb) { 2551 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2552 return -ENOMEM; 2553 } 2554 skb_put(skb, H2C_GENERAL_PKT_LEN); 2555 SET_GENERAL_PKT_MACID(skb->data, macid); 2556 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2557 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 2558 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 2559 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 2560 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2561 2562 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2563 H2C_CAT_MAC, 2564 H2C_CL_FW_INFO, 2565 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 2566 H2C_GENERAL_PKT_LEN); 2567 2568 ret = rtw89_h2c_tx(rtwdev, skb, false); 2569 if (ret) { 2570 rtw89_err(rtwdev, "failed to send h2c\n"); 2571 goto fail; 2572 } 2573 2574 return 0; 2575 fail: 2576 dev_kfree_skb_any(skb); 2577 2578 return ret; 2579 } 2580 2581 #define H2C_LPS_PARM_LEN 8 2582 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 2583 struct rtw89_lps_parm *lps_param) 2584 { 2585 struct sk_buff *skb; 2586 int ret; 2587 2588 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 2589 if (!skb) { 2590 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2591 return -ENOMEM; 2592 } 2593 skb_put(skb, H2C_LPS_PARM_LEN); 2594 2595 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 2596 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 2597 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 2598 SET_LPS_PARM_RLBM(skb->data, 1); 2599 SET_LPS_PARM_SMARTPS(skb->data, 1); 2600 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 2601 SET_LPS_PARM_VOUAPSD(skb->data, 0); 2602 SET_LPS_PARM_VIUAPSD(skb->data, 0); 2603 SET_LPS_PARM_BEUAPSD(skb->data, 0); 2604 SET_LPS_PARM_BKUAPSD(skb->data, 0); 2605 2606 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2607 H2C_CAT_MAC, 2608 H2C_CL_MAC_PS, 2609 H2C_FUNC_MAC_LPS_PARM, 0, !lps_param->psmode, 2610 H2C_LPS_PARM_LEN); 2611 2612 ret = rtw89_h2c_tx(rtwdev, skb, false); 2613 if (ret) { 2614 rtw89_err(rtwdev, "failed to send h2c\n"); 2615 goto fail; 2616 } 2617 2618 return 0; 2619 fail: 2620 dev_kfree_skb_any(skb); 2621 2622 return ret; 2623 } 2624 2625 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2626 { 2627 const struct rtw89_chip_info *chip = rtwdev->chip; 2628 const struct rtw89_chan *chan; 2629 struct rtw89_vif_link *rtwvif_link; 2630 struct rtw89_h2c_lps_ch_info *h2c; 2631 u32 len = sizeof(*h2c); 2632 unsigned int link_id; 2633 struct sk_buff *skb; 2634 bool no_chan = true; 2635 u8 phy_idx; 2636 u32 done; 2637 int ret; 2638 2639 if (chip->chip_gen != RTW89_CHIP_BE) 2640 return 0; 2641 2642 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2643 if (!skb) { 2644 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 2645 return -ENOMEM; 2646 } 2647 skb_put(skb, len); 2648 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2649 2650 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2651 phy_idx = rtwvif_link->phy_idx; 2652 if (phy_idx >= ARRAY_SIZE(h2c->info)) 2653 continue; 2654 2655 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2656 no_chan = false; 2657 2658 h2c->info[phy_idx].central_ch = chan->channel; 2659 h2c->info[phy_idx].pri_ch = chan->primary_channel; 2660 h2c->info[phy_idx].band = chan->band_type; 2661 h2c->info[phy_idx].bw = chan->band_width; 2662 } 2663 2664 if (no_chan) { 2665 rtw89_err(rtwdev, "no chan for h2c lps_ch_info\n"); 2666 ret = -ENOENT; 2667 goto fail; 2668 } 2669 2670 h2c->mlo_dbcc_mode_lps = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2671 2672 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2673 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2674 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 2675 2676 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2677 ret = rtw89_h2c_tx(rtwdev, skb, false); 2678 if (ret) { 2679 rtw89_err(rtwdev, "failed to send h2c\n"); 2680 goto fail; 2681 } 2682 2683 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2684 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2685 if (ret) 2686 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 2687 2688 return 0; 2689 fail: 2690 dev_kfree_skb_any(skb); 2691 2692 return ret; 2693 } 2694 2695 int rtw89_fw_h2c_lps_ml_cmn_info(struct rtw89_dev *rtwdev, 2696 struct rtw89_vif *rtwvif) 2697 { 2698 const struct rtw89_phy_bb_gain_info_be *gain = &rtwdev->bb_gain.be; 2699 struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat; 2700 const struct rtw89_chip_info *chip = rtwdev->chip; 2701 struct rtw89_h2c_lps_ml_cmn_info *h2c; 2702 struct rtw89_vif_link *rtwvif_link; 2703 const struct rtw89_chan *chan; 2704 u8 bw_idx = RTW89_BB_BW_20_40; 2705 u32 len = sizeof(*h2c); 2706 unsigned int link_id; 2707 struct sk_buff *skb; 2708 u8 gain_band; 2709 u32 done; 2710 u8 path; 2711 int ret; 2712 int i; 2713 2714 if (chip->chip_gen != RTW89_CHIP_BE) 2715 return 0; 2716 2717 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2718 if (!skb) { 2719 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ml_cmn_info\n"); 2720 return -ENOMEM; 2721 } 2722 skb_put(skb, len); 2723 h2c = (struct rtw89_h2c_lps_ml_cmn_info *)skb->data; 2724 2725 h2c->fmt_id = 0x1; 2726 2727 h2c->mlo_dbcc_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 2728 2729 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 2730 path = rtwvif_link->phy_idx == RTW89_PHY_1 ? RF_PATH_B : RF_PATH_A; 2731 chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 2732 gain_band = rtw89_subband_to_gain_band_be(chan->subband_type); 2733 2734 h2c->central_ch[rtwvif_link->phy_idx] = chan->channel; 2735 h2c->pri_ch[rtwvif_link->phy_idx] = chan->primary_channel; 2736 h2c->band[rtwvif_link->phy_idx] = chan->band_type; 2737 h2c->bw[rtwvif_link->phy_idx] = chan->band_width; 2738 if (pkt_stat->beacon_rate < RTW89_HW_RATE_OFDM6) 2739 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x1; 2740 else 2741 h2c->bcn_rate_type[rtwvif_link->phy_idx] = 0x2; 2742 2743 /* Fill BW20 RX gain table for beacon mode */ 2744 for (i = 0; i < TIA_GAIN_NUM; i++) { 2745 h2c->tia_gain[rtwvif_link->phy_idx][i] = 2746 cpu_to_le16(gain->tia_gain[gain_band][bw_idx][path][i]); 2747 } 2748 memcpy(h2c->lna_gain[rtwvif_link->phy_idx], 2749 gain->lna_gain[gain_band][bw_idx][path], 2750 LNA_GAIN_NUM); 2751 } 2752 2753 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2754 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2755 H2C_FUNC_FW_LPS_ML_CMN_INFO, 0, 0, len); 2756 2757 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2758 ret = rtw89_h2c_tx(rtwdev, skb, false); 2759 if (ret) { 2760 rtw89_err(rtwdev, "failed to send h2c\n"); 2761 goto fail; 2762 } 2763 2764 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2765 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2766 if (ret) 2767 rtw89_warn(rtwdev, "h2c_lps_ml_cmn_info done polling timeout\n"); 2768 2769 return 0; 2770 fail: 2771 dev_kfree_skb_any(skb); 2772 2773 return ret; 2774 } 2775 2776 #define H2C_P2P_ACT_LEN 20 2777 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, 2778 struct rtw89_vif_link *rtwvif_link, 2779 struct ieee80211_bss_conf *bss_conf, 2780 struct ieee80211_p2p_noa_desc *desc, 2781 u8 act, u8 noa_id) 2782 { 2783 bool p2p_type_gc = rtwvif_link->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 2784 u8 ctwindow_oppps = bss_conf->p2p_noa_attr.oppps_ctwindow; 2785 struct sk_buff *skb; 2786 u8 *cmd; 2787 int ret; 2788 2789 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 2790 if (!skb) { 2791 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 2792 return -ENOMEM; 2793 } 2794 skb_put(skb, H2C_P2P_ACT_LEN); 2795 cmd = skb->data; 2796 2797 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif_link->mac_id); 2798 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 2799 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 2800 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 2801 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 2802 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 2803 if (desc) { 2804 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 2805 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 2806 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 2807 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 2808 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 2809 } 2810 2811 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2812 H2C_CAT_MAC, H2C_CL_MAC_PS, 2813 H2C_FUNC_P2P_ACT, 0, 0, 2814 H2C_P2P_ACT_LEN); 2815 2816 ret = rtw89_h2c_tx(rtwdev, skb, false); 2817 if (ret) { 2818 rtw89_err(rtwdev, "failed to send h2c\n"); 2819 goto fail; 2820 } 2821 2822 return 0; 2823 fail: 2824 dev_kfree_skb_any(skb); 2825 2826 return ret; 2827 } 2828 2829 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 2830 struct sk_buff *skb) 2831 { 2832 const struct rtw89_chip_info *chip = rtwdev->chip; 2833 struct rtw89_hal *hal = &rtwdev->hal; 2834 u8 ntx_path; 2835 u8 map_b; 2836 2837 if (chip->rf_path_num == 1) { 2838 ntx_path = RF_A; 2839 map_b = 0; 2840 } else { 2841 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 2842 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 2843 } 2844 2845 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 2846 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 2847 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 2848 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 2849 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 2850 } 2851 2852 #define H2C_CMC_TBL_LEN 68 2853 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 2854 struct rtw89_vif_link *rtwvif_link, 2855 struct rtw89_sta_link *rtwsta_link) 2856 { 2857 const struct rtw89_chip_info *chip = rtwdev->chip; 2858 u8 macid = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2859 struct sk_buff *skb; 2860 int ret; 2861 2862 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2863 if (!skb) { 2864 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2865 return -ENOMEM; 2866 } 2867 skb_put(skb, H2C_CMC_TBL_LEN); 2868 SET_CTRL_INFO_MACID(skb->data, macid); 2869 SET_CTRL_INFO_OPERATION(skb->data, 1); 2870 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2871 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 2872 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 2873 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 2874 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 2875 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 2876 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 2877 } 2878 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 2879 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 2880 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 2881 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2882 2883 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2884 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2885 chip->h2c_cctl_func_id, 0, 1, 2886 H2C_CMC_TBL_LEN); 2887 2888 ret = rtw89_h2c_tx(rtwdev, skb, false); 2889 if (ret) { 2890 rtw89_err(rtwdev, "failed to send h2c\n"); 2891 goto fail; 2892 } 2893 2894 return 0; 2895 fail: 2896 dev_kfree_skb_any(skb); 2897 2898 return ret; 2899 } 2900 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 2901 2902 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2903 struct rtw89_vif_link *rtwvif_link, 2904 struct rtw89_sta_link *rtwsta_link) 2905 { 2906 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 2907 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2908 u32 len = sizeof(*h2c); 2909 struct sk_buff *skb; 2910 int ret; 2911 2912 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2913 if (!skb) { 2914 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 2915 return -ENOMEM; 2916 } 2917 skb_put(skb, len); 2918 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2919 2920 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 2921 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2922 2923 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE); 2924 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 2925 2926 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 2927 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 2928 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2929 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 2930 2931 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 2932 2933 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 2934 2935 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 2936 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 2937 2938 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 2939 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 2940 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 2941 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 2942 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 2943 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 2944 2945 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 2946 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 2947 2948 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 2949 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 2950 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 2951 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 2952 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 2953 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 2954 2955 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 2956 2957 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 2958 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 2959 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 2960 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 2961 2962 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 2963 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 2964 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 2965 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 2966 2967 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2968 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2969 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 2970 len); 2971 2972 ret = rtw89_h2c_tx(rtwdev, skb, false); 2973 if (ret) { 2974 rtw89_err(rtwdev, "failed to send h2c\n"); 2975 goto fail; 2976 } 2977 2978 return 0; 2979 fail: 2980 dev_kfree_skb_any(skb); 2981 2982 return ret; 2983 } 2984 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 2985 2986 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 2987 struct ieee80211_link_sta *link_sta, 2988 u8 *pads) 2989 { 2990 bool ppe_th; 2991 u8 ppe16, ppe8; 2992 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 2993 u8 ppe_thres_hdr = link_sta->he_cap.ppe_thres[0]; 2994 u8 ru_bitmap; 2995 u8 n, idx, sh; 2996 u16 ppe; 2997 int i; 2998 2999 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 3000 link_sta->he_cap.he_cap_elem.phy_cap_info[6]); 3001 if (!ppe_th) { 3002 u8 pad; 3003 3004 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 3005 link_sta->he_cap.he_cap_elem.phy_cap_info[9]); 3006 3007 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3008 pads[i] = pad; 3009 3010 return; 3011 } 3012 3013 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 3014 n = hweight8(ru_bitmap); 3015 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3016 3017 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3018 if (!(ru_bitmap & BIT(i))) { 3019 pads[i] = 1; 3020 continue; 3021 } 3022 3023 idx = n >> 3; 3024 sh = n & 7; 3025 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 3026 3027 ppe = le16_to_cpu(*((__le16 *)&link_sta->he_cap.ppe_thres[idx])); 3028 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3029 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 3030 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3031 3032 if (ppe16 != 7 && ppe8 == 7) 3033 pads[i] = RTW89_PE_DURATION_16; 3034 else if (ppe8 != 7) 3035 pads[i] = RTW89_PE_DURATION_8; 3036 else 3037 pads[i] = RTW89_PE_DURATION_0; 3038 } 3039 } 3040 3041 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 3042 struct rtw89_vif_link *rtwvif_link, 3043 struct rtw89_sta_link *rtwsta_link) 3044 { 3045 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3046 const struct rtw89_chip_info *chip = rtwdev->chip; 3047 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3048 rtwvif_link->chanctx_idx); 3049 struct ieee80211_link_sta *link_sta; 3050 struct sk_buff *skb; 3051 u8 pads[RTW89_PPE_BW_NUM]; 3052 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3053 u16 lowest_rate; 3054 int ret; 3055 3056 memset(pads, 0, sizeof(pads)); 3057 3058 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3059 if (!skb) { 3060 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3061 return -ENOMEM; 3062 } 3063 3064 rcu_read_lock(); 3065 3066 if (rtwsta_link) 3067 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3068 3069 if (rtwsta_link && link_sta->he_cap.has_he) 3070 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3071 3072 if (vif->p2p) 3073 lowest_rate = RTW89_HW_RATE_OFDM6; 3074 else if (chan->band_type == RTW89_BAND_2G) 3075 lowest_rate = RTW89_HW_RATE_CCK1; 3076 else 3077 lowest_rate = RTW89_HW_RATE_OFDM6; 3078 3079 skb_put(skb, H2C_CMC_TBL_LEN); 3080 SET_CTRL_INFO_MACID(skb->data, mac_id); 3081 SET_CTRL_INFO_OPERATION(skb->data, 1); 3082 SET_CMC_TBL_DISRTSFB(skb->data, 1); 3083 SET_CMC_TBL_DISDATAFB(skb->data, 1); 3084 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 3085 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 3086 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 3087 if (vif->type == NL80211_IFTYPE_STATION) 3088 SET_CMC_TBL_ULDL(skb->data, 1); 3089 else 3090 SET_CMC_TBL_ULDL(skb->data, 0); 3091 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif_link->port); 3092 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 3093 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3094 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3095 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3096 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3097 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 3098 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 3099 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 3100 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 3101 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 3102 } 3103 if (rtwsta_link) 3104 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 3105 link_sta->he_cap.has_he); 3106 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) 3107 SET_CMC_TBL_DATA_DCM(skb->data, 0); 3108 3109 rcu_read_unlock(); 3110 3111 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3112 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3113 chip->h2c_cctl_func_id, 0, 1, 3114 H2C_CMC_TBL_LEN); 3115 3116 ret = rtw89_h2c_tx(rtwdev, skb, false); 3117 if (ret) { 3118 rtw89_err(rtwdev, "failed to send h2c\n"); 3119 goto fail; 3120 } 3121 3122 return 0; 3123 fail: 3124 dev_kfree_skb_any(skb); 3125 3126 return ret; 3127 } 3128 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 3129 3130 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 3131 struct ieee80211_link_sta *link_sta, 3132 u8 *pads) 3133 { 3134 u8 nss = min(link_sta->rx_nss, rtwdev->hal.tx_nss) - 1; 3135 u16 ppe_thres_hdr; 3136 u8 ppe16, ppe8; 3137 u8 n, idx, sh; 3138 u8 ru_bitmap; 3139 bool ppe_th; 3140 u16 ppe; 3141 int i; 3142 3143 ppe_th = !!u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3144 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 3145 if (!ppe_th) { 3146 u8 pad; 3147 3148 pad = u8_get_bits(link_sta->eht_cap.eht_cap_elem.phy_cap_info[5], 3149 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 3150 3151 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 3152 pads[i] = pad; 3153 3154 return; 3155 } 3156 3157 ppe_thres_hdr = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres); 3158 ru_bitmap = u16_get_bits(ppe_thres_hdr, 3159 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 3160 n = hweight8(ru_bitmap); 3161 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 3162 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 3163 3164 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 3165 if (!(ru_bitmap & BIT(i))) { 3166 pads[i] = 1; 3167 continue; 3168 } 3169 3170 idx = n >> 3; 3171 sh = n & 7; 3172 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 3173 3174 ppe = get_unaligned_le16(link_sta->eht_cap.eht_ppe_thres + idx); 3175 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3176 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 3177 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 3178 3179 if (ppe16 != 7 && ppe8 == 7) 3180 pads[i] = RTW89_PE_DURATION_16_20; 3181 else if (ppe8 != 7) 3182 pads[i] = RTW89_PE_DURATION_8; 3183 else 3184 pads[i] = RTW89_PE_DURATION_0; 3185 } 3186 } 3187 3188 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3189 struct rtw89_vif_link *rtwvif_link, 3190 struct rtw89_sta_link *rtwsta_link) 3191 { 3192 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3193 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3194 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3195 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3196 struct ieee80211_bss_conf *bss_conf; 3197 struct ieee80211_link_sta *link_sta; 3198 u8 pads[RTW89_PPE_BW_NUM]; 3199 u32 len = sizeof(*h2c); 3200 struct sk_buff *skb; 3201 u16 lowest_rate; 3202 int ret; 3203 3204 memset(pads, 0, sizeof(pads)); 3205 3206 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3207 if (!skb) { 3208 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 3209 return -ENOMEM; 3210 } 3211 3212 rcu_read_lock(); 3213 3214 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3215 3216 if (rtwsta_link) { 3217 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3218 3219 if (link_sta->eht_cap.has_eht) 3220 __get_sta_eht_pkt_padding(rtwdev, link_sta, pads); 3221 else if (link_sta->he_cap.has_he) 3222 __get_sta_he_pkt_padding(rtwdev, link_sta, pads); 3223 } 3224 3225 if (vif->p2p) 3226 lowest_rate = RTW89_HW_RATE_OFDM6; 3227 else if (chan->band_type == RTW89_BAND_2G) 3228 lowest_rate = RTW89_HW_RATE_CCK1; 3229 else 3230 lowest_rate = RTW89_HW_RATE_OFDM6; 3231 3232 skb_put(skb, len); 3233 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3234 3235 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 3236 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3237 3238 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 3239 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 3240 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 3241 CCTLINFO_G7_W0_DISDATAFB); 3242 3243 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3244 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 3245 3246 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3247 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 3248 3249 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3250 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3251 3252 h2c->w4 = le32_encode_bits(rtwvif_link->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 3253 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 3254 3255 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3256 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 3257 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 3258 } 3259 3260 if (bss_conf->eht_support) { 3261 u16 punct = bss_conf->chanreq.oper.punctured; 3262 3263 h2c->w4 |= le32_encode_bits(~punct, 3264 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3265 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3266 } 3267 3268 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 3269 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3270 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 3271 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3272 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 3273 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3274 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 3275 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3276 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 3277 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3278 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 3279 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 3280 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 3281 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 3282 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3283 3284 h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 3285 CCTLINFO_G7_W6_ULDL); 3286 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL); 3287 3288 if (rtwsta_link) { 3289 h2c->w8 = le32_encode_bits(link_sta->he_cap.has_he, 3290 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3291 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3292 } 3293 3294 rcu_read_unlock(); 3295 3296 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3297 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3298 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3299 len); 3300 3301 ret = rtw89_h2c_tx(rtwdev, skb, false); 3302 if (ret) { 3303 rtw89_err(rtwdev, "failed to send h2c\n"); 3304 goto fail; 3305 } 3306 3307 return 0; 3308 fail: 3309 dev_kfree_skb_any(skb); 3310 3311 return ret; 3312 } 3313 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 3314 3315 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3316 struct rtw89_vif_link *rtwvif_link, 3317 struct rtw89_sta_link *rtwsta_link) 3318 { 3319 struct rtw89_sta *rtwsta = rtwsta_link->rtwsta; 3320 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3321 u32 len = sizeof(*h2c); 3322 struct sk_buff *skb; 3323 u16 agg_num = 0; 3324 u8 ba_bmap = 0; 3325 int ret; 3326 u8 tid; 3327 3328 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3329 if (!skb) { 3330 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 3331 return -ENOMEM; 3332 } 3333 skb_put(skb, len); 3334 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3335 3336 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 3337 if (agg_num == 0) 3338 agg_num = rtwsta->ampdu_params[tid].agg_num; 3339 else 3340 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 3341 } 3342 3343 if (agg_num <= 0x20) 3344 ba_bmap = 3; 3345 else if (agg_num > 0x20 && agg_num <= 0x40) 3346 ba_bmap = 0; 3347 else if (agg_num > 0x40 && agg_num <= 0x80) 3348 ba_bmap = 1; 3349 else if (agg_num > 0x80 && agg_num <= 0x100) 3350 ba_bmap = 2; 3351 else if (agg_num > 0x100 && agg_num <= 0x200) 3352 ba_bmap = 4; 3353 else if (agg_num > 0x200 && agg_num <= 0x400) 3354 ba_bmap = 5; 3355 3356 h2c->c0 = le32_encode_bits(rtwsta_link->mac_id, CCTLINFO_G7_C0_MACID) | 3357 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3358 3359 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 3360 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 3361 3362 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3363 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3364 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 3365 len); 3366 3367 ret = rtw89_h2c_tx(rtwdev, skb, false); 3368 if (ret) { 3369 rtw89_err(rtwdev, "failed to send h2c\n"); 3370 goto fail; 3371 } 3372 3373 return 0; 3374 fail: 3375 dev_kfree_skb_any(skb); 3376 3377 return ret; 3378 } 3379 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 3380 3381 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 3382 struct rtw89_sta_link *rtwsta_link) 3383 { 3384 const struct rtw89_chip_info *chip = rtwdev->chip; 3385 struct sk_buff *skb; 3386 int ret; 3387 3388 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3389 if (!skb) { 3390 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3391 return -ENOMEM; 3392 } 3393 skb_put(skb, H2C_CMC_TBL_LEN); 3394 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3395 SET_CTRL_INFO_OPERATION(skb->data, 1); 3396 if (rtwsta_link->cctl_tx_time) { 3397 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 3398 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta_link->ampdu_max_time); 3399 } 3400 if (rtwsta_link->cctl_tx_retry_limit) { 3401 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 3402 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta_link->data_tx_cnt_lmt); 3403 } 3404 3405 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3406 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3407 chip->h2c_cctl_func_id, 0, 1, 3408 H2C_CMC_TBL_LEN); 3409 3410 ret = rtw89_h2c_tx(rtwdev, skb, false); 3411 if (ret) { 3412 rtw89_err(rtwdev, "failed to send h2c\n"); 3413 goto fail; 3414 } 3415 3416 return 0; 3417 fail: 3418 dev_kfree_skb_any(skb); 3419 3420 return ret; 3421 } 3422 3423 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 3424 struct rtw89_sta_link *rtwsta_link) 3425 { 3426 const struct rtw89_chip_info *chip = rtwdev->chip; 3427 struct sk_buff *skb; 3428 int ret; 3429 3430 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 3431 return 0; 3432 3433 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3434 if (!skb) { 3435 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3436 return -ENOMEM; 3437 } 3438 skb_put(skb, H2C_CMC_TBL_LEN); 3439 SET_CTRL_INFO_MACID(skb->data, rtwsta_link->mac_id); 3440 SET_CTRL_INFO_OPERATION(skb->data, 1); 3441 3442 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3443 3444 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3445 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3446 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 3447 H2C_CMC_TBL_LEN); 3448 3449 ret = rtw89_h2c_tx(rtwdev, skb, false); 3450 if (ret) { 3451 rtw89_err(rtwdev, "failed to send h2c\n"); 3452 goto fail; 3453 } 3454 3455 return 0; 3456 fail: 3457 dev_kfree_skb_any(skb); 3458 3459 return ret; 3460 } 3461 3462 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 3463 struct rtw89_vif_link *rtwvif_link) 3464 { 3465 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3466 rtwvif_link->chanctx_idx); 3467 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3468 struct rtw89_h2c_bcn_upd *h2c; 3469 struct sk_buff *skb_beacon; 3470 struct ieee80211_hdr *hdr; 3471 u32 len = sizeof(*h2c); 3472 struct sk_buff *skb; 3473 int bcn_total_len; 3474 u16 beacon_rate; 3475 u16 tim_offset; 3476 void *noa_data; 3477 u8 noa_len; 3478 int ret; 3479 3480 if (vif->p2p) 3481 beacon_rate = RTW89_HW_RATE_OFDM6; 3482 else if (chan->band_type == RTW89_BAND_2G) 3483 beacon_rate = RTW89_HW_RATE_CCK1; 3484 else 3485 beacon_rate = RTW89_HW_RATE_OFDM6; 3486 3487 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3488 NULL, 0); 3489 if (!skb_beacon) { 3490 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3491 return -ENOMEM; 3492 } 3493 3494 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3495 if (noa_len && 3496 (noa_len <= skb_tailroom(skb_beacon) || 3497 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3498 skb_put_data(skb_beacon, noa_data, noa_len); 3499 } 3500 3501 hdr = (struct ieee80211_hdr *)skb_beacon; 3502 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3503 3504 bcn_total_len = len + skb_beacon->len; 3505 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3506 if (!skb) { 3507 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3508 dev_kfree_skb_any(skb_beacon); 3509 return -ENOMEM; 3510 } 3511 skb_put(skb, len); 3512 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 3513 3514 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_W0_PORT) | 3515 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 3516 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 3517 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 3518 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 3519 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 3520 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 3521 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 3522 3523 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3524 dev_kfree_skb_any(skb_beacon); 3525 3526 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3527 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3528 H2C_FUNC_MAC_BCN_UPD, 0, 1, 3529 bcn_total_len); 3530 3531 ret = rtw89_h2c_tx(rtwdev, skb, false); 3532 if (ret) { 3533 rtw89_err(rtwdev, "failed to send h2c\n"); 3534 dev_kfree_skb_any(skb); 3535 return ret; 3536 } 3537 3538 return 0; 3539 } 3540 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 3541 3542 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 3543 struct rtw89_vif_link *rtwvif_link) 3544 { 3545 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif_link->chanctx_idx); 3546 struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link); 3547 struct rtw89_h2c_bcn_upd_be *h2c; 3548 struct sk_buff *skb_beacon; 3549 struct ieee80211_hdr *hdr; 3550 u32 len = sizeof(*h2c); 3551 struct sk_buff *skb; 3552 int bcn_total_len; 3553 u16 beacon_rate; 3554 u16 tim_offset; 3555 void *noa_data; 3556 u8 noa_len; 3557 int ret; 3558 3559 if (vif->p2p) 3560 beacon_rate = RTW89_HW_RATE_OFDM6; 3561 else if (chan->band_type == RTW89_BAND_2G) 3562 beacon_rate = RTW89_HW_RATE_CCK1; 3563 else 3564 beacon_rate = RTW89_HW_RATE_OFDM6; 3565 3566 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3567 NULL, 0); 3568 if (!skb_beacon) { 3569 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3570 return -ENOMEM; 3571 } 3572 3573 noa_len = rtw89_p2p_noa_fetch(rtwvif_link, &noa_data); 3574 if (noa_len && 3575 (noa_len <= skb_tailroom(skb_beacon) || 3576 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3577 skb_put_data(skb_beacon, noa_data, noa_len); 3578 } 3579 3580 hdr = (struct ieee80211_hdr *)skb_beacon; 3581 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3582 3583 bcn_total_len = len + skb_beacon->len; 3584 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3585 if (!skb) { 3586 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3587 dev_kfree_skb_any(skb_beacon); 3588 return -ENOMEM; 3589 } 3590 skb_put(skb, len); 3591 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 3592 3593 h2c->w0 = le32_encode_bits(rtwvif_link->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 3594 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 3595 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 3596 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 3597 h2c->w1 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 3598 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 3599 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 3600 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 3601 3602 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3603 dev_kfree_skb_any(skb_beacon); 3604 3605 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3606 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3607 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 3608 bcn_total_len); 3609 3610 ret = rtw89_h2c_tx(rtwdev, skb, false); 3611 if (ret) { 3612 rtw89_err(rtwdev, "failed to send h2c\n"); 3613 goto fail; 3614 } 3615 3616 return 0; 3617 3618 fail: 3619 dev_kfree_skb_any(skb); 3620 3621 return ret; 3622 } 3623 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 3624 3625 #define H2C_ROLE_MAINTAIN_LEN 4 3626 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 3627 struct rtw89_vif_link *rtwvif_link, 3628 struct rtw89_sta_link *rtwsta_link, 3629 enum rtw89_upd_mode upd_mode) 3630 { 3631 struct sk_buff *skb; 3632 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3633 u8 self_role; 3634 int ret; 3635 3636 if (rtwvif_link->net_type == RTW89_NET_TYPE_AP_MODE) { 3637 if (rtwsta_link) 3638 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3639 else 3640 self_role = rtwvif_link->self_role; 3641 } else { 3642 self_role = rtwvif_link->self_role; 3643 } 3644 3645 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 3646 if (!skb) { 3647 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3648 return -ENOMEM; 3649 } 3650 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 3651 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 3652 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 3653 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 3654 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif_link->wifi_role); 3655 3656 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3657 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3658 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 3659 H2C_ROLE_MAINTAIN_LEN); 3660 3661 ret = rtw89_h2c_tx(rtwdev, skb, false); 3662 if (ret) { 3663 rtw89_err(rtwdev, "failed to send h2c\n"); 3664 goto fail; 3665 } 3666 3667 return 0; 3668 fail: 3669 dev_kfree_skb_any(skb); 3670 3671 return ret; 3672 } 3673 3674 static enum rtw89_fw_sta_type 3675 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3676 struct rtw89_sta_link *rtwsta_link) 3677 { 3678 struct ieee80211_bss_conf *bss_conf; 3679 struct ieee80211_link_sta *link_sta; 3680 enum rtw89_fw_sta_type type; 3681 3682 rcu_read_lock(); 3683 3684 if (!rtwsta_link) 3685 goto by_vif; 3686 3687 link_sta = rtw89_sta_rcu_dereference_link(rtwsta_link, true); 3688 3689 if (link_sta->eht_cap.has_eht) 3690 type = RTW89_FW_BE_STA; 3691 else if (link_sta->he_cap.has_he) 3692 type = RTW89_FW_AX_STA; 3693 else 3694 type = RTW89_FW_N_AC_STA; 3695 3696 goto out; 3697 3698 by_vif: 3699 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, true); 3700 3701 if (bss_conf->eht_support) 3702 type = RTW89_FW_BE_STA; 3703 else if (bss_conf->he_support) 3704 type = RTW89_FW_AX_STA; 3705 else 3706 type = RTW89_FW_N_AC_STA; 3707 3708 out: 3709 rcu_read_unlock(); 3710 3711 return type; 3712 } 3713 3714 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3715 struct rtw89_sta_link *rtwsta_link, bool dis_conn) 3716 { 3717 struct sk_buff *skb; 3718 u8 mac_id = rtwsta_link ? rtwsta_link->mac_id : rtwvif_link->mac_id; 3719 u8 self_role = rtwvif_link->self_role; 3720 enum rtw89_fw_sta_type sta_type; 3721 u8 net_type = rtwvif_link->net_type; 3722 struct rtw89_h2c_join_v1 *h2c_v1; 3723 struct rtw89_h2c_join *h2c; 3724 u32 len = sizeof(*h2c); 3725 bool format_v1 = false; 3726 int ret; 3727 3728 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 3729 len = sizeof(*h2c_v1); 3730 format_v1 = true; 3731 } 3732 3733 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta_link) { 3734 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3735 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 3736 } 3737 3738 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3739 if (!skb) { 3740 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3741 return -ENOMEM; 3742 } 3743 skb_put(skb, len); 3744 h2c = (struct rtw89_h2c_join *)skb->data; 3745 3746 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 3747 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 3748 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 3749 le32_encode_bits(rtwvif_link->wmm, RTW89_H2C_JOININFO_W0_WMM) | 3750 le32_encode_bits(rtwvif_link->trigger, RTW89_H2C_JOININFO_W0_TGR) | 3751 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 3752 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 3753 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 3754 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 3755 le32_encode_bits(rtwvif_link->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 3756 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 3757 le32_encode_bits(rtwvif_link->wifi_role, 3758 RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 3759 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 3760 3761 if (!format_v1) 3762 goto done; 3763 3764 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 3765 3766 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif_link, rtwsta_link); 3767 3768 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE); 3769 h2c_v1->w2 = 0; 3770 3771 done: 3772 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3773 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3774 H2C_FUNC_MAC_JOININFO, 0, 1, 3775 len); 3776 3777 ret = rtw89_h2c_tx(rtwdev, skb, false); 3778 if (ret) { 3779 rtw89_err(rtwdev, "failed to send h2c\n"); 3780 goto fail; 3781 } 3782 3783 return 0; 3784 fail: 3785 dev_kfree_skb_any(skb); 3786 3787 return ret; 3788 } 3789 3790 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 3791 { 3792 struct rtw89_h2c_notify_dbcc *h2c; 3793 u32 len = sizeof(*h2c); 3794 struct sk_buff *skb; 3795 int ret; 3796 3797 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3798 if (!skb) { 3799 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 3800 return -ENOMEM; 3801 } 3802 skb_put(skb, len); 3803 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 3804 3805 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 3806 3807 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3808 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3809 H2C_FUNC_NOTIFY_DBCC, 0, 1, 3810 len); 3811 3812 ret = rtw89_h2c_tx(rtwdev, skb, false); 3813 if (ret) { 3814 rtw89_err(rtwdev, "failed to send h2c\n"); 3815 goto fail; 3816 } 3817 3818 return 0; 3819 fail: 3820 dev_kfree_skb_any(skb); 3821 3822 return ret; 3823 } 3824 3825 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 3826 bool pause) 3827 { 3828 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 3829 struct rtw89_fw_macid_pause_grp *h2c; 3830 __le32 set = cpu_to_le32(BIT(sh)); 3831 u8 h2c_macid_pause_id; 3832 struct sk_buff *skb; 3833 u32 len; 3834 int ret; 3835 3836 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 3837 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 3838 len = sizeof(*h2c_new); 3839 } else { 3840 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 3841 len = sizeof(*h2c); 3842 } 3843 3844 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3845 if (!skb) { 3846 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 3847 return -ENOMEM; 3848 } 3849 skb_put(skb, len); 3850 3851 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 3852 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 3853 3854 h2c_new->n[0].pause_mask_grp[grp] = set; 3855 h2c_new->n[0].sleep_mask_grp[grp] = set; 3856 if (pause) { 3857 h2c_new->n[0].pause_grp[grp] = set; 3858 h2c_new->n[0].sleep_grp[grp] = set; 3859 } 3860 } else { 3861 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 3862 3863 h2c->mask_grp[grp] = set; 3864 if (pause) 3865 h2c->pause_grp[grp] = set; 3866 } 3867 3868 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3869 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3870 h2c_macid_pause_id, 1, 0, 3871 len); 3872 3873 ret = rtw89_h2c_tx(rtwdev, skb, false); 3874 if (ret) { 3875 rtw89_err(rtwdev, "failed to send h2c\n"); 3876 goto fail; 3877 } 3878 3879 return 0; 3880 fail: 3881 dev_kfree_skb_any(skb); 3882 3883 return ret; 3884 } 3885 3886 #define H2C_EDCA_LEN 12 3887 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 3888 u8 ac, u32 val) 3889 { 3890 struct sk_buff *skb; 3891 int ret; 3892 3893 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 3894 if (!skb) { 3895 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 3896 return -ENOMEM; 3897 } 3898 skb_put(skb, H2C_EDCA_LEN); 3899 RTW89_SET_EDCA_SEL(skb->data, 0); 3900 RTW89_SET_EDCA_BAND(skb->data, rtwvif_link->mac_idx); 3901 RTW89_SET_EDCA_WMM(skb->data, 0); 3902 RTW89_SET_EDCA_AC(skb->data, ac); 3903 RTW89_SET_EDCA_PARAM(skb->data, val); 3904 3905 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3906 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3907 H2C_FUNC_USR_EDCA, 0, 1, 3908 H2C_EDCA_LEN); 3909 3910 ret = rtw89_h2c_tx(rtwdev, skb, false); 3911 if (ret) { 3912 rtw89_err(rtwdev, "failed to send h2c\n"); 3913 goto fail; 3914 } 3915 3916 return 0; 3917 fail: 3918 dev_kfree_skb_any(skb); 3919 3920 return ret; 3921 } 3922 3923 #define H2C_TSF32_TOGL_LEN 4 3924 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, 3925 struct rtw89_vif_link *rtwvif_link, 3926 bool en) 3927 { 3928 struct sk_buff *skb; 3929 u16 early_us = en ? 2000 : 0; 3930 u8 *cmd; 3931 int ret; 3932 3933 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 3934 if (!skb) { 3935 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 3936 return -ENOMEM; 3937 } 3938 skb_put(skb, H2C_TSF32_TOGL_LEN); 3939 cmd = skb->data; 3940 3941 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif_link->mac_idx); 3942 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 3943 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif_link->port); 3944 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 3945 3946 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3947 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3948 H2C_FUNC_TSF32_TOGL, 0, 0, 3949 H2C_TSF32_TOGL_LEN); 3950 3951 ret = rtw89_h2c_tx(rtwdev, skb, false); 3952 if (ret) { 3953 rtw89_err(rtwdev, "failed to send h2c\n"); 3954 goto fail; 3955 } 3956 3957 return 0; 3958 fail: 3959 dev_kfree_skb_any(skb); 3960 3961 return ret; 3962 } 3963 3964 #define H2C_OFLD_CFG_LEN 8 3965 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 3966 { 3967 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 3968 struct sk_buff *skb; 3969 int ret; 3970 3971 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 3972 if (!skb) { 3973 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 3974 return -ENOMEM; 3975 } 3976 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 3977 3978 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3979 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3980 H2C_FUNC_OFLD_CFG, 0, 1, 3981 H2C_OFLD_CFG_LEN); 3982 3983 ret = rtw89_h2c_tx(rtwdev, skb, false); 3984 if (ret) { 3985 rtw89_err(rtwdev, "failed to send h2c\n"); 3986 goto fail; 3987 } 3988 3989 return 0; 3990 fail: 3991 dev_kfree_skb_any(skb); 3992 3993 return ret; 3994 } 3995 3996 int rtw89_fw_h2c_tx_duty(struct rtw89_dev *rtwdev, u8 lv) 3997 { 3998 struct rtw89_h2c_tx_duty *h2c; 3999 u32 len = sizeof(*h2c); 4000 struct sk_buff *skb; 4001 u16 pause, active; 4002 int ret; 4003 4004 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4005 if (!skb) { 4006 rtw89_err(rtwdev, "failed to alloc skb for h2c tx duty\n"); 4007 return -ENOMEM; 4008 } 4009 4010 skb_put(skb, len); 4011 h2c = (struct rtw89_h2c_tx_duty *)skb->data; 4012 4013 static_assert(RTW89_THERMAL_PROT_LV_MAX * RTW89_THERMAL_PROT_STEP < 100); 4014 4015 if (lv == 0 || lv > RTW89_THERMAL_PROT_LV_MAX) { 4016 h2c->w1 = le32_encode_bits(1, RTW89_H2C_TX_DUTY_W1_STOP); 4017 } else { 4018 active = 100 - lv * RTW89_THERMAL_PROT_STEP; 4019 pause = 100 - active; 4020 4021 h2c->w0 = le32_encode_bits(pause, RTW89_H2C_TX_DUTY_W0_PAUSE_INTVL_MASK) | 4022 le32_encode_bits(active, RTW89_H2C_TX_DUTY_W0_TX_INTVL_MASK); 4023 } 4024 4025 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4026 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4027 H2C_FUNC_TX_DUTY, 0, 0, len); 4028 4029 ret = rtw89_h2c_tx(rtwdev, skb, false); 4030 if (ret) { 4031 rtw89_err(rtwdev, "failed to send h2c\n"); 4032 goto fail; 4033 } 4034 4035 return 0; 4036 fail: 4037 dev_kfree_skb_any(skb); 4038 4039 return ret; 4040 } 4041 4042 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 4043 struct rtw89_vif_link *rtwvif_link, 4044 bool connect) 4045 { 4046 struct ieee80211_bss_conf *bss_conf; 4047 s32 thold = RTW89_DEFAULT_CQM_THOLD; 4048 u32 hyst = RTW89_DEFAULT_CQM_HYST; 4049 struct rtw89_h2c_bcnfltr *h2c; 4050 u32 len = sizeof(*h2c); 4051 struct sk_buff *skb; 4052 int ret; 4053 4054 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4055 return -EINVAL; 4056 4057 if (!rtwvif_link || rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4058 return -EINVAL; 4059 4060 rcu_read_lock(); 4061 4062 bss_conf = rtw89_vif_rcu_dereference_link(rtwvif_link, false); 4063 4064 if (bss_conf->cqm_rssi_hyst) 4065 hyst = bss_conf->cqm_rssi_hyst; 4066 if (bss_conf->cqm_rssi_thold) 4067 thold = bss_conf->cqm_rssi_thold; 4068 4069 rcu_read_unlock(); 4070 4071 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4072 if (!skb) { 4073 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 4074 return -ENOMEM; 4075 } 4076 4077 skb_put(skb, len); 4078 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 4079 4080 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 4081 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 4082 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 4083 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 4084 RTW89_H2C_BCNFLTR_W0_MODE) | 4085 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 4086 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 4087 le32_encode_bits(thold + MAX_RSSI, 4088 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 4089 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 4090 4091 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4092 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4093 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 4094 4095 ret = rtw89_h2c_tx(rtwdev, skb, false); 4096 if (ret) { 4097 rtw89_err(rtwdev, "failed to send h2c\n"); 4098 goto fail; 4099 } 4100 4101 return 0; 4102 fail: 4103 dev_kfree_skb_any(skb); 4104 4105 return ret; 4106 } 4107 4108 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 4109 struct rtw89_rx_phy_ppdu *phy_ppdu) 4110 { 4111 struct rtw89_h2c_ofld_rssi *h2c; 4112 u32 len = sizeof(*h2c); 4113 struct sk_buff *skb; 4114 s8 rssi; 4115 int ret; 4116 4117 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 4118 return -EINVAL; 4119 4120 if (!phy_ppdu) 4121 return -EINVAL; 4122 4123 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4124 if (!skb) { 4125 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 4126 return -ENOMEM; 4127 } 4128 4129 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 4130 skb_put(skb, len); 4131 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 4132 4133 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 4134 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 4135 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 4136 4137 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4138 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4139 H2C_FUNC_OFLD_RSSI, 0, 1, len); 4140 4141 ret = rtw89_h2c_tx(rtwdev, skb, false); 4142 if (ret) { 4143 rtw89_err(rtwdev, "failed to send h2c\n"); 4144 goto fail; 4145 } 4146 4147 return 0; 4148 fail: 4149 dev_kfree_skb_any(skb); 4150 4151 return ret; 4152 } 4153 4154 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link) 4155 { 4156 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 4157 struct rtw89_traffic_stats *stats = &rtwvif->stats; 4158 struct rtw89_h2c_ofld *h2c; 4159 u32 len = sizeof(*h2c); 4160 struct sk_buff *skb; 4161 int ret; 4162 4163 if (rtwvif_link->net_type != RTW89_NET_TYPE_INFRA) 4164 return -EINVAL; 4165 4166 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4167 if (!skb) { 4168 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 4169 return -ENOMEM; 4170 } 4171 4172 skb_put(skb, len); 4173 h2c = (struct rtw89_h2c_ofld *)skb->data; 4174 4175 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 4176 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 4177 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 4178 4179 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4180 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4181 H2C_FUNC_OFLD_TP, 0, 1, len); 4182 4183 ret = rtw89_h2c_tx(rtwdev, skb, false); 4184 if (ret) { 4185 rtw89_err(rtwdev, "failed to send h2c\n"); 4186 goto fail; 4187 } 4188 4189 return 0; 4190 fail: 4191 dev_kfree_skb_any(skb); 4192 4193 return ret; 4194 } 4195 4196 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 4197 { 4198 const struct rtw89_chip_info *chip = rtwdev->chip; 4199 struct rtw89_h2c_ra_v1 *h2c_v1; 4200 struct rtw89_h2c_ra *h2c; 4201 u32 len = sizeof(*h2c); 4202 bool format_v1 = false; 4203 struct sk_buff *skb; 4204 int ret; 4205 4206 if (chip->chip_gen == RTW89_CHIP_BE) { 4207 len = sizeof(*h2c_v1); 4208 format_v1 = true; 4209 } 4210 4211 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4212 if (!skb) { 4213 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 4214 return -ENOMEM; 4215 } 4216 skb_put(skb, len); 4217 h2c = (struct rtw89_h2c_ra *)skb->data; 4218 rtw89_debug(rtwdev, RTW89_DBG_RA, 4219 "ra cmd msk: %llx ", ra->ra_mask); 4220 4221 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 4222 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 4223 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 4224 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 4225 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 4226 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 4227 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 4228 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 4229 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 4230 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 4231 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 4232 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 4233 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 4234 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 4235 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 4236 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 4237 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 4238 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 4239 4240 if (!format_v1) 4241 goto csi; 4242 4243 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 4244 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 4245 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 4246 4247 csi: 4248 if (!csi) 4249 goto done; 4250 4251 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 4252 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 4253 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 4254 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 4255 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 4256 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 4257 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 4258 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 4259 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 4260 4261 done: 4262 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4263 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 4264 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 4265 len); 4266 4267 ret = rtw89_h2c_tx(rtwdev, skb, false); 4268 if (ret) { 4269 rtw89_err(rtwdev, "failed to send h2c\n"); 4270 goto fail; 4271 } 4272 4273 return 0; 4274 fail: 4275 dev_kfree_skb_any(skb); 4276 4277 return ret; 4278 } 4279 4280 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 4281 { 4282 struct rtw89_btc *btc = &rtwdev->btc; 4283 struct rtw89_btc_dm *dm = &btc->dm; 4284 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 4285 struct rtw89_btc_module *module = &init_info->module; 4286 struct rtw89_btc_ant_info *ant = &module->ant; 4287 struct rtw89_h2c_cxinit *h2c; 4288 u32 len = sizeof(*h2c); 4289 struct sk_buff *skb; 4290 int ret; 4291 4292 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4293 if (!skb) { 4294 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 4295 return -ENOMEM; 4296 } 4297 skb_put(skb, len); 4298 h2c = (struct rtw89_h2c_cxinit *)skb->data; 4299 4300 h2c->hdr.type = type; 4301 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 4302 4303 h2c->ant_type = ant->type; 4304 h2c->ant_num = ant->num; 4305 h2c->ant_iso = ant->isolation; 4306 h2c->ant_info = 4307 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 4308 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 4309 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 4310 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 4311 4312 h2c->mod_rfe = module->rfe_type; 4313 h2c->mod_cv = module->cv; 4314 h2c->mod_info = 4315 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 4316 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 4317 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 4318 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 4319 h2c->mod_adie_kt = module->kt_ver_adie; 4320 h2c->wl_gch = init_info->wl_guard_ch; 4321 4322 h2c->info = 4323 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 4324 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 4325 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 4326 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 4327 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 4328 4329 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4330 H2C_CAT_OUTSRC, BTFC_SET, 4331 SET_DRV_INFO, 0, 0, 4332 len); 4333 4334 ret = rtw89_h2c_tx(rtwdev, skb, false); 4335 if (ret) { 4336 rtw89_err(rtwdev, "failed to send h2c\n"); 4337 goto fail; 4338 } 4339 4340 return 0; 4341 fail: 4342 dev_kfree_skb_any(skb); 4343 4344 return ret; 4345 } 4346 4347 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 4348 { 4349 struct rtw89_btc *btc = &rtwdev->btc; 4350 struct rtw89_btc_dm *dm = &btc->dm; 4351 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 4352 struct rtw89_h2c_cxinit_v7 *h2c; 4353 u32 len = sizeof(*h2c); 4354 struct sk_buff *skb; 4355 int ret; 4356 4357 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4358 if (!skb) { 4359 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 4360 return -ENOMEM; 4361 } 4362 skb_put(skb, len); 4363 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 4364 4365 h2c->hdr.type = type; 4366 h2c->hdr.ver = btc->ver->fcxinit; 4367 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4368 h2c->init = *init_info; 4369 4370 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4371 H2C_CAT_OUTSRC, BTFC_SET, 4372 SET_DRV_INFO, 0, 0, 4373 len); 4374 4375 ret = rtw89_h2c_tx(rtwdev, skb, false); 4376 if (ret) { 4377 rtw89_err(rtwdev, "failed to send h2c\n"); 4378 goto fail; 4379 } 4380 4381 return 0; 4382 fail: 4383 dev_kfree_skb_any(skb); 4384 4385 return ret; 4386 } 4387 4388 #define PORT_DATA_OFFSET 4 4389 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 4390 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 4391 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 4392 4393 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 4394 { 4395 struct rtw89_btc *btc = &rtwdev->btc; 4396 const struct rtw89_btc_ver *ver = btc->ver; 4397 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4398 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 4399 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4400 struct rtw89_btc_wl_active_role *active = role_info->active_role; 4401 struct sk_buff *skb; 4402 u32 len; 4403 u8 offset = 0; 4404 u8 *cmd; 4405 int ret; 4406 int i; 4407 4408 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 4409 4410 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4411 if (!skb) { 4412 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4413 return -ENOMEM; 4414 } 4415 skb_put(skb, len); 4416 cmd = skb->data; 4417 4418 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4419 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4420 4421 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4422 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4423 4424 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4425 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4426 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4427 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4428 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4429 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4430 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4431 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4432 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4433 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4434 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4435 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4436 4437 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4438 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4439 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4440 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4441 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4442 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4443 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4444 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4445 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4446 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4447 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4448 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4449 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4450 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4451 } 4452 4453 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4454 H2C_CAT_OUTSRC, BTFC_SET, 4455 SET_DRV_INFO, 0, 0, 4456 len); 4457 4458 ret = rtw89_h2c_tx(rtwdev, skb, false); 4459 if (ret) { 4460 rtw89_err(rtwdev, "failed to send h2c\n"); 4461 goto fail; 4462 } 4463 4464 return 0; 4465 fail: 4466 dev_kfree_skb_any(skb); 4467 4468 return ret; 4469 } 4470 4471 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 4472 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4473 4474 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 4475 { 4476 struct rtw89_btc *btc = &rtwdev->btc; 4477 const struct rtw89_btc_ver *ver = btc->ver; 4478 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4479 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 4480 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4481 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 4482 struct sk_buff *skb; 4483 u32 len; 4484 u8 *cmd, offset; 4485 int ret; 4486 int i; 4487 4488 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 4489 4490 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4491 if (!skb) { 4492 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4493 return -ENOMEM; 4494 } 4495 skb_put(skb, len); 4496 cmd = skb->data; 4497 4498 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4499 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4500 4501 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4502 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4503 4504 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4505 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4506 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4507 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4508 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4509 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4510 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4511 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4512 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4513 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4514 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4515 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4516 4517 offset = PORT_DATA_OFFSET; 4518 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4519 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4520 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4521 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4522 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4523 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4524 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4525 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4526 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4527 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4528 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4529 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4530 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4531 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4532 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 4533 } 4534 4535 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4536 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4537 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4538 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4539 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4540 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4541 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4542 4543 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4544 H2C_CAT_OUTSRC, BTFC_SET, 4545 SET_DRV_INFO, 0, 0, 4546 len); 4547 4548 ret = rtw89_h2c_tx(rtwdev, skb, false); 4549 if (ret) { 4550 rtw89_err(rtwdev, "failed to send h2c\n"); 4551 goto fail; 4552 } 4553 4554 return 0; 4555 fail: 4556 dev_kfree_skb_any(skb); 4557 4558 return ret; 4559 } 4560 4561 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 4562 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4563 4564 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 4565 { 4566 struct rtw89_btc *btc = &rtwdev->btc; 4567 const struct rtw89_btc_ver *ver = btc->ver; 4568 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4569 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 4570 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4571 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 4572 struct sk_buff *skb; 4573 u32 len; 4574 u8 *cmd, offset; 4575 int ret; 4576 int i; 4577 4578 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 4579 4580 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4581 if (!skb) { 4582 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4583 return -ENOMEM; 4584 } 4585 skb_put(skb, len); 4586 cmd = skb->data; 4587 4588 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4589 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4590 4591 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4592 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4593 4594 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4595 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4596 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4597 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4598 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4599 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4600 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4601 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4602 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4603 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4604 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4605 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4606 4607 offset = PORT_DATA_OFFSET; 4608 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4609 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 4610 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 4611 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 4612 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 4613 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 4614 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 4615 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 4616 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 4617 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 4618 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 4619 } 4620 4621 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4622 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4623 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4624 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4625 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4626 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4627 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4628 4629 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4630 H2C_CAT_OUTSRC, BTFC_SET, 4631 SET_DRV_INFO, 0, 0, 4632 len); 4633 4634 ret = rtw89_h2c_tx(rtwdev, skb, false); 4635 if (ret) { 4636 rtw89_err(rtwdev, "failed to send h2c\n"); 4637 goto fail; 4638 } 4639 4640 return 0; 4641 fail: 4642 dev_kfree_skb_any(skb); 4643 4644 return ret; 4645 } 4646 4647 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type) 4648 { 4649 struct rtw89_btc *btc = &rtwdev->btc; 4650 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7; 4651 struct rtw89_h2c_cxrole_v7 *h2c; 4652 u32 len = sizeof(*h2c); 4653 struct sk_buff *skb; 4654 int ret; 4655 4656 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4657 if (!skb) { 4658 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4659 return -ENOMEM; 4660 } 4661 skb_put(skb, len); 4662 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data; 4663 4664 h2c->hdr.type = type; 4665 h2c->hdr.ver = btc->ver->fwlrole; 4666 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4667 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4668 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4669 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4670 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4671 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en); 4672 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg); 4673 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy); 4674 4675 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4676 H2C_CAT_OUTSRC, BTFC_SET, 4677 SET_DRV_INFO, 0, 0, 4678 len); 4679 4680 ret = rtw89_h2c_tx(rtwdev, skb, false); 4681 if (ret) { 4682 rtw89_err(rtwdev, "failed to send h2c\n"); 4683 goto fail; 4684 } 4685 4686 return 0; 4687 fail: 4688 dev_kfree_skb_any(skb); 4689 4690 return ret; 4691 } 4692 4693 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 4694 { 4695 struct rtw89_btc *btc = &rtwdev->btc; 4696 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 4697 struct rtw89_h2c_cxrole_v8 *h2c; 4698 u32 len = sizeof(*h2c); 4699 struct sk_buff *skb; 4700 int ret; 4701 4702 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4703 if (!skb) { 4704 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4705 return -ENOMEM; 4706 } 4707 skb_put(skb, len); 4708 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 4709 4710 h2c->hdr.type = type; 4711 h2c->hdr.ver = btc->ver->fwlrole; 4712 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4713 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4714 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4715 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4716 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4717 4718 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4719 H2C_CAT_OUTSRC, BTFC_SET, 4720 SET_DRV_INFO, 0, 0, 4721 len); 4722 4723 ret = rtw89_h2c_tx(rtwdev, skb, false); 4724 if (ret) { 4725 rtw89_err(rtwdev, "failed to send h2c\n"); 4726 goto fail; 4727 } 4728 4729 return 0; 4730 fail: 4731 dev_kfree_skb_any(skb); 4732 4733 return ret; 4734 } 4735 4736 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 4737 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 4738 { 4739 struct rtw89_btc *btc = &rtwdev->btc; 4740 const struct rtw89_btc_ver *ver = btc->ver; 4741 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 4742 struct sk_buff *skb; 4743 u8 *cmd; 4744 int ret; 4745 4746 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 4747 if (!skb) { 4748 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4749 return -ENOMEM; 4750 } 4751 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 4752 cmd = skb->data; 4753 4754 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4755 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 4756 4757 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 4758 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 4759 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 4760 if (ver->fcxctrl == 0) 4761 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 4762 4763 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4764 H2C_CAT_OUTSRC, BTFC_SET, 4765 SET_DRV_INFO, 0, 0, 4766 H2C_LEN_CXDRVINFO_CTRL); 4767 4768 ret = rtw89_h2c_tx(rtwdev, skb, false); 4769 if (ret) { 4770 rtw89_err(rtwdev, "failed to send h2c\n"); 4771 goto fail; 4772 } 4773 4774 return 0; 4775 fail: 4776 dev_kfree_skb_any(skb); 4777 4778 return ret; 4779 } 4780 4781 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 4782 { 4783 struct rtw89_btc *btc = &rtwdev->btc; 4784 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 4785 struct rtw89_h2c_cxctrl_v7 *h2c; 4786 u32 len = sizeof(*h2c); 4787 struct sk_buff *skb; 4788 int ret; 4789 4790 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4791 if (!skb) { 4792 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n"); 4793 return -ENOMEM; 4794 } 4795 skb_put(skb, len); 4796 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 4797 4798 h2c->hdr.type = type; 4799 h2c->hdr.ver = btc->ver->fcxctrl; 4800 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 4801 h2c->ctrl = *ctrl; 4802 4803 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4804 H2C_CAT_OUTSRC, BTFC_SET, 4805 SET_DRV_INFO, 0, 0, len); 4806 4807 ret = rtw89_h2c_tx(rtwdev, skb, false); 4808 if (ret) { 4809 rtw89_err(rtwdev, "failed to send h2c\n"); 4810 goto fail; 4811 } 4812 4813 return 0; 4814 fail: 4815 dev_kfree_skb_any(skb); 4816 4817 return ret; 4818 } 4819 4820 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 4821 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 4822 { 4823 struct rtw89_btc *btc = &rtwdev->btc; 4824 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 4825 struct sk_buff *skb; 4826 u8 *cmd; 4827 int ret; 4828 4829 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 4830 if (!skb) { 4831 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 4832 return -ENOMEM; 4833 } 4834 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 4835 cmd = skb->data; 4836 4837 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4838 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 4839 4840 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 4841 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 4842 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 4843 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 4844 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 4845 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 4846 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 4847 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 4848 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 4849 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 4850 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 4851 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 4852 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 4853 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 4854 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 4855 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 4856 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 4857 4858 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4859 H2C_CAT_OUTSRC, BTFC_SET, 4860 SET_DRV_INFO, 0, 0, 4861 H2C_LEN_CXDRVINFO_TRX); 4862 4863 ret = rtw89_h2c_tx(rtwdev, skb, false); 4864 if (ret) { 4865 rtw89_err(rtwdev, "failed to send h2c\n"); 4866 goto fail; 4867 } 4868 4869 return 0; 4870 fail: 4871 dev_kfree_skb_any(skb); 4872 4873 return ret; 4874 } 4875 4876 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 4877 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 4878 { 4879 struct rtw89_btc *btc = &rtwdev->btc; 4880 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4881 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 4882 struct sk_buff *skb; 4883 u8 *cmd; 4884 int ret; 4885 4886 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 4887 if (!skb) { 4888 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4889 return -ENOMEM; 4890 } 4891 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 4892 cmd = skb->data; 4893 4894 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4895 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 4896 4897 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 4898 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 4899 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 4900 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 4901 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 4902 4903 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4904 H2C_CAT_OUTSRC, BTFC_SET, 4905 SET_DRV_INFO, 0, 0, 4906 H2C_LEN_CXDRVINFO_RFK); 4907 4908 ret = rtw89_h2c_tx(rtwdev, skb, false); 4909 if (ret) { 4910 rtw89_err(rtwdev, "failed to send h2c\n"); 4911 goto fail; 4912 } 4913 4914 return 0; 4915 fail: 4916 dev_kfree_skb_any(skb); 4917 4918 return ret; 4919 } 4920 4921 #define H2C_LEN_PKT_OFLD 4 4922 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 4923 { 4924 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4925 struct sk_buff *skb; 4926 unsigned int cond; 4927 u8 *cmd; 4928 int ret; 4929 4930 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 4931 if (!skb) { 4932 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4933 return -ENOMEM; 4934 } 4935 skb_put(skb, H2C_LEN_PKT_OFLD); 4936 cmd = skb->data; 4937 4938 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 4939 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 4940 4941 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4942 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4943 H2C_FUNC_PACKET_OFLD, 1, 1, 4944 H2C_LEN_PKT_OFLD); 4945 4946 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 4947 4948 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4949 if (ret < 0) { 4950 rtw89_debug(rtwdev, RTW89_DBG_FW, 4951 "failed to del pkt ofld: id %d, ret %d\n", 4952 id, ret); 4953 return ret; 4954 } 4955 4956 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 4957 return 0; 4958 } 4959 4960 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 4961 struct sk_buff *skb_ofld) 4962 { 4963 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4964 struct sk_buff *skb; 4965 unsigned int cond; 4966 u8 *cmd; 4967 u8 alloc_id; 4968 int ret; 4969 4970 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 4971 RTW89_MAX_PKT_OFLD_NUM); 4972 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 4973 return -ENOSPC; 4974 4975 *id = alloc_id; 4976 4977 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 4978 if (!skb) { 4979 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4980 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 4981 return -ENOMEM; 4982 } 4983 skb_put(skb, H2C_LEN_PKT_OFLD); 4984 cmd = skb->data; 4985 4986 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 4987 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 4988 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 4989 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 4990 4991 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4992 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4993 H2C_FUNC_PACKET_OFLD, 1, 1, 4994 H2C_LEN_PKT_OFLD + skb_ofld->len); 4995 4996 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 4997 4998 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4999 if (ret < 0) { 5000 rtw89_debug(rtwdev, RTW89_DBG_FW, 5001 "failed to add pkt ofld: id %d, ret %d\n", 5002 alloc_id, ret); 5003 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 5004 return ret; 5005 } 5006 5007 return 0; 5008 } 5009 5010 static 5011 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num, 5012 struct list_head *chan_list) 5013 { 5014 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5015 struct rtw89_h2c_chinfo_elem *elem; 5016 struct rtw89_mac_chinfo *ch_info; 5017 struct rtw89_h2c_chinfo *h2c; 5018 struct sk_buff *skb; 5019 unsigned int cond; 5020 int skb_len; 5021 int ret; 5022 5023 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 5024 5025 skb_len = struct_size(h2c, elem, ch_num); 5026 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5027 if (!skb) { 5028 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5029 return -ENOMEM; 5030 } 5031 skb_put(skb, sizeof(*h2c)); 5032 h2c = (struct rtw89_h2c_chinfo *)skb->data; 5033 5034 h2c->ch_num = ch_num; 5035 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5036 5037 list_for_each_entry(ch_info, chan_list, list) { 5038 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 5039 5040 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 5041 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 5042 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 5043 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 5044 5045 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 5046 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 5047 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 5048 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 5049 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 5050 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 5051 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 5052 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 5053 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 5054 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 5055 5056 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 5057 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 5058 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 5059 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 5060 5061 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 5062 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 5063 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 5064 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 5065 } 5066 5067 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5068 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5069 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5070 5071 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5072 5073 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5074 if (ret) { 5075 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5076 return ret; 5077 } 5078 5079 return 0; 5080 } 5081 5082 static 5083 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 5084 struct list_head *chan_list, 5085 struct rtw89_vif_link *rtwvif_link) 5086 { 5087 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5088 struct rtw89_h2c_chinfo_elem_be *elem; 5089 struct rtw89_mac_chinfo_be *ch_info; 5090 struct rtw89_h2c_chinfo_be *h2c; 5091 struct sk_buff *skb; 5092 unsigned int cond; 5093 u8 ver = U8_MAX; 5094 int skb_len; 5095 int ret; 5096 5097 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE_BE); 5098 5099 skb_len = struct_size(h2c, elem, ch_num); 5100 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 5101 if (!skb) { 5102 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 5103 return -ENOMEM; 5104 } 5105 5106 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5107 ver = 0; 5108 5109 skb_put(skb, sizeof(*h2c)); 5110 h2c = (struct rtw89_h2c_chinfo_be *)skb->data; 5111 5112 h2c->ch_num = ch_num; 5113 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 5114 h2c->arg = u8_encode_bits(rtwvif_link->mac_idx, 5115 RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 5116 5117 list_for_each_entry(ch_info, chan_list, list) { 5118 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 5119 5120 elem->w0 = le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 5121 le32_encode_bits(ch_info->central_ch, 5122 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 5123 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 5124 5125 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 5126 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 5127 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 5128 le32_encode_bits(ch_info->pause_data, 5129 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 5130 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 5131 le32_encode_bits(ch_info->rand_seq_num, 5132 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 5133 le32_encode_bits(ch_info->notify_action, 5134 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 5135 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 5136 RTW89_H2C_CHINFO_BE_W1_PROBE) | 5137 le32_encode_bits(ch_info->leave_crit, 5138 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 5139 le32_encode_bits(ch_info->chkpt_timer, 5140 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 5141 5142 elem->w2 = le32_encode_bits(ch_info->leave_time, 5143 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 5144 le32_encode_bits(ch_info->leave_th, 5145 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 5146 le32_encode_bits(ch_info->tx_pkt_ctrl, 5147 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 5148 5149 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 5150 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 5151 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 5152 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 5153 5154 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 5155 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 5156 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 5157 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 5158 5159 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 5160 le32_encode_bits(ch_info->fw_probe0_ssids, 5161 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 5162 5163 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 5164 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 5165 le32_encode_bits(ch_info->fw_probe0_bssids, 5166 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 5167 if (ver == 0) 5168 elem->w0 |= 5169 le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD); 5170 else 5171 elem->w7 = le32_encode_bits(ch_info->period, 5172 RTW89_H2C_CHINFO_BE_W7_PERIOD_V1); 5173 } 5174 5175 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5176 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5177 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 5178 5179 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 5180 5181 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5182 if (ret) { 5183 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 5184 return ret; 5185 } 5186 5187 return 0; 5188 } 5189 5190 #define RTW89_SCAN_DELAY_TSF_UNIT 104800 5191 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, 5192 struct rtw89_scan_option *option, 5193 struct rtw89_vif_link *rtwvif_link, 5194 bool wowlan) 5195 { 5196 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5197 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 5198 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE; 5199 struct rtw89_h2c_scanofld *h2c; 5200 u32 len = sizeof(*h2c); 5201 struct sk_buff *skb; 5202 unsigned int cond; 5203 u64 tsf = 0; 5204 int ret; 5205 5206 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5207 if (!skb) { 5208 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5209 return -ENOMEM; 5210 } 5211 skb_put(skb, len); 5212 h2c = (struct rtw89_h2c_scanofld *)skb->data; 5213 5214 if (option->delay) { 5215 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif_link, &tsf); 5216 if (ret) { 5217 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret); 5218 scan_mode = RTW89_SCAN_IMMEDIATE; 5219 } else { 5220 scan_mode = RTW89_SCAN_DELAY; 5221 tsf += (u64)option->delay * RTW89_SCAN_DELAY_TSF_UNIT; 5222 } 5223 } 5224 5225 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 5226 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 5227 le32_encode_bits(rtwvif_link->mac_idx, RTW89_H2C_SCANOFLD_W0_BAND) | 5228 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 5229 5230 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 5231 le32_encode_bits(option->target_ch_mode, 5232 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 5233 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) | 5234 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 5235 5236 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) | 5237 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD); 5238 5239 if (option->target_ch_mode) { 5240 h2c->w1 |= le32_encode_bits(op->band_width, 5241 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 5242 le32_encode_bits(op->primary_channel, 5243 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 5244 le32_encode_bits(op->channel, 5245 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 5246 h2c->w0 |= le32_encode_bits(op->band_type, 5247 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 5248 } 5249 5250 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf), 5251 RTW89_H2C_SCANOFLD_W3_TSF_HIGH); 5252 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf), 5253 RTW89_H2C_SCANOFLD_W4_TSF_LOW); 5254 5255 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5256 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5257 H2C_FUNC_SCANOFLD, 1, 1, 5258 len); 5259 5260 if (option->enable) 5261 cond = RTW89_SCANOFLD_WAIT_COND_START; 5262 else 5263 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 5264 5265 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5266 if (ret) { 5267 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 5268 return ret; 5269 } 5270 5271 return 0; 5272 } 5273 5274 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 5275 struct rtw89_scan_option *option) 5276 { 5277 struct ieee80211_supported_band *sband; 5278 struct ieee80211_channel *chan; 5279 u8 i, idx; 5280 5281 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 5282 if (!sband) { 5283 option->prohib_chan = U64_MAX; 5284 return; 5285 } 5286 5287 for (i = 0; i < sband->n_channels; i++) { 5288 chan = &sband->channels[i]; 5289 if (chan->flags & IEEE80211_CHAN_DISABLED) { 5290 idx = (chan->hw_value - 1) / 4; 5291 option->prohib_chan |= BIT(idx); 5292 } 5293 } 5294 } 5295 5296 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 5297 struct rtw89_scan_option *option, 5298 struct rtw89_vif_link *rtwvif_link, 5299 bool wowlan) 5300 { 5301 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 5302 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5303 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 5304 struct cfg80211_scan_request *req = rtwvif->scan_req; 5305 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 5306 struct rtw89_chan *op = &scan_info->op_chan; 5307 struct rtw89_h2c_scanofld_be_opch *opch; 5308 struct rtw89_pktofld_info *pkt_info; 5309 struct rtw89_h2c_scanofld_be *h2c; 5310 struct sk_buff *skb; 5311 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 5312 u8 opch_size = sizeof(*opch) * option->num_opch; 5313 u8 probe_id[NUM_NL80211_BANDS]; 5314 u8 cfg_len = sizeof(*h2c); 5315 unsigned int cond; 5316 u8 ver = U8_MAX; 5317 void *ptr; 5318 int ret; 5319 u32 len; 5320 u8 i; 5321 5322 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 5323 5324 len = cfg_len + macc_role_size + opch_size; 5325 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5326 if (!skb) { 5327 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 5328 return -ENOMEM; 5329 } 5330 5331 skb_put(skb, len); 5332 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 5333 ptr = skb->data; 5334 5335 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 5336 5337 if (RTW89_CHK_FW_FEATURE(CH_INFO_BE_V0, &rtwdev->fw)) 5338 ver = 0; 5339 5340 if (!wowlan) { 5341 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 5342 if (pkt_info->wildcard_6ghz) { 5343 /* Provide wildcard as template */ 5344 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 5345 break; 5346 } 5347 } 5348 } 5349 5350 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 5351 le32_encode_bits(option->scan_mode, 5352 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 5353 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 5354 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 5355 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 5356 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 5357 le32_encode_bits(rtwvif_link->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 5358 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 5359 5360 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 5361 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 5362 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 5363 5364 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 5365 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 5366 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 5367 5368 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 5369 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 5370 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 5371 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 5372 5373 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 5374 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 5375 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 5376 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 5377 le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 5378 5379 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 5380 5381 h2c->w6 = le32_encode_bits(option->prohib_chan, 5382 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 5383 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 5384 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 5385 if (!wowlan && req->no_cck) { 5386 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); 5387 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, 5388 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | 5389 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5390 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | 5391 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5392 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); 5393 } 5394 5395 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { 5396 cfg_len = offsetofend(typeof(*h2c), w8); 5397 goto flex_member; 5398 } 5399 5400 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), 5401 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | 5402 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0), 5403 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) | 5404 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0), 5405 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP); 5406 5407 flex_member: 5408 ptr += cfg_len; 5409 5410 for (i = 0; i < option->num_macc_role; i++) { 5411 macc_role = ptr; 5412 macc_role->w0 = 5413 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 5414 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 5415 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 5416 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 5417 ptr += sizeof(*macc_role); 5418 } 5419 5420 for (i = 0; i < option->num_opch; i++) { 5421 opch = ptr; 5422 opch->w0 = le32_encode_bits(rtwvif_link->mac_id, 5423 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 5424 le32_encode_bits(option->band, 5425 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 5426 le32_encode_bits(rtwvif_link->port, 5427 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 5428 le32_encode_bits(RTW89_SCAN_OPMODE_INTV, 5429 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 5430 le32_encode_bits(true, 5431 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 5432 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10, 5433 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 5434 5435 opch->w1 = le32_encode_bits(op->band_type, 5436 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 5437 le32_encode_bits(op->band_width, 5438 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 5439 le32_encode_bits(0x3, 5440 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 5441 le32_encode_bits(op->primary_channel, 5442 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 5443 le32_encode_bits(op->channel, 5444 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 5445 5446 opch->w2 = le32_encode_bits(0, 5447 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 5448 le32_encode_bits(0, 5449 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 5450 le32_encode_bits(2, 5451 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS); 5452 5453 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5454 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 5455 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5456 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 5457 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5458 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 5459 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5460 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 5461 5462 if (ver == 0) 5463 opch->w1 |= le32_encode_bits(RTW89_CHANNEL_TIME, 5464 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION); 5465 else 5466 opch->w4 = le32_encode_bits(RTW89_CHANNEL_TIME, 5467 RTW89_H2C_SCANOFLD_BE_OPCH_W4_DURATION_V1); 5468 ptr += sizeof(*opch); 5469 } 5470 5471 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5472 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5473 H2C_FUNC_SCANOFLD_BE, 1, 1, 5474 len); 5475 5476 if (option->enable) 5477 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 5478 else 5479 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 5480 5481 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5482 if (ret) { 5483 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 5484 return ret; 5485 } 5486 5487 return 0; 5488 } 5489 5490 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 5491 struct rtw89_fw_h2c_rf_reg_info *info, 5492 u16 len, u8 page) 5493 { 5494 struct sk_buff *skb; 5495 u8 class = info->rf_path == RF_PATH_A ? 5496 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 5497 int ret; 5498 5499 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5500 if (!skb) { 5501 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 5502 return -ENOMEM; 5503 } 5504 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 5505 5506 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5507 H2C_CAT_OUTSRC, class, page, 0, 0, 5508 len); 5509 5510 ret = rtw89_h2c_tx(rtwdev, skb, false); 5511 if (ret) { 5512 rtw89_err(rtwdev, "failed to send h2c\n"); 5513 goto fail; 5514 } 5515 5516 return 0; 5517 fail: 5518 dev_kfree_skb_any(skb); 5519 5520 return ret; 5521 } 5522 5523 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 5524 { 5525 struct rtw89_rfk_mcc_info_data *rfk_mcc = rtwdev->rfk_mcc.data; 5526 struct rtw89_fw_h2c_rf_get_mccch *mccch; 5527 struct sk_buff *skb; 5528 int ret; 5529 u8 idx; 5530 5531 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 5532 if (!skb) { 5533 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5534 return -ENOMEM; 5535 } 5536 skb_put(skb, sizeof(*mccch)); 5537 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 5538 5539 idx = rfk_mcc->table_idx; 5540 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 5541 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 5542 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 5543 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 5544 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 5545 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 5546 5547 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5548 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 5549 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 5550 sizeof(*mccch)); 5551 5552 ret = rtw89_h2c_tx(rtwdev, skb, false); 5553 if (ret) { 5554 rtw89_err(rtwdev, "failed to send h2c\n"); 5555 goto fail; 5556 } 5557 5558 return 0; 5559 fail: 5560 dev_kfree_skb_any(skb); 5561 5562 return ret; 5563 } 5564 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 5565 5566 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 5567 enum rtw89_phy_idx phy_idx) 5568 { 5569 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 5570 struct rtw89_fw_h2c_rfk_pre_info_common *common; 5571 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; 5572 struct rtw89_fw_h2c_rfk_pre_info_v1 *h2c_v1; 5573 struct rtw89_fw_h2c_rfk_pre_info *h2c; 5574 u8 tbl_sel[NUM_OF_RTW89_FW_RFK_PATH]; 5575 u32 len = sizeof(*h2c); 5576 struct sk_buff *skb; 5577 u8 ver = U8_MAX; 5578 u8 tbl, path; 5579 u32 val32; 5580 int ret; 5581 5582 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V1, &rtwdev->fw)) { 5583 len = sizeof(*h2c_v1); 5584 ver = 1; 5585 } else if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 5586 len = sizeof(*h2c_v0); 5587 ver = 0; 5588 } 5589 5590 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5591 if (!skb) { 5592 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 5593 return -ENOMEM; 5594 } 5595 skb_put(skb, len); 5596 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 5597 common = &h2c->base_v1.common; 5598 5599 common->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 5600 5601 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 5602 BUILD_BUG_ON(ARRAY_SIZE(rfk_mcc->data) < NUM_OF_RTW89_FW_RFK_PATH); 5603 5604 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 5605 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5606 common->dbcc.ch[path][tbl] = 5607 cpu_to_le32(rfk_mcc->data[path].ch[tbl]); 5608 common->dbcc.band[path][tbl] = 5609 cpu_to_le32(rfk_mcc->data[path].band[tbl]); 5610 } 5611 } 5612 5613 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5614 tbl_sel[path] = rfk_mcc->data[path].table_idx; 5615 5616 common->tbl.cur_ch[path] = 5617 cpu_to_le32(rfk_mcc->data[path].ch[tbl_sel[path]]); 5618 common->tbl.cur_band[path] = 5619 cpu_to_le32(rfk_mcc->data[path].band[tbl_sel[path]]); 5620 5621 if (ver <= 1) 5622 continue; 5623 5624 h2c->cur_bandwidth[path] = 5625 cpu_to_le32(rfk_mcc->data[path].bw[tbl_sel[path]]); 5626 } 5627 5628 common->phy_idx = cpu_to_le32(phy_idx); 5629 5630 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ 5631 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; 5632 5633 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->data[0].band[tbl_sel[0]]); 5634 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->data[0].bw[tbl_sel[0]]); 5635 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->data[0].ch[tbl_sel[0]]); 5636 5637 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 5638 h2c_v0->ktbl_sel0 = cpu_to_le32(val32); 5639 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 5640 h2c_v0->ktbl_sel1 = cpu_to_le32(val32); 5641 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 5642 h2c_v0->rfmod0 = cpu_to_le32(val32); 5643 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 5644 h2c_v0->rfmod1 = cpu_to_le32(val32); 5645 5646 if (rtw89_is_mlo_1_1(rtwdev)) 5647 h2c_v0->mlo_1_1 = cpu_to_le32(1); 5648 5649 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 5650 5651 goto done; 5652 } 5653 5654 if (rtw89_is_mlo_1_1(rtwdev)) { 5655 h2c_v1 = &h2c->base_v1; 5656 h2c_v1->mlo_1_1 = cpu_to_le32(1); 5657 } 5658 done: 5659 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5660 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5661 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 5662 len); 5663 5664 ret = rtw89_h2c_tx(rtwdev, skb, false); 5665 if (ret) { 5666 rtw89_err(rtwdev, "failed to send h2c\n"); 5667 goto fail; 5668 } 5669 5670 return 0; 5671 fail: 5672 dev_kfree_skb_any(skb); 5673 5674 return ret; 5675 } 5676 5677 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5678 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode) 5679 { 5680 struct rtw89_hal *hal = &rtwdev->hal; 5681 struct rtw89_h2c_rf_tssi *h2c; 5682 u32 len = sizeof(*h2c); 5683 struct sk_buff *skb; 5684 int ret; 5685 5686 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5687 if (!skb) { 5688 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 5689 return -ENOMEM; 5690 } 5691 skb_put(skb, len); 5692 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 5693 5694 h2c->len = cpu_to_le16(len); 5695 h2c->phy = phy_idx; 5696 h2c->ch = chan->channel; 5697 h2c->bw = chan->band_width; 5698 h2c->band = chan->band_type; 5699 h2c->hwtx_en = true; 5700 h2c->cv = hal->cv; 5701 h2c->tssi_mode = tssi_mode; 5702 5703 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 5704 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 5705 5706 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5707 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5708 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 5709 5710 ret = rtw89_h2c_tx(rtwdev, skb, false); 5711 if (ret) { 5712 rtw89_err(rtwdev, "failed to send h2c\n"); 5713 goto fail; 5714 } 5715 5716 return 0; 5717 fail: 5718 dev_kfree_skb_any(skb); 5719 5720 return ret; 5721 } 5722 5723 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5724 const struct rtw89_chan *chan) 5725 { 5726 struct rtw89_h2c_rf_iqk *h2c; 5727 u32 len = sizeof(*h2c); 5728 struct sk_buff *skb; 5729 int ret; 5730 5731 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5732 if (!skb) { 5733 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 5734 return -ENOMEM; 5735 } 5736 skb_put(skb, len); 5737 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 5738 5739 h2c->phy_idx = cpu_to_le32(phy_idx); 5740 h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en); 5741 5742 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5743 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5744 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 5745 5746 ret = rtw89_h2c_tx(rtwdev, skb, false); 5747 if (ret) { 5748 rtw89_err(rtwdev, "failed to send h2c\n"); 5749 goto fail; 5750 } 5751 5752 return 0; 5753 fail: 5754 dev_kfree_skb_any(skb); 5755 5756 return ret; 5757 } 5758 5759 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5760 const struct rtw89_chan *chan) 5761 { 5762 struct rtw89_h2c_rf_dpk *h2c; 5763 u32 len = sizeof(*h2c); 5764 struct sk_buff *skb; 5765 int ret; 5766 5767 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5768 if (!skb) { 5769 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 5770 return -ENOMEM; 5771 } 5772 skb_put(skb, len); 5773 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 5774 5775 h2c->len = len; 5776 h2c->phy = phy_idx; 5777 h2c->dpk_enable = true; 5778 h2c->kpath = RF_AB; 5779 h2c->cur_band = chan->band_type; 5780 h2c->cur_bw = chan->band_width; 5781 h2c->cur_ch = chan->channel; 5782 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5783 5784 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5785 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5786 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 5787 5788 ret = rtw89_h2c_tx(rtwdev, skb, false); 5789 if (ret) { 5790 rtw89_err(rtwdev, "failed to send h2c\n"); 5791 goto fail; 5792 } 5793 5794 return 0; 5795 fail: 5796 dev_kfree_skb_any(skb); 5797 5798 return ret; 5799 } 5800 5801 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5802 const struct rtw89_chan *chan) 5803 { 5804 struct rtw89_hal *hal = &rtwdev->hal; 5805 struct rtw89_h2c_rf_txgapk *h2c; 5806 u32 len = sizeof(*h2c); 5807 struct sk_buff *skb; 5808 int ret; 5809 5810 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5811 if (!skb) { 5812 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 5813 return -ENOMEM; 5814 } 5815 skb_put(skb, len); 5816 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 5817 5818 h2c->len = len; 5819 h2c->ktype = 2; 5820 h2c->phy = phy_idx; 5821 h2c->kpath = RF_AB; 5822 h2c->band = chan->band_type; 5823 h2c->bw = chan->band_width; 5824 h2c->ch = chan->channel; 5825 h2c->cv = hal->cv; 5826 5827 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5828 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5829 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 5830 5831 ret = rtw89_h2c_tx(rtwdev, skb, false); 5832 if (ret) { 5833 rtw89_err(rtwdev, "failed to send h2c\n"); 5834 goto fail; 5835 } 5836 5837 return 0; 5838 fail: 5839 dev_kfree_skb_any(skb); 5840 5841 return ret; 5842 } 5843 5844 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5845 const struct rtw89_chan *chan) 5846 { 5847 struct rtw89_h2c_rf_dack *h2c; 5848 u32 len = sizeof(*h2c); 5849 struct sk_buff *skb; 5850 int ret; 5851 5852 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5853 if (!skb) { 5854 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 5855 return -ENOMEM; 5856 } 5857 skb_put(skb, len); 5858 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 5859 5860 h2c->len = cpu_to_le32(len); 5861 h2c->phy = cpu_to_le32(phy_idx); 5862 h2c->type = cpu_to_le32(0); 5863 5864 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5865 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5866 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 5867 5868 ret = rtw89_h2c_tx(rtwdev, skb, false); 5869 if (ret) { 5870 rtw89_err(rtwdev, "failed to send h2c\n"); 5871 goto fail; 5872 } 5873 5874 return 0; 5875 fail: 5876 dev_kfree_skb_any(skb); 5877 5878 return ret; 5879 } 5880 5881 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5882 const struct rtw89_chan *chan, bool is_chl_k) 5883 { 5884 struct rtw89_h2c_rf_rxdck_v0 *v0; 5885 struct rtw89_h2c_rf_rxdck *h2c; 5886 u32 len = sizeof(*h2c); 5887 struct sk_buff *skb; 5888 int ver = -1; 5889 int ret; 5890 5891 if (RTW89_CHK_FW_FEATURE(RFK_RXDCK_V0, &rtwdev->fw)) { 5892 len = sizeof(*v0); 5893 ver = 0; 5894 } 5895 5896 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5897 if (!skb) { 5898 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 5899 return -ENOMEM; 5900 } 5901 skb_put(skb, len); 5902 v0 = (struct rtw89_h2c_rf_rxdck_v0 *)skb->data; 5903 5904 v0->len = len; 5905 v0->phy = phy_idx; 5906 v0->is_afe = false; 5907 v0->kpath = RF_AB; 5908 v0->cur_band = chan->band_type; 5909 v0->cur_bw = chan->band_width; 5910 v0->cur_ch = chan->channel; 5911 v0->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5912 5913 if (ver == 0) 5914 goto hdr; 5915 5916 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 5917 h2c->is_chl_k = is_chl_k; 5918 5919 hdr: 5920 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5921 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5922 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 5923 5924 ret = rtw89_h2c_tx(rtwdev, skb, false); 5925 if (ret) { 5926 rtw89_err(rtwdev, "failed to send h2c\n"); 5927 goto fail; 5928 } 5929 5930 return 0; 5931 fail: 5932 dev_kfree_skb_any(skb); 5933 5934 return ret; 5935 } 5936 5937 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 5938 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 5939 bool rack, bool dack) 5940 { 5941 struct sk_buff *skb; 5942 int ret; 5943 5944 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5945 if (!skb) { 5946 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 5947 return -ENOMEM; 5948 } 5949 skb_put_data(skb, buf, len); 5950 5951 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5952 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 5953 len); 5954 5955 ret = rtw89_h2c_tx(rtwdev, skb, false); 5956 if (ret) { 5957 rtw89_err(rtwdev, "failed to send h2c\n"); 5958 goto fail; 5959 } 5960 5961 return 0; 5962 fail: 5963 dev_kfree_skb_any(skb); 5964 5965 return ret; 5966 } 5967 5968 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 5969 { 5970 struct sk_buff *skb; 5971 int ret; 5972 5973 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 5974 if (!skb) { 5975 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 5976 return -ENOMEM; 5977 } 5978 skb_put_data(skb, buf, len); 5979 5980 ret = rtw89_h2c_tx(rtwdev, skb, false); 5981 if (ret) { 5982 rtw89_err(rtwdev, "failed to send h2c\n"); 5983 goto fail; 5984 } 5985 5986 return 0; 5987 fail: 5988 dev_kfree_skb_any(skb); 5989 5990 return ret; 5991 } 5992 5993 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 5994 { 5995 struct rtw89_early_h2c *early_h2c; 5996 5997 lockdep_assert_held(&rtwdev->mutex); 5998 5999 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 6000 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 6001 } 6002 } 6003 6004 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 6005 { 6006 struct rtw89_early_h2c *early_h2c, *tmp; 6007 6008 mutex_lock(&rtwdev->mutex); 6009 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 6010 list_del(&early_h2c->list); 6011 kfree(early_h2c->h2c); 6012 kfree(early_h2c); 6013 } 6014 mutex_unlock(&rtwdev->mutex); 6015 } 6016 6017 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 6018 { 6019 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 6020 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6021 6022 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 6023 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 6024 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 6025 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 6026 } 6027 6028 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 6029 struct sk_buff *c2h) 6030 { 6031 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 6032 u8 category = attr->category; 6033 u8 class = attr->class; 6034 u8 func = attr->func; 6035 6036 switch (category) { 6037 default: 6038 return false; 6039 case RTW89_C2H_CAT_MAC: 6040 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 6041 case RTW89_C2H_CAT_OUTSRC: 6042 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 6043 } 6044 } 6045 6046 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 6047 { 6048 rtw89_fw_c2h_parse_attr(c2h); 6049 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 6050 goto enqueue; 6051 6052 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 6053 dev_kfree_skb_any(c2h); 6054 return; 6055 6056 enqueue: 6057 skb_queue_tail(&rtwdev->c2h_queue, c2h); 6058 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 6059 } 6060 6061 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 6062 struct sk_buff *skb) 6063 { 6064 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 6065 u8 category = attr->category; 6066 u8 class = attr->class; 6067 u8 func = attr->func; 6068 u16 len = attr->len; 6069 bool dump = true; 6070 6071 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 6072 return; 6073 6074 switch (category) { 6075 case RTW89_C2H_CAT_TEST: 6076 break; 6077 case RTW89_C2H_CAT_MAC: 6078 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 6079 if (class == RTW89_MAC_C2H_CLASS_INFO && 6080 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 6081 dump = false; 6082 break; 6083 case RTW89_C2H_CAT_OUTSRC: 6084 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 6085 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 6086 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 6087 else 6088 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 6089 break; 6090 } 6091 6092 if (dump) 6093 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 6094 } 6095 6096 void rtw89_fw_c2h_work(struct work_struct *work) 6097 { 6098 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 6099 c2h_work); 6100 struct sk_buff *skb, *tmp; 6101 6102 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 6103 skb_unlink(skb, &rtwdev->c2h_queue); 6104 mutex_lock(&rtwdev->mutex); 6105 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 6106 mutex_unlock(&rtwdev->mutex); 6107 dev_kfree_skb_any(skb); 6108 } 6109 } 6110 6111 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 6112 struct rtw89_mac_h2c_info *info) 6113 { 6114 const struct rtw89_chip_info *chip = rtwdev->chip; 6115 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6116 const u32 *h2c_reg = chip->h2c_regs; 6117 u8 i, val, len; 6118 int ret; 6119 6120 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 6121 rtwdev, chip->h2c_ctrl_reg); 6122 if (ret) { 6123 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 6124 return ret; 6125 } 6126 6127 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 6128 sizeof(info->u.h2creg[0])); 6129 6130 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 6131 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 6132 6133 for (i = 0; i < RTW89_H2CREG_MAX; i++) 6134 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 6135 6136 fw_info->h2c_counter++; 6137 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 6138 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 6139 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 6140 6141 return 0; 6142 } 6143 6144 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 6145 struct rtw89_mac_c2h_info *info) 6146 { 6147 const struct rtw89_chip_info *chip = rtwdev->chip; 6148 struct rtw89_fw_info *fw_info = &rtwdev->fw; 6149 const u32 *c2h_reg = chip->c2h_regs; 6150 u32 ret; 6151 u8 i, val; 6152 6153 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 6154 6155 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 6156 RTW89_C2H_TIMEOUT, false, rtwdev, 6157 chip->c2h_ctrl_reg); 6158 if (ret) { 6159 rtw89_warn(rtwdev, "c2h reg timeout\n"); 6160 return ret; 6161 } 6162 6163 for (i = 0; i < RTW89_C2HREG_MAX; i++) 6164 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 6165 6166 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 6167 6168 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 6169 info->content_len = 6170 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 6171 RTW89_C2HREG_HDR_LEN; 6172 6173 fw_info->c2h_counter++; 6174 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 6175 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 6176 6177 return 0; 6178 } 6179 6180 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 6181 struct rtw89_mac_h2c_info *h2c_info, 6182 struct rtw89_mac_c2h_info *c2h_info) 6183 { 6184 u32 ret; 6185 6186 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 6187 lockdep_assert_held(&rtwdev->mutex); 6188 6189 if (!h2c_info && !c2h_info) 6190 return -EINVAL; 6191 6192 if (!h2c_info) 6193 goto recv_c2h; 6194 6195 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 6196 if (ret) 6197 return ret; 6198 6199 recv_c2h: 6200 if (!c2h_info) 6201 return 0; 6202 6203 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 6204 if (ret) 6205 return ret; 6206 6207 return 0; 6208 } 6209 6210 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 6211 { 6212 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 6213 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 6214 return; 6215 } 6216 6217 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 6218 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 6219 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 6220 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 6221 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 6222 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 6223 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 6224 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 6225 6226 rtw89_fw_prog_cnt_dump(rtwdev); 6227 } 6228 6229 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 6230 { 6231 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6232 struct rtw89_pktofld_info *info, *tmp; 6233 u8 idx; 6234 6235 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 6236 if (!(rtwdev->chip->support_bands & BIT(idx))) 6237 continue; 6238 6239 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 6240 if (test_bit(info->id, rtwdev->pkt_offload)) 6241 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 6242 list_del(&info->list); 6243 kfree(info); 6244 } 6245 } 6246 } 6247 6248 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 6249 struct cfg80211_scan_request *req, 6250 struct rtw89_pktofld_info *info, 6251 enum nl80211_band band, u8 ssid_idx) 6252 { 6253 if (band != NL80211_BAND_6GHZ) 6254 return false; 6255 6256 if (req->ssids[ssid_idx].ssid_len) { 6257 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 6258 req->ssids[ssid_idx].ssid_len); 6259 info->ssid_len = req->ssids[ssid_idx].ssid_len; 6260 return false; 6261 } else { 6262 info->wildcard_6ghz = true; 6263 return true; 6264 } 6265 } 6266 6267 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 6268 struct rtw89_vif_link *rtwvif_link, 6269 struct sk_buff *skb, u8 ssid_idx) 6270 { 6271 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6272 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6273 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 6274 struct cfg80211_scan_request *req = rtwvif->scan_req; 6275 struct rtw89_pktofld_info *info; 6276 struct sk_buff *new; 6277 int ret = 0; 6278 u8 band; 6279 6280 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 6281 if (!(rtwdev->chip->support_bands & BIT(band))) 6282 continue; 6283 6284 new = skb_copy(skb, GFP_KERNEL); 6285 if (!new) { 6286 ret = -ENOMEM; 6287 goto out; 6288 } 6289 skb_put_data(new, ies->ies[band], ies->len[band]); 6290 skb_put_data(new, ies->common_ies, ies->common_ie_len); 6291 6292 info = kzalloc(sizeof(*info), GFP_KERNEL); 6293 if (!info) { 6294 ret = -ENOMEM; 6295 kfree_skb(new); 6296 goto out; 6297 } 6298 6299 rtw89_is_6ghz_wildcard_probe_req(rtwdev, req, info, band, ssid_idx); 6300 6301 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 6302 if (ret) { 6303 kfree_skb(new); 6304 kfree(info); 6305 goto out; 6306 } 6307 6308 list_add_tail(&info->list, &scan_info->pkt_list[band]); 6309 kfree_skb(new); 6310 } 6311 out: 6312 return ret; 6313 } 6314 6315 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 6316 struct rtw89_vif_link *rtwvif_link) 6317 { 6318 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6319 struct cfg80211_scan_request *req = rtwvif->scan_req; 6320 struct sk_buff *skb; 6321 u8 num = req->n_ssids, i; 6322 int ret; 6323 6324 for (i = 0; i < num; i++) { 6325 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 6326 req->ssids[i].ssid, 6327 req->ssids[i].ssid_len, 6328 req->ie_len); 6329 if (!skb) 6330 return -ENOMEM; 6331 6332 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif_link, skb, i); 6333 kfree_skb(skb); 6334 6335 if (ret) 6336 return ret; 6337 } 6338 6339 return 0; 6340 } 6341 6342 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 6343 struct ieee80211_scan_ies *ies, 6344 struct cfg80211_scan_request *req, 6345 struct rtw89_mac_chinfo *ch_info) 6346 { 6347 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6348 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 6349 struct cfg80211_scan_6ghz_params *params; 6350 struct rtw89_pktofld_info *info, *tmp; 6351 struct ieee80211_hdr *hdr; 6352 struct sk_buff *skb; 6353 bool found; 6354 int ret = 0; 6355 u8 i; 6356 6357 if (!req->n_6ghz_params) 6358 return 0; 6359 6360 for (i = 0; i < req->n_6ghz_params; i++) { 6361 params = &req->scan_6ghz_params[i]; 6362 6363 if (req->channels[params->channel_idx]->hw_value != 6364 ch_info->pri_ch) 6365 continue; 6366 6367 found = false; 6368 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 6369 if (ether_addr_equal(tmp->bssid, params->bssid)) { 6370 found = true; 6371 break; 6372 } 6373 } 6374 if (found) 6375 continue; 6376 6377 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif_link->mac_addr, 6378 NULL, 0, req->ie_len); 6379 if (!skb) 6380 return -ENOMEM; 6381 6382 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 6383 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 6384 hdr = (struct ieee80211_hdr *)skb->data; 6385 ether_addr_copy(hdr->addr3, params->bssid); 6386 6387 info = kzalloc(sizeof(*info), GFP_KERNEL); 6388 if (!info) { 6389 ret = -ENOMEM; 6390 kfree_skb(skb); 6391 goto out; 6392 } 6393 6394 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 6395 if (ret) { 6396 kfree_skb(skb); 6397 kfree(info); 6398 goto out; 6399 } 6400 6401 ether_addr_copy(info->bssid, params->bssid); 6402 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 6403 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 6404 6405 ch_info->tx_pkt = true; 6406 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6407 6408 kfree_skb(skb); 6409 } 6410 6411 out: 6412 return ret; 6413 } 6414 6415 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, 6416 int chan_type, int ssid_num, 6417 struct rtw89_mac_chinfo *ch_info) 6418 { 6419 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6420 struct rtw89_pktofld_info *info; 6421 u8 probe_count = 0; 6422 6423 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6424 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6425 ch_info->bw = RTW89_SCAN_WIDTH; 6426 ch_info->tx_pkt = true; 6427 ch_info->cfg_tx_pwr = false; 6428 ch_info->tx_pwr_idx = 0; 6429 ch_info->tx_null = false; 6430 ch_info->pause_data = false; 6431 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6432 6433 if (ssid_num) { 6434 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6435 if (info->channel_6ghz && 6436 ch_info->pri_ch != info->channel_6ghz) 6437 continue; 6438 else if (info->channel_6ghz && probe_count != 0) 6439 ch_info->period += RTW89_CHANNEL_TIME_6G; 6440 6441 if (info->wildcard_6ghz) 6442 continue; 6443 6444 ch_info->pkt_id[probe_count++] = info->id; 6445 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6446 break; 6447 } 6448 ch_info->num_pkt = probe_count; 6449 } 6450 6451 switch (chan_type) { 6452 case RTW89_CHAN_DFS: 6453 if (ch_info->ch_band != RTW89_BAND_6G) 6454 ch_info->period = max_t(u8, ch_info->period, 6455 RTW89_DFS_CHAN_TIME); 6456 ch_info->dwell_time = RTW89_DWELL_TIME; 6457 break; 6458 case RTW89_CHAN_ACTIVE: 6459 break; 6460 default: 6461 rtw89_err(rtwdev, "Channel type out of bound\n"); 6462 } 6463 } 6464 6465 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 6466 int ssid_num, 6467 struct rtw89_mac_chinfo *ch_info) 6468 { 6469 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6470 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6471 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6472 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 6473 struct cfg80211_scan_request *req = rtwvif->scan_req; 6474 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 6475 struct rtw89_pktofld_info *info; 6476 u8 band, probe_count = 0; 6477 int ret; 6478 6479 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6480 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6481 ch_info->bw = RTW89_SCAN_WIDTH; 6482 ch_info->tx_pkt = true; 6483 ch_info->cfg_tx_pwr = false; 6484 ch_info->tx_pwr_idx = 0; 6485 ch_info->tx_null = false; 6486 ch_info->pause_data = false; 6487 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6488 6489 if (ch_info->ch_band == RTW89_BAND_6G) { 6490 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6491 !ch_info->is_psc) { 6492 ch_info->tx_pkt = false; 6493 if (!req->duration_mandatory) 6494 ch_info->period -= RTW89_DWELL_TIME_6G; 6495 } 6496 } 6497 6498 ret = rtw89_update_6ghz_rnr_chan(rtwdev, ies, req, ch_info); 6499 if (ret) 6500 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 6501 6502 if (ssid_num) { 6503 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6504 6505 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6506 if (info->channel_6ghz && 6507 ch_info->pri_ch != info->channel_6ghz) 6508 continue; 6509 else if (info->channel_6ghz && probe_count != 0) 6510 ch_info->period += RTW89_CHANNEL_TIME_6G; 6511 6512 if (info->wildcard_6ghz) 6513 continue; 6514 6515 ch_info->pkt_id[probe_count++] = info->id; 6516 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6517 break; 6518 } 6519 ch_info->num_pkt = probe_count; 6520 } 6521 6522 switch (chan_type) { 6523 case RTW89_CHAN_OPERATE: 6524 ch_info->central_ch = op->channel; 6525 ch_info->pri_ch = op->primary_channel; 6526 ch_info->ch_band = op->band_type; 6527 ch_info->bw = op->band_width; 6528 ch_info->tx_null = true; 6529 ch_info->num_pkt = 0; 6530 break; 6531 case RTW89_CHAN_DFS: 6532 if (ch_info->ch_band != RTW89_BAND_6G) 6533 ch_info->period = max_t(u8, ch_info->period, 6534 RTW89_DFS_CHAN_TIME); 6535 ch_info->dwell_time = RTW89_DWELL_TIME; 6536 ch_info->pause_data = true; 6537 break; 6538 case RTW89_CHAN_ACTIVE: 6539 ch_info->pause_data = true; 6540 break; 6541 default: 6542 rtw89_err(rtwdev, "Channel type out of bound\n"); 6543 } 6544 } 6545 6546 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6547 int ssid_num, 6548 struct rtw89_mac_chinfo_be *ch_info) 6549 { 6550 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6551 struct rtw89_pktofld_info *info; 6552 u8 probe_count = 0, i; 6553 6554 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6555 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6556 ch_info->bw = RTW89_SCAN_WIDTH; 6557 ch_info->tx_null = false; 6558 ch_info->pause_data = false; 6559 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6560 6561 if (ssid_num) { 6562 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6563 ch_info->pkt_id[probe_count++] = info->id; 6564 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6565 break; 6566 } 6567 } 6568 6569 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6570 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6571 6572 switch (chan_type) { 6573 case RTW89_CHAN_DFS: 6574 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6575 ch_info->dwell_time = RTW89_DWELL_TIME; 6576 break; 6577 case RTW89_CHAN_ACTIVE: 6578 break; 6579 default: 6580 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6581 break; 6582 } 6583 } 6584 6585 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6586 int ssid_num, 6587 struct rtw89_mac_chinfo_be *ch_info) 6588 { 6589 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6590 struct rtw89_vif_link *rtwvif_link = rtwdev->scan_info.scanning_vif; 6591 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6592 struct cfg80211_scan_request *req = rtwvif->scan_req; 6593 struct rtw89_pktofld_info *info; 6594 u8 band, probe_count = 0, i; 6595 6596 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6597 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6598 ch_info->bw = RTW89_SCAN_WIDTH; 6599 ch_info->tx_null = false; 6600 ch_info->pause_data = false; 6601 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6602 6603 if (ssid_num) { 6604 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6605 6606 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6607 if (info->channel_6ghz && 6608 ch_info->pri_ch != info->channel_6ghz) 6609 continue; 6610 6611 if (info->wildcard_6ghz) 6612 continue; 6613 6614 ch_info->pkt_id[probe_count++] = info->id; 6615 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6616 break; 6617 } 6618 } 6619 6620 if (ch_info->ch_band == RTW89_BAND_6G) { 6621 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6622 !ch_info->is_psc) { 6623 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6624 if (!req->duration_mandatory) 6625 ch_info->period -= RTW89_DWELL_TIME_6G; 6626 } 6627 } 6628 6629 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6630 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6631 6632 switch (chan_type) { 6633 case RTW89_CHAN_DFS: 6634 if (ch_info->ch_band != RTW89_BAND_6G) 6635 ch_info->period = 6636 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6637 ch_info->dwell_time = RTW89_DWELL_TIME; 6638 ch_info->pause_data = true; 6639 break; 6640 case RTW89_CHAN_ACTIVE: 6641 ch_info->pause_data = true; 6642 break; 6643 default: 6644 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6645 break; 6646 } 6647 } 6648 6649 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6650 struct rtw89_vif_link *rtwvif_link) 6651 { 6652 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6653 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6654 struct rtw89_mac_chinfo *ch_info, *tmp; 6655 struct ieee80211_channel *channel; 6656 struct list_head chan_list; 6657 int list_len; 6658 enum rtw89_chan_type type; 6659 int ret = 0; 6660 u32 idx; 6661 6662 INIT_LIST_HEAD(&chan_list); 6663 for (idx = 0, list_len = 0; 6664 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 6665 idx++, list_len++) { 6666 channel = nd_config->channels[idx]; 6667 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6668 if (!ch_info) { 6669 ret = -ENOMEM; 6670 goto out; 6671 } 6672 6673 ch_info->period = RTW89_CHANNEL_TIME; 6674 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6675 ch_info->central_ch = channel->hw_value; 6676 ch_info->pri_ch = channel->hw_value; 6677 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6678 6679 if (channel->flags & 6680 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6681 type = RTW89_CHAN_DFS; 6682 else 6683 type = RTW89_CHAN_ACTIVE; 6684 6685 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); 6686 list_add_tail(&ch_info->list, &chan_list); 6687 } 6688 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6689 6690 out: 6691 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6692 list_del(&ch_info->list); 6693 kfree(ch_info); 6694 } 6695 6696 return ret; 6697 } 6698 6699 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6700 struct rtw89_vif_link *rtwvif_link, bool connected) 6701 { 6702 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6703 struct cfg80211_scan_request *req = rtwvif->scan_req; 6704 struct rtw89_mac_chinfo *ch_info, *tmp; 6705 struct ieee80211_channel *channel; 6706 struct list_head chan_list; 6707 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 6708 int list_len, off_chan_time = 0; 6709 enum rtw89_chan_type type; 6710 int ret = 0; 6711 u32 idx; 6712 6713 INIT_LIST_HEAD(&chan_list); 6714 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6715 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_AX; 6716 idx++, list_len++) { 6717 channel = req->channels[idx]; 6718 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6719 if (!ch_info) { 6720 ret = -ENOMEM; 6721 goto out; 6722 } 6723 6724 if (req->duration) 6725 ch_info->period = req->duration; 6726 else if (channel->band == NL80211_BAND_6GHZ) 6727 ch_info->period = RTW89_CHANNEL_TIME_6G + 6728 RTW89_DWELL_TIME_6G; 6729 else 6730 ch_info->period = RTW89_CHANNEL_TIME; 6731 6732 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6733 ch_info->central_ch = channel->hw_value; 6734 ch_info->pri_ch = channel->hw_value; 6735 ch_info->rand_seq_num = random_seq; 6736 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6737 6738 if (channel->flags & 6739 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6740 type = RTW89_CHAN_DFS; 6741 else 6742 type = RTW89_CHAN_ACTIVE; 6743 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 6744 6745 if (connected && 6746 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 6747 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 6748 if (!tmp) { 6749 ret = -ENOMEM; 6750 kfree(ch_info); 6751 goto out; 6752 } 6753 6754 type = RTW89_CHAN_OPERATE; 6755 tmp->period = req->duration_mandatory ? 6756 req->duration : RTW89_CHANNEL_TIME; 6757 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 6758 list_add_tail(&tmp->list, &chan_list); 6759 off_chan_time = 0; 6760 list_len++; 6761 } 6762 list_add_tail(&ch_info->list, &chan_list); 6763 off_chan_time += ch_info->period; 6764 } 6765 rtwdev->scan_info.last_chan_idx = idx; 6766 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6767 6768 out: 6769 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6770 list_del(&ch_info->list); 6771 kfree(ch_info); 6772 } 6773 6774 return ret; 6775 } 6776 6777 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6778 struct rtw89_vif_link *rtwvif_link) 6779 { 6780 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6781 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6782 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6783 struct ieee80211_channel *channel; 6784 struct list_head chan_list; 6785 enum rtw89_chan_type type; 6786 int list_len, ret; 6787 u32 idx; 6788 6789 INIT_LIST_HEAD(&chan_list); 6790 6791 for (idx = 0, list_len = 0; 6792 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 6793 idx++, list_len++) { 6794 channel = nd_config->channels[idx]; 6795 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6796 if (!ch_info) { 6797 ret = -ENOMEM; 6798 goto out; 6799 } 6800 6801 ch_info->period = RTW89_CHANNEL_TIME; 6802 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6803 ch_info->central_ch = channel->hw_value; 6804 ch_info->pri_ch = channel->hw_value; 6805 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6806 6807 if (channel->flags & 6808 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6809 type = RTW89_CHAN_DFS; 6810 else 6811 type = RTW89_CHAN_ACTIVE; 6812 6813 rtw89_pno_scan_add_chan_be(rtwdev, type, 6814 nd_config->n_match_sets, ch_info); 6815 list_add_tail(&ch_info->list, &chan_list); 6816 } 6817 6818 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 6819 rtwvif_link); 6820 6821 out: 6822 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6823 list_del(&ch_info->list); 6824 kfree(ch_info); 6825 } 6826 6827 return ret; 6828 } 6829 6830 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6831 struct rtw89_vif_link *rtwvif_link, bool connected) 6832 { 6833 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6834 struct cfg80211_scan_request *req = rtwvif->scan_req; 6835 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6836 struct ieee80211_channel *channel; 6837 struct list_head chan_list; 6838 enum rtw89_chan_type type; 6839 int list_len, ret; 6840 bool random_seq; 6841 u32 idx; 6842 6843 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 6844 INIT_LIST_HEAD(&chan_list); 6845 6846 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6847 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT_BE; 6848 idx++, list_len++) { 6849 channel = req->channels[idx]; 6850 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6851 if (!ch_info) { 6852 ret = -ENOMEM; 6853 goto out; 6854 } 6855 6856 if (req->duration) 6857 ch_info->period = req->duration; 6858 else if (channel->band == NL80211_BAND_6GHZ) 6859 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6860 else 6861 ch_info->period = RTW89_CHANNEL_TIME; 6862 6863 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6864 ch_info->central_ch = channel->hw_value; 6865 ch_info->pri_ch = channel->hw_value; 6866 ch_info->rand_seq_num = random_seq; 6867 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6868 6869 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6870 type = RTW89_CHAN_DFS; 6871 else 6872 type = RTW89_CHAN_ACTIVE; 6873 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 6874 6875 list_add_tail(&ch_info->list, &chan_list); 6876 } 6877 6878 rtwdev->scan_info.last_chan_idx = idx; 6879 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list, 6880 rtwvif_link); 6881 6882 out: 6883 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6884 list_del(&ch_info->list); 6885 kfree(ch_info); 6886 } 6887 6888 return ret; 6889 } 6890 6891 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 6892 struct rtw89_vif_link *rtwvif_link, bool connected) 6893 { 6894 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6895 int ret; 6896 6897 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif_link); 6898 if (ret) { 6899 rtw89_err(rtwdev, "Update probe request failed\n"); 6900 goto out; 6901 } 6902 ret = mac->add_chan_list(rtwdev, rtwvif_link, connected); 6903 out: 6904 return ret; 6905 } 6906 6907 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, 6908 struct rtw89_vif_link *rtwvif_link, 6909 struct ieee80211_scan_request *scan_req) 6910 { 6911 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6912 struct cfg80211_scan_request *req = &scan_req->req; 6913 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 6914 rtwvif_link->chanctx_idx); 6915 struct rtw89_vif *rtwvif = rtwvif_link->rtwvif; 6916 u32 rx_fltr = rtwdev->hal.rx_fltr; 6917 u8 mac_addr[ETH_ALEN]; 6918 u32 reg; 6919 6920 /* clone op and keep it during scan */ 6921 rtwdev->scan_info.op_chan = *chan; 6922 6923 rtwdev->scan_info.scanning_vif = rtwvif_link; 6924 rtwdev->scan_info.last_chan_idx = 0; 6925 rtwdev->scan_info.abort = false; 6926 rtwvif->scan_ies = &scan_req->ies; 6927 rtwvif->scan_req = req; 6928 ieee80211_stop_queues(rtwdev->hw); 6929 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, false); 6930 6931 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 6932 get_random_mask_addr(mac_addr, req->mac_addr, 6933 req->mac_addr_mask); 6934 else 6935 ether_addr_copy(mac_addr, rtwvif_link->mac_addr); 6936 rtw89_core_scan_start(rtwdev, rtwvif_link, mac_addr, true); 6937 6938 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 6939 rx_fltr &= ~B_AX_A_BC; 6940 rx_fltr &= ~B_AX_A_A1_MATCH; 6941 6942 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 6943 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rx_fltr); 6944 6945 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); 6946 } 6947 6948 struct rtw89_hw_scan_complete_cb_data { 6949 struct rtw89_vif_link *rtwvif_link; 6950 bool aborted; 6951 }; 6952 6953 static int rtw89_hw_scan_complete_cb(struct rtw89_dev *rtwdev, void *data) 6954 { 6955 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6956 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6957 struct rtw89_hw_scan_complete_cb_data *cb_data = data; 6958 struct rtw89_vif_link *rtwvif_link = cb_data->rtwvif_link; 6959 struct cfg80211_scan_info info = { 6960 .aborted = cb_data->aborted, 6961 }; 6962 struct rtw89_vif *rtwvif; 6963 u32 reg; 6964 6965 if (!rtwvif_link) 6966 return -EINVAL; 6967 6968 rtwvif = rtwvif_link->rtwvif; 6969 6970 reg = rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, rtwvif_link->mac_idx); 6971 rtw89_write32_mask(rtwdev, reg, B_AX_RX_FLTR_CFG_MASK, rtwdev->hal.rx_fltr); 6972 6973 rtw89_core_scan_complete(rtwdev, rtwvif_link, true); 6974 ieee80211_scan_completed(rtwdev->hw, &info); 6975 ieee80211_wake_queues(rtwdev->hw); 6976 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif_link, true); 6977 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 6978 6979 rtw89_release_pkt_list(rtwdev); 6980 rtwvif->scan_req = NULL; 6981 rtwvif->scan_ies = NULL; 6982 scan_info->last_chan_idx = 0; 6983 scan_info->scanning_vif = NULL; 6984 scan_info->abort = false; 6985 6986 return 0; 6987 } 6988 6989 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, 6990 struct rtw89_vif_link *rtwvif_link, 6991 bool aborted) 6992 { 6993 struct rtw89_hw_scan_complete_cb_data cb_data = { 6994 .rtwvif_link = rtwvif_link, 6995 .aborted = aborted, 6996 }; 6997 const struct rtw89_chanctx_cb_parm cb_parm = { 6998 .cb = rtw89_hw_scan_complete_cb, 6999 .data = &cb_data, 7000 .caller = __func__, 7001 }; 7002 7003 /* The things here needs to be done after setting channel (for coex) 7004 * and before proceeding entity mode (for MCC). So, pass a callback 7005 * of them for the right sequence rather than doing them directly. 7006 */ 7007 rtw89_chanctx_proceed(rtwdev, &cb_parm); 7008 } 7009 7010 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, 7011 struct rtw89_vif_link *rtwvif_link) 7012 { 7013 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 7014 int ret; 7015 7016 scan_info->abort = true; 7017 7018 ret = rtw89_hw_scan_offload(rtwdev, rtwvif_link, false); 7019 if (ret) 7020 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); 7021 7022 /* Indicate ieee80211_scan_completed() before returning, which is safe 7023 * because scan abort command always waits for completion of 7024 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan 7025 * work properly. 7026 */ 7027 rtw89_hw_scan_complete(rtwdev, rtwvif_link, true); 7028 } 7029 7030 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 7031 { 7032 struct rtw89_vif_link *rtwvif_link; 7033 struct rtw89_vif *rtwvif; 7034 unsigned int link_id; 7035 7036 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 7037 rtw89_vif_for_each_link(rtwvif, rtwvif_link, link_id) { 7038 /* This variable implies connected or during attempt to connect */ 7039 if (!is_zero_ether_addr(rtwvif_link->bssid)) 7040 return true; 7041 } 7042 } 7043 7044 return false; 7045 } 7046 7047 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, 7048 struct rtw89_vif_link *rtwvif_link, 7049 bool enable) 7050 { 7051 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 7052 struct rtw89_scan_option opt = {0}; 7053 bool connected; 7054 int ret = 0; 7055 7056 if (!rtwvif_link) 7057 return -EINVAL; 7058 7059 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 7060 opt.enable = enable; 7061 opt.target_ch_mode = connected; 7062 if (enable) { 7063 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif_link, connected); 7064 if (ret) 7065 goto out; 7066 } 7067 7068 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 7069 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 7070 opt.scan_mode = RTW89_SCAN_MODE_SA; 7071 opt.band = rtwvif_link->mac_idx; 7072 opt.num_macc_role = 0; 7073 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 7074 opt.num_opch = connected ? 1 : 0; 7075 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 7076 } 7077 7078 ret = mac->scan_offload(rtwdev, &opt, rtwvif_link, false); 7079 out: 7080 return ret; 7081 } 7082 7083 #define H2C_FW_CPU_EXCEPTION_LEN 4 7084 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 7085 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 7086 { 7087 struct sk_buff *skb; 7088 int ret; 7089 7090 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 7091 if (!skb) { 7092 rtw89_err(rtwdev, 7093 "failed to alloc skb for fw cpu exception\n"); 7094 return -ENOMEM; 7095 } 7096 7097 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 7098 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 7099 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 7100 7101 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7102 H2C_CAT_TEST, 7103 H2C_CL_FW_STATUS_TEST, 7104 H2C_FUNC_CPU_EXCEPTION, 0, 0, 7105 H2C_FW_CPU_EXCEPTION_LEN); 7106 7107 ret = rtw89_h2c_tx(rtwdev, skb, false); 7108 if (ret) { 7109 rtw89_err(rtwdev, "failed to send h2c\n"); 7110 goto fail; 7111 } 7112 7113 return 0; 7114 7115 fail: 7116 dev_kfree_skb_any(skb); 7117 return ret; 7118 } 7119 7120 #define H2C_PKT_DROP_LEN 24 7121 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 7122 const struct rtw89_pkt_drop_params *params) 7123 { 7124 struct sk_buff *skb; 7125 int ret; 7126 7127 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 7128 if (!skb) { 7129 rtw89_err(rtwdev, 7130 "failed to alloc skb for packet drop\n"); 7131 return -ENOMEM; 7132 } 7133 7134 switch (params->sel) { 7135 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 7136 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 7137 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 7138 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 7139 case RTW89_PKT_DROP_SEL_BAND_ONCE: 7140 break; 7141 default: 7142 rtw89_debug(rtwdev, RTW89_DBG_FW, 7143 "H2C of pkt drop might not fully support sel: %d yet\n", 7144 params->sel); 7145 break; 7146 } 7147 7148 skb_put(skb, H2C_PKT_DROP_LEN); 7149 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 7150 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 7151 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 7152 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 7153 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 7154 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 7155 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 7156 params->macid_band_sel[0]); 7157 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 7158 params->macid_band_sel[1]); 7159 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 7160 params->macid_band_sel[2]); 7161 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 7162 params->macid_band_sel[3]); 7163 7164 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7165 H2C_CAT_MAC, 7166 H2C_CL_MAC_FW_OFLD, 7167 H2C_FUNC_PKT_DROP, 0, 0, 7168 H2C_PKT_DROP_LEN); 7169 7170 ret = rtw89_h2c_tx(rtwdev, skb, false); 7171 if (ret) { 7172 rtw89_err(rtwdev, "failed to send h2c\n"); 7173 goto fail; 7174 } 7175 7176 return 0; 7177 7178 fail: 7179 dev_kfree_skb_any(skb); 7180 return ret; 7181 } 7182 7183 #define H2C_KEEP_ALIVE_LEN 4 7184 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7185 bool enable) 7186 { 7187 struct sk_buff *skb; 7188 u8 pkt_id = 0; 7189 int ret; 7190 7191 if (enable) { 7192 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7193 RTW89_PKT_OFLD_TYPE_NULL_DATA, 7194 &pkt_id); 7195 if (ret) 7196 return -EPERM; 7197 } 7198 7199 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 7200 if (!skb) { 7201 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7202 return -ENOMEM; 7203 } 7204 7205 skb_put(skb, H2C_KEEP_ALIVE_LEN); 7206 7207 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 7208 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 7209 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 7210 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif_link->mac_id); 7211 7212 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7213 H2C_CAT_MAC, 7214 H2C_CL_MAC_WOW, 7215 H2C_FUNC_KEEP_ALIVE, 0, 1, 7216 H2C_KEEP_ALIVE_LEN); 7217 7218 ret = rtw89_h2c_tx(rtwdev, skb, false); 7219 if (ret) { 7220 rtw89_err(rtwdev, "failed to send h2c\n"); 7221 goto fail; 7222 } 7223 7224 return 0; 7225 7226 fail: 7227 dev_kfree_skb_any(skb); 7228 7229 return ret; 7230 } 7231 7232 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7233 bool enable) 7234 { 7235 struct rtw89_h2c_arp_offload *h2c; 7236 u32 len = sizeof(*h2c); 7237 struct sk_buff *skb; 7238 u8 pkt_id = 0; 7239 int ret; 7240 7241 if (enable) { 7242 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7243 RTW89_PKT_OFLD_TYPE_ARP_RSP, 7244 &pkt_id); 7245 if (ret) 7246 return ret; 7247 } 7248 7249 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7250 if (!skb) { 7251 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); 7252 return -ENOMEM; 7253 } 7254 7255 skb_put(skb, len); 7256 h2c = (struct rtw89_h2c_arp_offload *)skb->data; 7257 7258 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | 7259 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | 7260 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | 7261 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); 7262 7263 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7264 H2C_CAT_MAC, 7265 H2C_CL_MAC_WOW, 7266 H2C_FUNC_ARP_OFLD, 0, 1, 7267 len); 7268 7269 ret = rtw89_h2c_tx(rtwdev, skb, false); 7270 if (ret) { 7271 rtw89_err(rtwdev, "failed to send h2c\n"); 7272 goto fail; 7273 } 7274 7275 return 0; 7276 7277 fail: 7278 dev_kfree_skb_any(skb); 7279 7280 return ret; 7281 } 7282 7283 #define H2C_DISCONNECT_DETECT_LEN 8 7284 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 7285 struct rtw89_vif_link *rtwvif_link, bool enable) 7286 { 7287 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7288 struct sk_buff *skb; 7289 u8 macid = rtwvif_link->mac_id; 7290 int ret; 7291 7292 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 7293 if (!skb) { 7294 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7295 return -ENOMEM; 7296 } 7297 7298 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 7299 7300 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 7301 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 7302 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 7303 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 7304 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 7305 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 7306 } 7307 7308 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7309 H2C_CAT_MAC, 7310 H2C_CL_MAC_WOW, 7311 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 7312 H2C_DISCONNECT_DETECT_LEN); 7313 7314 ret = rtw89_h2c_tx(rtwdev, skb, false); 7315 if (ret) { 7316 rtw89_err(rtwdev, "failed to send h2c\n"); 7317 goto fail; 7318 } 7319 7320 return 0; 7321 7322 fail: 7323 dev_kfree_skb_any(skb); 7324 7325 return ret; 7326 } 7327 7328 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7329 bool enable) 7330 { 7331 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7332 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 7333 struct rtw89_h2c_cfg_nlo *h2c; 7334 u32 len = sizeof(*h2c); 7335 struct sk_buff *skb; 7336 int ret, i; 7337 7338 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7339 if (!skb) { 7340 rtw89_err(rtwdev, "failed to alloc skb for nlo\n"); 7341 return -ENOMEM; 7342 } 7343 7344 skb_put(skb, len); 7345 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data; 7346 7347 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) | 7348 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) | 7349 le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_NLO_W0_MACID); 7350 7351 if (enable) { 7352 h2c->nlo_cnt = nd_config->n_match_sets; 7353 for (i = 0 ; i < nd_config->n_match_sets; i++) { 7354 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len; 7355 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid, 7356 nd_config->match_sets[i].ssid.ssid_len); 7357 } 7358 } 7359 7360 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7361 H2C_CAT_MAC, 7362 H2C_CL_MAC_WOW, 7363 H2C_FUNC_NLO, 0, 1, 7364 len); 7365 7366 ret = rtw89_h2c_tx(rtwdev, skb, false); 7367 if (ret) { 7368 rtw89_err(rtwdev, "failed to send h2c\n"); 7369 goto fail; 7370 } 7371 7372 return 0; 7373 7374 fail: 7375 dev_kfree_skb_any(skb); 7376 return ret; 7377 } 7378 7379 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7380 bool enable) 7381 { 7382 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7383 struct rtw89_h2c_wow_global *h2c; 7384 u8 macid = rtwvif_link->mac_id; 7385 u32 len = sizeof(*h2c); 7386 struct sk_buff *skb; 7387 int ret; 7388 7389 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7390 if (!skb) { 7391 rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); 7392 return -ENOMEM; 7393 } 7394 7395 skb_put(skb, len); 7396 h2c = (struct rtw89_h2c_wow_global *)skb->data; 7397 7398 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | 7399 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | 7400 le32_encode_bits(rtw_wow->ptk_alg, 7401 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | 7402 le32_encode_bits(rtw_wow->gtk_alg, 7403 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); 7404 h2c->key_info = rtw_wow->key_info; 7405 7406 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7407 H2C_CAT_MAC, 7408 H2C_CL_MAC_WOW, 7409 H2C_FUNC_WOW_GLOBAL, 0, 1, 7410 len); 7411 7412 ret = rtw89_h2c_tx(rtwdev, skb, false); 7413 if (ret) { 7414 rtw89_err(rtwdev, "failed to send h2c\n"); 7415 goto fail; 7416 } 7417 7418 return 0; 7419 7420 fail: 7421 dev_kfree_skb_any(skb); 7422 7423 return ret; 7424 } 7425 7426 #define H2C_WAKEUP_CTRL_LEN 4 7427 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 7428 struct rtw89_vif_link *rtwvif_link, 7429 bool enable) 7430 { 7431 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7432 struct sk_buff *skb; 7433 u8 macid = rtwvif_link->mac_id; 7434 int ret; 7435 7436 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 7437 if (!skb) { 7438 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); 7439 return -ENOMEM; 7440 } 7441 7442 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 7443 7444 if (rtw_wow->pattern_cnt) 7445 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 7446 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 7447 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 7448 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 7449 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 7450 7451 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 7452 7453 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7454 H2C_CAT_MAC, 7455 H2C_CL_MAC_WOW, 7456 H2C_FUNC_WAKEUP_CTRL, 0, 1, 7457 H2C_WAKEUP_CTRL_LEN); 7458 7459 ret = rtw89_h2c_tx(rtwdev, skb, false); 7460 if (ret) { 7461 rtw89_err(rtwdev, "failed to send h2c\n"); 7462 goto fail; 7463 } 7464 7465 return 0; 7466 7467 fail: 7468 dev_kfree_skb_any(skb); 7469 7470 return ret; 7471 } 7472 7473 #define H2C_WOW_CAM_UPD_LEN 24 7474 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 7475 struct rtw89_wow_cam_info *cam_info) 7476 { 7477 struct sk_buff *skb; 7478 int ret; 7479 7480 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 7481 if (!skb) { 7482 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7483 return -ENOMEM; 7484 } 7485 7486 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 7487 7488 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 7489 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 7490 if (cam_info->valid) { 7491 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 7492 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 7493 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 7494 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 7495 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 7496 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 7497 cam_info->negative_pattern_match); 7498 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 7499 cam_info->skip_mac_hdr); 7500 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 7501 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 7502 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 7503 } 7504 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 7505 7506 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7507 H2C_CAT_MAC, 7508 H2C_CL_MAC_WOW, 7509 H2C_FUNC_WOW_CAM_UPD, 0, 1, 7510 H2C_WOW_CAM_UPD_LEN); 7511 7512 ret = rtw89_h2c_tx(rtwdev, skb, false); 7513 if (ret) { 7514 rtw89_err(rtwdev, "failed to send h2c\n"); 7515 goto fail; 7516 } 7517 7518 return 0; 7519 fail: 7520 dev_kfree_skb_any(skb); 7521 7522 return ret; 7523 } 7524 7525 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, 7526 struct rtw89_vif_link *rtwvif_link, 7527 bool enable) 7528 { 7529 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7530 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; 7531 struct rtw89_h2c_wow_gtk_ofld *h2c; 7532 u8 macid = rtwvif_link->mac_id; 7533 u32 len = sizeof(*h2c); 7534 u8 pkt_id_sa_query = 0; 7535 struct sk_buff *skb; 7536 u8 pkt_id_eapol = 0; 7537 int ret; 7538 7539 if (!rtw_wow->gtk_alg) 7540 return 0; 7541 7542 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7543 if (!skb) { 7544 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); 7545 return -ENOMEM; 7546 } 7547 7548 skb_put(skb, len); 7549 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; 7550 7551 if (!enable) 7552 goto hdr; 7553 7554 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7555 RTW89_PKT_OFLD_TYPE_EAPOL_KEY, 7556 &pkt_id_eapol); 7557 if (ret) 7558 goto fail; 7559 7560 if (gtk_info->igtk_keyid) { 7561 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif_link, 7562 RTW89_PKT_OFLD_TYPE_SA_QUERY, 7563 &pkt_id_sa_query); 7564 if (ret) 7565 goto fail; 7566 } 7567 7568 /* not support TKIP yet */ 7569 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 7570 le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 7571 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 7572 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 7573 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | 7574 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); 7575 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, 7576 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | 7577 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); 7578 h2c->gtk_info = rtw_wow->gtk_info; 7579 7580 hdr: 7581 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7582 H2C_CAT_MAC, 7583 H2C_CL_MAC_WOW, 7584 H2C_FUNC_GTK_OFLD, 0, 1, 7585 len); 7586 7587 ret = rtw89_h2c_tx(rtwdev, skb, false); 7588 if (ret) { 7589 rtw89_err(rtwdev, "failed to send h2c\n"); 7590 goto fail; 7591 } 7592 return 0; 7593 fail: 7594 dev_kfree_skb_any(skb); 7595 7596 return ret; 7597 } 7598 7599 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif_link *rtwvif_link, 7600 bool enable) 7601 { 7602 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait; 7603 struct rtw89_h2c_fwips *h2c; 7604 u32 len = sizeof(*h2c); 7605 struct sk_buff *skb; 7606 7607 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7608 if (!skb) { 7609 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n"); 7610 return -ENOMEM; 7611 } 7612 skb_put(skb, len); 7613 h2c = (struct rtw89_h2c_fwips *)skb->data; 7614 7615 h2c->w0 = le32_encode_bits(rtwvif_link->mac_id, RTW89_H2C_FW_IPS_W0_MACID) | 7616 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE); 7617 7618 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7619 H2C_CAT_MAC, 7620 H2C_CL_MAC_PS, 7621 H2C_FUNC_IPS_CFG, 0, 1, 7622 len); 7623 7624 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG); 7625 } 7626 7627 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) 7628 { 7629 struct rtw89_wait_info *wait = &rtwdev->wow.wait; 7630 struct rtw89_h2c_wow_aoac *h2c; 7631 u32 len = sizeof(*h2c); 7632 struct sk_buff *skb; 7633 7634 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7635 if (!skb) { 7636 rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); 7637 return -ENOMEM; 7638 } 7639 7640 skb_put(skb, len); 7641 7642 /* This H2C only nofity firmware to generate AOAC report C2H, 7643 * no need any parameter. 7644 */ 7645 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7646 H2C_CAT_MAC, 7647 H2C_CL_MAC_WOW, 7648 H2C_FUNC_AOAC_REPORT_REQ, 1, 0, 7649 len); 7650 7651 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC); 7652 } 7653 7654 /* Return < 0, if failures happen during waiting for the condition. 7655 * Return 0, when waiting for the condition succeeds. 7656 * Return > 0, if the wait is considered unreachable due to driver/FW design, 7657 * where 1 means during SER. 7658 */ 7659 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 7660 struct rtw89_wait_info *wait, unsigned int cond) 7661 { 7662 int ret; 7663 7664 ret = rtw89_h2c_tx(rtwdev, skb, false); 7665 if (ret) { 7666 rtw89_err(rtwdev, "failed to send h2c\n"); 7667 dev_kfree_skb_any(skb); 7668 return -EBUSY; 7669 } 7670 7671 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 7672 return 1; 7673 7674 return rtw89_wait_for_cond(wait, cond); 7675 } 7676 7677 #define H2C_ADD_MCC_LEN 16 7678 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 7679 const struct rtw89_fw_mcc_add_req *p) 7680 { 7681 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7682 struct sk_buff *skb; 7683 unsigned int cond; 7684 7685 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 7686 if (!skb) { 7687 rtw89_err(rtwdev, 7688 "failed to alloc skb for add mcc\n"); 7689 return -ENOMEM; 7690 } 7691 7692 skb_put(skb, H2C_ADD_MCC_LEN); 7693 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 7694 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 7695 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 7696 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 7697 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 7698 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 7699 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 7700 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 7701 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 7702 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 7703 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 7704 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 7705 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 7706 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 7707 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 7708 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 7709 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 7710 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 7711 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 7712 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 7713 7714 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7715 H2C_CAT_MAC, 7716 H2C_CL_MCC, 7717 H2C_FUNC_ADD_MCC, 0, 0, 7718 H2C_ADD_MCC_LEN); 7719 7720 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 7721 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7722 } 7723 7724 #define H2C_START_MCC_LEN 12 7725 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 7726 const struct rtw89_fw_mcc_start_req *p) 7727 { 7728 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7729 struct sk_buff *skb; 7730 unsigned int cond; 7731 7732 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 7733 if (!skb) { 7734 rtw89_err(rtwdev, 7735 "failed to alloc skb for start mcc\n"); 7736 return -ENOMEM; 7737 } 7738 7739 skb_put(skb, H2C_START_MCC_LEN); 7740 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 7741 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 7742 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 7743 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 7744 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 7745 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 7746 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 7747 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 7748 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 7749 7750 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7751 H2C_CAT_MAC, 7752 H2C_CL_MCC, 7753 H2C_FUNC_START_MCC, 0, 0, 7754 H2C_START_MCC_LEN); 7755 7756 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 7757 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7758 } 7759 7760 #define H2C_STOP_MCC_LEN 4 7761 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7762 bool prev_groups) 7763 { 7764 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7765 struct sk_buff *skb; 7766 unsigned int cond; 7767 7768 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 7769 if (!skb) { 7770 rtw89_err(rtwdev, 7771 "failed to alloc skb for stop mcc\n"); 7772 return -ENOMEM; 7773 } 7774 7775 skb_put(skb, H2C_STOP_MCC_LEN); 7776 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 7777 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 7778 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 7779 7780 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7781 H2C_CAT_MAC, 7782 H2C_CL_MCC, 7783 H2C_FUNC_STOP_MCC, 0, 0, 7784 H2C_STOP_MCC_LEN); 7785 7786 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 7787 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7788 } 7789 7790 #define H2C_DEL_MCC_GROUP_LEN 4 7791 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 7792 bool prev_groups) 7793 { 7794 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7795 struct sk_buff *skb; 7796 unsigned int cond; 7797 7798 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 7799 if (!skb) { 7800 rtw89_err(rtwdev, 7801 "failed to alloc skb for del mcc group\n"); 7802 return -ENOMEM; 7803 } 7804 7805 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 7806 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 7807 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 7808 7809 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7810 H2C_CAT_MAC, 7811 H2C_CL_MCC, 7812 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 7813 H2C_DEL_MCC_GROUP_LEN); 7814 7815 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 7816 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7817 } 7818 7819 #define H2C_RESET_MCC_GROUP_LEN 4 7820 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 7821 { 7822 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7823 struct sk_buff *skb; 7824 unsigned int cond; 7825 7826 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 7827 if (!skb) { 7828 rtw89_err(rtwdev, 7829 "failed to alloc skb for reset mcc group\n"); 7830 return -ENOMEM; 7831 } 7832 7833 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 7834 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 7835 7836 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7837 H2C_CAT_MAC, 7838 H2C_CL_MCC, 7839 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 7840 H2C_RESET_MCC_GROUP_LEN); 7841 7842 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 7843 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7844 } 7845 7846 #define H2C_MCC_REQ_TSF_LEN 4 7847 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 7848 const struct rtw89_fw_mcc_tsf_req *req, 7849 struct rtw89_mac_mcc_tsf_rpt *rpt) 7850 { 7851 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7852 struct rtw89_mac_mcc_tsf_rpt *tmp; 7853 struct sk_buff *skb; 7854 unsigned int cond; 7855 int ret; 7856 7857 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 7858 if (!skb) { 7859 rtw89_err(rtwdev, 7860 "failed to alloc skb for mcc req tsf\n"); 7861 return -ENOMEM; 7862 } 7863 7864 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 7865 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 7866 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 7867 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 7868 7869 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7870 H2C_CAT_MAC, 7871 H2C_CL_MCC, 7872 H2C_FUNC_MCC_REQ_TSF, 0, 0, 7873 H2C_MCC_REQ_TSF_LEN); 7874 7875 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 7876 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7877 if (ret) 7878 return ret; 7879 7880 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 7881 *rpt = *tmp; 7882 7883 return 0; 7884 } 7885 7886 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 7887 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7888 u8 *bitmap) 7889 { 7890 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7891 struct sk_buff *skb; 7892 unsigned int cond; 7893 u8 map_len; 7894 u8 h2c_len; 7895 7896 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 7897 map_len = RTW89_MAX_MAC_ID_NUM / 8; 7898 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 7899 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 7900 if (!skb) { 7901 rtw89_err(rtwdev, 7902 "failed to alloc skb for mcc macid bitmap\n"); 7903 return -ENOMEM; 7904 } 7905 7906 skb_put(skb, h2c_len); 7907 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 7908 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 7909 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 7910 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 7911 7912 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7913 H2C_CAT_MAC, 7914 H2C_CL_MCC, 7915 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 7916 h2c_len); 7917 7918 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 7919 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7920 } 7921 7922 #define H2C_MCC_SYNC_LEN 4 7923 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 7924 u8 target, u8 offset) 7925 { 7926 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7927 struct sk_buff *skb; 7928 unsigned int cond; 7929 7930 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 7931 if (!skb) { 7932 rtw89_err(rtwdev, 7933 "failed to alloc skb for mcc sync\n"); 7934 return -ENOMEM; 7935 } 7936 7937 skb_put(skb, H2C_MCC_SYNC_LEN); 7938 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 7939 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 7940 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 7941 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 7942 7943 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7944 H2C_CAT_MAC, 7945 H2C_CL_MCC, 7946 H2C_FUNC_MCC_SYNC, 0, 0, 7947 H2C_MCC_SYNC_LEN); 7948 7949 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 7950 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7951 } 7952 7953 #define H2C_MCC_SET_DURATION_LEN 20 7954 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 7955 const struct rtw89_fw_mcc_duration *p) 7956 { 7957 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7958 struct sk_buff *skb; 7959 unsigned int cond; 7960 7961 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 7962 if (!skb) { 7963 rtw89_err(rtwdev, 7964 "failed to alloc skb for mcc set duration\n"); 7965 return -ENOMEM; 7966 } 7967 7968 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 7969 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 7970 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 7971 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 7972 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 7973 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 7974 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 7975 p->start_tsf_low); 7976 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 7977 p->start_tsf_high); 7978 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 7979 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 7980 7981 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7982 H2C_CAT_MAC, 7983 H2C_CL_MCC, 7984 H2C_FUNC_MCC_SET_DURATION, 0, 0, 7985 H2C_MCC_SET_DURATION_LEN); 7986 7987 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 7988 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7989 } 7990 7991 static 7992 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 7993 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 7994 struct rtw89_h2c_mrc_add_slot *slot_h2c) 7995 { 7996 bool fill_h2c = !!slot_h2c; 7997 unsigned int i; 7998 7999 if (!fill_h2c) 8000 goto calc_len; 8001 8002 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 8003 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 8004 le32_encode_bits(slot_arg->courtesy_en, 8005 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 8006 le32_encode_bits(slot_arg->role_num, 8007 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 8008 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 8009 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 8010 le32_encode_bits(slot_arg->courtesy_target, 8011 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 8012 8013 for (i = 0; i < slot_arg->role_num; i++) { 8014 slot_h2c->roles[i].w0 = 8015 le32_encode_bits(slot_arg->roles[i].macid, 8016 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 8017 le32_encode_bits(slot_arg->roles[i].role_type, 8018 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 8019 le32_encode_bits(slot_arg->roles[i].is_master, 8020 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 8021 le32_encode_bits(slot_arg->roles[i].en_tx_null, 8022 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 8023 le32_encode_bits(false, 8024 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 8025 le32_encode_bits(false, 8026 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 8027 slot_h2c->roles[i].w1 = 8028 le32_encode_bits(slot_arg->roles[i].central_ch, 8029 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 8030 le32_encode_bits(slot_arg->roles[i].primary_ch, 8031 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 8032 le32_encode_bits(slot_arg->roles[i].bw, 8033 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 8034 le32_encode_bits(slot_arg->roles[i].band, 8035 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 8036 le32_encode_bits(slot_arg->roles[i].null_early, 8037 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 8038 le32_encode_bits(false, 8039 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 8040 le32_encode_bits(true, 8041 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 8042 slot_h2c->roles[i].macid_main_bitmap = 8043 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 8044 slot_h2c->roles[i].macid_paired_bitmap = 8045 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 8046 } 8047 8048 calc_len: 8049 return struct_size(slot_h2c, roles, slot_arg->role_num); 8050 } 8051 8052 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 8053 const struct rtw89_fw_mrc_add_arg *arg) 8054 { 8055 struct rtw89_h2c_mrc_add *h2c_head; 8056 struct sk_buff *skb; 8057 unsigned int i; 8058 void *tmp; 8059 u32 len; 8060 int ret; 8061 8062 len = sizeof(*h2c_head); 8063 for (i = 0; i < arg->slot_num; i++) 8064 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 8065 8066 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8067 if (!skb) { 8068 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 8069 return -ENOMEM; 8070 } 8071 8072 skb_put(skb, len); 8073 tmp = skb->data; 8074 8075 h2c_head = tmp; 8076 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 8077 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 8078 le32_encode_bits(arg->sch_type, 8079 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 8080 le32_encode_bits(arg->slot_num, 8081 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 8082 le32_encode_bits(arg->btc_in_sch, 8083 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 8084 8085 tmp += sizeof(*h2c_head); 8086 for (i = 0; i < arg->slot_num; i++) 8087 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 8088 8089 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8090 H2C_CAT_MAC, 8091 H2C_CL_MRC, 8092 H2C_FUNC_ADD_MRC, 0, 0, 8093 len); 8094 8095 ret = rtw89_h2c_tx(rtwdev, skb, false); 8096 if (ret) { 8097 rtw89_err(rtwdev, "failed to send h2c\n"); 8098 dev_kfree_skb_any(skb); 8099 return -EBUSY; 8100 } 8101 8102 return 0; 8103 } 8104 8105 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 8106 const struct rtw89_fw_mrc_start_arg *arg) 8107 { 8108 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8109 struct rtw89_h2c_mrc_start *h2c; 8110 u32 len = sizeof(*h2c); 8111 struct sk_buff *skb; 8112 unsigned int cond; 8113 8114 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8115 if (!skb) { 8116 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 8117 return -ENOMEM; 8118 } 8119 8120 skb_put(skb, len); 8121 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 8122 8123 h2c->w0 = le32_encode_bits(arg->sch_idx, 8124 RTW89_H2C_MRC_START_W0_SCH_IDX) | 8125 le32_encode_bits(arg->old_sch_idx, 8126 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 8127 le32_encode_bits(arg->action, 8128 RTW89_H2C_MRC_START_W0_ACTION); 8129 8130 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 8131 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 8132 8133 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8134 H2C_CAT_MAC, 8135 H2C_CL_MRC, 8136 H2C_FUNC_START_MRC, 0, 0, 8137 len); 8138 8139 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 8140 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8141 } 8142 8143 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx) 8144 { 8145 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8146 struct rtw89_h2c_mrc_del *h2c; 8147 u32 len = sizeof(*h2c); 8148 struct sk_buff *skb; 8149 unsigned int cond; 8150 8151 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8152 if (!skb) { 8153 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 8154 return -ENOMEM; 8155 } 8156 8157 skb_put(skb, len); 8158 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 8159 8160 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) | 8161 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX); 8162 8163 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8164 H2C_CAT_MAC, 8165 H2C_CL_MRC, 8166 H2C_FUNC_DEL_MRC, 0, 0, 8167 len); 8168 8169 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 8170 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 8171 } 8172 8173 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 8174 const struct rtw89_fw_mrc_req_tsf_arg *arg, 8175 struct rtw89_mac_mrc_tsf_rpt *rpt) 8176 { 8177 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 8178 struct rtw89_h2c_mrc_req_tsf *h2c; 8179 struct rtw89_mac_mrc_tsf_rpt *tmp; 8180 struct sk_buff *skb; 8181 unsigned int i; 8182 u32 len; 8183 int ret; 8184 8185 len = struct_size(h2c, infos, arg->num); 8186 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8187 if (!skb) { 8188 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 8189 return -ENOMEM; 8190 } 8191 8192 skb_put(skb, len); 8193 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 8194 8195 h2c->req_tsf_num = arg->num; 8196 for (i = 0; i < arg->num; i++) 8197 h2c->infos[i] = 8198 u8_encode_bits(arg->infos[i].band, 8199 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 8200 u8_encode_bits(arg->infos[i].port, 8201 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 8202 8203 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8204 H2C_CAT_MAC, 8205 H2C_CL_MRC, 8206 H2C_FUNC_MRC_REQ_TSF, 0, 0, 8207 len); 8208 8209 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 8210 if (ret) 8211 return ret; 8212 8213 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 8214 *rpt = *tmp; 8215 8216 return 0; 8217 } 8218 8219 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 8220 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 8221 { 8222 struct rtw89_h2c_mrc_upd_bitmap *h2c; 8223 u32 len = sizeof(*h2c); 8224 struct sk_buff *skb; 8225 int ret; 8226 8227 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8228 if (!skb) { 8229 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 8230 return -ENOMEM; 8231 } 8232 8233 skb_put(skb, len); 8234 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 8235 8236 h2c->w0 = le32_encode_bits(arg->sch_idx, 8237 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 8238 le32_encode_bits(arg->action, 8239 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 8240 le32_encode_bits(arg->macid, 8241 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 8242 h2c->w1 = le32_encode_bits(arg->client_macid, 8243 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 8244 8245 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8246 H2C_CAT_MAC, 8247 H2C_CL_MRC, 8248 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 8249 len); 8250 8251 ret = rtw89_h2c_tx(rtwdev, skb, false); 8252 if (ret) { 8253 rtw89_err(rtwdev, "failed to send h2c\n"); 8254 dev_kfree_skb_any(skb); 8255 return -EBUSY; 8256 } 8257 8258 return 0; 8259 } 8260 8261 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 8262 const struct rtw89_fw_mrc_sync_arg *arg) 8263 { 8264 struct rtw89_h2c_mrc_sync *h2c; 8265 u32 len = sizeof(*h2c); 8266 struct sk_buff *skb; 8267 int ret; 8268 8269 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8270 if (!skb) { 8271 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 8272 return -ENOMEM; 8273 } 8274 8275 skb_put(skb, len); 8276 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 8277 8278 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 8279 le32_encode_bits(arg->src.port, 8280 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 8281 le32_encode_bits(arg->src.band, 8282 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 8283 le32_encode_bits(arg->dest.port, 8284 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 8285 le32_encode_bits(arg->dest.band, 8286 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 8287 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 8288 8289 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8290 H2C_CAT_MAC, 8291 H2C_CL_MRC, 8292 H2C_FUNC_MRC_SYNC, 0, 0, 8293 len); 8294 8295 ret = rtw89_h2c_tx(rtwdev, skb, false); 8296 if (ret) { 8297 rtw89_err(rtwdev, "failed to send h2c\n"); 8298 dev_kfree_skb_any(skb); 8299 return -EBUSY; 8300 } 8301 8302 return 0; 8303 } 8304 8305 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 8306 const struct rtw89_fw_mrc_upd_duration_arg *arg) 8307 { 8308 struct rtw89_h2c_mrc_upd_duration *h2c; 8309 struct sk_buff *skb; 8310 unsigned int i; 8311 u32 len; 8312 int ret; 8313 8314 len = struct_size(h2c, slots, arg->slot_num); 8315 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8316 if (!skb) { 8317 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 8318 return -ENOMEM; 8319 } 8320 8321 skb_put(skb, len); 8322 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 8323 8324 h2c->w0 = le32_encode_bits(arg->sch_idx, 8325 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 8326 le32_encode_bits(arg->slot_num, 8327 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 8328 le32_encode_bits(false, 8329 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 8330 8331 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 8332 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 8333 8334 for (i = 0; i < arg->slot_num; i++) { 8335 h2c->slots[i] = 8336 le32_encode_bits(arg->slots[i].slot_idx, 8337 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 8338 le32_encode_bits(arg->slots[i].duration, 8339 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 8340 } 8341 8342 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8343 H2C_CAT_MAC, 8344 H2C_CL_MRC, 8345 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 8346 len); 8347 8348 ret = rtw89_h2c_tx(rtwdev, skb, false); 8349 if (ret) { 8350 rtw89_err(rtwdev, "failed to send h2c\n"); 8351 dev_kfree_skb_any(skb); 8352 return -EBUSY; 8353 } 8354 8355 return 0; 8356 } 8357 8358 static int rtw89_fw_h2c_ap_info(struct rtw89_dev *rtwdev, bool en) 8359 { 8360 struct rtw89_h2c_ap_info *h2c; 8361 u32 len = sizeof(*h2c); 8362 struct sk_buff *skb; 8363 int ret; 8364 8365 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 8366 if (!skb) { 8367 rtw89_err(rtwdev, "failed to alloc skb for ap info\n"); 8368 return -ENOMEM; 8369 } 8370 8371 skb_put(skb, len); 8372 h2c = (struct rtw89_h2c_ap_info *)skb->data; 8373 8374 h2c->w0 = le32_encode_bits(en, RTW89_H2C_AP_INFO_W0_PWR_INT_EN); 8375 8376 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 8377 H2C_CAT_MAC, 8378 H2C_CL_AP, 8379 H2C_FUNC_AP_INFO, 0, 0, 8380 len); 8381 8382 ret = rtw89_h2c_tx(rtwdev, skb, false); 8383 if (ret) { 8384 rtw89_err(rtwdev, "failed to send h2c\n"); 8385 dev_kfree_skb_any(skb); 8386 return -EBUSY; 8387 } 8388 8389 return 0; 8390 } 8391 8392 int rtw89_fw_h2c_ap_info_refcount(struct rtw89_dev *rtwdev, bool en) 8393 { 8394 int ret; 8395 8396 if (en) { 8397 if (refcount_inc_not_zero(&rtwdev->refcount_ap_info)) 8398 return 0; 8399 } else { 8400 if (!refcount_dec_and_test(&rtwdev->refcount_ap_info)) 8401 return 0; 8402 } 8403 8404 ret = rtw89_fw_h2c_ap_info(rtwdev, en); 8405 if (ret) { 8406 if (!test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 8407 return ret; 8408 8409 /* During recovery, neither driver nor stack has full error 8410 * handling, so show a warning, but return 0 with refcount 8411 * increased normally. It can avoid underflow when calling 8412 * with @en == false later. 8413 */ 8414 rtw89_warn(rtwdev, "h2c ap_info failed during SER\n"); 8415 } 8416 8417 if (en) 8418 refcount_set(&rtwdev->refcount_ap_info, 1); 8419 8420 return 0; 8421 } 8422 8423 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 8424 { 8425 static const u8 zeros[U8_MAX] = {}; 8426 8427 return memcmp(ext_ptr, zeros, ext_len) == 0; 8428 } 8429 8430 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 8431 ({ \ 8432 u8 __var_sz = sizeof(*(e)); \ 8433 bool __accept; \ 8434 if (__var_sz >= (ent_sz)) \ 8435 __accept = true; \ 8436 else \ 8437 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 8438 (ent_sz) - __var_sz);\ 8439 __accept; \ 8440 }) 8441 8442 static bool 8443 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 8444 const void *cursor, 8445 const struct rtw89_txpwr_conf *conf) 8446 { 8447 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8448 return false; 8449 8450 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 8451 return false; 8452 8453 switch (e->rs) { 8454 case RTW89_RS_CCK: 8455 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 8456 return false; 8457 break; 8458 case RTW89_RS_OFDM: 8459 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 8460 return false; 8461 break; 8462 case RTW89_RS_MCS: 8463 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 8464 e->nss >= RTW89_NSS_NUM || 8465 e->ofdma >= RTW89_OFDMA_NUM) 8466 return false; 8467 break; 8468 case RTW89_RS_HEDCM: 8469 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 8470 e->nss >= RTW89_NSS_HEDCM_NUM || 8471 e->ofdma >= RTW89_OFDMA_NUM) 8472 return false; 8473 break; 8474 case RTW89_RS_OFFSET: 8475 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 8476 return false; 8477 break; 8478 default: 8479 return false; 8480 } 8481 8482 return true; 8483 } 8484 8485 static 8486 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 8487 const struct rtw89_txpwr_table *tbl) 8488 { 8489 const struct rtw89_txpwr_conf *conf = tbl->data; 8490 struct rtw89_fw_txpwr_byrate_entry entry = {}; 8491 struct rtw89_txpwr_byrate *byr_head; 8492 struct rtw89_rate_desc desc = {}; 8493 const void *cursor; 8494 u32 data; 8495 s8 *byr; 8496 int i; 8497 8498 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8499 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 8500 continue; 8501 8502 byr_head = &rtwdev->byr[entry.band][entry.bw]; 8503 data = le32_to_cpu(entry.data); 8504 desc.ofdma = entry.ofdma; 8505 desc.nss = entry.nss; 8506 desc.rs = entry.rs; 8507 8508 for (i = 0; i < entry.len; i++, data >>= 8) { 8509 desc.idx = entry.shf + i; 8510 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 8511 *byr = data & 0xff; 8512 } 8513 } 8514 } 8515 8516 static bool 8517 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 8518 const void *cursor, 8519 const struct rtw89_txpwr_conf *conf) 8520 { 8521 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8522 return false; 8523 8524 if (e->bw >= RTW89_2G_BW_NUM) 8525 return false; 8526 if (e->nt >= RTW89_NTX_NUM) 8527 return false; 8528 if (e->rs >= RTW89_RS_LMT_NUM) 8529 return false; 8530 if (e->bf >= RTW89_BF_NUM) 8531 return false; 8532 if (e->regd >= RTW89_REGD_NUM) 8533 return false; 8534 if (e->ch_idx >= RTW89_2G_CH_NUM) 8535 return false; 8536 8537 return true; 8538 } 8539 8540 static 8541 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 8542 { 8543 const struct rtw89_txpwr_conf *conf = &data->conf; 8544 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 8545 const void *cursor; 8546 8547 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8548 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 8549 continue; 8550 8551 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8552 [entry.ch_idx] = entry.v; 8553 } 8554 } 8555 8556 static bool 8557 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 8558 const void *cursor, 8559 const struct rtw89_txpwr_conf *conf) 8560 { 8561 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8562 return false; 8563 8564 if (e->bw >= RTW89_5G_BW_NUM) 8565 return false; 8566 if (e->nt >= RTW89_NTX_NUM) 8567 return false; 8568 if (e->rs >= RTW89_RS_LMT_NUM) 8569 return false; 8570 if (e->bf >= RTW89_BF_NUM) 8571 return false; 8572 if (e->regd >= RTW89_REGD_NUM) 8573 return false; 8574 if (e->ch_idx >= RTW89_5G_CH_NUM) 8575 return false; 8576 8577 return true; 8578 } 8579 8580 static 8581 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 8582 { 8583 const struct rtw89_txpwr_conf *conf = &data->conf; 8584 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 8585 const void *cursor; 8586 8587 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8588 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 8589 continue; 8590 8591 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8592 [entry.ch_idx] = entry.v; 8593 } 8594 } 8595 8596 static bool 8597 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 8598 const void *cursor, 8599 const struct rtw89_txpwr_conf *conf) 8600 { 8601 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8602 return false; 8603 8604 if (e->bw >= RTW89_6G_BW_NUM) 8605 return false; 8606 if (e->nt >= RTW89_NTX_NUM) 8607 return false; 8608 if (e->rs >= RTW89_RS_LMT_NUM) 8609 return false; 8610 if (e->bf >= RTW89_BF_NUM) 8611 return false; 8612 if (e->regd >= RTW89_REGD_NUM) 8613 return false; 8614 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8615 return false; 8616 if (e->ch_idx >= RTW89_6G_CH_NUM) 8617 return false; 8618 8619 return true; 8620 } 8621 8622 static 8623 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 8624 { 8625 const struct rtw89_txpwr_conf *conf = &data->conf; 8626 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 8627 const void *cursor; 8628 8629 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8630 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 8631 continue; 8632 8633 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8634 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 8635 } 8636 } 8637 8638 static bool 8639 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 8640 const void *cursor, 8641 const struct rtw89_txpwr_conf *conf) 8642 { 8643 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8644 return false; 8645 8646 if (e->ru >= RTW89_RU_NUM) 8647 return false; 8648 if (e->nt >= RTW89_NTX_NUM) 8649 return false; 8650 if (e->regd >= RTW89_REGD_NUM) 8651 return false; 8652 if (e->ch_idx >= RTW89_2G_CH_NUM) 8653 return false; 8654 8655 return true; 8656 } 8657 8658 static 8659 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 8660 { 8661 const struct rtw89_txpwr_conf *conf = &data->conf; 8662 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 8663 const void *cursor; 8664 8665 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8666 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 8667 continue; 8668 8669 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8670 } 8671 } 8672 8673 static bool 8674 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 8675 const void *cursor, 8676 const struct rtw89_txpwr_conf *conf) 8677 { 8678 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8679 return false; 8680 8681 if (e->ru >= RTW89_RU_NUM) 8682 return false; 8683 if (e->nt >= RTW89_NTX_NUM) 8684 return false; 8685 if (e->regd >= RTW89_REGD_NUM) 8686 return false; 8687 if (e->ch_idx >= RTW89_5G_CH_NUM) 8688 return false; 8689 8690 return true; 8691 } 8692 8693 static 8694 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 8695 { 8696 const struct rtw89_txpwr_conf *conf = &data->conf; 8697 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 8698 const void *cursor; 8699 8700 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8701 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 8702 continue; 8703 8704 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8705 } 8706 } 8707 8708 static bool 8709 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 8710 const void *cursor, 8711 const struct rtw89_txpwr_conf *conf) 8712 { 8713 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8714 return false; 8715 8716 if (e->ru >= RTW89_RU_NUM) 8717 return false; 8718 if (e->nt >= RTW89_NTX_NUM) 8719 return false; 8720 if (e->regd >= RTW89_REGD_NUM) 8721 return false; 8722 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8723 return false; 8724 if (e->ch_idx >= RTW89_6G_CH_NUM) 8725 return false; 8726 8727 return true; 8728 } 8729 8730 static 8731 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 8732 { 8733 const struct rtw89_txpwr_conf *conf = &data->conf; 8734 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 8735 const void *cursor; 8736 8737 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8738 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 8739 continue; 8740 8741 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 8742 [entry.ch_idx] = entry.v; 8743 } 8744 } 8745 8746 static bool 8747 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 8748 const void *cursor, 8749 const struct rtw89_txpwr_conf *conf) 8750 { 8751 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8752 return false; 8753 8754 if (e->band >= RTW89_BAND_NUM) 8755 return false; 8756 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 8757 return false; 8758 if (e->regd >= RTW89_REGD_NUM) 8759 return false; 8760 8761 return true; 8762 } 8763 8764 static 8765 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 8766 { 8767 const struct rtw89_txpwr_conf *conf = &data->conf; 8768 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 8769 const void *cursor; 8770 8771 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8772 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 8773 continue; 8774 8775 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 8776 } 8777 } 8778 8779 static bool 8780 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 8781 const void *cursor, 8782 const struct rtw89_txpwr_conf *conf) 8783 { 8784 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8785 return false; 8786 8787 if (e->band >= RTW89_BAND_NUM) 8788 return false; 8789 if (e->regd >= RTW89_REGD_NUM) 8790 return false; 8791 8792 return true; 8793 } 8794 8795 static 8796 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 8797 { 8798 const struct rtw89_txpwr_conf *conf = &data->conf; 8799 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 8800 const void *cursor; 8801 8802 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8803 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 8804 continue; 8805 8806 data->v[entry.band][entry.regd] = entry.v; 8807 } 8808 } 8809 8810 const struct rtw89_rfe_parms * 8811 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 8812 const struct rtw89_rfe_parms *init) 8813 { 8814 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 8815 struct rtw89_rfe_parms *parms; 8816 8817 if (!rfe_data) 8818 return init; 8819 8820 parms = &rfe_data->rfe_parms; 8821 if (init) 8822 *parms = *init; 8823 8824 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 8825 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 8826 rfe_data->byrate.tbl.size = 0; /* don't care here */ 8827 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 8828 parms->byr_tbl = &rfe_data->byrate.tbl; 8829 } 8830 8831 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 8832 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 8833 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 8834 } 8835 8836 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 8837 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 8838 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 8839 } 8840 8841 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 8842 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 8843 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 8844 } 8845 8846 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 8847 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 8848 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 8849 } 8850 8851 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 8852 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 8853 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 8854 } 8855 8856 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 8857 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 8858 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 8859 } 8860 8861 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 8862 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 8863 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 8864 } 8865 8866 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 8867 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 8868 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 8869 } 8870 8871 return parms; 8872 } 8873