1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/if_arp.h> 6 #include "cam.h" 7 #include "chan.h" 8 #include "coex.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "phy.h" 13 #include "ps.h" 14 #include "reg.h" 15 #include "util.h" 16 #include "wow.h" 17 18 struct rtw89_eapol_2_of_2 { 19 u8 gtkbody[14]; 20 u8 key_des_ver; 21 u8 rsvd[92]; 22 } __packed; 23 24 struct rtw89_sa_query { 25 u8 category; 26 u8 action; 27 } __packed; 28 29 struct rtw89_arp_rsp { 30 u8 llc_hdr[sizeof(rfc1042_header)]; 31 __be16 llc_type; 32 struct arphdr arp_hdr; 33 u8 sender_hw[ETH_ALEN]; 34 __be32 sender_ip; 35 u8 target_hw[ETH_ALEN]; 36 __be32 target_ip; 37 } __packed; 38 39 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 40 41 union rtw89_fw_element_arg { 42 size_t offset; 43 enum rtw89_rf_path rf_path; 44 enum rtw89_fw_type fw_type; 45 }; 46 47 struct rtw89_fw_element_handler { 48 int (*fn)(struct rtw89_dev *rtwdev, 49 const struct rtw89_fw_element_hdr *elm, 50 const union rtw89_fw_element_arg arg); 51 const union rtw89_fw_element_arg arg; 52 const char *name; 53 }; 54 55 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 56 struct sk_buff *skb); 57 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 58 struct rtw89_wait_info *wait, unsigned int cond); 59 60 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 61 bool header) 62 { 63 struct sk_buff *skb; 64 u32 header_len = 0; 65 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 66 67 if (header) 68 header_len = H2C_HEADER_LEN; 69 70 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 71 if (!skb) 72 return NULL; 73 skb_reserve(skb, header_len + h2c_desc_size); 74 memset(skb->data, 0, len); 75 76 return skb; 77 } 78 79 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 80 { 81 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 82 } 83 84 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 85 { 86 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 87 } 88 89 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 90 { 91 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 92 u8 val; 93 int ret; 94 95 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 96 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 97 1, FWDL_WAIT_CNT, false, rtwdev, type); 98 if (ret) { 99 switch (val) { 100 case RTW89_FWDL_CHECKSUM_FAIL: 101 rtw89_err(rtwdev, "fw checksum fail\n"); 102 return -EINVAL; 103 104 case RTW89_FWDL_SECURITY_FAIL: 105 rtw89_err(rtwdev, "fw security fail\n"); 106 return -EINVAL; 107 108 case RTW89_FWDL_CV_NOT_MATCH: 109 rtw89_err(rtwdev, "fw cv not match\n"); 110 return -EINVAL; 111 112 default: 113 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 114 return -EBUSY; 115 } 116 } 117 118 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 119 120 return 0; 121 } 122 123 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 124 struct rtw89_fw_bin_info *info) 125 { 126 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 127 struct rtw89_fw_hdr_section_info *section_info; 128 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 129 const struct rtw89_fw_hdr_section *section; 130 const u8 *fw_end = fw + len; 131 const u8 *bin; 132 u32 base_hdr_len; 133 u32 mssc_len = 0; 134 u32 i; 135 136 if (!info) 137 return -EINVAL; 138 139 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 140 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 141 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 142 143 if (info->dynamic_hdr_en) { 144 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 145 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 146 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 147 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 148 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 149 return -EINVAL; 150 } 151 } else { 152 info->hdr_len = base_hdr_len; 153 info->dynamic_hdr_len = 0; 154 } 155 156 bin = fw + info->hdr_len; 157 158 /* jump to section header */ 159 section_info = info->section_info; 160 for (i = 0; i < info->section_num; i++) { 161 section = &fw_hdr->sections[i]; 162 section_info->type = 163 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 164 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 165 section_info->mssc = 166 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 167 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 168 } else { 169 section_info->mssc = 0; 170 } 171 172 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 173 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 174 section_info->len += FWDL_SECTION_CHKSUM_LEN; 175 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 176 section_info->dladdr = 177 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 178 section_info->addr = bin; 179 bin += section_info->len; 180 section_info++; 181 } 182 183 if (fw_end != bin + mssc_len) { 184 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 185 return -EINVAL; 186 } 187 188 return 0; 189 } 190 191 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 192 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 193 u32 rmp_tbl_size, u32 *key_idx) 194 { 195 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 196 u32 sel_byte_idx; 197 u32 mss_sel_idx; 198 u8 sel_bit_idx; 199 int i; 200 201 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 202 if (!mss_hdr->defen) 203 return -ENOENT; 204 205 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 206 sec->mss_key_num; 207 } else { 208 if (mss_hdr->defen) 209 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 210 else 211 mss_sel_idx = 0; 212 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 213 le16_to_cpu(mss_hdr->msscust_max) + 214 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 215 sec->mss_key_num; 216 } 217 218 sel_byte_idx = mss_sel_idx >> 3; 219 sel_bit_idx = mss_sel_idx & 0x7; 220 221 if (sel_byte_idx >= rmp_tbl_size) 222 return -EFAULT; 223 224 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 225 return -ENOENT; 226 227 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 228 229 for (i = 0; i < sel_byte_idx; i++) 230 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 231 232 return 0; 233 } 234 235 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 236 struct rtw89_fw_bin_info *info, 237 struct rtw89_fw_hdr_section_info *section_info, 238 const struct rtw89_fw_hdr_section_v1 *section, 239 const void *content, 240 u32 *mssc_len) 241 { 242 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 243 const union rtw89_fw_section_mssc_content *section_content = content; 244 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 245 u32 rmp_tbl_size; 246 u32 key_sign_len; 247 u32 real_key_idx; 248 u32 sb_sel_ver; 249 int ret; 250 251 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 252 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 253 return -ENOENT; 254 } 255 256 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 257 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 258 le16_to_cpu(mss_hdr->msscust_max) * 259 mss_hdr->mssdev_max) >> 3; 260 if (mss_hdr->defen) 261 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 262 } else { 263 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 264 mss_hdr->rmpfmt); 265 return -EINVAL; 266 } 267 268 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 269 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 270 rmp_tbl_size, (int)sizeof(*mss_hdr), 271 le32_to_cpu(mss_hdr->key_raw_offset)); 272 return -EINVAL; 273 } 274 275 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 276 if (!key_sign_len) 277 key_sign_len = 512; 278 279 if (info->dsp_checksum) 280 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 281 282 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 283 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 284 285 if (!sec->secure_boot) 286 goto out; 287 288 sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v); 289 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 290 goto ignore; 291 292 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 293 if (ret) 294 goto ignore; 295 296 section_info->key_addr = content + section_info->len + 297 le32_to_cpu(mss_hdr->key_raw_offset) + 298 key_sign_len * real_key_idx; 299 section_info->key_len = key_sign_len; 300 section_info->key_idx = real_key_idx; 301 302 out: 303 if (info->secure_section_exist) { 304 section_info->ignore = true; 305 return 0; 306 } 307 308 info->secure_section_exist = true; 309 310 return 0; 311 312 ignore: 313 section_info->ignore = true; 314 315 return 0; 316 } 317 318 static int __parse_security_section(struct rtw89_dev *rtwdev, 319 struct rtw89_fw_bin_info *info, 320 struct rtw89_fw_hdr_section_info *section_info, 321 const struct rtw89_fw_hdr_section_v1 *section, 322 const void *content, 323 u32 *mssc_len) 324 { 325 int ret; 326 327 section_info->mssc = 328 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 329 330 if (section_info->mssc == FORMATTED_MSSC) { 331 ret = __parse_formatted_mssc(rtwdev, info, section_info, 332 section, content, mssc_len); 333 if (ret) 334 return -EINVAL; 335 } else { 336 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 337 if (info->dsp_checksum) 338 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 339 340 info->secure_section_exist = true; 341 } 342 343 return 0; 344 } 345 346 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 347 struct rtw89_fw_bin_info *info) 348 { 349 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 350 struct rtw89_fw_hdr_section_info *section_info; 351 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 352 const struct rtw89_fw_hdr_section_v1 *section; 353 const u8 *fw_end = fw + len; 354 const u8 *bin; 355 u32 base_hdr_len; 356 u32 mssc_len; 357 int ret; 358 u32 i; 359 360 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 361 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 362 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 363 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 364 365 if (info->dynamic_hdr_en) { 366 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 367 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 368 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 369 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 370 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 371 return -EINVAL; 372 } 373 } else { 374 info->hdr_len = base_hdr_len; 375 info->dynamic_hdr_len = 0; 376 } 377 378 bin = fw + info->hdr_len; 379 380 /* jump to section header */ 381 section_info = info->section_info; 382 for (i = 0; i < info->section_num; i++) { 383 section = &fw_hdr->sections[i]; 384 385 section_info->type = 386 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 387 section_info->len = 388 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 389 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 390 section_info->len += FWDL_SECTION_CHKSUM_LEN; 391 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 392 section_info->dladdr = 393 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 394 section_info->addr = bin; 395 396 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 397 ret = __parse_security_section(rtwdev, info, section_info, 398 section, bin, &mssc_len); 399 if (ret) 400 return ret; 401 } else { 402 section_info->mssc = 0; 403 mssc_len = 0; 404 } 405 406 rtw89_debug(rtwdev, RTW89_DBG_FW, 407 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 408 i, section_info->type, section_info->len, 409 section_info->mssc, mssc_len, bin - fw); 410 rtw89_debug(rtwdev, RTW89_DBG_FW, 411 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 412 section_info->ignore, section_info->key_addr, 413 section_info->key_addr ? 414 section_info->key_addr - section_info->addr : 0, 415 section_info->key_len, section_info->key_idx); 416 417 bin += section_info->len + mssc_len; 418 section_info++; 419 } 420 421 if (fw_end != bin) { 422 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 423 return -EINVAL; 424 } 425 426 if (!info->secure_section_exist) 427 rtw89_warn(rtwdev, "no firmware secure section\n"); 428 429 return 0; 430 } 431 432 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 433 const struct rtw89_fw_suit *fw_suit, 434 struct rtw89_fw_bin_info *info) 435 { 436 const u8 *fw = fw_suit->data; 437 u32 len = fw_suit->size; 438 439 if (!fw || !len) { 440 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 441 return -ENOENT; 442 } 443 444 switch (fw_suit->hdr_ver) { 445 case 0: 446 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 447 case 1: 448 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 449 default: 450 return -ENOENT; 451 } 452 } 453 454 static 455 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 456 struct rtw89_fw_suit *fw_suit, bool nowarn) 457 { 458 struct rtw89_fw_info *fw_info = &rtwdev->fw; 459 const struct firmware *firmware = fw_info->req.firmware; 460 const u8 *mfw = firmware->data; 461 u32 mfw_len = firmware->size; 462 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 463 const struct rtw89_mfw_info *mfw_info = NULL, *tmp; 464 int i; 465 466 if (mfw_hdr->sig != RTW89_MFW_SIG) { 467 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 468 /* legacy firmware support normal type only */ 469 if (type != RTW89_FW_NORMAL) 470 return -EINVAL; 471 fw_suit->data = mfw; 472 fw_suit->size = mfw_len; 473 return 0; 474 } 475 476 for (i = 0; i < mfw_hdr->fw_nr; i++) { 477 tmp = &mfw_hdr->info[i]; 478 if (tmp->type != type) 479 continue; 480 481 if (type == RTW89_FW_LOGFMT) { 482 mfw_info = tmp; 483 goto found; 484 } 485 486 /* Version order of WiFi firmware in firmware file are not in order, 487 * pass all firmware to find the equal or less but closest version. 488 */ 489 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { 490 if (!mfw_info || mfw_info->cv < tmp->cv) 491 mfw_info = tmp; 492 } 493 } 494 495 if (mfw_info) 496 goto found; 497 498 if (!nowarn) 499 rtw89_err(rtwdev, "no suitable firmware found\n"); 500 return -ENOENT; 501 502 found: 503 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 504 fw_suit->size = le32_to_cpu(mfw_info->size); 505 return 0; 506 } 507 508 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 509 { 510 struct rtw89_fw_info *fw_info = &rtwdev->fw; 511 const struct firmware *firmware = fw_info->req.firmware; 512 const struct rtw89_mfw_hdr *mfw_hdr = 513 (const struct rtw89_mfw_hdr *)firmware->data; 514 const struct rtw89_mfw_info *mfw_info; 515 u32 size; 516 517 if (mfw_hdr->sig != RTW89_MFW_SIG) { 518 rtw89_warn(rtwdev, "not mfw format\n"); 519 return 0; 520 } 521 522 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 523 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 524 525 return size; 526 } 527 528 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 529 struct rtw89_fw_suit *fw_suit, 530 const struct rtw89_fw_hdr *hdr) 531 { 532 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 533 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 534 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 535 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 536 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 537 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 538 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 539 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 540 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 541 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 542 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 543 } 544 545 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 546 struct rtw89_fw_suit *fw_suit, 547 const struct rtw89_fw_hdr_v1 *hdr) 548 { 549 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 550 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 551 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 552 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 553 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 554 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 555 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 556 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 557 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 558 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 559 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 560 } 561 562 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 563 enum rtw89_fw_type type, 564 struct rtw89_fw_suit *fw_suit) 565 { 566 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 567 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 568 569 if (type == RTW89_FW_LOGFMT) 570 return 0; 571 572 fw_suit->type = type; 573 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 574 575 switch (fw_suit->hdr_ver) { 576 case 0: 577 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 578 break; 579 case 1: 580 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 581 break; 582 default: 583 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 584 fw_suit->hdr_ver); 585 return -ENOENT; 586 } 587 588 rtw89_info(rtwdev, 589 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 590 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 591 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 592 593 return 0; 594 } 595 596 static 597 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 598 bool nowarn) 599 { 600 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 601 int ret; 602 603 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 604 if (ret) 605 return ret; 606 607 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 608 } 609 610 static 611 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 612 const struct rtw89_fw_element_hdr *elm, 613 const union rtw89_fw_element_arg arg) 614 { 615 enum rtw89_fw_type type = arg.fw_type; 616 struct rtw89_hal *hal = &rtwdev->hal; 617 struct rtw89_fw_suit *fw_suit; 618 619 /* Version of BB MCU is in decreasing order in firmware file, so take 620 * first equal or less version, which is equal or less but closest version. 621 */ 622 if (hal->cv < elm->u.bbmcu.cv) 623 return 1; /* ignore this element */ 624 625 fw_suit = rtw89_fw_suit_get(rtwdev, type); 626 if (fw_suit->data) 627 return 1; /* ignore this element (a firmware is taken already) */ 628 629 fw_suit->data = elm->u.bbmcu.contents; 630 fw_suit->size = le32_to_cpu(elm->size); 631 632 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 633 } 634 635 #define __DEF_FW_FEAT_COND(__cond, __op) \ 636 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 637 { \ 638 return suit_ver_code __op comp_ver_code; \ 639 } 640 641 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 642 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 643 __DEF_FW_FEAT_COND(lt, <); /* less than */ 644 645 struct __fw_feat_cfg { 646 enum rtw89_core_chip_id chip_id; 647 enum rtw89_fw_feature feature; 648 u32 ver_code; 649 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 650 }; 651 652 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 653 { \ 654 .chip_id = _chip, \ 655 .feature = RTW89_FW_FEATURE_ ## _feat, \ 656 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 657 .cond = __fw_feat_cond_ ## _cond, \ 658 } 659 660 static const struct __fw_feat_cfg fw_feat_tbl[] = { 661 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 662 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 663 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 664 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 665 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 666 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 667 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 668 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 669 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 670 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 671 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 672 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 673 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 674 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 675 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 676 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 677 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 678 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 679 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), 680 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 681 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 682 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 683 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 684 }; 685 686 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 687 const struct rtw89_chip_info *chip, 688 u32 ver_code) 689 { 690 int i; 691 692 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 693 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 694 695 if (chip->chip_id != ent->chip_id) 696 continue; 697 698 if (ent->cond(ver_code, ent->ver_code)) 699 RTW89_SET_FW_FEATURE(ent->feature, fw); 700 } 701 } 702 703 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 704 { 705 const struct rtw89_chip_info *chip = rtwdev->chip; 706 const struct rtw89_fw_suit *fw_suit; 707 u32 suit_ver_code; 708 709 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 710 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 711 712 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 713 } 714 715 const struct firmware * 716 rtw89_early_fw_feature_recognize(struct device *device, 717 const struct rtw89_chip_info *chip, 718 struct rtw89_fw_info *early_fw, 719 int *used_fw_format) 720 { 721 const struct firmware *firmware; 722 char fw_name[64]; 723 int fw_format; 724 u32 ver_code; 725 int ret; 726 727 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 728 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 729 chip->fw_basename, fw_format); 730 731 ret = request_firmware(&firmware, fw_name, device); 732 if (!ret) { 733 dev_info(device, "loaded firmware %s\n", fw_name); 734 *used_fw_format = fw_format; 735 break; 736 } 737 } 738 739 if (ret) { 740 dev_err(device, "failed to early request firmware: %d\n", ret); 741 return NULL; 742 } 743 744 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 745 746 if (!ver_code) 747 goto out; 748 749 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 750 751 out: 752 return firmware; 753 } 754 755 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 756 { 757 const struct rtw89_chip_info *chip = rtwdev->chip; 758 int ret; 759 760 if (chip->try_ce_fw) { 761 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 762 if (!ret) 763 goto normal_done; 764 } 765 766 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 767 if (ret) 768 return ret; 769 770 normal_done: 771 /* It still works if wowlan firmware isn't existing. */ 772 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 773 774 /* It still works if log format file isn't existing. */ 775 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 776 777 rtw89_fw_recognize_features(rtwdev); 778 779 rtw89_coex_recognize_ver(rtwdev); 780 781 return 0; 782 } 783 784 static 785 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 786 const struct rtw89_fw_element_hdr *elm, 787 const union rtw89_fw_element_arg arg) 788 { 789 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 790 struct rtw89_phy_table *tbl; 791 struct rtw89_reg2_def *regs; 792 enum rtw89_rf_path rf_path; 793 u32 n_regs, i; 794 u8 idx; 795 796 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 797 if (!tbl) 798 return -ENOMEM; 799 800 switch (le32_to_cpu(elm->id)) { 801 case RTW89_FW_ELEMENT_ID_BB_REG: 802 elm_info->bb_tbl = tbl; 803 break; 804 case RTW89_FW_ELEMENT_ID_BB_GAIN: 805 elm_info->bb_gain = tbl; 806 break; 807 case RTW89_FW_ELEMENT_ID_RADIO_A: 808 case RTW89_FW_ELEMENT_ID_RADIO_B: 809 case RTW89_FW_ELEMENT_ID_RADIO_C: 810 case RTW89_FW_ELEMENT_ID_RADIO_D: 811 rf_path = arg.rf_path; 812 idx = elm->u.reg2.idx; 813 814 elm_info->rf_radio[idx] = tbl; 815 tbl->rf_path = rf_path; 816 tbl->config = rtw89_phy_config_rf_reg_v1; 817 break; 818 case RTW89_FW_ELEMENT_ID_RF_NCTL: 819 elm_info->rf_nctl = tbl; 820 break; 821 default: 822 kfree(tbl); 823 return -ENOENT; 824 } 825 826 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 827 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); 828 if (!regs) 829 goto out; 830 831 for (i = 0; i < n_regs; i++) { 832 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 833 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 834 } 835 836 tbl->n_regs = n_regs; 837 tbl->regs = regs; 838 839 return 0; 840 841 out: 842 kfree(tbl); 843 return -ENOMEM; 844 } 845 846 static 847 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 848 const struct rtw89_fw_element_hdr *elm, 849 const union rtw89_fw_element_arg arg) 850 { 851 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 852 const unsigned long offset = arg.offset; 853 struct rtw89_efuse *efuse = &rtwdev->efuse; 854 struct rtw89_txpwr_conf *conf; 855 856 if (!rtwdev->rfe_data) { 857 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 858 if (!rtwdev->rfe_data) 859 return -ENOMEM; 860 } 861 862 conf = (void *)rtwdev->rfe_data + offset; 863 864 /* if multiple matched, take the last eventually */ 865 if (txpwr_elm->rfe_type == efuse->rfe_type) 866 goto setup; 867 868 /* without one is matched, accept default */ 869 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 870 (!rtw89_txpwr_conf_valid(conf) || 871 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 872 goto setup; 873 874 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 875 elm->id, txpwr_elm->rfe_type); 876 return 0; 877 878 setup: 879 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 880 elm->id, txpwr_elm->rfe_type); 881 882 conf->rfe_type = txpwr_elm->rfe_type; 883 conf->ent_sz = txpwr_elm->ent_sz; 884 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 885 conf->data = txpwr_elm->content; 886 return 0; 887 } 888 889 static 890 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 891 const struct rtw89_fw_element_hdr *elm, 892 const union rtw89_fw_element_arg arg) 893 { 894 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 895 const struct rtw89_chip_info *chip = rtwdev->chip; 896 u32 needed_bitmap = 0; 897 u32 offset = 0; 898 int subband; 899 u32 bitmap; 900 int type; 901 902 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 903 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 904 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 905 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 906 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 907 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 908 909 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 910 911 if ((bitmap & needed_bitmap) != needed_bitmap) { 912 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n", 913 needed_bitmap, bitmap); 914 return -ENOENT; 915 } 916 917 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL); 918 if (!elm_info->txpwr_trk) 919 return -ENOMEM; 920 921 for (type = 0; bitmap; type++, bitmap >>= 1) { 922 if (!(bitmap & BIT(0))) 923 continue; 924 925 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 926 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 927 subband = 4; 928 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 929 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 930 subband = 3; 931 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 932 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 933 subband = 1; 934 else 935 break; 936 937 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 938 939 offset += subband; 940 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 941 goto err; 942 } 943 944 return 0; 945 946 err: 947 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 948 offset, le32_to_cpu(elm->size)); 949 kfree(elm_info->txpwr_trk); 950 elm_info->txpwr_trk = NULL; 951 952 return -EFAULT; 953 } 954 955 static 956 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 957 const struct rtw89_fw_element_hdr *elm, 958 const union rtw89_fw_element_arg arg) 959 { 960 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 961 u8 rfk_id; 962 963 if (elm_info->rfk_log_fmt) 964 goto allocated; 965 966 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL); 967 if (!elm_info->rfk_log_fmt) 968 return 1; /* this is an optional element, so just ignore this */ 969 970 allocated: 971 rfk_id = elm->u.rfk_log_fmt.rfk_id; 972 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 973 return 1; 974 975 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 976 977 return 0; 978 } 979 980 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 981 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 982 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 983 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 984 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 985 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 986 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 987 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 988 { .rf_path = RF_PATH_A }, "radio A"}, 989 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 990 { .rf_path = RF_PATH_B }, NULL}, 991 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 992 { .rf_path = RF_PATH_C }, NULL}, 993 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 994 { .rf_path = RF_PATH_D }, NULL}, 995 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 996 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 997 rtw89_fw_recognize_txpwr_from_elm, 998 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 999 }, 1000 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 1001 rtw89_fw_recognize_txpwr_from_elm, 1002 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 1003 }, 1004 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 1005 rtw89_fw_recognize_txpwr_from_elm, 1006 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 1007 }, 1008 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 1009 rtw89_fw_recognize_txpwr_from_elm, 1010 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 1011 }, 1012 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 1013 rtw89_fw_recognize_txpwr_from_elm, 1014 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 1015 }, 1016 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 1017 rtw89_fw_recognize_txpwr_from_elm, 1018 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 1019 }, 1020 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 1021 rtw89_fw_recognize_txpwr_from_elm, 1022 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 1023 }, 1024 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 1025 rtw89_fw_recognize_txpwr_from_elm, 1026 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 1027 }, 1028 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 1029 rtw89_fw_recognize_txpwr_from_elm, 1030 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 1031 }, 1032 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 1033 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 1034 }, 1035 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 1036 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 1037 }, 1038 }; 1039 1040 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 1041 { 1042 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1043 const struct firmware *firmware = fw_info->req.firmware; 1044 const struct rtw89_chip_info *chip = rtwdev->chip; 1045 u32 unrecognized_elements = chip->needed_fw_elms; 1046 const struct rtw89_fw_element_handler *handler; 1047 const struct rtw89_fw_element_hdr *hdr; 1048 u32 elm_size; 1049 u32 elem_id; 1050 u32 offset; 1051 int ret; 1052 1053 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1054 1055 offset = rtw89_mfw_get_size(rtwdev); 1056 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1057 if (offset == 0) 1058 return -EINVAL; 1059 1060 while (offset + sizeof(*hdr) < firmware->size) { 1061 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1062 1063 elm_size = le32_to_cpu(hdr->size); 1064 if (offset + elm_size >= firmware->size) { 1065 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1066 break; 1067 } 1068 1069 elem_id = le32_to_cpu(hdr->id); 1070 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1071 goto next; 1072 1073 handler = &__fw_element_handlers[elem_id]; 1074 if (!handler->fn) 1075 goto next; 1076 1077 ret = handler->fn(rtwdev, hdr, handler->arg); 1078 if (ret == 1) /* ignore this element */ 1079 goto next; 1080 if (ret) 1081 return ret; 1082 1083 if (handler->name) 1084 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1085 handler->name, hdr->ver); 1086 1087 unrecognized_elements &= ~BIT(elem_id); 1088 next: 1089 offset += sizeof(*hdr) + elm_size; 1090 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1091 } 1092 1093 if (unrecognized_elements) { 1094 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1095 unrecognized_elements); 1096 return -ENOENT; 1097 } 1098 1099 return 0; 1100 } 1101 1102 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1103 u8 type, u8 cat, u8 class, u8 func, 1104 bool rack, bool dack, u32 len) 1105 { 1106 struct fwcmd_hdr *hdr; 1107 1108 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1109 1110 if (!(rtwdev->fw.h2c_seq % 4)) 1111 rack = true; 1112 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1113 FIELD_PREP(H2C_HDR_CAT, cat) | 1114 FIELD_PREP(H2C_HDR_CLASS, class) | 1115 FIELD_PREP(H2C_HDR_FUNC, func) | 1116 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1117 1118 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1119 len + H2C_HEADER_LEN) | 1120 (rack ? H2C_HDR_REC_ACK : 0) | 1121 (dack ? H2C_HDR_DONE_ACK : 0)); 1122 1123 rtwdev->fw.h2c_seq++; 1124 } 1125 1126 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1127 struct sk_buff *skb, 1128 u8 type, u8 cat, u8 class, u8 func, 1129 u32 len) 1130 { 1131 struct fwcmd_hdr *hdr; 1132 1133 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1134 1135 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1136 FIELD_PREP(H2C_HDR_CAT, cat) | 1137 FIELD_PREP(H2C_HDR_CLASS, class) | 1138 FIELD_PREP(H2C_HDR_FUNC, func) | 1139 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1140 1141 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1142 len + H2C_HEADER_LEN)); 1143 } 1144 1145 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1146 struct rtw89_fw_bin_info *info, 1147 struct rtw89_fw_hdr *fw_hdr) 1148 { 1149 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1150 FW_HDR_W7_PART_SIZE); 1151 1152 return 0; 1153 } 1154 1155 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1156 struct rtw89_fw_bin_info *info, 1157 struct rtw89_fw_hdr_v1 *fw_hdr) 1158 { 1159 struct rtw89_fw_hdr_section_info *section_info; 1160 struct rtw89_fw_hdr_section_v1 *section; 1161 u8 dst_sec_idx = 0; 1162 u8 sec_idx; 1163 1164 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1165 FW_HDR_V1_W7_PART_SIZE); 1166 1167 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1168 section_info = &info->section_info[sec_idx]; 1169 section = &fw_hdr->sections[sec_idx]; 1170 1171 if (section_info->ignore) 1172 continue; 1173 1174 if (dst_sec_idx != sec_idx) 1175 fw_hdr->sections[dst_sec_idx] = *section; 1176 1177 dst_sec_idx++; 1178 } 1179 1180 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1181 1182 return (info->section_num - dst_sec_idx) * sizeof(*section); 1183 } 1184 1185 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1186 const struct rtw89_fw_suit *fw_suit, 1187 struct rtw89_fw_bin_info *info) 1188 { 1189 u32 len = info->hdr_len - info->dynamic_hdr_len; 1190 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1191 const u8 *fw = fw_suit->data; 1192 struct rtw89_fw_hdr *fw_hdr; 1193 struct sk_buff *skb; 1194 u32 truncated; 1195 u32 ret = 0; 1196 1197 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1198 if (!skb) { 1199 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1200 return -ENOMEM; 1201 } 1202 1203 skb_put_data(skb, fw, len); 1204 1205 switch (fw_suit->hdr_ver) { 1206 case 0: 1207 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1208 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1209 break; 1210 case 1: 1211 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1212 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1213 break; 1214 default: 1215 ret = -EOPNOTSUPP; 1216 goto fail; 1217 } 1218 1219 if (truncated) { 1220 len -= truncated; 1221 skb_trim(skb, len); 1222 } 1223 1224 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1225 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1226 H2C_FUNC_MAC_FWHDR_DL, len); 1227 1228 ret = rtw89_h2c_tx(rtwdev, skb, false); 1229 if (ret) { 1230 rtw89_err(rtwdev, "failed to send h2c\n"); 1231 ret = -1; 1232 goto fail; 1233 } 1234 1235 return 0; 1236 fail: 1237 dev_kfree_skb_any(skb); 1238 1239 return ret; 1240 } 1241 1242 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1243 const struct rtw89_fw_suit *fw_suit, 1244 struct rtw89_fw_bin_info *info) 1245 { 1246 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1247 int ret; 1248 1249 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1250 if (ret) { 1251 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1252 return ret; 1253 } 1254 1255 ret = mac->fwdl_check_path_ready(rtwdev, false); 1256 if (ret) { 1257 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1258 return ret; 1259 } 1260 1261 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1262 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1263 1264 return 0; 1265 } 1266 1267 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1268 struct rtw89_fw_hdr_section_info *info) 1269 { 1270 struct sk_buff *skb; 1271 const u8 *section = info->addr; 1272 u32 residue_len = info->len; 1273 bool copy_key = false; 1274 u32 pkt_len; 1275 int ret; 1276 1277 if (info->ignore) 1278 return 0; 1279 1280 if (info->key_addr && info->key_len) { 1281 if (info->len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) 1282 rtw89_warn(rtwdev, "ignore to copy key data because of len %d, %d, %d\n", 1283 info->len, FWDL_SECTION_PER_PKT_LEN, info->key_len); 1284 else 1285 copy_key = true; 1286 } 1287 1288 while (residue_len) { 1289 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 1290 pkt_len = FWDL_SECTION_PER_PKT_LEN; 1291 else 1292 pkt_len = residue_len; 1293 1294 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1295 if (!skb) { 1296 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1297 return -ENOMEM; 1298 } 1299 skb_put_data(skb, section, pkt_len); 1300 1301 if (copy_key) 1302 memcpy(skb->data + pkt_len - info->key_len, 1303 info->key_addr, info->key_len); 1304 1305 ret = rtw89_h2c_tx(rtwdev, skb, true); 1306 if (ret) { 1307 rtw89_err(rtwdev, "failed to send h2c\n"); 1308 ret = -1; 1309 goto fail; 1310 } 1311 1312 section += pkt_len; 1313 residue_len -= pkt_len; 1314 } 1315 1316 return 0; 1317 fail: 1318 dev_kfree_skb_any(skb); 1319 1320 return ret; 1321 } 1322 1323 static enum rtw89_fwdl_check_type 1324 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1325 const struct rtw89_fw_suit *fw_suit) 1326 { 1327 switch (fw_suit->type) { 1328 case RTW89_FW_BBMCU0: 1329 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1330 case RTW89_FW_BBMCU1: 1331 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1332 default: 1333 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1334 } 1335 } 1336 1337 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1338 const struct rtw89_fw_suit *fw_suit, 1339 struct rtw89_fw_bin_info *info) 1340 { 1341 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1342 const struct rtw89_chip_info *chip = rtwdev->chip; 1343 enum rtw89_fwdl_check_type chk_type; 1344 u8 section_num = info->section_num; 1345 int ret; 1346 1347 while (section_num--) { 1348 ret = __rtw89_fw_download_main(rtwdev, section_info); 1349 if (ret) 1350 return ret; 1351 section_info++; 1352 } 1353 1354 if (chip->chip_gen == RTW89_CHIP_AX) 1355 return 0; 1356 1357 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1358 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1359 if (ret) { 1360 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1361 fw_suit->type); 1362 return ret; 1363 } 1364 1365 return 0; 1366 } 1367 1368 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1369 { 1370 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1371 u32 addr = R_AX_DBG_PORT_SEL; 1372 u32 val32; 1373 u16 index; 1374 1375 if (chip_gen == RTW89_CHIP_BE) { 1376 addr = R_BE_WLCPU_PORT_PC; 1377 goto dump; 1378 } 1379 1380 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1381 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1382 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1383 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1384 1385 dump: 1386 for (index = 0; index < 15; index++) { 1387 val32 = rtw89_read32(rtwdev, addr); 1388 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1389 fsleep(10); 1390 } 1391 } 1392 1393 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1394 { 1395 u32 val32; 1396 1397 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1398 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1399 1400 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1401 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1402 1403 rtw89_fw_prog_cnt_dump(rtwdev); 1404 } 1405 1406 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1407 struct rtw89_fw_suit *fw_suit) 1408 { 1409 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1410 struct rtw89_fw_bin_info info = {}; 1411 int ret; 1412 1413 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1414 if (ret) { 1415 rtw89_err(rtwdev, "parse fw header fail\n"); 1416 return ret; 1417 } 1418 1419 if (rtwdev->chip->chip_id == RTL8922A && 1420 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1421 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1422 1423 ret = mac->fwdl_check_path_ready(rtwdev, true); 1424 if (ret) { 1425 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1426 return ret; 1427 } 1428 1429 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1430 if (ret) 1431 return ret; 1432 1433 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1434 if (ret) 1435 return ret; 1436 1437 return 0; 1438 } 1439 1440 static 1441 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1442 bool include_bb) 1443 { 1444 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1445 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1446 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1447 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1448 int ret; 1449 int i; 1450 1451 mac->disable_cpu(rtwdev); 1452 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1453 if (ret) 1454 return ret; 1455 1456 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1457 if (ret) 1458 goto fwdl_err; 1459 1460 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1461 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1462 1463 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1464 if (ret) 1465 goto fwdl_err; 1466 } 1467 1468 fw_info->h2c_seq = 0; 1469 fw_info->rec_seq = 0; 1470 fw_info->h2c_counter = 0; 1471 fw_info->c2h_counter = 0; 1472 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1473 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1474 1475 mdelay(5); 1476 1477 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1478 if (ret) { 1479 rtw89_warn(rtwdev, "download firmware fail\n"); 1480 goto fwdl_err; 1481 } 1482 1483 return ret; 1484 1485 fwdl_err: 1486 rtw89_fw_dl_fail_dump(rtwdev); 1487 return ret; 1488 } 1489 1490 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1491 bool include_bb) 1492 { 1493 int retry; 1494 int ret; 1495 1496 for (retry = 0; retry < 5; retry++) { 1497 ret = __rtw89_fw_download(rtwdev, type, include_bb); 1498 if (!ret) 1499 return 0; 1500 } 1501 1502 return ret; 1503 } 1504 1505 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1506 { 1507 struct rtw89_fw_info *fw = &rtwdev->fw; 1508 1509 wait_for_completion(&fw->req.completion); 1510 if (!fw->req.firmware) 1511 return -EINVAL; 1512 1513 return 0; 1514 } 1515 1516 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1517 struct rtw89_fw_req_info *req, 1518 const char *fw_name, bool nowarn) 1519 { 1520 int ret; 1521 1522 if (req->firmware) { 1523 rtw89_debug(rtwdev, RTW89_DBG_FW, 1524 "full firmware has been early requested\n"); 1525 complete_all(&req->completion); 1526 return 0; 1527 } 1528 1529 if (nowarn) 1530 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1531 else 1532 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1533 1534 complete_all(&req->completion); 1535 1536 return ret; 1537 } 1538 1539 void rtw89_load_firmware_work(struct work_struct *work) 1540 { 1541 struct rtw89_dev *rtwdev = 1542 container_of(work, struct rtw89_dev, load_firmware_work); 1543 const struct rtw89_chip_info *chip = rtwdev->chip; 1544 char fw_name[64]; 1545 1546 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1547 chip->fw_basename, rtwdev->fw.fw_format); 1548 1549 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1550 } 1551 1552 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1553 { 1554 if (!tbl) 1555 return; 1556 1557 kfree(tbl->regs); 1558 kfree(tbl); 1559 } 1560 1561 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1562 { 1563 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1564 int i; 1565 1566 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1567 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1568 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1569 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1570 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1571 1572 kfree(elm_info->txpwr_trk); 1573 kfree(elm_info->rfk_log_fmt); 1574 } 1575 1576 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1577 { 1578 struct rtw89_fw_info *fw = &rtwdev->fw; 1579 1580 cancel_work_sync(&rtwdev->load_firmware_work); 1581 1582 if (fw->req.firmware) { 1583 release_firmware(fw->req.firmware); 1584 1585 /* assign NULL back in case rtw89_free_ieee80211_hw() 1586 * try to release the same one again. 1587 */ 1588 fw->req.firmware = NULL; 1589 } 1590 1591 kfree(fw->log.fmts); 1592 rtw89_unload_firmware_elements(rtwdev); 1593 } 1594 1595 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1596 { 1597 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1598 u32 i; 1599 1600 if (fmt_id > fw_log->last_fmt_id) 1601 return 0; 1602 1603 for (i = 0; i < fw_log->fmt_count; i++) { 1604 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1605 return i; 1606 } 1607 return 0; 1608 } 1609 1610 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1611 { 1612 struct rtw89_fw_log *log = &rtwdev->fw.log; 1613 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1614 struct rtw89_fw_suit *suit = &log->suit; 1615 const void *fmts_ptr, *fmts_end_ptr; 1616 u32 fmt_count; 1617 int i; 1618 1619 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1620 fmt_count = le32_to_cpu(suit_hdr->count); 1621 log->fmt_ids = suit_hdr->ids; 1622 fmts_ptr = &suit_hdr->ids[fmt_count]; 1623 fmts_end_ptr = suit->data + suit->size; 1624 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1625 if (!log->fmts) 1626 return -ENOMEM; 1627 1628 for (i = 0; i < fmt_count; i++) { 1629 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1630 if (!fmts_ptr) 1631 break; 1632 1633 (*log->fmts)[i] = fmts_ptr; 1634 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1635 log->fmt_count++; 1636 fmts_ptr += strlen(fmts_ptr); 1637 } 1638 1639 return 0; 1640 } 1641 1642 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1643 { 1644 struct rtw89_fw_log *log = &rtwdev->fw.log; 1645 struct rtw89_fw_suit *suit = &log->suit; 1646 1647 if (!suit || !suit->data) { 1648 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1649 return -EINVAL; 1650 } 1651 if (log->fmts) 1652 return 0; 1653 1654 return rtw89_fw_log_create_fmts_dict(rtwdev); 1655 } 1656 1657 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 1658 const struct rtw89_fw_c2h_log_fmt *log_fmt, 1659 u32 fmt_idx, u8 para_int, bool raw_data) 1660 { 1661 const char *(*fmts)[] = rtwdev->fw.log.fmts; 1662 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 1663 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 1664 int i; 1665 1666 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 1667 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 1668 log_fmt->argc); 1669 return; 1670 } 1671 1672 if (para_int) 1673 for (i = 0 ; i < log_fmt->argc; i++) 1674 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 1675 1676 if (raw_data) { 1677 if (para_int) 1678 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1679 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 1680 para_int, log_fmt->argc, (int)sizeof(args), args); 1681 else 1682 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1683 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 1684 para_int, log_fmt->argc, log_fmt->u.raw); 1685 } else { 1686 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 1687 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 1688 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 1689 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 1690 args[0xf]); 1691 } 1692 1693 rtw89_info(rtwdev, "C2H log: %s", str_buf); 1694 } 1695 1696 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 1697 { 1698 const struct rtw89_fw_c2h_log_fmt *log_fmt; 1699 u8 para_int; 1700 u32 fmt_idx; 1701 1702 if (len < RTW89_C2H_HEADER_LEN) { 1703 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 1704 return; 1705 } 1706 1707 buf += RTW89_C2H_HEADER_LEN; 1708 len -= RTW89_C2H_HEADER_LEN; 1709 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 1710 1711 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 1712 goto plain_log; 1713 1714 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 1715 goto plain_log; 1716 1717 if (!rtwdev->fw.log.fmts) 1718 return; 1719 1720 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 1721 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 1722 1723 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 1724 rtw89_info(rtwdev, "C2H log: %s%s", 1725 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 1726 else if (fmt_idx != 0 && para_int) 1727 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 1728 else 1729 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 1730 return; 1731 1732 plain_log: 1733 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 1734 1735 } 1736 1737 #define H2C_CAM_LEN 60 1738 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1739 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 1740 { 1741 struct sk_buff *skb; 1742 int ret; 1743 1744 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 1745 if (!skb) { 1746 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1747 return -ENOMEM; 1748 } 1749 skb_put(skb, H2C_CAM_LEN); 1750 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 1751 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 1752 1753 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1754 H2C_CAT_MAC, 1755 H2C_CL_MAC_ADDR_CAM_UPDATE, 1756 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 1757 H2C_CAM_LEN); 1758 1759 ret = rtw89_h2c_tx(rtwdev, skb, false); 1760 if (ret) { 1761 rtw89_err(rtwdev, "failed to send h2c\n"); 1762 goto fail; 1763 } 1764 1765 return 0; 1766 fail: 1767 dev_kfree_skb_any(skb); 1768 1769 return ret; 1770 } 1771 1772 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 1773 struct rtw89_vif *rtwvif, 1774 struct rtw89_sta *rtwsta) 1775 { 1776 struct rtw89_h2c_dctlinfo_ud_v1 *h2c; 1777 u32 len = sizeof(*h2c); 1778 struct sk_buff *skb; 1779 int ret; 1780 1781 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1782 if (!skb) { 1783 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1784 return -ENOMEM; 1785 } 1786 skb_put(skb, len); 1787 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; 1788 1789 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, h2c); 1790 1791 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1792 H2C_CAT_MAC, 1793 H2C_CL_MAC_FR_EXCHG, 1794 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 1795 len); 1796 1797 ret = rtw89_h2c_tx(rtwdev, skb, false); 1798 if (ret) { 1799 rtw89_err(rtwdev, "failed to send h2c\n"); 1800 goto fail; 1801 } 1802 1803 return 0; 1804 fail: 1805 dev_kfree_skb_any(skb); 1806 1807 return ret; 1808 } 1809 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 1810 1811 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 1812 struct rtw89_vif *rtwvif, 1813 struct rtw89_sta *rtwsta) 1814 { 1815 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1816 u32 len = sizeof(*h2c); 1817 struct sk_buff *skb; 1818 int ret; 1819 1820 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1821 if (!skb) { 1822 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1823 return -ENOMEM; 1824 } 1825 skb_put(skb, len); 1826 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1827 1828 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif, rtwsta, h2c); 1829 1830 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1831 H2C_CAT_MAC, 1832 H2C_CL_MAC_FR_EXCHG, 1833 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1834 len); 1835 1836 ret = rtw89_h2c_tx(rtwdev, skb, false); 1837 if (ret) { 1838 rtw89_err(rtwdev, "failed to send h2c\n"); 1839 goto fail; 1840 } 1841 1842 return 0; 1843 fail: 1844 dev_kfree_skb_any(skb); 1845 1846 return ret; 1847 } 1848 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 1849 1850 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 1851 struct rtw89_vif *rtwvif, 1852 struct rtw89_sta *rtwsta) 1853 { 1854 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1855 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1856 u32 len = sizeof(*h2c); 1857 struct sk_buff *skb; 1858 int ret; 1859 1860 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1861 if (!skb) { 1862 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 1863 return -ENOMEM; 1864 } 1865 skb_put(skb, len); 1866 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1867 1868 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 1869 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 1870 1871 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 1872 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 1873 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 1874 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 1875 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 1876 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 1877 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 1878 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 1879 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 1880 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 1881 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 1882 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 1883 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 1884 1885 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1886 H2C_CAT_MAC, 1887 H2C_CL_MAC_FR_EXCHG, 1888 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1889 len); 1890 1891 ret = rtw89_h2c_tx(rtwdev, skb, false); 1892 if (ret) { 1893 rtw89_err(rtwdev, "failed to send h2c\n"); 1894 goto fail; 1895 } 1896 1897 return 0; 1898 fail: 1899 dev_kfree_skb_any(skb); 1900 1901 return ret; 1902 } 1903 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 1904 1905 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 1906 bool valid, struct ieee80211_ampdu_params *params) 1907 { 1908 const struct rtw89_chip_info *chip = rtwdev->chip; 1909 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 1910 struct rtw89_h2c_ba_cam *h2c; 1911 u8 macid = rtwsta->mac_id; 1912 u32 len = sizeof(*h2c); 1913 struct sk_buff *skb; 1914 u8 entry_idx; 1915 int ret; 1916 1917 ret = valid ? 1918 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 1919 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 1920 if (ret) { 1921 /* it still works even if we don't have static BA CAM, because 1922 * hardware can create dynamic BA CAM automatically. 1923 */ 1924 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 1925 "failed to %s entry tid=%d for h2c ba cam\n", 1926 valid ? "alloc" : "free", params->tid); 1927 return 0; 1928 } 1929 1930 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1931 if (!skb) { 1932 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 1933 return -ENOMEM; 1934 } 1935 skb_put(skb, len); 1936 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 1937 1938 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 1939 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 1940 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 1941 else 1942 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 1943 if (!valid) 1944 goto end; 1945 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 1946 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 1947 if (params->buf_size > 64) 1948 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 1949 else 1950 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 1951 /* If init req is set, hw will set the ssn */ 1952 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 1953 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 1954 1955 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 1956 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 1957 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BA_CAM_W1_BAND); 1958 } 1959 1960 end: 1961 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1962 H2C_CAT_MAC, 1963 H2C_CL_BA_CAM, 1964 H2C_FUNC_MAC_BA_CAM, 0, 1, 1965 len); 1966 1967 ret = rtw89_h2c_tx(rtwdev, skb, false); 1968 if (ret) { 1969 rtw89_err(rtwdev, "failed to send h2c\n"); 1970 goto fail; 1971 } 1972 1973 return 0; 1974 fail: 1975 dev_kfree_skb_any(skb); 1976 1977 return ret; 1978 } 1979 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 1980 1981 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 1982 u8 entry_idx, u8 uid) 1983 { 1984 struct rtw89_h2c_ba_cam *h2c; 1985 u32 len = sizeof(*h2c); 1986 struct sk_buff *skb; 1987 int ret; 1988 1989 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1990 if (!skb) { 1991 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 1992 return -ENOMEM; 1993 } 1994 skb_put(skb, len); 1995 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 1996 1997 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 1998 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 1999 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 2000 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 2001 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 2002 2003 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2004 H2C_CAT_MAC, 2005 H2C_CL_BA_CAM, 2006 H2C_FUNC_MAC_BA_CAM, 0, 1, 2007 len); 2008 2009 ret = rtw89_h2c_tx(rtwdev, skb, false); 2010 if (ret) { 2011 rtw89_err(rtwdev, "failed to send h2c\n"); 2012 goto fail; 2013 } 2014 2015 return 0; 2016 fail: 2017 dev_kfree_skb_any(skb); 2018 2019 return ret; 2020 } 2021 2022 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 2023 { 2024 const struct rtw89_chip_info *chip = rtwdev->chip; 2025 u8 entry_idx = chip->bacam_num; 2026 u8 uid = 0; 2027 int i; 2028 2029 for (i = 0; i < chip->bacam_dynamic_num; i++) { 2030 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 2031 entry_idx++; 2032 uid++; 2033 } 2034 } 2035 2036 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 2037 bool valid, struct ieee80211_ampdu_params *params) 2038 { 2039 const struct rtw89_chip_info *chip = rtwdev->chip; 2040 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 2041 struct rtw89_h2c_ba_cam_v1 *h2c; 2042 u8 macid = rtwsta->mac_id; 2043 u32 len = sizeof(*h2c); 2044 struct sk_buff *skb; 2045 u8 entry_idx; 2046 u8 bmap_size; 2047 int ret; 2048 2049 ret = valid ? 2050 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 2051 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 2052 if (ret) { 2053 /* it still works even if we don't have static BA CAM, because 2054 * hardware can create dynamic BA CAM automatically. 2055 */ 2056 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2057 "failed to %s entry tid=%d for h2c ba cam\n", 2058 valid ? "alloc" : "free", params->tid); 2059 return 0; 2060 } 2061 2062 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2063 if (!skb) { 2064 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2065 return -ENOMEM; 2066 } 2067 skb_put(skb, len); 2068 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2069 2070 if (params->buf_size > 512) 2071 bmap_size = 10; 2072 else if (params->buf_size > 256) 2073 bmap_size = 8; 2074 else if (params->buf_size > 64) 2075 bmap_size = 4; 2076 else 2077 bmap_size = 0; 2078 2079 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2080 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2081 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2082 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2083 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2084 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2085 2086 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2087 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2088 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2089 le32_encode_bits(!!rtwvif->mac_idx, RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2090 2091 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2092 H2C_CAT_MAC, 2093 H2C_CL_BA_CAM, 2094 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2095 len); 2096 2097 ret = rtw89_h2c_tx(rtwdev, skb, false); 2098 if (ret) { 2099 rtw89_err(rtwdev, "failed to send h2c\n"); 2100 goto fail; 2101 } 2102 2103 return 0; 2104 fail: 2105 dev_kfree_skb_any(skb); 2106 2107 return ret; 2108 } 2109 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2110 2111 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2112 u8 offset, u8 mac_idx) 2113 { 2114 struct rtw89_h2c_ba_cam_init *h2c; 2115 u32 len = sizeof(*h2c); 2116 struct sk_buff *skb; 2117 int ret; 2118 2119 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2120 if (!skb) { 2121 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2122 return -ENOMEM; 2123 } 2124 skb_put(skb, len); 2125 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2126 2127 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2128 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2129 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2130 2131 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2132 H2C_CAT_MAC, 2133 H2C_CL_BA_CAM, 2134 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2135 len); 2136 2137 ret = rtw89_h2c_tx(rtwdev, skb, false); 2138 if (ret) { 2139 rtw89_err(rtwdev, "failed to send h2c\n"); 2140 goto fail; 2141 } 2142 2143 return 0; 2144 fail: 2145 dev_kfree_skb_any(skb); 2146 2147 return ret; 2148 } 2149 2150 #define H2C_LOG_CFG_LEN 12 2151 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2152 { 2153 struct sk_buff *skb; 2154 u32 comp = 0; 2155 int ret; 2156 2157 if (enable) 2158 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2159 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2160 BIT(RTW89_FW_LOG_COMP_SCAN); 2161 2162 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2163 if (!skb) { 2164 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2165 return -ENOMEM; 2166 } 2167 2168 skb_put(skb, H2C_LOG_CFG_LEN); 2169 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2170 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2171 SET_LOG_CFG_COMP(skb->data, comp); 2172 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2173 2174 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2175 H2C_CAT_MAC, 2176 H2C_CL_FW_INFO, 2177 H2C_FUNC_LOG_CFG, 0, 0, 2178 H2C_LOG_CFG_LEN); 2179 2180 ret = rtw89_h2c_tx(rtwdev, skb, false); 2181 if (ret) { 2182 rtw89_err(rtwdev, "failed to send h2c\n"); 2183 goto fail; 2184 } 2185 2186 return 0; 2187 fail: 2188 dev_kfree_skb_any(skb); 2189 2190 return ret; 2191 } 2192 2193 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 2194 struct rtw89_vif *rtwvif) 2195 { 2196 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 2197 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; 2198 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2199 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 2200 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2201 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2202 struct rtw89_eapol_2_of_2 *eapol_pkt; 2203 struct ieee80211_hdr_3addr *hdr; 2204 struct sk_buff *skb; 2205 u8 key_des_ver; 2206 2207 if (rtw_wow->ptk_alg == 3) 2208 key_des_ver = 1; 2209 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) 2210 key_des_ver = 2; 2211 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) 2212 key_des_ver = 3; 2213 else 2214 key_des_ver = 0; 2215 2216 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); 2217 if (!skb) 2218 return NULL; 2219 2220 hdr = skb_put_zero(skb, sizeof(*hdr)); 2221 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2222 IEEE80211_FCTL_TODS | 2223 IEEE80211_FCTL_PROTECTED); 2224 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2225 ether_addr_copy(hdr->addr2, vif->addr); 2226 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2227 2228 skb_put_zero(skb, sec_hdr_len); 2229 2230 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); 2231 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); 2232 eapol_pkt->key_des_ver = key_des_ver; 2233 2234 return skb; 2235 } 2236 2237 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, 2238 struct rtw89_vif *rtwvif) 2239 { 2240 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2241 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 2242 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2243 struct ieee80211_hdr_3addr *hdr; 2244 struct rtw89_sa_query *sa_query; 2245 struct sk_buff *skb; 2246 2247 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); 2248 if (!skb) 2249 return NULL; 2250 2251 hdr = skb_put_zero(skb, sizeof(*hdr)); 2252 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2253 IEEE80211_STYPE_ACTION | 2254 IEEE80211_FCTL_PROTECTED); 2255 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2256 ether_addr_copy(hdr->addr2, vif->addr); 2257 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2258 2259 skb_put_zero(skb, sec_hdr_len); 2260 2261 sa_query = skb_put_zero(skb, sizeof(*sa_query)); 2262 sa_query->category = WLAN_CATEGORY_SA_QUERY; 2263 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; 2264 2265 return skb; 2266 } 2267 2268 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, 2269 struct rtw89_vif *rtwvif) 2270 { 2271 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2272 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2273 struct ieee80211_hdr_3addr *hdr; 2274 struct rtw89_arp_rsp *arp_skb; 2275 struct arphdr *arp_hdr; 2276 struct sk_buff *skb; 2277 __le16 fc; 2278 2279 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); 2280 if (!skb) 2281 return NULL; 2282 2283 hdr = skb_put_zero(skb, sizeof(*hdr)); 2284 2285 if (rtw_wow->ptk_alg) 2286 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | 2287 IEEE80211_FCTL_PROTECTED); 2288 else 2289 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); 2290 2291 hdr->frame_control = fc; 2292 ether_addr_copy(hdr->addr1, rtwvif->bssid); 2293 ether_addr_copy(hdr->addr2, rtwvif->mac_addr); 2294 ether_addr_copy(hdr->addr3, rtwvif->bssid); 2295 2296 skb_put_zero(skb, sec_hdr_len); 2297 2298 arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); 2299 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 2300 arp_skb->llc_type = htons(ETH_P_ARP); 2301 2302 arp_hdr = &arp_skb->arp_hdr; 2303 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 2304 arp_hdr->ar_pro = htons(ETH_P_IP); 2305 arp_hdr->ar_hln = ETH_ALEN; 2306 arp_hdr->ar_pln = 4; 2307 arp_hdr->ar_op = htons(ARPOP_REPLY); 2308 2309 ether_addr_copy(arp_skb->sender_hw, rtwvif->mac_addr); 2310 arp_skb->sender_ip = rtwvif->ip_addr; 2311 2312 return skb; 2313 } 2314 2315 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2316 struct rtw89_vif *rtwvif, 2317 enum rtw89_fw_pkt_ofld_type type, 2318 u8 *id) 2319 { 2320 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2321 struct rtw89_pktofld_info *info; 2322 struct sk_buff *skb; 2323 int ret; 2324 2325 info = kzalloc(sizeof(*info), GFP_KERNEL); 2326 if (!info) 2327 return -ENOMEM; 2328 2329 switch (type) { 2330 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2331 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2332 break; 2333 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2334 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2335 break; 2336 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2337 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 2338 break; 2339 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2340 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 2341 break; 2342 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2343 skb = rtw89_eapol_get(rtwdev, rtwvif); 2344 break; 2345 case RTW89_PKT_OFLD_TYPE_SA_QUERY: 2346 skb = rtw89_sa_query_get(rtwdev, rtwvif); 2347 break; 2348 case RTW89_PKT_OFLD_TYPE_ARP_RSP: 2349 skb = rtw89_arp_response_get(rtwdev, rtwvif); 2350 break; 2351 default: 2352 goto err; 2353 } 2354 2355 if (!skb) 2356 goto err; 2357 2358 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 2359 kfree_skb(skb); 2360 2361 if (ret) 2362 goto err; 2363 2364 list_add_tail(&info->list, &rtwvif->general_pkt_list); 2365 *id = info->id; 2366 return 0; 2367 2368 err: 2369 kfree(info); 2370 return -ENOMEM; 2371 } 2372 2373 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 2374 struct rtw89_vif *rtwvif, bool notify_fw) 2375 { 2376 struct list_head *pkt_list = &rtwvif->general_pkt_list; 2377 struct rtw89_pktofld_info *info, *tmp; 2378 2379 list_for_each_entry_safe(info, tmp, pkt_list, list) { 2380 if (notify_fw) 2381 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2382 else 2383 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 2384 list_del(&info->list); 2385 kfree(info); 2386 } 2387 } 2388 2389 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 2390 { 2391 struct rtw89_vif *rtwvif; 2392 2393 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2394 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw); 2395 } 2396 2397 #define H2C_GENERAL_PKT_LEN 6 2398 #define H2C_GENERAL_PKT_ID_UND 0xff 2399 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 2400 struct rtw89_vif *rtwvif, u8 macid) 2401 { 2402 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 2403 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 2404 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 2405 struct sk_buff *skb; 2406 int ret; 2407 2408 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 2409 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 2410 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 2411 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 2412 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 2413 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 2414 2415 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 2416 if (!skb) { 2417 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2418 return -ENOMEM; 2419 } 2420 skb_put(skb, H2C_GENERAL_PKT_LEN); 2421 SET_GENERAL_PKT_MACID(skb->data, macid); 2422 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2423 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 2424 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 2425 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 2426 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2427 2428 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2429 H2C_CAT_MAC, 2430 H2C_CL_FW_INFO, 2431 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 2432 H2C_GENERAL_PKT_LEN); 2433 2434 ret = rtw89_h2c_tx(rtwdev, skb, false); 2435 if (ret) { 2436 rtw89_err(rtwdev, "failed to send h2c\n"); 2437 goto fail; 2438 } 2439 2440 return 0; 2441 fail: 2442 dev_kfree_skb_any(skb); 2443 2444 return ret; 2445 } 2446 2447 #define H2C_LPS_PARM_LEN 8 2448 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 2449 struct rtw89_lps_parm *lps_param) 2450 { 2451 struct sk_buff *skb; 2452 int ret; 2453 2454 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 2455 if (!skb) { 2456 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2457 return -ENOMEM; 2458 } 2459 skb_put(skb, H2C_LPS_PARM_LEN); 2460 2461 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 2462 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 2463 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 2464 SET_LPS_PARM_RLBM(skb->data, 1); 2465 SET_LPS_PARM_SMARTPS(skb->data, 1); 2466 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 2467 SET_LPS_PARM_VOUAPSD(skb->data, 0); 2468 SET_LPS_PARM_VIUAPSD(skb->data, 0); 2469 SET_LPS_PARM_BEUAPSD(skb->data, 0); 2470 SET_LPS_PARM_BKUAPSD(skb->data, 0); 2471 2472 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2473 H2C_CAT_MAC, 2474 H2C_CL_MAC_PS, 2475 H2C_FUNC_MAC_LPS_PARM, 0, 1, 2476 H2C_LPS_PARM_LEN); 2477 2478 ret = rtw89_h2c_tx(rtwdev, skb, false); 2479 if (ret) { 2480 rtw89_err(rtwdev, "failed to send h2c\n"); 2481 goto fail; 2482 } 2483 2484 return 0; 2485 fail: 2486 dev_kfree_skb_any(skb); 2487 2488 return ret; 2489 } 2490 2491 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2492 { 2493 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 2494 rtwvif->sub_entity_idx); 2495 const struct rtw89_chip_info *chip = rtwdev->chip; 2496 struct rtw89_h2c_lps_ch_info *h2c; 2497 u32 len = sizeof(*h2c); 2498 struct sk_buff *skb; 2499 u32 done; 2500 int ret; 2501 2502 if (chip->chip_gen != RTW89_CHIP_BE) 2503 return 0; 2504 2505 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2506 if (!skb) { 2507 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 2508 return -ENOMEM; 2509 } 2510 skb_put(skb, len); 2511 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2512 2513 h2c->info[0].central_ch = chan->channel; 2514 h2c->info[0].pri_ch = chan->primary_channel; 2515 h2c->info[0].band = chan->band_type; 2516 h2c->info[0].bw = chan->band_width; 2517 h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF); 2518 2519 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2520 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2521 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 2522 2523 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2524 ret = rtw89_h2c_tx(rtwdev, skb, false); 2525 if (ret) { 2526 rtw89_err(rtwdev, "failed to send h2c\n"); 2527 goto fail; 2528 } 2529 2530 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2531 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2532 if (ret) 2533 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 2534 2535 return 0; 2536 fail: 2537 dev_kfree_skb_any(skb); 2538 2539 return ret; 2540 } 2541 2542 #define H2C_P2P_ACT_LEN 20 2543 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2544 struct ieee80211_p2p_noa_desc *desc, 2545 u8 act, u8 noa_id) 2546 { 2547 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2548 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 2549 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 2550 struct sk_buff *skb; 2551 u8 *cmd; 2552 int ret; 2553 2554 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 2555 if (!skb) { 2556 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 2557 return -ENOMEM; 2558 } 2559 skb_put(skb, H2C_P2P_ACT_LEN); 2560 cmd = skb->data; 2561 2562 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 2563 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 2564 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 2565 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 2566 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 2567 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 2568 if (desc) { 2569 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 2570 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 2571 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 2572 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 2573 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 2574 } 2575 2576 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2577 H2C_CAT_MAC, H2C_CL_MAC_PS, 2578 H2C_FUNC_P2P_ACT, 0, 0, 2579 H2C_P2P_ACT_LEN); 2580 2581 ret = rtw89_h2c_tx(rtwdev, skb, false); 2582 if (ret) { 2583 rtw89_err(rtwdev, "failed to send h2c\n"); 2584 goto fail; 2585 } 2586 2587 return 0; 2588 fail: 2589 dev_kfree_skb_any(skb); 2590 2591 return ret; 2592 } 2593 2594 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 2595 struct sk_buff *skb) 2596 { 2597 const struct rtw89_chip_info *chip = rtwdev->chip; 2598 struct rtw89_hal *hal = &rtwdev->hal; 2599 u8 ntx_path; 2600 u8 map_b; 2601 2602 if (chip->rf_path_num == 1) { 2603 ntx_path = RF_A; 2604 map_b = 0; 2605 } else { 2606 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 2607 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 2608 } 2609 2610 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 2611 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 2612 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 2613 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 2614 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 2615 } 2616 2617 #define H2C_CMC_TBL_LEN 68 2618 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 2619 struct rtw89_vif *rtwvif, 2620 struct rtw89_sta *rtwsta) 2621 { 2622 const struct rtw89_chip_info *chip = rtwdev->chip; 2623 u8 macid = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2624 struct sk_buff *skb; 2625 int ret; 2626 2627 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2628 if (!skb) { 2629 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2630 return -ENOMEM; 2631 } 2632 skb_put(skb, H2C_CMC_TBL_LEN); 2633 SET_CTRL_INFO_MACID(skb->data, macid); 2634 SET_CTRL_INFO_OPERATION(skb->data, 1); 2635 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2636 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 2637 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 2638 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 2639 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 2640 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 2641 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 2642 } 2643 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 2644 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 2645 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 2646 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2647 2648 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2649 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2650 chip->h2c_cctl_func_id, 0, 1, 2651 H2C_CMC_TBL_LEN); 2652 2653 ret = rtw89_h2c_tx(rtwdev, skb, false); 2654 if (ret) { 2655 rtw89_err(rtwdev, "failed to send h2c\n"); 2656 goto fail; 2657 } 2658 2659 return 0; 2660 fail: 2661 dev_kfree_skb_any(skb); 2662 2663 return ret; 2664 } 2665 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 2666 2667 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2668 struct rtw89_vif *rtwvif, 2669 struct rtw89_sta *rtwsta) 2670 { 2671 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2672 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2673 u32 len = sizeof(*h2c); 2674 struct sk_buff *skb; 2675 int ret; 2676 2677 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2678 if (!skb) { 2679 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 2680 return -ENOMEM; 2681 } 2682 skb_put(skb, len); 2683 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2684 2685 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 2686 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2687 2688 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE); 2689 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 2690 2691 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 2692 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 2693 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2694 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 2695 2696 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 2697 2698 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 2699 2700 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 2701 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 2702 2703 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 2704 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 2705 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 2706 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 2707 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 2708 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 2709 2710 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 2711 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 2712 2713 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 2714 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 2715 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 2716 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 2717 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 2718 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 2719 2720 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 2721 2722 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 2723 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 2724 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 2725 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 2726 2727 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 2728 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 2729 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 2730 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 2731 2732 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2733 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2734 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 2735 len); 2736 2737 ret = rtw89_h2c_tx(rtwdev, skb, false); 2738 if (ret) { 2739 rtw89_err(rtwdev, "failed to send h2c\n"); 2740 goto fail; 2741 } 2742 2743 return 0; 2744 fail: 2745 dev_kfree_skb_any(skb); 2746 2747 return ret; 2748 } 2749 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 2750 2751 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 2752 struct ieee80211_sta *sta, u8 *pads) 2753 { 2754 bool ppe_th; 2755 u8 ppe16, ppe8; 2756 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 2757 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 2758 u8 ru_bitmap; 2759 u8 n, idx, sh; 2760 u16 ppe; 2761 int i; 2762 2763 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 2764 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 2765 if (!ppe_th) { 2766 u8 pad; 2767 2768 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 2769 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 2770 2771 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 2772 pads[i] = pad; 2773 2774 return; 2775 } 2776 2777 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 2778 n = hweight8(ru_bitmap); 2779 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 2780 2781 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 2782 if (!(ru_bitmap & BIT(i))) { 2783 pads[i] = 1; 2784 continue; 2785 } 2786 2787 idx = n >> 3; 2788 sh = n & 7; 2789 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 2790 2791 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 2792 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2793 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 2794 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2795 2796 if (ppe16 != 7 && ppe8 == 7) 2797 pads[i] = RTW89_PE_DURATION_16; 2798 else if (ppe8 != 7) 2799 pads[i] = RTW89_PE_DURATION_8; 2800 else 2801 pads[i] = RTW89_PE_DURATION_0; 2802 } 2803 } 2804 2805 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 2806 struct ieee80211_vif *vif, 2807 struct ieee80211_sta *sta) 2808 { 2809 const struct rtw89_chip_info *chip = rtwdev->chip; 2810 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 2811 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2812 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 2813 rtwvif->sub_entity_idx); 2814 struct sk_buff *skb; 2815 u8 pads[RTW89_PPE_BW_NUM]; 2816 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2817 u16 lowest_rate; 2818 int ret; 2819 2820 memset(pads, 0, sizeof(pads)); 2821 if (sta && sta->deflink.he_cap.has_he) 2822 __get_sta_he_pkt_padding(rtwdev, sta, pads); 2823 2824 if (vif->p2p) 2825 lowest_rate = RTW89_HW_RATE_OFDM6; 2826 else if (chan->band_type == RTW89_BAND_2G) 2827 lowest_rate = RTW89_HW_RATE_CCK1; 2828 else 2829 lowest_rate = RTW89_HW_RATE_OFDM6; 2830 2831 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2832 if (!skb) { 2833 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2834 return -ENOMEM; 2835 } 2836 skb_put(skb, H2C_CMC_TBL_LEN); 2837 SET_CTRL_INFO_MACID(skb->data, mac_id); 2838 SET_CTRL_INFO_OPERATION(skb->data, 1); 2839 SET_CMC_TBL_DISRTSFB(skb->data, 1); 2840 SET_CMC_TBL_DISDATAFB(skb->data, 1); 2841 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 2842 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 2843 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 2844 if (vif->type == NL80211_IFTYPE_STATION) 2845 SET_CMC_TBL_ULDL(skb->data, 1); 2846 else 2847 SET_CMC_TBL_ULDL(skb->data, 0); 2848 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 2849 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 2850 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 2851 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 2852 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 2853 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 2854 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2855 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 2856 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 2857 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 2858 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 2859 } 2860 if (sta) 2861 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 2862 sta->deflink.he_cap.has_he); 2863 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 2864 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2865 2866 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2867 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2868 chip->h2c_cctl_func_id, 0, 1, 2869 H2C_CMC_TBL_LEN); 2870 2871 ret = rtw89_h2c_tx(rtwdev, skb, false); 2872 if (ret) { 2873 rtw89_err(rtwdev, "failed to send h2c\n"); 2874 goto fail; 2875 } 2876 2877 return 0; 2878 fail: 2879 dev_kfree_skb_any(skb); 2880 2881 return ret; 2882 } 2883 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 2884 2885 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 2886 struct ieee80211_sta *sta, u8 *pads) 2887 { 2888 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 2889 u16 ppe_thres_hdr; 2890 u8 ppe16, ppe8; 2891 u8 n, idx, sh; 2892 u8 ru_bitmap; 2893 bool ppe_th; 2894 u16 ppe; 2895 int i; 2896 2897 ppe_th = !!u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5], 2898 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 2899 if (!ppe_th) { 2900 u8 pad; 2901 2902 pad = u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5], 2903 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 2904 2905 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 2906 pads[i] = pad; 2907 2908 return; 2909 } 2910 2911 ppe_thres_hdr = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres); 2912 ru_bitmap = u16_get_bits(ppe_thres_hdr, 2913 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 2914 n = hweight8(ru_bitmap); 2915 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 2916 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 2917 2918 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 2919 if (!(ru_bitmap & BIT(i))) { 2920 pads[i] = 1; 2921 continue; 2922 } 2923 2924 idx = n >> 3; 2925 sh = n & 7; 2926 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 2927 2928 ppe = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres + idx); 2929 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2930 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 2931 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2932 2933 if (ppe16 != 7 && ppe8 == 7) 2934 pads[i] = RTW89_PE_DURATION_16_20; 2935 else if (ppe8 != 7) 2936 pads[i] = RTW89_PE_DURATION_8; 2937 else 2938 pads[i] = RTW89_PE_DURATION_0; 2939 } 2940 } 2941 2942 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2943 struct ieee80211_vif *vif, 2944 struct ieee80211_sta *sta) 2945 { 2946 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2947 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2948 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 2949 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2950 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2951 u8 pads[RTW89_PPE_BW_NUM]; 2952 u32 len = sizeof(*h2c); 2953 struct sk_buff *skb; 2954 u16 lowest_rate; 2955 int ret; 2956 2957 memset(pads, 0, sizeof(pads)); 2958 if (sta) { 2959 if (sta->deflink.eht_cap.has_eht) 2960 __get_sta_eht_pkt_padding(rtwdev, sta, pads); 2961 else if (sta->deflink.he_cap.has_he) 2962 __get_sta_he_pkt_padding(rtwdev, sta, pads); 2963 } 2964 2965 if (vif->p2p) 2966 lowest_rate = RTW89_HW_RATE_OFDM6; 2967 else if (chan->band_type == RTW89_BAND_2G) 2968 lowest_rate = RTW89_HW_RATE_CCK1; 2969 else 2970 lowest_rate = RTW89_HW_RATE_OFDM6; 2971 2972 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2973 if (!skb) { 2974 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 2975 return -ENOMEM; 2976 } 2977 skb_put(skb, len); 2978 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2979 2980 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 2981 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2982 2983 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 2984 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 2985 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 2986 CCTLINFO_G7_W0_DISDATAFB); 2987 2988 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2989 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2990 2991 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 2992 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 2993 2994 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 2995 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 2996 2997 h2c->w4 = le32_encode_bits(rtwvif->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 2998 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 2999 3000 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 3001 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 3002 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 3003 } 3004 3005 if (vif->bss_conf.eht_support) { 3006 u16 punct = vif->bss_conf.chanreq.oper.punctured; 3007 3008 h2c->w4 |= le32_encode_bits(~punct, 3009 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3010 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3011 } 3012 3013 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 3014 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3015 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 3016 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3017 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 3018 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3019 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 3020 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3021 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 3022 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3023 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 3024 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 3025 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 3026 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 3027 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3028 3029 h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 3030 CCTLINFO_G7_W6_ULDL); 3031 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL); 3032 3033 if (sta) { 3034 h2c->w8 = le32_encode_bits(sta->deflink.he_cap.has_he, 3035 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3036 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3037 } 3038 3039 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3040 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3041 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3042 len); 3043 3044 ret = rtw89_h2c_tx(rtwdev, skb, false); 3045 if (ret) { 3046 rtw89_err(rtwdev, "failed to send h2c\n"); 3047 goto fail; 3048 } 3049 3050 return 0; 3051 fail: 3052 dev_kfree_skb_any(skb); 3053 3054 return ret; 3055 } 3056 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 3057 3058 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3059 struct ieee80211_vif *vif, 3060 struct ieee80211_sta *sta) 3061 { 3062 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 3063 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3064 u32 len = sizeof(*h2c); 3065 struct sk_buff *skb; 3066 u16 agg_num = 0; 3067 u8 ba_bmap = 0; 3068 int ret; 3069 u8 tid; 3070 3071 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3072 if (!skb) { 3073 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 3074 return -ENOMEM; 3075 } 3076 skb_put(skb, len); 3077 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3078 3079 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 3080 if (agg_num == 0) 3081 agg_num = rtwsta->ampdu_params[tid].agg_num; 3082 else 3083 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 3084 } 3085 3086 if (agg_num <= 0x20) 3087 ba_bmap = 3; 3088 else if (agg_num > 0x20 && agg_num <= 0x40) 3089 ba_bmap = 0; 3090 else if (agg_num > 0x40 && agg_num <= 0x80) 3091 ba_bmap = 1; 3092 else if (agg_num > 0x80 && agg_num <= 0x100) 3093 ba_bmap = 2; 3094 else if (agg_num > 0x100 && agg_num <= 0x200) 3095 ba_bmap = 4; 3096 else if (agg_num > 0x200 && agg_num <= 0x400) 3097 ba_bmap = 5; 3098 3099 h2c->c0 = le32_encode_bits(rtwsta->mac_id, CCTLINFO_G7_C0_MACID) | 3100 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3101 3102 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 3103 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 3104 3105 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3106 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3107 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 3108 len); 3109 3110 ret = rtw89_h2c_tx(rtwdev, skb, false); 3111 if (ret) { 3112 rtw89_err(rtwdev, "failed to send h2c\n"); 3113 goto fail; 3114 } 3115 3116 return 0; 3117 fail: 3118 dev_kfree_skb_any(skb); 3119 3120 return ret; 3121 } 3122 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 3123 3124 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 3125 struct rtw89_sta *rtwsta) 3126 { 3127 const struct rtw89_chip_info *chip = rtwdev->chip; 3128 struct sk_buff *skb; 3129 int ret; 3130 3131 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3132 if (!skb) { 3133 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3134 return -ENOMEM; 3135 } 3136 skb_put(skb, H2C_CMC_TBL_LEN); 3137 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 3138 SET_CTRL_INFO_OPERATION(skb->data, 1); 3139 if (rtwsta->cctl_tx_time) { 3140 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 3141 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 3142 } 3143 if (rtwsta->cctl_tx_retry_limit) { 3144 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 3145 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 3146 } 3147 3148 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3149 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3150 chip->h2c_cctl_func_id, 0, 1, 3151 H2C_CMC_TBL_LEN); 3152 3153 ret = rtw89_h2c_tx(rtwdev, skb, false); 3154 if (ret) { 3155 rtw89_err(rtwdev, "failed to send h2c\n"); 3156 goto fail; 3157 } 3158 3159 return 0; 3160 fail: 3161 dev_kfree_skb_any(skb); 3162 3163 return ret; 3164 } 3165 3166 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 3167 struct rtw89_sta *rtwsta) 3168 { 3169 const struct rtw89_chip_info *chip = rtwdev->chip; 3170 struct sk_buff *skb; 3171 int ret; 3172 3173 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 3174 return 0; 3175 3176 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3177 if (!skb) { 3178 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3179 return -ENOMEM; 3180 } 3181 skb_put(skb, H2C_CMC_TBL_LEN); 3182 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 3183 SET_CTRL_INFO_OPERATION(skb->data, 1); 3184 3185 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3186 3187 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3188 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3189 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 3190 H2C_CMC_TBL_LEN); 3191 3192 ret = rtw89_h2c_tx(rtwdev, skb, false); 3193 if (ret) { 3194 rtw89_err(rtwdev, "failed to send h2c\n"); 3195 goto fail; 3196 } 3197 3198 return 0; 3199 fail: 3200 dev_kfree_skb_any(skb); 3201 3202 return ret; 3203 } 3204 3205 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 3206 struct rtw89_vif *rtwvif) 3207 { 3208 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3209 rtwvif->sub_entity_idx); 3210 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3211 struct rtw89_h2c_bcn_upd *h2c; 3212 struct sk_buff *skb_beacon; 3213 struct ieee80211_hdr *hdr; 3214 u32 len = sizeof(*h2c); 3215 struct sk_buff *skb; 3216 int bcn_total_len; 3217 u16 beacon_rate; 3218 u16 tim_offset; 3219 void *noa_data; 3220 u8 noa_len; 3221 int ret; 3222 3223 if (vif->p2p) 3224 beacon_rate = RTW89_HW_RATE_OFDM6; 3225 else if (chan->band_type == RTW89_BAND_2G) 3226 beacon_rate = RTW89_HW_RATE_CCK1; 3227 else 3228 beacon_rate = RTW89_HW_RATE_OFDM6; 3229 3230 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3231 NULL, 0); 3232 if (!skb_beacon) { 3233 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3234 return -ENOMEM; 3235 } 3236 3237 noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data); 3238 if (noa_len && 3239 (noa_len <= skb_tailroom(skb_beacon) || 3240 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3241 skb_put_data(skb_beacon, noa_data, noa_len); 3242 } 3243 3244 hdr = (struct ieee80211_hdr *)skb_beacon; 3245 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3246 3247 bcn_total_len = len + skb_beacon->len; 3248 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3249 if (!skb) { 3250 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3251 dev_kfree_skb_any(skb_beacon); 3252 return -ENOMEM; 3253 } 3254 skb_put(skb, len); 3255 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 3256 3257 h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_W0_PORT) | 3258 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 3259 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 3260 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 3261 h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 3262 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 3263 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 3264 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 3265 3266 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3267 dev_kfree_skb_any(skb_beacon); 3268 3269 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3270 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3271 H2C_FUNC_MAC_BCN_UPD, 0, 1, 3272 bcn_total_len); 3273 3274 ret = rtw89_h2c_tx(rtwdev, skb, false); 3275 if (ret) { 3276 rtw89_err(rtwdev, "failed to send h2c\n"); 3277 dev_kfree_skb_any(skb); 3278 return ret; 3279 } 3280 3281 return 0; 3282 } 3283 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 3284 3285 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 3286 struct rtw89_vif *rtwvif) 3287 { 3288 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3289 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3290 struct rtw89_h2c_bcn_upd_be *h2c; 3291 struct sk_buff *skb_beacon; 3292 struct ieee80211_hdr *hdr; 3293 u32 len = sizeof(*h2c); 3294 struct sk_buff *skb; 3295 int bcn_total_len; 3296 u16 beacon_rate; 3297 u16 tim_offset; 3298 void *noa_data; 3299 u8 noa_len; 3300 int ret; 3301 3302 if (vif->p2p) 3303 beacon_rate = RTW89_HW_RATE_OFDM6; 3304 else if (chan->band_type == RTW89_BAND_2G) 3305 beacon_rate = RTW89_HW_RATE_CCK1; 3306 else 3307 beacon_rate = RTW89_HW_RATE_OFDM6; 3308 3309 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3310 NULL, 0); 3311 if (!skb_beacon) { 3312 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3313 return -ENOMEM; 3314 } 3315 3316 noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data); 3317 if (noa_len && 3318 (noa_len <= skb_tailroom(skb_beacon) || 3319 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3320 skb_put_data(skb_beacon, noa_data, noa_len); 3321 } 3322 3323 hdr = (struct ieee80211_hdr *)skb_beacon; 3324 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3325 3326 bcn_total_len = len + skb_beacon->len; 3327 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3328 if (!skb) { 3329 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3330 dev_kfree_skb_any(skb_beacon); 3331 return -ENOMEM; 3332 } 3333 skb_put(skb, len); 3334 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 3335 3336 h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 3337 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 3338 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 3339 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 3340 h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 3341 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 3342 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 3343 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 3344 3345 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3346 dev_kfree_skb_any(skb_beacon); 3347 3348 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3349 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3350 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 3351 bcn_total_len); 3352 3353 ret = rtw89_h2c_tx(rtwdev, skb, false); 3354 if (ret) { 3355 rtw89_err(rtwdev, "failed to send h2c\n"); 3356 goto fail; 3357 } 3358 3359 return 0; 3360 3361 fail: 3362 dev_kfree_skb_any(skb); 3363 3364 return ret; 3365 } 3366 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 3367 3368 #define H2C_ROLE_MAINTAIN_LEN 4 3369 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 3370 struct rtw89_vif *rtwvif, 3371 struct rtw89_sta *rtwsta, 3372 enum rtw89_upd_mode upd_mode) 3373 { 3374 struct sk_buff *skb; 3375 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 3376 u8 self_role; 3377 int ret; 3378 3379 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 3380 if (rtwsta) 3381 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3382 else 3383 self_role = rtwvif->self_role; 3384 } else { 3385 self_role = rtwvif->self_role; 3386 } 3387 3388 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 3389 if (!skb) { 3390 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3391 return -ENOMEM; 3392 } 3393 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 3394 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 3395 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 3396 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 3397 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 3398 3399 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3400 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3401 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 3402 H2C_ROLE_MAINTAIN_LEN); 3403 3404 ret = rtw89_h2c_tx(rtwdev, skb, false); 3405 if (ret) { 3406 rtw89_err(rtwdev, "failed to send h2c\n"); 3407 goto fail; 3408 } 3409 3410 return 0; 3411 fail: 3412 dev_kfree_skb_any(skb); 3413 3414 return ret; 3415 } 3416 3417 static enum rtw89_fw_sta_type 3418 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3419 struct rtw89_sta *rtwsta) 3420 { 3421 struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta); 3422 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3423 3424 if (!sta) 3425 goto by_vif; 3426 3427 if (sta->deflink.eht_cap.has_eht) 3428 return RTW89_FW_BE_STA; 3429 else if (sta->deflink.he_cap.has_he) 3430 return RTW89_FW_AX_STA; 3431 else 3432 return RTW89_FW_N_AC_STA; 3433 3434 by_vif: 3435 if (vif->bss_conf.eht_support) 3436 return RTW89_FW_BE_STA; 3437 else if (vif->bss_conf.he_support) 3438 return RTW89_FW_AX_STA; 3439 else 3440 return RTW89_FW_N_AC_STA; 3441 } 3442 3443 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3444 struct rtw89_sta *rtwsta, bool dis_conn) 3445 { 3446 struct sk_buff *skb; 3447 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 3448 u8 self_role = rtwvif->self_role; 3449 enum rtw89_fw_sta_type sta_type; 3450 u8 net_type = rtwvif->net_type; 3451 struct rtw89_h2c_join_v1 *h2c_v1; 3452 struct rtw89_h2c_join *h2c; 3453 u32 len = sizeof(*h2c); 3454 bool format_v1 = false; 3455 int ret; 3456 3457 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 3458 len = sizeof(*h2c_v1); 3459 format_v1 = true; 3460 } 3461 3462 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 3463 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3464 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 3465 } 3466 3467 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3468 if (!skb) { 3469 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3470 return -ENOMEM; 3471 } 3472 skb_put(skb, len); 3473 h2c = (struct rtw89_h2c_join *)skb->data; 3474 3475 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 3476 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 3477 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 3478 le32_encode_bits(rtwvif->wmm, RTW89_H2C_JOININFO_W0_WMM) | 3479 le32_encode_bits(rtwvif->trigger, RTW89_H2C_JOININFO_W0_TGR) | 3480 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 3481 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 3482 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 3483 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 3484 le32_encode_bits(rtwvif->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 3485 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 3486 le32_encode_bits(rtwvif->wifi_role, RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 3487 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 3488 3489 if (!format_v1) 3490 goto done; 3491 3492 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 3493 3494 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif, rtwsta); 3495 3496 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE); 3497 h2c_v1->w2 = 0; 3498 3499 done: 3500 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3501 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3502 H2C_FUNC_MAC_JOININFO, 0, 1, 3503 len); 3504 3505 ret = rtw89_h2c_tx(rtwdev, skb, false); 3506 if (ret) { 3507 rtw89_err(rtwdev, "failed to send h2c\n"); 3508 goto fail; 3509 } 3510 3511 return 0; 3512 fail: 3513 dev_kfree_skb_any(skb); 3514 3515 return ret; 3516 } 3517 3518 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 3519 { 3520 struct rtw89_h2c_notify_dbcc *h2c; 3521 u32 len = sizeof(*h2c); 3522 struct sk_buff *skb; 3523 int ret; 3524 3525 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3526 if (!skb) { 3527 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 3528 return -ENOMEM; 3529 } 3530 skb_put(skb, len); 3531 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 3532 3533 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 3534 3535 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3536 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3537 H2C_FUNC_NOTIFY_DBCC, 0, 1, 3538 len); 3539 3540 ret = rtw89_h2c_tx(rtwdev, skb, false); 3541 if (ret) { 3542 rtw89_err(rtwdev, "failed to send h2c\n"); 3543 goto fail; 3544 } 3545 3546 return 0; 3547 fail: 3548 dev_kfree_skb_any(skb); 3549 3550 return ret; 3551 } 3552 3553 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 3554 bool pause) 3555 { 3556 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 3557 struct rtw89_fw_macid_pause_grp *h2c; 3558 __le32 set = cpu_to_le32(BIT(sh)); 3559 u8 h2c_macid_pause_id; 3560 struct sk_buff *skb; 3561 u32 len; 3562 int ret; 3563 3564 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 3565 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 3566 len = sizeof(*h2c_new); 3567 } else { 3568 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 3569 len = sizeof(*h2c); 3570 } 3571 3572 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3573 if (!skb) { 3574 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 3575 return -ENOMEM; 3576 } 3577 skb_put(skb, len); 3578 3579 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 3580 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 3581 3582 h2c_new->n[0].pause_mask_grp[grp] = set; 3583 h2c_new->n[0].sleep_mask_grp[grp] = set; 3584 if (pause) { 3585 h2c_new->n[0].pause_grp[grp] = set; 3586 h2c_new->n[0].sleep_grp[grp] = set; 3587 } 3588 } else { 3589 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 3590 3591 h2c->mask_grp[grp] = set; 3592 if (pause) 3593 h2c->pause_grp[grp] = set; 3594 } 3595 3596 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3597 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3598 h2c_macid_pause_id, 1, 0, 3599 len); 3600 3601 ret = rtw89_h2c_tx(rtwdev, skb, false); 3602 if (ret) { 3603 rtw89_err(rtwdev, "failed to send h2c\n"); 3604 goto fail; 3605 } 3606 3607 return 0; 3608 fail: 3609 dev_kfree_skb_any(skb); 3610 3611 return ret; 3612 } 3613 3614 #define H2C_EDCA_LEN 12 3615 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3616 u8 ac, u32 val) 3617 { 3618 struct sk_buff *skb; 3619 int ret; 3620 3621 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 3622 if (!skb) { 3623 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 3624 return -ENOMEM; 3625 } 3626 skb_put(skb, H2C_EDCA_LEN); 3627 RTW89_SET_EDCA_SEL(skb->data, 0); 3628 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 3629 RTW89_SET_EDCA_WMM(skb->data, 0); 3630 RTW89_SET_EDCA_AC(skb->data, ac); 3631 RTW89_SET_EDCA_PARAM(skb->data, val); 3632 3633 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3634 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3635 H2C_FUNC_USR_EDCA, 0, 1, 3636 H2C_EDCA_LEN); 3637 3638 ret = rtw89_h2c_tx(rtwdev, skb, false); 3639 if (ret) { 3640 rtw89_err(rtwdev, "failed to send h2c\n"); 3641 goto fail; 3642 } 3643 3644 return 0; 3645 fail: 3646 dev_kfree_skb_any(skb); 3647 3648 return ret; 3649 } 3650 3651 #define H2C_TSF32_TOGL_LEN 4 3652 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3653 bool en) 3654 { 3655 struct sk_buff *skb; 3656 u16 early_us = en ? 2000 : 0; 3657 u8 *cmd; 3658 int ret; 3659 3660 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 3661 if (!skb) { 3662 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 3663 return -ENOMEM; 3664 } 3665 skb_put(skb, H2C_TSF32_TOGL_LEN); 3666 cmd = skb->data; 3667 3668 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 3669 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 3670 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 3671 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 3672 3673 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3674 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3675 H2C_FUNC_TSF32_TOGL, 0, 0, 3676 H2C_TSF32_TOGL_LEN); 3677 3678 ret = rtw89_h2c_tx(rtwdev, skb, false); 3679 if (ret) { 3680 rtw89_err(rtwdev, "failed to send h2c\n"); 3681 goto fail; 3682 } 3683 3684 return 0; 3685 fail: 3686 dev_kfree_skb_any(skb); 3687 3688 return ret; 3689 } 3690 3691 #define H2C_OFLD_CFG_LEN 8 3692 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 3693 { 3694 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 3695 struct sk_buff *skb; 3696 int ret; 3697 3698 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 3699 if (!skb) { 3700 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 3701 return -ENOMEM; 3702 } 3703 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 3704 3705 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3706 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3707 H2C_FUNC_OFLD_CFG, 0, 1, 3708 H2C_OFLD_CFG_LEN); 3709 3710 ret = rtw89_h2c_tx(rtwdev, skb, false); 3711 if (ret) { 3712 rtw89_err(rtwdev, "failed to send h2c\n"); 3713 goto fail; 3714 } 3715 3716 return 0; 3717 fail: 3718 dev_kfree_skb_any(skb); 3719 3720 return ret; 3721 } 3722 3723 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 3724 struct ieee80211_vif *vif, 3725 bool connect) 3726 { 3727 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 3728 struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL; 3729 s32 thold = RTW89_DEFAULT_CQM_THOLD; 3730 u32 hyst = RTW89_DEFAULT_CQM_HYST; 3731 struct rtw89_h2c_bcnfltr *h2c; 3732 u32 len = sizeof(*h2c); 3733 struct sk_buff *skb; 3734 int ret; 3735 3736 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 3737 return -EINVAL; 3738 3739 if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA) 3740 return -EINVAL; 3741 3742 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3743 if (!skb) { 3744 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 3745 return -ENOMEM; 3746 } 3747 3748 skb_put(skb, len); 3749 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 3750 3751 if (bss_conf->cqm_rssi_hyst) 3752 hyst = bss_conf->cqm_rssi_hyst; 3753 if (bss_conf->cqm_rssi_thold) 3754 thold = bss_conf->cqm_rssi_thold; 3755 3756 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 3757 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 3758 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 3759 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 3760 RTW89_H2C_BCNFLTR_W0_MODE) | 3761 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 3762 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 3763 le32_encode_bits(thold + MAX_RSSI, 3764 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 3765 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 3766 3767 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3768 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3769 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 3770 3771 ret = rtw89_h2c_tx(rtwdev, skb, false); 3772 if (ret) { 3773 rtw89_err(rtwdev, "failed to send h2c\n"); 3774 goto fail; 3775 } 3776 3777 return 0; 3778 fail: 3779 dev_kfree_skb_any(skb); 3780 3781 return ret; 3782 } 3783 3784 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 3785 struct rtw89_rx_phy_ppdu *phy_ppdu) 3786 { 3787 struct rtw89_h2c_ofld_rssi *h2c; 3788 u32 len = sizeof(*h2c); 3789 struct sk_buff *skb; 3790 s8 rssi; 3791 int ret; 3792 3793 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 3794 return -EINVAL; 3795 3796 if (!phy_ppdu) 3797 return -EINVAL; 3798 3799 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3800 if (!skb) { 3801 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 3802 return -ENOMEM; 3803 } 3804 3805 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 3806 skb_put(skb, len); 3807 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 3808 3809 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 3810 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 3811 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 3812 3813 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3814 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3815 H2C_FUNC_OFLD_RSSI, 0, 1, len); 3816 3817 ret = rtw89_h2c_tx(rtwdev, skb, false); 3818 if (ret) { 3819 rtw89_err(rtwdev, "failed to send h2c\n"); 3820 goto fail; 3821 } 3822 3823 return 0; 3824 fail: 3825 dev_kfree_skb_any(skb); 3826 3827 return ret; 3828 } 3829 3830 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3831 { 3832 struct rtw89_traffic_stats *stats = &rtwvif->stats; 3833 struct rtw89_h2c_ofld *h2c; 3834 u32 len = sizeof(*h2c); 3835 struct sk_buff *skb; 3836 int ret; 3837 3838 if (rtwvif->net_type != RTW89_NET_TYPE_INFRA) 3839 return -EINVAL; 3840 3841 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3842 if (!skb) { 3843 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 3844 return -ENOMEM; 3845 } 3846 3847 skb_put(skb, len); 3848 h2c = (struct rtw89_h2c_ofld *)skb->data; 3849 3850 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 3851 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 3852 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 3853 3854 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3855 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3856 H2C_FUNC_OFLD_TP, 0, 1, len); 3857 3858 ret = rtw89_h2c_tx(rtwdev, skb, false); 3859 if (ret) { 3860 rtw89_err(rtwdev, "failed to send h2c\n"); 3861 goto fail; 3862 } 3863 3864 return 0; 3865 fail: 3866 dev_kfree_skb_any(skb); 3867 3868 return ret; 3869 } 3870 3871 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 3872 { 3873 const struct rtw89_chip_info *chip = rtwdev->chip; 3874 struct rtw89_h2c_ra_v1 *h2c_v1; 3875 struct rtw89_h2c_ra *h2c; 3876 u32 len = sizeof(*h2c); 3877 bool format_v1 = false; 3878 struct sk_buff *skb; 3879 int ret; 3880 3881 if (chip->chip_gen == RTW89_CHIP_BE) { 3882 len = sizeof(*h2c_v1); 3883 format_v1 = true; 3884 } 3885 3886 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3887 if (!skb) { 3888 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3889 return -ENOMEM; 3890 } 3891 skb_put(skb, len); 3892 h2c = (struct rtw89_h2c_ra *)skb->data; 3893 rtw89_debug(rtwdev, RTW89_DBG_RA, 3894 "ra cmd msk: %llx ", ra->ra_mask); 3895 3896 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 3897 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 3898 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 3899 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 3900 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 3901 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 3902 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 3903 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 3904 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 3905 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 3906 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 3907 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 3908 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 3909 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 3910 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 3911 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 3912 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 3913 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 3914 3915 if (!format_v1) 3916 goto csi; 3917 3918 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 3919 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 3920 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 3921 3922 csi: 3923 if (!csi) 3924 goto done; 3925 3926 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 3927 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 3928 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 3929 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 3930 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 3931 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 3932 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 3933 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 3934 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 3935 3936 done: 3937 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3938 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 3939 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 3940 len); 3941 3942 ret = rtw89_h2c_tx(rtwdev, skb, false); 3943 if (ret) { 3944 rtw89_err(rtwdev, "failed to send h2c\n"); 3945 goto fail; 3946 } 3947 3948 return 0; 3949 fail: 3950 dev_kfree_skb_any(skb); 3951 3952 return ret; 3953 } 3954 3955 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 3956 { 3957 struct rtw89_btc *btc = &rtwdev->btc; 3958 struct rtw89_btc_dm *dm = &btc->dm; 3959 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 3960 struct rtw89_btc_module *module = &init_info->module; 3961 struct rtw89_btc_ant_info *ant = &module->ant; 3962 struct rtw89_h2c_cxinit *h2c; 3963 u32 len = sizeof(*h2c); 3964 struct sk_buff *skb; 3965 int ret; 3966 3967 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3968 if (!skb) { 3969 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 3970 return -ENOMEM; 3971 } 3972 skb_put(skb, len); 3973 h2c = (struct rtw89_h2c_cxinit *)skb->data; 3974 3975 h2c->hdr.type = type; 3976 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 3977 3978 h2c->ant_type = ant->type; 3979 h2c->ant_num = ant->num; 3980 h2c->ant_iso = ant->isolation; 3981 h2c->ant_info = 3982 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 3983 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 3984 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 3985 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 3986 3987 h2c->mod_rfe = module->rfe_type; 3988 h2c->mod_cv = module->cv; 3989 h2c->mod_info = 3990 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 3991 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 3992 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 3993 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 3994 h2c->mod_adie_kt = module->kt_ver_adie; 3995 h2c->wl_gch = init_info->wl_guard_ch; 3996 3997 h2c->info = 3998 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 3999 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 4000 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 4001 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 4002 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 4003 4004 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4005 H2C_CAT_OUTSRC, BTFC_SET, 4006 SET_DRV_INFO, 0, 0, 4007 len); 4008 4009 ret = rtw89_h2c_tx(rtwdev, skb, false); 4010 if (ret) { 4011 rtw89_err(rtwdev, "failed to send h2c\n"); 4012 goto fail; 4013 } 4014 4015 return 0; 4016 fail: 4017 dev_kfree_skb_any(skb); 4018 4019 return ret; 4020 } 4021 4022 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 4023 { 4024 struct rtw89_btc *btc = &rtwdev->btc; 4025 struct rtw89_btc_dm *dm = &btc->dm; 4026 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 4027 struct rtw89_h2c_cxinit_v7 *h2c; 4028 u32 len = sizeof(*h2c); 4029 struct sk_buff *skb; 4030 int ret; 4031 4032 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4033 if (!skb) { 4034 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 4035 return -ENOMEM; 4036 } 4037 skb_put(skb, len); 4038 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 4039 4040 h2c->hdr.type = type; 4041 h2c->hdr.ver = btc->ver->fcxinit; 4042 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4043 h2c->init = *init_info; 4044 4045 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4046 H2C_CAT_OUTSRC, BTFC_SET, 4047 SET_DRV_INFO, 0, 0, 4048 len); 4049 4050 ret = rtw89_h2c_tx(rtwdev, skb, false); 4051 if (ret) { 4052 rtw89_err(rtwdev, "failed to send h2c\n"); 4053 goto fail; 4054 } 4055 4056 return 0; 4057 fail: 4058 dev_kfree_skb_any(skb); 4059 4060 return ret; 4061 } 4062 4063 #define PORT_DATA_OFFSET 4 4064 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 4065 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 4066 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 4067 4068 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 4069 { 4070 struct rtw89_btc *btc = &rtwdev->btc; 4071 const struct rtw89_btc_ver *ver = btc->ver; 4072 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4073 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 4074 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4075 struct rtw89_btc_wl_active_role *active = role_info->active_role; 4076 struct sk_buff *skb; 4077 u32 len; 4078 u8 offset = 0; 4079 u8 *cmd; 4080 int ret; 4081 int i; 4082 4083 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 4084 4085 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4086 if (!skb) { 4087 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4088 return -ENOMEM; 4089 } 4090 skb_put(skb, len); 4091 cmd = skb->data; 4092 4093 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4094 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4095 4096 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4097 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4098 4099 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4100 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4101 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4102 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4103 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4104 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4105 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4106 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4107 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4108 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4109 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4110 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4111 4112 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4113 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4114 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4115 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4116 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4117 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4118 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4119 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4120 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4121 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4122 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4123 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4124 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4125 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4126 } 4127 4128 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4129 H2C_CAT_OUTSRC, BTFC_SET, 4130 SET_DRV_INFO, 0, 0, 4131 len); 4132 4133 ret = rtw89_h2c_tx(rtwdev, skb, false); 4134 if (ret) { 4135 rtw89_err(rtwdev, "failed to send h2c\n"); 4136 goto fail; 4137 } 4138 4139 return 0; 4140 fail: 4141 dev_kfree_skb_any(skb); 4142 4143 return ret; 4144 } 4145 4146 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 4147 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4148 4149 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 4150 { 4151 struct rtw89_btc *btc = &rtwdev->btc; 4152 const struct rtw89_btc_ver *ver = btc->ver; 4153 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4154 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 4155 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4156 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 4157 struct sk_buff *skb; 4158 u32 len; 4159 u8 *cmd, offset; 4160 int ret; 4161 int i; 4162 4163 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 4164 4165 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4166 if (!skb) { 4167 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4168 return -ENOMEM; 4169 } 4170 skb_put(skb, len); 4171 cmd = skb->data; 4172 4173 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4174 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4175 4176 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4177 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4178 4179 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4180 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4181 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4182 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4183 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4184 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4185 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4186 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4187 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4188 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4189 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4190 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4191 4192 offset = PORT_DATA_OFFSET; 4193 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4194 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4195 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4196 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4197 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4198 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4199 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4200 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4201 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4202 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4203 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4204 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4205 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4206 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4207 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 4208 } 4209 4210 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4211 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4212 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4213 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4214 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4215 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4216 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4217 4218 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4219 H2C_CAT_OUTSRC, BTFC_SET, 4220 SET_DRV_INFO, 0, 0, 4221 len); 4222 4223 ret = rtw89_h2c_tx(rtwdev, skb, false); 4224 if (ret) { 4225 rtw89_err(rtwdev, "failed to send h2c\n"); 4226 goto fail; 4227 } 4228 4229 return 0; 4230 fail: 4231 dev_kfree_skb_any(skb); 4232 4233 return ret; 4234 } 4235 4236 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 4237 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4238 4239 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 4240 { 4241 struct rtw89_btc *btc = &rtwdev->btc; 4242 const struct rtw89_btc_ver *ver = btc->ver; 4243 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4244 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 4245 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4246 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 4247 struct sk_buff *skb; 4248 u32 len; 4249 u8 *cmd, offset; 4250 int ret; 4251 int i; 4252 4253 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 4254 4255 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4256 if (!skb) { 4257 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4258 return -ENOMEM; 4259 } 4260 skb_put(skb, len); 4261 cmd = skb->data; 4262 4263 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4264 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4265 4266 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4267 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4268 4269 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4270 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4271 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4272 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4273 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4274 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4275 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4276 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4277 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4278 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4279 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4280 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4281 4282 offset = PORT_DATA_OFFSET; 4283 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4284 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 4285 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 4286 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 4287 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 4288 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 4289 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 4290 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 4291 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 4292 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 4293 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 4294 } 4295 4296 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4297 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4298 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4299 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4300 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4301 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4302 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4303 4304 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4305 H2C_CAT_OUTSRC, BTFC_SET, 4306 SET_DRV_INFO, 0, 0, 4307 len); 4308 4309 ret = rtw89_h2c_tx(rtwdev, skb, false); 4310 if (ret) { 4311 rtw89_err(rtwdev, "failed to send h2c\n"); 4312 goto fail; 4313 } 4314 4315 return 0; 4316 fail: 4317 dev_kfree_skb_any(skb); 4318 4319 return ret; 4320 } 4321 4322 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 4323 { 4324 struct rtw89_btc *btc = &rtwdev->btc; 4325 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 4326 struct rtw89_h2c_cxrole_v8 *h2c; 4327 u32 len = sizeof(*h2c); 4328 struct sk_buff *skb; 4329 int ret; 4330 4331 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4332 if (!skb) { 4333 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4334 return -ENOMEM; 4335 } 4336 skb_put(skb, len); 4337 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 4338 4339 h2c->hdr.type = type; 4340 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4341 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4342 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4343 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4344 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4345 4346 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4347 H2C_CAT_OUTSRC, BTFC_SET, 4348 SET_DRV_INFO, 0, 0, 4349 len); 4350 4351 ret = rtw89_h2c_tx(rtwdev, skb, false); 4352 if (ret) { 4353 rtw89_err(rtwdev, "failed to send h2c\n"); 4354 goto fail; 4355 } 4356 4357 return 0; 4358 fail: 4359 dev_kfree_skb_any(skb); 4360 4361 return ret; 4362 } 4363 4364 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 4365 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 4366 { 4367 struct rtw89_btc *btc = &rtwdev->btc; 4368 const struct rtw89_btc_ver *ver = btc->ver; 4369 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 4370 struct sk_buff *skb; 4371 u8 *cmd; 4372 int ret; 4373 4374 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 4375 if (!skb) { 4376 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4377 return -ENOMEM; 4378 } 4379 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 4380 cmd = skb->data; 4381 4382 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4383 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 4384 4385 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 4386 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 4387 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 4388 if (ver->fcxctrl == 0) 4389 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 4390 4391 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4392 H2C_CAT_OUTSRC, BTFC_SET, 4393 SET_DRV_INFO, 0, 0, 4394 H2C_LEN_CXDRVINFO_CTRL); 4395 4396 ret = rtw89_h2c_tx(rtwdev, skb, false); 4397 if (ret) { 4398 rtw89_err(rtwdev, "failed to send h2c\n"); 4399 goto fail; 4400 } 4401 4402 return 0; 4403 fail: 4404 dev_kfree_skb_any(skb); 4405 4406 return ret; 4407 } 4408 4409 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 4410 { 4411 struct rtw89_btc *btc = &rtwdev->btc; 4412 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 4413 struct rtw89_h2c_cxctrl_v7 *h2c; 4414 u32 len = sizeof(*h2c); 4415 struct sk_buff *skb; 4416 int ret; 4417 4418 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4419 if (!skb) { 4420 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4421 return -ENOMEM; 4422 } 4423 skb_put(skb, len); 4424 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 4425 4426 h2c->hdr.type = type; 4427 h2c->hdr.ver = btc->ver->fcxctrl; 4428 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 4429 h2c->ctrl = *ctrl; 4430 4431 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4432 H2C_CAT_OUTSRC, BTFC_SET, 4433 SET_DRV_INFO, 0, 0, len); 4434 4435 ret = rtw89_h2c_tx(rtwdev, skb, false); 4436 if (ret) { 4437 rtw89_err(rtwdev, "failed to send h2c\n"); 4438 goto fail; 4439 } 4440 4441 return 0; 4442 fail: 4443 dev_kfree_skb_any(skb); 4444 4445 return ret; 4446 } 4447 4448 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 4449 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 4450 { 4451 struct rtw89_btc *btc = &rtwdev->btc; 4452 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 4453 struct sk_buff *skb; 4454 u8 *cmd; 4455 int ret; 4456 4457 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 4458 if (!skb) { 4459 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 4460 return -ENOMEM; 4461 } 4462 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 4463 cmd = skb->data; 4464 4465 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4466 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 4467 4468 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 4469 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 4470 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 4471 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 4472 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 4473 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 4474 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 4475 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 4476 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 4477 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 4478 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 4479 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 4480 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 4481 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 4482 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 4483 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 4484 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 4485 4486 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4487 H2C_CAT_OUTSRC, BTFC_SET, 4488 SET_DRV_INFO, 0, 0, 4489 H2C_LEN_CXDRVINFO_TRX); 4490 4491 ret = rtw89_h2c_tx(rtwdev, skb, false); 4492 if (ret) { 4493 rtw89_err(rtwdev, "failed to send h2c\n"); 4494 goto fail; 4495 } 4496 4497 return 0; 4498 fail: 4499 dev_kfree_skb_any(skb); 4500 4501 return ret; 4502 } 4503 4504 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 4505 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 4506 { 4507 struct rtw89_btc *btc = &rtwdev->btc; 4508 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4509 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 4510 struct sk_buff *skb; 4511 u8 *cmd; 4512 int ret; 4513 4514 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 4515 if (!skb) { 4516 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4517 return -ENOMEM; 4518 } 4519 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 4520 cmd = skb->data; 4521 4522 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4523 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 4524 4525 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 4526 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 4527 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 4528 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 4529 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 4530 4531 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4532 H2C_CAT_OUTSRC, BTFC_SET, 4533 SET_DRV_INFO, 0, 0, 4534 H2C_LEN_CXDRVINFO_RFK); 4535 4536 ret = rtw89_h2c_tx(rtwdev, skb, false); 4537 if (ret) { 4538 rtw89_err(rtwdev, "failed to send h2c\n"); 4539 goto fail; 4540 } 4541 4542 return 0; 4543 fail: 4544 dev_kfree_skb_any(skb); 4545 4546 return ret; 4547 } 4548 4549 #define H2C_LEN_PKT_OFLD 4 4550 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 4551 { 4552 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4553 struct sk_buff *skb; 4554 unsigned int cond; 4555 u8 *cmd; 4556 int ret; 4557 4558 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 4559 if (!skb) { 4560 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4561 return -ENOMEM; 4562 } 4563 skb_put(skb, H2C_LEN_PKT_OFLD); 4564 cmd = skb->data; 4565 4566 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 4567 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 4568 4569 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4570 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4571 H2C_FUNC_PACKET_OFLD, 1, 1, 4572 H2C_LEN_PKT_OFLD); 4573 4574 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 4575 4576 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4577 if (ret < 0) { 4578 rtw89_debug(rtwdev, RTW89_DBG_FW, 4579 "failed to del pkt ofld: id %d, ret %d\n", 4580 id, ret); 4581 return ret; 4582 } 4583 4584 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 4585 return 0; 4586 } 4587 4588 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 4589 struct sk_buff *skb_ofld) 4590 { 4591 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4592 struct sk_buff *skb; 4593 unsigned int cond; 4594 u8 *cmd; 4595 u8 alloc_id; 4596 int ret; 4597 4598 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 4599 RTW89_MAX_PKT_OFLD_NUM); 4600 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 4601 return -ENOSPC; 4602 4603 *id = alloc_id; 4604 4605 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 4606 if (!skb) { 4607 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4608 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 4609 return -ENOMEM; 4610 } 4611 skb_put(skb, H2C_LEN_PKT_OFLD); 4612 cmd = skb->data; 4613 4614 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 4615 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 4616 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 4617 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 4618 4619 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4620 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4621 H2C_FUNC_PACKET_OFLD, 1, 1, 4622 H2C_LEN_PKT_OFLD + skb_ofld->len); 4623 4624 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 4625 4626 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4627 if (ret < 0) { 4628 rtw89_debug(rtwdev, RTW89_DBG_FW, 4629 "failed to add pkt ofld: id %d, ret %d\n", 4630 alloc_id, ret); 4631 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 4632 return ret; 4633 } 4634 4635 return 0; 4636 } 4637 4638 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num, 4639 struct list_head *chan_list) 4640 { 4641 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4642 struct rtw89_h2c_chinfo_elem *elem; 4643 struct rtw89_mac_chinfo *ch_info; 4644 struct rtw89_h2c_chinfo *h2c; 4645 struct sk_buff *skb; 4646 unsigned int cond; 4647 int skb_len; 4648 int ret; 4649 4650 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 4651 4652 skb_len = struct_size(h2c, elem, ch_num); 4653 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 4654 if (!skb) { 4655 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 4656 return -ENOMEM; 4657 } 4658 skb_put(skb, sizeof(*h2c)); 4659 h2c = (struct rtw89_h2c_chinfo *)skb->data; 4660 4661 h2c->ch_num = ch_num; 4662 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 4663 4664 list_for_each_entry(ch_info, chan_list, list) { 4665 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 4666 4667 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 4668 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 4669 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 4670 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 4671 4672 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 4673 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 4674 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 4675 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 4676 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 4677 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 4678 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 4679 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 4680 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 4681 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 4682 4683 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 4684 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 4685 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 4686 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 4687 4688 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 4689 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 4690 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 4691 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 4692 } 4693 4694 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4695 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4696 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 4697 4698 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 4699 4700 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4701 if (ret) { 4702 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 4703 return ret; 4704 } 4705 4706 return 0; 4707 } 4708 4709 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 4710 struct list_head *chan_list) 4711 { 4712 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4713 struct rtw89_h2c_chinfo_elem_be *elem; 4714 struct rtw89_mac_chinfo_be *ch_info; 4715 struct rtw89_h2c_chinfo *h2c; 4716 struct sk_buff *skb; 4717 unsigned int cond; 4718 int skb_len; 4719 int ret; 4720 4721 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 4722 4723 skb_len = struct_size(h2c, elem, ch_num); 4724 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 4725 if (!skb) { 4726 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 4727 return -ENOMEM; 4728 } 4729 4730 skb_put(skb, sizeof(*h2c)); 4731 h2c = (struct rtw89_h2c_chinfo *)skb->data; 4732 4733 h2c->ch_num = ch_num; 4734 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 4735 h2c->arg = u8_encode_bits(RTW89_PHY_0, RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 4736 4737 list_for_each_entry(ch_info, chan_list, list) { 4738 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 4739 4740 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) | 4741 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 4742 le32_encode_bits(ch_info->central_ch, 4743 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 4744 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 4745 4746 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 4747 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 4748 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 4749 le32_encode_bits(ch_info->pause_data, 4750 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 4751 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 4752 le32_encode_bits(ch_info->rand_seq_num, 4753 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 4754 le32_encode_bits(ch_info->notify_action, 4755 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 4756 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 4757 RTW89_H2C_CHINFO_BE_W1_PROBE) | 4758 le32_encode_bits(ch_info->leave_crit, 4759 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 4760 le32_encode_bits(ch_info->chkpt_timer, 4761 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 4762 4763 elem->w2 = le32_encode_bits(ch_info->leave_time, 4764 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 4765 le32_encode_bits(ch_info->leave_th, 4766 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 4767 le32_encode_bits(ch_info->tx_pkt_ctrl, 4768 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 4769 4770 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 4771 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 4772 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 4773 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 4774 4775 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 4776 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 4777 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 4778 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 4779 4780 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 4781 le32_encode_bits(ch_info->fw_probe0_ssids, 4782 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 4783 4784 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 4785 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 4786 le32_encode_bits(ch_info->fw_probe0_bssids, 4787 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 4788 } 4789 4790 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4791 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4792 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 4793 4794 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 4795 4796 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4797 if (ret) { 4798 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 4799 return ret; 4800 } 4801 4802 return 0; 4803 } 4804 4805 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 4806 struct rtw89_scan_option *option, 4807 struct rtw89_vif *rtwvif) 4808 { 4809 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4810 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 4811 struct rtw89_h2c_scanofld *h2c; 4812 u32 len = sizeof(*h2c); 4813 struct sk_buff *skb; 4814 unsigned int cond; 4815 int ret; 4816 4817 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4818 if (!skb) { 4819 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 4820 return -ENOMEM; 4821 } 4822 skb_put(skb, len); 4823 h2c = (struct rtw89_h2c_scanofld *)skb->data; 4824 4825 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 4826 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 4827 le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) | 4828 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 4829 4830 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 4831 le32_encode_bits(option->target_ch_mode, 4832 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 4833 le32_encode_bits(RTW89_SCAN_IMMEDIATE, 4834 RTW89_H2C_SCANOFLD_W1_START_MODE) | 4835 le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 4836 4837 if (option->target_ch_mode) { 4838 h2c->w1 |= le32_encode_bits(op->band_width, 4839 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 4840 le32_encode_bits(op->primary_channel, 4841 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 4842 le32_encode_bits(op->channel, 4843 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 4844 h2c->w0 |= le32_encode_bits(op->band_type, 4845 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 4846 } 4847 4848 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4849 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4850 H2C_FUNC_SCANOFLD, 1, 1, 4851 len); 4852 4853 if (option->enable) 4854 cond = RTW89_SCANOFLD_WAIT_COND_START; 4855 else 4856 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 4857 4858 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4859 if (ret) { 4860 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 4861 return ret; 4862 } 4863 4864 return 0; 4865 } 4866 4867 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 4868 struct rtw89_scan_option *option) 4869 { 4870 struct ieee80211_supported_band *sband; 4871 struct ieee80211_channel *chan; 4872 u8 i, idx; 4873 4874 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 4875 if (!sband) { 4876 option->prohib_chan = U64_MAX; 4877 return; 4878 } 4879 4880 for (i = 0; i < sband->n_channels; i++) { 4881 chan = &sband->channels[i]; 4882 if (chan->flags & IEEE80211_CHAN_DISABLED) { 4883 idx = (chan->hw_value - 1) / 4; 4884 option->prohib_chan |= BIT(idx); 4885 } 4886 } 4887 } 4888 4889 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 4890 struct rtw89_scan_option *option, 4891 struct rtw89_vif *rtwvif) 4892 { 4893 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 4894 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4895 struct cfg80211_scan_request *req = rtwvif->scan_req; 4896 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 4897 struct rtw89_chan *op = &scan_info->op_chan; 4898 struct rtw89_h2c_scanofld_be_opch *opch; 4899 struct rtw89_pktofld_info *pkt_info; 4900 struct rtw89_h2c_scanofld_be *h2c; 4901 struct sk_buff *skb; 4902 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 4903 u8 opch_size = sizeof(*opch) * option->num_opch; 4904 u8 probe_id[NUM_NL80211_BANDS]; 4905 unsigned int cond; 4906 void *ptr; 4907 int ret; 4908 u32 len; 4909 u8 i; 4910 4911 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 4912 4913 len = sizeof(*h2c) + macc_role_size + opch_size; 4914 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4915 if (!skb) { 4916 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 4917 return -ENOMEM; 4918 } 4919 4920 skb_put(skb, len); 4921 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 4922 ptr = skb->data; 4923 4924 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 4925 4926 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 4927 if (pkt_info->wildcard_6ghz) { 4928 /* Provide wildcard as template */ 4929 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 4930 break; 4931 } 4932 } 4933 4934 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 4935 le32_encode_bits(option->scan_mode, 4936 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 4937 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 4938 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 4939 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 4940 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 4941 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 4942 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 4943 4944 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 4945 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 4946 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 4947 4948 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 4949 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 4950 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 4951 4952 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 4953 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 4954 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 4955 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 4956 4957 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 4958 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 4959 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 4960 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 4961 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 4962 4963 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 4964 4965 h2c->w6 = le32_encode_bits(option->prohib_chan, 4966 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 4967 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 4968 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 4969 if (req->no_cck) { 4970 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); 4971 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, 4972 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | 4973 le32_encode_bits(RTW89_HW_RATE_OFDM6, 4974 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | 4975 le32_encode_bits(RTW89_HW_RATE_OFDM6, 4976 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); 4977 } 4978 ptr += sizeof(*h2c); 4979 4980 for (i = 0; i < option->num_macc_role; i++) { 4981 macc_role = (struct rtw89_h2c_scanofld_be_macc_role *)&h2c->role[i]; 4982 macc_role->w0 = 4983 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 4984 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 4985 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 4986 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 4987 ptr += sizeof(*macc_role); 4988 } 4989 4990 for (i = 0; i < option->num_opch; i++) { 4991 opch = ptr; 4992 opch->w0 = le32_encode_bits(rtwvif->mac_id, 4993 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 4994 le32_encode_bits(option->band, 4995 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 4996 le32_encode_bits(rtwvif->port, 4997 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 4998 le32_encode_bits(RTW89_SCAN_OPMODE_INTV, 4999 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 5000 le32_encode_bits(true, 5001 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 5002 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10, 5003 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 5004 5005 opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME, 5006 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) | 5007 le32_encode_bits(op->band_type, 5008 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 5009 le32_encode_bits(op->band_width, 5010 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 5011 le32_encode_bits(0x3, 5012 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 5013 le32_encode_bits(op->primary_channel, 5014 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 5015 le32_encode_bits(op->channel, 5016 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 5017 5018 opch->w2 = le32_encode_bits(0, 5019 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 5020 le32_encode_bits(0, 5021 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 5022 le32_encode_bits(2, 5023 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS); 5024 5025 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5026 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 5027 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5028 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 5029 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5030 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 5031 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5032 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 5033 ptr += sizeof(*opch); 5034 } 5035 5036 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5037 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5038 H2C_FUNC_SCANOFLD_BE, 1, 1, 5039 len); 5040 5041 if (option->enable) 5042 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 5043 else 5044 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 5045 5046 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5047 if (ret) { 5048 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 5049 return ret; 5050 } 5051 5052 return 0; 5053 } 5054 5055 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 5056 struct rtw89_fw_h2c_rf_reg_info *info, 5057 u16 len, u8 page) 5058 { 5059 struct sk_buff *skb; 5060 u8 class = info->rf_path == RF_PATH_A ? 5061 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 5062 int ret; 5063 5064 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5065 if (!skb) { 5066 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 5067 return -ENOMEM; 5068 } 5069 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 5070 5071 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5072 H2C_CAT_OUTSRC, class, page, 0, 0, 5073 len); 5074 5075 ret = rtw89_h2c_tx(rtwdev, skb, false); 5076 if (ret) { 5077 rtw89_err(rtwdev, "failed to send h2c\n"); 5078 goto fail; 5079 } 5080 5081 return 0; 5082 fail: 5083 dev_kfree_skb_any(skb); 5084 5085 return ret; 5086 } 5087 5088 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 5089 { 5090 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 5091 struct rtw89_fw_h2c_rf_get_mccch *mccch; 5092 struct sk_buff *skb; 5093 int ret; 5094 u8 idx; 5095 5096 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 5097 if (!skb) { 5098 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5099 return -ENOMEM; 5100 } 5101 skb_put(skb, sizeof(*mccch)); 5102 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 5103 5104 idx = rfk_mcc->table_idx; 5105 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 5106 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 5107 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 5108 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 5109 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 5110 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 5111 5112 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5113 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 5114 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 5115 sizeof(*mccch)); 5116 5117 ret = rtw89_h2c_tx(rtwdev, skb, false); 5118 if (ret) { 5119 rtw89_err(rtwdev, "failed to send h2c\n"); 5120 goto fail; 5121 } 5122 5123 return 0; 5124 fail: 5125 dev_kfree_skb_any(skb); 5126 5127 return ret; 5128 } 5129 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 5130 5131 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 5132 enum rtw89_phy_idx phy_idx) 5133 { 5134 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 5135 struct rtw89_fw_h2c_rfk_pre_info *h2c; 5136 u8 tbl_sel = rfk_mcc->table_idx; 5137 u32 len = sizeof(*h2c); 5138 struct sk_buff *skb; 5139 u8 tbl, path; 5140 u32 val32; 5141 int ret; 5142 5143 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5144 if (!skb) { 5145 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 5146 return -ENOMEM; 5147 } 5148 skb_put(skb, len); 5149 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 5150 5151 h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 5152 5153 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 5154 5155 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 5156 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5157 h2c->dbcc.ch[path][tbl] = cpu_to_le32(rfk_mcc->ch[tbl]); 5158 h2c->dbcc.band[path][tbl] = cpu_to_le32(rfk_mcc->band[tbl]); 5159 } 5160 } 5161 5162 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5163 h2c->tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]); 5164 h2c->tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]); 5165 } 5166 5167 h2c->phy_idx = cpu_to_le32(phy_idx); 5168 h2c->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]); 5169 h2c->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]); 5170 h2c->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]); 5171 5172 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 5173 h2c->ktbl_sel0 = cpu_to_le32(val32); 5174 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 5175 h2c->ktbl_sel1 = cpu_to_le32(val32); 5176 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 5177 h2c->rfmod0 = cpu_to_le32(val32); 5178 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 5179 h2c->rfmod1 = cpu_to_le32(val32); 5180 5181 if (rtw89_is_mlo_1_1(rtwdev)) 5182 h2c->mlo_1_1 = cpu_to_le32(1); 5183 5184 h2c->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 5185 5186 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5187 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5188 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 5189 len); 5190 5191 ret = rtw89_h2c_tx(rtwdev, skb, false); 5192 if (ret) { 5193 rtw89_err(rtwdev, "failed to send h2c\n"); 5194 goto fail; 5195 } 5196 5197 return 0; 5198 fail: 5199 dev_kfree_skb_any(skb); 5200 5201 return ret; 5202 } 5203 5204 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5205 enum rtw89_tssi_mode tssi_mode) 5206 { 5207 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 5208 RTW89_SUB_ENTITY_0); 5209 struct rtw89_hal *hal = &rtwdev->hal; 5210 struct rtw89_h2c_rf_tssi *h2c; 5211 u32 len = sizeof(*h2c); 5212 struct sk_buff *skb; 5213 int ret; 5214 5215 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5216 if (!skb) { 5217 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 5218 return -ENOMEM; 5219 } 5220 skb_put(skb, len); 5221 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 5222 5223 h2c->len = cpu_to_le16(len); 5224 h2c->phy = phy_idx; 5225 h2c->ch = chan->channel; 5226 h2c->bw = chan->band_width; 5227 h2c->band = chan->band_type; 5228 h2c->hwtx_en = true; 5229 h2c->cv = hal->cv; 5230 h2c->tssi_mode = tssi_mode; 5231 5232 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 5233 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 5234 5235 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5236 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5237 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 5238 5239 ret = rtw89_h2c_tx(rtwdev, skb, false); 5240 if (ret) { 5241 rtw89_err(rtwdev, "failed to send h2c\n"); 5242 goto fail; 5243 } 5244 5245 return 0; 5246 fail: 5247 dev_kfree_skb_any(skb); 5248 5249 return ret; 5250 } 5251 5252 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5253 { 5254 struct rtw89_h2c_rf_iqk *h2c; 5255 u32 len = sizeof(*h2c); 5256 struct sk_buff *skb; 5257 int ret; 5258 5259 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5260 if (!skb) { 5261 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 5262 return -ENOMEM; 5263 } 5264 skb_put(skb, len); 5265 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 5266 5267 h2c->phy_idx = cpu_to_le32(phy_idx); 5268 h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en); 5269 5270 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5271 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5272 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 5273 5274 ret = rtw89_h2c_tx(rtwdev, skb, false); 5275 if (ret) { 5276 rtw89_err(rtwdev, "failed to send h2c\n"); 5277 goto fail; 5278 } 5279 5280 return 0; 5281 fail: 5282 dev_kfree_skb_any(skb); 5283 5284 return ret; 5285 } 5286 5287 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5288 { 5289 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 5290 RTW89_SUB_ENTITY_0); 5291 struct rtw89_h2c_rf_dpk *h2c; 5292 u32 len = sizeof(*h2c); 5293 struct sk_buff *skb; 5294 int ret; 5295 5296 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5297 if (!skb) { 5298 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 5299 return -ENOMEM; 5300 } 5301 skb_put(skb, len); 5302 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 5303 5304 h2c->len = len; 5305 h2c->phy = phy_idx; 5306 h2c->dpk_enable = true; 5307 h2c->kpath = RF_AB; 5308 h2c->cur_band = chan->band_type; 5309 h2c->cur_bw = chan->band_width; 5310 h2c->cur_ch = chan->channel; 5311 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5312 5313 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5314 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5315 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 5316 5317 ret = rtw89_h2c_tx(rtwdev, skb, false); 5318 if (ret) { 5319 rtw89_err(rtwdev, "failed to send h2c\n"); 5320 goto fail; 5321 } 5322 5323 return 0; 5324 fail: 5325 dev_kfree_skb_any(skb); 5326 5327 return ret; 5328 } 5329 5330 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5331 { 5332 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 5333 RTW89_SUB_ENTITY_0); 5334 struct rtw89_hal *hal = &rtwdev->hal; 5335 struct rtw89_h2c_rf_txgapk *h2c; 5336 u32 len = sizeof(*h2c); 5337 struct sk_buff *skb; 5338 int ret; 5339 5340 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5341 if (!skb) { 5342 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 5343 return -ENOMEM; 5344 } 5345 skb_put(skb, len); 5346 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 5347 5348 h2c->len = len; 5349 h2c->ktype = 2; 5350 h2c->phy = phy_idx; 5351 h2c->kpath = RF_AB; 5352 h2c->band = chan->band_type; 5353 h2c->bw = chan->band_width; 5354 h2c->ch = chan->channel; 5355 h2c->cv = hal->cv; 5356 5357 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5358 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5359 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 5360 5361 ret = rtw89_h2c_tx(rtwdev, skb, false); 5362 if (ret) { 5363 rtw89_err(rtwdev, "failed to send h2c\n"); 5364 goto fail; 5365 } 5366 5367 return 0; 5368 fail: 5369 dev_kfree_skb_any(skb); 5370 5371 return ret; 5372 } 5373 5374 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5375 { 5376 struct rtw89_h2c_rf_dack *h2c; 5377 u32 len = sizeof(*h2c); 5378 struct sk_buff *skb; 5379 int ret; 5380 5381 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5382 if (!skb) { 5383 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 5384 return -ENOMEM; 5385 } 5386 skb_put(skb, len); 5387 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 5388 5389 h2c->len = cpu_to_le32(len); 5390 h2c->phy = cpu_to_le32(phy_idx); 5391 h2c->type = cpu_to_le32(0); 5392 5393 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5394 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5395 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 5396 5397 ret = rtw89_h2c_tx(rtwdev, skb, false); 5398 if (ret) { 5399 rtw89_err(rtwdev, "failed to send h2c\n"); 5400 goto fail; 5401 } 5402 5403 return 0; 5404 fail: 5405 dev_kfree_skb_any(skb); 5406 5407 return ret; 5408 } 5409 5410 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5411 { 5412 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 5413 RTW89_SUB_ENTITY_0); 5414 struct rtw89_h2c_rf_rxdck *h2c; 5415 u32 len = sizeof(*h2c); 5416 struct sk_buff *skb; 5417 int ret; 5418 5419 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5420 if (!skb) { 5421 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 5422 return -ENOMEM; 5423 } 5424 skb_put(skb, len); 5425 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 5426 5427 h2c->len = len; 5428 h2c->phy = phy_idx; 5429 h2c->is_afe = false; 5430 h2c->kpath = RF_AB; 5431 h2c->cur_band = chan->band_type; 5432 h2c->cur_bw = chan->band_width; 5433 h2c->cur_ch = chan->channel; 5434 h2c->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5435 5436 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5437 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5438 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 5439 5440 ret = rtw89_h2c_tx(rtwdev, skb, false); 5441 if (ret) { 5442 rtw89_err(rtwdev, "failed to send h2c\n"); 5443 goto fail; 5444 } 5445 5446 return 0; 5447 fail: 5448 dev_kfree_skb_any(skb); 5449 5450 return ret; 5451 } 5452 5453 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 5454 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 5455 bool rack, bool dack) 5456 { 5457 struct sk_buff *skb; 5458 int ret; 5459 5460 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5461 if (!skb) { 5462 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 5463 return -ENOMEM; 5464 } 5465 skb_put_data(skb, buf, len); 5466 5467 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5468 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 5469 len); 5470 5471 ret = rtw89_h2c_tx(rtwdev, skb, false); 5472 if (ret) { 5473 rtw89_err(rtwdev, "failed to send h2c\n"); 5474 goto fail; 5475 } 5476 5477 return 0; 5478 fail: 5479 dev_kfree_skb_any(skb); 5480 5481 return ret; 5482 } 5483 5484 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 5485 { 5486 struct sk_buff *skb; 5487 int ret; 5488 5489 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 5490 if (!skb) { 5491 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 5492 return -ENOMEM; 5493 } 5494 skb_put_data(skb, buf, len); 5495 5496 ret = rtw89_h2c_tx(rtwdev, skb, false); 5497 if (ret) { 5498 rtw89_err(rtwdev, "failed to send h2c\n"); 5499 goto fail; 5500 } 5501 5502 return 0; 5503 fail: 5504 dev_kfree_skb_any(skb); 5505 5506 return ret; 5507 } 5508 5509 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 5510 { 5511 struct rtw89_early_h2c *early_h2c; 5512 5513 lockdep_assert_held(&rtwdev->mutex); 5514 5515 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 5516 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 5517 } 5518 } 5519 5520 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 5521 { 5522 struct rtw89_early_h2c *early_h2c, *tmp; 5523 5524 mutex_lock(&rtwdev->mutex); 5525 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 5526 list_del(&early_h2c->list); 5527 kfree(early_h2c->h2c); 5528 kfree(early_h2c); 5529 } 5530 mutex_unlock(&rtwdev->mutex); 5531 } 5532 5533 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 5534 { 5535 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 5536 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 5537 5538 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 5539 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 5540 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 5541 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 5542 } 5543 5544 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 5545 struct sk_buff *c2h) 5546 { 5547 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 5548 u8 category = attr->category; 5549 u8 class = attr->class; 5550 u8 func = attr->func; 5551 5552 switch (category) { 5553 default: 5554 return false; 5555 case RTW89_C2H_CAT_MAC: 5556 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 5557 case RTW89_C2H_CAT_OUTSRC: 5558 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 5559 } 5560 } 5561 5562 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 5563 { 5564 rtw89_fw_c2h_parse_attr(c2h); 5565 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 5566 goto enqueue; 5567 5568 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 5569 dev_kfree_skb_any(c2h); 5570 return; 5571 5572 enqueue: 5573 skb_queue_tail(&rtwdev->c2h_queue, c2h); 5574 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 5575 } 5576 5577 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 5578 struct sk_buff *skb) 5579 { 5580 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 5581 u8 category = attr->category; 5582 u8 class = attr->class; 5583 u8 func = attr->func; 5584 u16 len = attr->len; 5585 bool dump = true; 5586 5587 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 5588 return; 5589 5590 switch (category) { 5591 case RTW89_C2H_CAT_TEST: 5592 break; 5593 case RTW89_C2H_CAT_MAC: 5594 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 5595 if (class == RTW89_MAC_C2H_CLASS_INFO && 5596 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 5597 dump = false; 5598 break; 5599 case RTW89_C2H_CAT_OUTSRC: 5600 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 5601 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 5602 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 5603 else 5604 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 5605 break; 5606 } 5607 5608 if (dump) 5609 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 5610 } 5611 5612 void rtw89_fw_c2h_work(struct work_struct *work) 5613 { 5614 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 5615 c2h_work); 5616 struct sk_buff *skb, *tmp; 5617 5618 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 5619 skb_unlink(skb, &rtwdev->c2h_queue); 5620 mutex_lock(&rtwdev->mutex); 5621 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 5622 mutex_unlock(&rtwdev->mutex); 5623 dev_kfree_skb_any(skb); 5624 } 5625 } 5626 5627 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 5628 struct rtw89_mac_h2c_info *info) 5629 { 5630 const struct rtw89_chip_info *chip = rtwdev->chip; 5631 struct rtw89_fw_info *fw_info = &rtwdev->fw; 5632 const u32 *h2c_reg = chip->h2c_regs; 5633 u8 i, val, len; 5634 int ret; 5635 5636 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 5637 rtwdev, chip->h2c_ctrl_reg); 5638 if (ret) { 5639 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 5640 return ret; 5641 } 5642 5643 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 5644 sizeof(info->u.h2creg[0])); 5645 5646 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 5647 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 5648 5649 for (i = 0; i < RTW89_H2CREG_MAX; i++) 5650 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 5651 5652 fw_info->h2c_counter++; 5653 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 5654 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 5655 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 5656 5657 return 0; 5658 } 5659 5660 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 5661 struct rtw89_mac_c2h_info *info) 5662 { 5663 const struct rtw89_chip_info *chip = rtwdev->chip; 5664 struct rtw89_fw_info *fw_info = &rtwdev->fw; 5665 const u32 *c2h_reg = chip->c2h_regs; 5666 u32 ret; 5667 u8 i, val; 5668 5669 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 5670 5671 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 5672 RTW89_C2H_TIMEOUT, false, rtwdev, 5673 chip->c2h_ctrl_reg); 5674 if (ret) { 5675 rtw89_warn(rtwdev, "c2h reg timeout\n"); 5676 return ret; 5677 } 5678 5679 for (i = 0; i < RTW89_C2HREG_MAX; i++) 5680 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 5681 5682 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 5683 5684 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 5685 info->content_len = 5686 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 5687 RTW89_C2HREG_HDR_LEN; 5688 5689 fw_info->c2h_counter++; 5690 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 5691 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 5692 5693 return 0; 5694 } 5695 5696 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 5697 struct rtw89_mac_h2c_info *h2c_info, 5698 struct rtw89_mac_c2h_info *c2h_info) 5699 { 5700 u32 ret; 5701 5702 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 5703 lockdep_assert_held(&rtwdev->mutex); 5704 5705 if (!h2c_info && !c2h_info) 5706 return -EINVAL; 5707 5708 if (!h2c_info) 5709 goto recv_c2h; 5710 5711 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 5712 if (ret) 5713 return ret; 5714 5715 recv_c2h: 5716 if (!c2h_info) 5717 return 0; 5718 5719 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 5720 if (ret) 5721 return ret; 5722 5723 return 0; 5724 } 5725 5726 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 5727 { 5728 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 5729 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 5730 return; 5731 } 5732 5733 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 5734 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 5735 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 5736 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 5737 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 5738 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 5739 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 5740 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 5741 5742 rtw89_fw_prog_cnt_dump(rtwdev); 5743 } 5744 5745 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 5746 { 5747 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 5748 struct rtw89_pktofld_info *info, *tmp; 5749 u8 idx; 5750 5751 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 5752 if (!(rtwdev->chip->support_bands & BIT(idx))) 5753 continue; 5754 5755 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 5756 if (test_bit(info->id, rtwdev->pkt_offload)) 5757 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 5758 list_del(&info->list); 5759 kfree(info); 5760 } 5761 } 5762 } 5763 5764 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 5765 struct rtw89_vif *rtwvif, 5766 struct rtw89_pktofld_info *info, 5767 enum nl80211_band band, u8 ssid_idx) 5768 { 5769 struct cfg80211_scan_request *req = rtwvif->scan_req; 5770 5771 if (band != NL80211_BAND_6GHZ) 5772 return false; 5773 5774 if (req->ssids[ssid_idx].ssid_len) { 5775 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 5776 req->ssids[ssid_idx].ssid_len); 5777 info->ssid_len = req->ssids[ssid_idx].ssid_len; 5778 return false; 5779 } else { 5780 info->wildcard_6ghz = true; 5781 return true; 5782 } 5783 } 5784 5785 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 5786 struct rtw89_vif *rtwvif, 5787 struct sk_buff *skb, u8 ssid_idx) 5788 { 5789 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5790 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 5791 struct rtw89_pktofld_info *info; 5792 struct sk_buff *new; 5793 int ret = 0; 5794 u8 band; 5795 5796 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 5797 if (!(rtwdev->chip->support_bands & BIT(band))) 5798 continue; 5799 5800 new = skb_copy(skb, GFP_KERNEL); 5801 if (!new) { 5802 ret = -ENOMEM; 5803 goto out; 5804 } 5805 skb_put_data(new, ies->ies[band], ies->len[band]); 5806 skb_put_data(new, ies->common_ies, ies->common_ie_len); 5807 5808 info = kzalloc(sizeof(*info), GFP_KERNEL); 5809 if (!info) { 5810 ret = -ENOMEM; 5811 kfree_skb(new); 5812 goto out; 5813 } 5814 5815 rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band, 5816 ssid_idx); 5817 5818 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 5819 if (ret) { 5820 kfree_skb(new); 5821 kfree(info); 5822 goto out; 5823 } 5824 5825 list_add_tail(&info->list, &scan_info->pkt_list[band]); 5826 kfree_skb(new); 5827 } 5828 out: 5829 return ret; 5830 } 5831 5832 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 5833 struct rtw89_vif *rtwvif) 5834 { 5835 struct cfg80211_scan_request *req = rtwvif->scan_req; 5836 struct sk_buff *skb; 5837 u8 num = req->n_ssids, i; 5838 int ret; 5839 5840 for (i = 0; i < num; i++) { 5841 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 5842 req->ssids[i].ssid, 5843 req->ssids[i].ssid_len, 5844 req->ie_len); 5845 if (!skb) 5846 return -ENOMEM; 5847 5848 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i); 5849 kfree_skb(skb); 5850 5851 if (ret) 5852 return ret; 5853 } 5854 5855 return 0; 5856 } 5857 5858 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 5859 struct cfg80211_scan_request *req, 5860 struct rtw89_mac_chinfo *ch_info) 5861 { 5862 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 5863 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 5864 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 5865 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 5866 struct cfg80211_scan_6ghz_params *params; 5867 struct rtw89_pktofld_info *info, *tmp; 5868 struct ieee80211_hdr *hdr; 5869 struct sk_buff *skb; 5870 bool found; 5871 int ret = 0; 5872 u8 i; 5873 5874 if (!req->n_6ghz_params) 5875 return 0; 5876 5877 for (i = 0; i < req->n_6ghz_params; i++) { 5878 params = &req->scan_6ghz_params[i]; 5879 5880 if (req->channels[params->channel_idx]->hw_value != 5881 ch_info->pri_ch) 5882 continue; 5883 5884 found = false; 5885 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 5886 if (ether_addr_equal(tmp->bssid, params->bssid)) { 5887 found = true; 5888 break; 5889 } 5890 } 5891 if (found) 5892 continue; 5893 5894 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 5895 NULL, 0, req->ie_len); 5896 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 5897 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 5898 hdr = (struct ieee80211_hdr *)skb->data; 5899 ether_addr_copy(hdr->addr3, params->bssid); 5900 5901 info = kzalloc(sizeof(*info), GFP_KERNEL); 5902 if (!info) { 5903 ret = -ENOMEM; 5904 kfree_skb(skb); 5905 goto out; 5906 } 5907 5908 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 5909 if (ret) { 5910 kfree_skb(skb); 5911 kfree(info); 5912 goto out; 5913 } 5914 5915 ether_addr_copy(info->bssid, params->bssid); 5916 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 5917 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 5918 5919 ch_info->tx_pkt = true; 5920 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 5921 5922 kfree_skb(skb); 5923 } 5924 5925 out: 5926 return ret; 5927 } 5928 5929 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 5930 int ssid_num, 5931 struct rtw89_mac_chinfo *ch_info) 5932 { 5933 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5934 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 5935 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 5936 struct cfg80211_scan_request *req = rtwvif->scan_req; 5937 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 5938 struct rtw89_pktofld_info *info; 5939 u8 band, probe_count = 0; 5940 int ret; 5941 5942 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 5943 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 5944 ch_info->bw = RTW89_SCAN_WIDTH; 5945 ch_info->tx_pkt = true; 5946 ch_info->cfg_tx_pwr = false; 5947 ch_info->tx_pwr_idx = 0; 5948 ch_info->tx_null = false; 5949 ch_info->pause_data = false; 5950 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 5951 5952 if (ch_info->ch_band == RTW89_BAND_6G) { 5953 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 5954 !ch_info->is_psc) { 5955 ch_info->tx_pkt = false; 5956 if (!req->duration_mandatory) 5957 ch_info->period -= RTW89_DWELL_TIME_6G; 5958 } 5959 } 5960 5961 ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info); 5962 if (ret) 5963 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 5964 5965 if (ssid_num) { 5966 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 5967 5968 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 5969 if (info->channel_6ghz && 5970 ch_info->pri_ch != info->channel_6ghz) 5971 continue; 5972 else if (info->channel_6ghz && probe_count != 0) 5973 ch_info->period += RTW89_CHANNEL_TIME_6G; 5974 5975 if (info->wildcard_6ghz) 5976 continue; 5977 5978 ch_info->pkt_id[probe_count++] = info->id; 5979 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 5980 break; 5981 } 5982 ch_info->num_pkt = probe_count; 5983 } 5984 5985 switch (chan_type) { 5986 case RTW89_CHAN_OPERATE: 5987 ch_info->central_ch = op->channel; 5988 ch_info->pri_ch = op->primary_channel; 5989 ch_info->ch_band = op->band_type; 5990 ch_info->bw = op->band_width; 5991 ch_info->tx_null = true; 5992 ch_info->num_pkt = 0; 5993 break; 5994 case RTW89_CHAN_DFS: 5995 if (ch_info->ch_band != RTW89_BAND_6G) 5996 ch_info->period = max_t(u8, ch_info->period, 5997 RTW89_DFS_CHAN_TIME); 5998 ch_info->dwell_time = RTW89_DWELL_TIME; 5999 break; 6000 case RTW89_CHAN_ACTIVE: 6001 break; 6002 default: 6003 rtw89_err(rtwdev, "Channel type out of bound\n"); 6004 } 6005 } 6006 6007 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6008 int ssid_num, 6009 struct rtw89_mac_chinfo_be *ch_info) 6010 { 6011 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6012 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 6013 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 6014 struct cfg80211_scan_request *req = rtwvif->scan_req; 6015 struct rtw89_pktofld_info *info; 6016 u8 band, probe_count = 0, i; 6017 6018 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6019 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6020 ch_info->bw = RTW89_SCAN_WIDTH; 6021 ch_info->tx_null = false; 6022 ch_info->pause_data = false; 6023 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6024 6025 if (ssid_num) { 6026 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6027 6028 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6029 if (info->channel_6ghz && 6030 ch_info->pri_ch != info->channel_6ghz) 6031 continue; 6032 6033 if (info->wildcard_6ghz) 6034 continue; 6035 6036 ch_info->pkt_id[probe_count++] = info->id; 6037 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6038 break; 6039 } 6040 } 6041 6042 if (ch_info->ch_band == RTW89_BAND_6G) { 6043 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6044 !ch_info->is_psc) { 6045 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6046 if (!req->duration_mandatory) 6047 ch_info->period -= RTW89_DWELL_TIME_6G; 6048 } 6049 } 6050 6051 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6052 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6053 6054 switch (chan_type) { 6055 case RTW89_CHAN_DFS: 6056 if (ch_info->ch_band != RTW89_BAND_6G) 6057 ch_info->period = 6058 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6059 ch_info->dwell_time = RTW89_DWELL_TIME; 6060 break; 6061 case RTW89_CHAN_ACTIVE: 6062 break; 6063 default: 6064 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6065 break; 6066 } 6067 } 6068 6069 int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 6070 struct rtw89_vif *rtwvif, bool connected) 6071 { 6072 struct cfg80211_scan_request *req = rtwvif->scan_req; 6073 struct rtw89_mac_chinfo *ch_info, *tmp; 6074 struct ieee80211_channel *channel; 6075 struct list_head chan_list; 6076 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 6077 int list_len, off_chan_time = 0; 6078 enum rtw89_chan_type type; 6079 int ret = 0; 6080 u32 idx; 6081 6082 INIT_LIST_HEAD(&chan_list); 6083 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6084 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6085 idx++, list_len++) { 6086 channel = req->channels[idx]; 6087 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6088 if (!ch_info) { 6089 ret = -ENOMEM; 6090 goto out; 6091 } 6092 6093 if (req->duration) 6094 ch_info->period = req->duration; 6095 else if (channel->band == NL80211_BAND_6GHZ) 6096 ch_info->period = RTW89_CHANNEL_TIME_6G + 6097 RTW89_DWELL_TIME_6G; 6098 else 6099 ch_info->period = RTW89_CHANNEL_TIME; 6100 6101 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6102 ch_info->central_ch = channel->hw_value; 6103 ch_info->pri_ch = channel->hw_value; 6104 ch_info->rand_seq_num = random_seq; 6105 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6106 6107 if (channel->flags & 6108 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6109 type = RTW89_CHAN_DFS; 6110 else 6111 type = RTW89_CHAN_ACTIVE; 6112 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 6113 6114 if (connected && 6115 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 6116 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 6117 if (!tmp) { 6118 ret = -ENOMEM; 6119 kfree(ch_info); 6120 goto out; 6121 } 6122 6123 type = RTW89_CHAN_OPERATE; 6124 tmp->period = req->duration_mandatory ? 6125 req->duration : RTW89_CHANNEL_TIME; 6126 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 6127 list_add_tail(&tmp->list, &chan_list); 6128 off_chan_time = 0; 6129 list_len++; 6130 } 6131 list_add_tail(&ch_info->list, &chan_list); 6132 off_chan_time += ch_info->period; 6133 } 6134 rtwdev->scan_info.last_chan_idx = idx; 6135 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6136 6137 out: 6138 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6139 list_del(&ch_info->list); 6140 kfree(ch_info); 6141 } 6142 6143 return ret; 6144 } 6145 6146 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6147 struct rtw89_vif *rtwvif, bool connected) 6148 { 6149 struct cfg80211_scan_request *req = rtwvif->scan_req; 6150 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6151 struct ieee80211_channel *channel; 6152 struct list_head chan_list; 6153 enum rtw89_chan_type type; 6154 int list_len, ret; 6155 bool random_seq; 6156 u32 idx; 6157 6158 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 6159 INIT_LIST_HEAD(&chan_list); 6160 6161 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6162 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6163 idx++, list_len++) { 6164 channel = req->channels[idx]; 6165 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6166 if (!ch_info) { 6167 ret = -ENOMEM; 6168 goto out; 6169 } 6170 6171 if (req->duration) 6172 ch_info->period = req->duration; 6173 else if (channel->band == NL80211_BAND_6GHZ) 6174 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6175 else 6176 ch_info->period = RTW89_CHANNEL_TIME; 6177 6178 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6179 ch_info->central_ch = channel->hw_value; 6180 ch_info->pri_ch = channel->hw_value; 6181 ch_info->rand_seq_num = random_seq; 6182 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6183 6184 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6185 type = RTW89_CHAN_DFS; 6186 else 6187 type = RTW89_CHAN_ACTIVE; 6188 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 6189 6190 list_add_tail(&ch_info->list, &chan_list); 6191 } 6192 6193 rtwdev->scan_info.last_chan_idx = idx; 6194 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list); 6195 6196 out: 6197 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6198 list_del(&ch_info->list); 6199 kfree(ch_info); 6200 } 6201 6202 return ret; 6203 } 6204 6205 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 6206 struct rtw89_vif *rtwvif, bool connected) 6207 { 6208 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6209 int ret; 6210 6211 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 6212 if (ret) { 6213 rtw89_err(rtwdev, "Update probe request failed\n"); 6214 goto out; 6215 } 6216 ret = mac->add_chan_list(rtwdev, rtwvif, connected); 6217 out: 6218 return ret; 6219 } 6220 6221 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 6222 struct ieee80211_scan_request *scan_req) 6223 { 6224 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 6225 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6226 struct cfg80211_scan_request *req = &scan_req->req; 6227 u32 rx_fltr = rtwdev->hal.rx_fltr; 6228 u8 mac_addr[ETH_ALEN]; 6229 6230 rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan); 6231 rtwdev->scan_info.scanning_vif = vif; 6232 rtwdev->scan_info.last_chan_idx = 0; 6233 rtwdev->scan_info.abort = false; 6234 rtwvif->scan_ies = &scan_req->ies; 6235 rtwvif->scan_req = req; 6236 ieee80211_stop_queues(rtwdev->hw); 6237 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, false); 6238 6239 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 6240 get_random_mask_addr(mac_addr, req->mac_addr, 6241 req->mac_addr_mask); 6242 else 6243 ether_addr_copy(mac_addr, vif->addr); 6244 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 6245 6246 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 6247 rx_fltr &= ~B_AX_A_BC; 6248 rx_fltr &= ~B_AX_A_A1_MATCH; 6249 rtw89_write32_mask(rtwdev, 6250 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 6251 B_AX_RX_FLTR_CFG_MASK, 6252 rx_fltr); 6253 6254 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); 6255 } 6256 6257 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 6258 bool aborted) 6259 { 6260 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6261 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6262 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 6263 struct cfg80211_scan_info info = { 6264 .aborted = aborted, 6265 }; 6266 6267 if (!vif) 6268 return; 6269 6270 rtw89_write32_mask(rtwdev, 6271 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 6272 B_AX_RX_FLTR_CFG_MASK, 6273 rtwdev->hal.rx_fltr); 6274 6275 rtw89_core_scan_complete(rtwdev, vif, true); 6276 ieee80211_scan_completed(rtwdev->hw, &info); 6277 ieee80211_wake_queues(rtwdev->hw); 6278 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, true); 6279 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 6280 6281 rtw89_release_pkt_list(rtwdev); 6282 rtwvif->scan_req = NULL; 6283 rtwvif->scan_ies = NULL; 6284 scan_info->last_chan_idx = 0; 6285 scan_info->scanning_vif = NULL; 6286 scan_info->abort = false; 6287 6288 rtw89_chanctx_proceed(rtwdev); 6289 } 6290 6291 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 6292 { 6293 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6294 int ret; 6295 6296 scan_info->abort = true; 6297 6298 ret = rtw89_hw_scan_offload(rtwdev, vif, false); 6299 if (ret) 6300 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); 6301 6302 /* Indicate ieee80211_scan_completed() before returning, which is safe 6303 * because scan abort command always waits for completion of 6304 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan 6305 * work properly. 6306 */ 6307 rtw89_hw_scan_complete(rtwdev, vif, true); 6308 } 6309 6310 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 6311 { 6312 struct rtw89_vif *rtwvif; 6313 6314 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 6315 /* This variable implies connected or during attempt to connect */ 6316 if (!is_zero_ether_addr(rtwvif->bssid)) 6317 return true; 6318 } 6319 6320 return false; 6321 } 6322 6323 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 6324 bool enable) 6325 { 6326 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6327 struct rtw89_scan_option opt = {0}; 6328 struct rtw89_vif *rtwvif; 6329 bool connected; 6330 int ret = 0; 6331 6332 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 6333 if (!rtwvif) 6334 return -EINVAL; 6335 6336 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 6337 opt.enable = enable; 6338 opt.target_ch_mode = connected; 6339 if (enable) { 6340 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected); 6341 if (ret) 6342 goto out; 6343 } 6344 6345 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 6346 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 6347 opt.scan_mode = RTW89_SCAN_MODE_SA; 6348 opt.band = RTW89_PHY_0; 6349 opt.num_macc_role = 0; 6350 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 6351 opt.num_opch = connected ? 1 : 0; 6352 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 6353 } 6354 6355 ret = mac->scan_offload(rtwdev, &opt, rtwvif); 6356 out: 6357 return ret; 6358 } 6359 6360 #define H2C_FW_CPU_EXCEPTION_LEN 4 6361 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 6362 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 6363 { 6364 struct sk_buff *skb; 6365 int ret; 6366 6367 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 6368 if (!skb) { 6369 rtw89_err(rtwdev, 6370 "failed to alloc skb for fw cpu exception\n"); 6371 return -ENOMEM; 6372 } 6373 6374 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 6375 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 6376 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 6377 6378 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6379 H2C_CAT_TEST, 6380 H2C_CL_FW_STATUS_TEST, 6381 H2C_FUNC_CPU_EXCEPTION, 0, 0, 6382 H2C_FW_CPU_EXCEPTION_LEN); 6383 6384 ret = rtw89_h2c_tx(rtwdev, skb, false); 6385 if (ret) { 6386 rtw89_err(rtwdev, "failed to send h2c\n"); 6387 goto fail; 6388 } 6389 6390 return 0; 6391 6392 fail: 6393 dev_kfree_skb_any(skb); 6394 return ret; 6395 } 6396 6397 #define H2C_PKT_DROP_LEN 24 6398 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 6399 const struct rtw89_pkt_drop_params *params) 6400 { 6401 struct sk_buff *skb; 6402 int ret; 6403 6404 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 6405 if (!skb) { 6406 rtw89_err(rtwdev, 6407 "failed to alloc skb for packet drop\n"); 6408 return -ENOMEM; 6409 } 6410 6411 switch (params->sel) { 6412 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 6413 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 6414 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 6415 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 6416 case RTW89_PKT_DROP_SEL_BAND_ONCE: 6417 break; 6418 default: 6419 rtw89_debug(rtwdev, RTW89_DBG_FW, 6420 "H2C of pkt drop might not fully support sel: %d yet\n", 6421 params->sel); 6422 break; 6423 } 6424 6425 skb_put(skb, H2C_PKT_DROP_LEN); 6426 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 6427 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 6428 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 6429 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 6430 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 6431 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 6432 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 6433 params->macid_band_sel[0]); 6434 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 6435 params->macid_band_sel[1]); 6436 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 6437 params->macid_band_sel[2]); 6438 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 6439 params->macid_band_sel[3]); 6440 6441 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6442 H2C_CAT_MAC, 6443 H2C_CL_MAC_FW_OFLD, 6444 H2C_FUNC_PKT_DROP, 0, 0, 6445 H2C_PKT_DROP_LEN); 6446 6447 ret = rtw89_h2c_tx(rtwdev, skb, false); 6448 if (ret) { 6449 rtw89_err(rtwdev, "failed to send h2c\n"); 6450 goto fail; 6451 } 6452 6453 return 0; 6454 6455 fail: 6456 dev_kfree_skb_any(skb); 6457 return ret; 6458 } 6459 6460 #define H2C_KEEP_ALIVE_LEN 4 6461 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6462 bool enable) 6463 { 6464 struct sk_buff *skb; 6465 u8 pkt_id = 0; 6466 int ret; 6467 6468 if (enable) { 6469 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 6470 RTW89_PKT_OFLD_TYPE_NULL_DATA, 6471 &pkt_id); 6472 if (ret) 6473 return -EPERM; 6474 } 6475 6476 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 6477 if (!skb) { 6478 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 6479 return -ENOMEM; 6480 } 6481 6482 skb_put(skb, H2C_KEEP_ALIVE_LEN); 6483 6484 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 6485 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 6486 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 6487 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 6488 6489 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6490 H2C_CAT_MAC, 6491 H2C_CL_MAC_WOW, 6492 H2C_FUNC_KEEP_ALIVE, 0, 1, 6493 H2C_KEEP_ALIVE_LEN); 6494 6495 ret = rtw89_h2c_tx(rtwdev, skb, false); 6496 if (ret) { 6497 rtw89_err(rtwdev, "failed to send h2c\n"); 6498 goto fail; 6499 } 6500 6501 return 0; 6502 6503 fail: 6504 dev_kfree_skb_any(skb); 6505 6506 return ret; 6507 } 6508 6509 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6510 bool enable) 6511 { 6512 struct rtw89_h2c_arp_offload *h2c; 6513 u32 len = sizeof(*h2c); 6514 struct sk_buff *skb; 6515 u8 pkt_id = 0; 6516 int ret; 6517 6518 if (enable) { 6519 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 6520 RTW89_PKT_OFLD_TYPE_ARP_RSP, 6521 &pkt_id); 6522 if (ret) 6523 return ret; 6524 } 6525 6526 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6527 if (!skb) { 6528 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); 6529 return -ENOMEM; 6530 } 6531 6532 skb_put(skb, len); 6533 h2c = (struct rtw89_h2c_arp_offload *)skb->data; 6534 6535 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | 6536 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | 6537 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | 6538 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); 6539 6540 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6541 H2C_CAT_MAC, 6542 H2C_CL_MAC_WOW, 6543 H2C_FUNC_ARP_OFLD, 0, 1, 6544 len); 6545 6546 ret = rtw89_h2c_tx(rtwdev, skb, false); 6547 if (ret) { 6548 rtw89_err(rtwdev, "failed to send h2c\n"); 6549 goto fail; 6550 } 6551 6552 return 0; 6553 6554 fail: 6555 dev_kfree_skb_any(skb); 6556 6557 return ret; 6558 } 6559 6560 #define H2C_DISCONNECT_DETECT_LEN 8 6561 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 6562 struct rtw89_vif *rtwvif, bool enable) 6563 { 6564 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6565 struct sk_buff *skb; 6566 u8 macid = rtwvif->mac_id; 6567 int ret; 6568 6569 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 6570 if (!skb) { 6571 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 6572 return -ENOMEM; 6573 } 6574 6575 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 6576 6577 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 6578 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 6579 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 6580 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 6581 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 6582 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 6583 } 6584 6585 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6586 H2C_CAT_MAC, 6587 H2C_CL_MAC_WOW, 6588 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 6589 H2C_DISCONNECT_DETECT_LEN); 6590 6591 ret = rtw89_h2c_tx(rtwdev, skb, false); 6592 if (ret) { 6593 rtw89_err(rtwdev, "failed to send h2c\n"); 6594 goto fail; 6595 } 6596 6597 return 0; 6598 6599 fail: 6600 dev_kfree_skb_any(skb); 6601 6602 return ret; 6603 } 6604 6605 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6606 bool enable) 6607 { 6608 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6609 struct rtw89_h2c_wow_global *h2c; 6610 u8 macid = rtwvif->mac_id; 6611 u32 len = sizeof(*h2c); 6612 struct sk_buff *skb; 6613 int ret; 6614 6615 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6616 if (!skb) { 6617 rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); 6618 return -ENOMEM; 6619 } 6620 6621 skb_put(skb, len); 6622 h2c = (struct rtw89_h2c_wow_global *)skb->data; 6623 6624 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | 6625 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | 6626 le32_encode_bits(rtw_wow->ptk_alg, 6627 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | 6628 le32_encode_bits(rtw_wow->gtk_alg, 6629 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); 6630 h2c->key_info = rtw_wow->key_info; 6631 6632 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6633 H2C_CAT_MAC, 6634 H2C_CL_MAC_WOW, 6635 H2C_FUNC_WOW_GLOBAL, 0, 1, 6636 len); 6637 6638 ret = rtw89_h2c_tx(rtwdev, skb, false); 6639 if (ret) { 6640 rtw89_err(rtwdev, "failed to send h2c\n"); 6641 goto fail; 6642 } 6643 6644 return 0; 6645 6646 fail: 6647 dev_kfree_skb_any(skb); 6648 6649 return ret; 6650 } 6651 6652 #define H2C_WAKEUP_CTRL_LEN 4 6653 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 6654 struct rtw89_vif *rtwvif, 6655 bool enable) 6656 { 6657 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6658 struct sk_buff *skb; 6659 u8 macid = rtwvif->mac_id; 6660 int ret; 6661 6662 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 6663 if (!skb) { 6664 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); 6665 return -ENOMEM; 6666 } 6667 6668 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 6669 6670 if (rtw_wow->pattern_cnt) 6671 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 6672 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 6673 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 6674 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 6675 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 6676 6677 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 6678 6679 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6680 H2C_CAT_MAC, 6681 H2C_CL_MAC_WOW, 6682 H2C_FUNC_WAKEUP_CTRL, 0, 1, 6683 H2C_WAKEUP_CTRL_LEN); 6684 6685 ret = rtw89_h2c_tx(rtwdev, skb, false); 6686 if (ret) { 6687 rtw89_err(rtwdev, "failed to send h2c\n"); 6688 goto fail; 6689 } 6690 6691 return 0; 6692 6693 fail: 6694 dev_kfree_skb_any(skb); 6695 6696 return ret; 6697 } 6698 6699 #define H2C_WOW_CAM_UPD_LEN 24 6700 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 6701 struct rtw89_wow_cam_info *cam_info) 6702 { 6703 struct sk_buff *skb; 6704 int ret; 6705 6706 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 6707 if (!skb) { 6708 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 6709 return -ENOMEM; 6710 } 6711 6712 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 6713 6714 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 6715 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 6716 if (cam_info->valid) { 6717 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 6718 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 6719 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 6720 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 6721 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 6722 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 6723 cam_info->negative_pattern_match); 6724 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 6725 cam_info->skip_mac_hdr); 6726 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 6727 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 6728 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 6729 } 6730 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 6731 6732 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6733 H2C_CAT_MAC, 6734 H2C_CL_MAC_WOW, 6735 H2C_FUNC_WOW_CAM_UPD, 0, 1, 6736 H2C_WOW_CAM_UPD_LEN); 6737 6738 ret = rtw89_h2c_tx(rtwdev, skb, false); 6739 if (ret) { 6740 rtw89_err(rtwdev, "failed to send h2c\n"); 6741 goto fail; 6742 } 6743 6744 return 0; 6745 fail: 6746 dev_kfree_skb_any(skb); 6747 6748 return ret; 6749 } 6750 6751 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, 6752 struct rtw89_vif *rtwvif, 6753 bool enable) 6754 { 6755 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6756 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; 6757 struct rtw89_h2c_wow_gtk_ofld *h2c; 6758 u8 macid = rtwvif->mac_id; 6759 u32 len = sizeof(*h2c); 6760 u8 pkt_id_sa_query = 0; 6761 struct sk_buff *skb; 6762 u8 pkt_id_eapol = 0; 6763 int ret; 6764 6765 if (!rtw_wow->gtk_alg) 6766 return 0; 6767 6768 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6769 if (!skb) { 6770 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); 6771 return -ENOMEM; 6772 } 6773 6774 skb_put(skb, len); 6775 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; 6776 6777 if (!enable) 6778 goto hdr; 6779 6780 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 6781 RTW89_PKT_OFLD_TYPE_EAPOL_KEY, 6782 &pkt_id_eapol); 6783 if (ret) 6784 goto fail; 6785 6786 if (gtk_info->igtk_keyid) { 6787 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 6788 RTW89_PKT_OFLD_TYPE_SA_QUERY, 6789 &pkt_id_sa_query); 6790 if (ret) 6791 goto fail; 6792 } 6793 6794 /* not support TKIP yet */ 6795 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 6796 le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 6797 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 6798 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 6799 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | 6800 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); 6801 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, 6802 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | 6803 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); 6804 h2c->gtk_info = rtw_wow->gtk_info; 6805 6806 hdr: 6807 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6808 H2C_CAT_MAC, 6809 H2C_CL_MAC_WOW, 6810 H2C_FUNC_GTK_OFLD, 0, 1, 6811 len); 6812 6813 ret = rtw89_h2c_tx(rtwdev, skb, false); 6814 if (ret) { 6815 rtw89_err(rtwdev, "failed to send h2c\n"); 6816 goto fail; 6817 } 6818 return 0; 6819 6820 fail: 6821 dev_kfree_skb_any(skb); 6822 6823 return ret; 6824 } 6825 6826 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) 6827 { 6828 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 6829 struct rtw89_h2c_wow_aoac *h2c; 6830 u32 len = sizeof(*h2c); 6831 struct sk_buff *skb; 6832 unsigned int cond; 6833 6834 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6835 if (!skb) { 6836 rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); 6837 return -ENOMEM; 6838 } 6839 6840 skb_put(skb, len); 6841 6842 /* This H2C only nofity firmware to generate AOAC report C2H, 6843 * no need any parameter. 6844 */ 6845 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6846 H2C_CAT_MAC, 6847 H2C_CL_MAC_WOW, 6848 H2C_FUNC_AOAC_REPORT_REQ, 1, 0, 6849 len); 6850 6851 cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ); 6852 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6853 } 6854 6855 /* Return < 0, if failures happen during waiting for the condition. 6856 * Return 0, when waiting for the condition succeeds. 6857 * Return > 0, if the wait is considered unreachable due to driver/FW design, 6858 * where 1 means during SER. 6859 */ 6860 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 6861 struct rtw89_wait_info *wait, unsigned int cond) 6862 { 6863 int ret; 6864 6865 ret = rtw89_h2c_tx(rtwdev, skb, false); 6866 if (ret) { 6867 rtw89_err(rtwdev, "failed to send h2c\n"); 6868 dev_kfree_skb_any(skb); 6869 return -EBUSY; 6870 } 6871 6872 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 6873 return 1; 6874 6875 return rtw89_wait_for_cond(wait, cond); 6876 } 6877 6878 #define H2C_ADD_MCC_LEN 16 6879 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 6880 const struct rtw89_fw_mcc_add_req *p) 6881 { 6882 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6883 struct sk_buff *skb; 6884 unsigned int cond; 6885 6886 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 6887 if (!skb) { 6888 rtw89_err(rtwdev, 6889 "failed to alloc skb for add mcc\n"); 6890 return -ENOMEM; 6891 } 6892 6893 skb_put(skb, H2C_ADD_MCC_LEN); 6894 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 6895 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 6896 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 6897 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 6898 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 6899 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 6900 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 6901 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 6902 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 6903 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 6904 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 6905 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 6906 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 6907 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 6908 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 6909 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 6910 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 6911 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 6912 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 6913 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 6914 6915 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6916 H2C_CAT_MAC, 6917 H2C_CL_MCC, 6918 H2C_FUNC_ADD_MCC, 0, 0, 6919 H2C_ADD_MCC_LEN); 6920 6921 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 6922 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6923 } 6924 6925 #define H2C_START_MCC_LEN 12 6926 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 6927 const struct rtw89_fw_mcc_start_req *p) 6928 { 6929 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6930 struct sk_buff *skb; 6931 unsigned int cond; 6932 6933 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 6934 if (!skb) { 6935 rtw89_err(rtwdev, 6936 "failed to alloc skb for start mcc\n"); 6937 return -ENOMEM; 6938 } 6939 6940 skb_put(skb, H2C_START_MCC_LEN); 6941 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 6942 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 6943 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 6944 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 6945 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 6946 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 6947 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 6948 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 6949 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 6950 6951 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6952 H2C_CAT_MAC, 6953 H2C_CL_MCC, 6954 H2C_FUNC_START_MCC, 0, 0, 6955 H2C_START_MCC_LEN); 6956 6957 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 6958 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6959 } 6960 6961 #define H2C_STOP_MCC_LEN 4 6962 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 6963 bool prev_groups) 6964 { 6965 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6966 struct sk_buff *skb; 6967 unsigned int cond; 6968 6969 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 6970 if (!skb) { 6971 rtw89_err(rtwdev, 6972 "failed to alloc skb for stop mcc\n"); 6973 return -ENOMEM; 6974 } 6975 6976 skb_put(skb, H2C_STOP_MCC_LEN); 6977 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 6978 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 6979 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 6980 6981 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6982 H2C_CAT_MAC, 6983 H2C_CL_MCC, 6984 H2C_FUNC_STOP_MCC, 0, 0, 6985 H2C_STOP_MCC_LEN); 6986 6987 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 6988 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6989 } 6990 6991 #define H2C_DEL_MCC_GROUP_LEN 4 6992 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 6993 bool prev_groups) 6994 { 6995 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6996 struct sk_buff *skb; 6997 unsigned int cond; 6998 6999 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 7000 if (!skb) { 7001 rtw89_err(rtwdev, 7002 "failed to alloc skb for del mcc group\n"); 7003 return -ENOMEM; 7004 } 7005 7006 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 7007 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 7008 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 7009 7010 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7011 H2C_CAT_MAC, 7012 H2C_CL_MCC, 7013 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 7014 H2C_DEL_MCC_GROUP_LEN); 7015 7016 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 7017 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7018 } 7019 7020 #define H2C_RESET_MCC_GROUP_LEN 4 7021 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 7022 { 7023 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7024 struct sk_buff *skb; 7025 unsigned int cond; 7026 7027 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 7028 if (!skb) { 7029 rtw89_err(rtwdev, 7030 "failed to alloc skb for reset mcc group\n"); 7031 return -ENOMEM; 7032 } 7033 7034 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 7035 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 7036 7037 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7038 H2C_CAT_MAC, 7039 H2C_CL_MCC, 7040 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 7041 H2C_RESET_MCC_GROUP_LEN); 7042 7043 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 7044 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7045 } 7046 7047 #define H2C_MCC_REQ_TSF_LEN 4 7048 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 7049 const struct rtw89_fw_mcc_tsf_req *req, 7050 struct rtw89_mac_mcc_tsf_rpt *rpt) 7051 { 7052 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7053 struct rtw89_mac_mcc_tsf_rpt *tmp; 7054 struct sk_buff *skb; 7055 unsigned int cond; 7056 int ret; 7057 7058 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 7059 if (!skb) { 7060 rtw89_err(rtwdev, 7061 "failed to alloc skb for mcc req tsf\n"); 7062 return -ENOMEM; 7063 } 7064 7065 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 7066 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 7067 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 7068 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 7069 7070 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7071 H2C_CAT_MAC, 7072 H2C_CL_MCC, 7073 H2C_FUNC_MCC_REQ_TSF, 0, 0, 7074 H2C_MCC_REQ_TSF_LEN); 7075 7076 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 7077 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7078 if (ret) 7079 return ret; 7080 7081 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 7082 *rpt = *tmp; 7083 7084 return 0; 7085 } 7086 7087 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 7088 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7089 u8 *bitmap) 7090 { 7091 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7092 struct sk_buff *skb; 7093 unsigned int cond; 7094 u8 map_len; 7095 u8 h2c_len; 7096 7097 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 7098 map_len = RTW89_MAX_MAC_ID_NUM / 8; 7099 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 7100 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 7101 if (!skb) { 7102 rtw89_err(rtwdev, 7103 "failed to alloc skb for mcc macid bitmap\n"); 7104 return -ENOMEM; 7105 } 7106 7107 skb_put(skb, h2c_len); 7108 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 7109 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 7110 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 7111 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 7112 7113 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7114 H2C_CAT_MAC, 7115 H2C_CL_MCC, 7116 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 7117 h2c_len); 7118 7119 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 7120 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7121 } 7122 7123 #define H2C_MCC_SYNC_LEN 4 7124 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 7125 u8 target, u8 offset) 7126 { 7127 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7128 struct sk_buff *skb; 7129 unsigned int cond; 7130 7131 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 7132 if (!skb) { 7133 rtw89_err(rtwdev, 7134 "failed to alloc skb for mcc sync\n"); 7135 return -ENOMEM; 7136 } 7137 7138 skb_put(skb, H2C_MCC_SYNC_LEN); 7139 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 7140 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 7141 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 7142 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 7143 7144 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7145 H2C_CAT_MAC, 7146 H2C_CL_MCC, 7147 H2C_FUNC_MCC_SYNC, 0, 0, 7148 H2C_MCC_SYNC_LEN); 7149 7150 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 7151 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7152 } 7153 7154 #define H2C_MCC_SET_DURATION_LEN 20 7155 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 7156 const struct rtw89_fw_mcc_duration *p) 7157 { 7158 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7159 struct sk_buff *skb; 7160 unsigned int cond; 7161 7162 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 7163 if (!skb) { 7164 rtw89_err(rtwdev, 7165 "failed to alloc skb for mcc set duration\n"); 7166 return -ENOMEM; 7167 } 7168 7169 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 7170 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 7171 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 7172 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 7173 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 7174 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 7175 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 7176 p->start_tsf_low); 7177 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 7178 p->start_tsf_high); 7179 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 7180 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 7181 7182 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7183 H2C_CAT_MAC, 7184 H2C_CL_MCC, 7185 H2C_FUNC_MCC_SET_DURATION, 0, 0, 7186 H2C_MCC_SET_DURATION_LEN); 7187 7188 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 7189 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7190 } 7191 7192 static 7193 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 7194 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 7195 struct rtw89_h2c_mrc_add_slot *slot_h2c) 7196 { 7197 bool fill_h2c = !!slot_h2c; 7198 unsigned int i; 7199 7200 if (!fill_h2c) 7201 goto calc_len; 7202 7203 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 7204 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 7205 le32_encode_bits(slot_arg->courtesy_en, 7206 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 7207 le32_encode_bits(slot_arg->role_num, 7208 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 7209 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 7210 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 7211 le32_encode_bits(slot_arg->courtesy_target, 7212 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 7213 7214 for (i = 0; i < slot_arg->role_num; i++) { 7215 slot_h2c->roles[i].w0 = 7216 le32_encode_bits(slot_arg->roles[i].macid, 7217 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 7218 le32_encode_bits(slot_arg->roles[i].role_type, 7219 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 7220 le32_encode_bits(slot_arg->roles[i].is_master, 7221 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 7222 le32_encode_bits(slot_arg->roles[i].en_tx_null, 7223 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 7224 le32_encode_bits(false, 7225 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 7226 le32_encode_bits(false, 7227 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 7228 slot_h2c->roles[i].w1 = 7229 le32_encode_bits(slot_arg->roles[i].central_ch, 7230 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 7231 le32_encode_bits(slot_arg->roles[i].primary_ch, 7232 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 7233 le32_encode_bits(slot_arg->roles[i].bw, 7234 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 7235 le32_encode_bits(slot_arg->roles[i].band, 7236 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 7237 le32_encode_bits(slot_arg->roles[i].null_early, 7238 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 7239 le32_encode_bits(false, 7240 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 7241 le32_encode_bits(true, 7242 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 7243 slot_h2c->roles[i].macid_main_bitmap = 7244 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 7245 slot_h2c->roles[i].macid_paired_bitmap = 7246 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 7247 } 7248 7249 calc_len: 7250 return struct_size(slot_h2c, roles, slot_arg->role_num); 7251 } 7252 7253 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 7254 const struct rtw89_fw_mrc_add_arg *arg) 7255 { 7256 struct rtw89_h2c_mrc_add *h2c_head; 7257 struct sk_buff *skb; 7258 unsigned int i; 7259 void *tmp; 7260 u32 len; 7261 int ret; 7262 7263 len = sizeof(*h2c_head); 7264 for (i = 0; i < arg->slot_num; i++) 7265 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 7266 7267 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7268 if (!skb) { 7269 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 7270 return -ENOMEM; 7271 } 7272 7273 skb_put(skb, len); 7274 tmp = skb->data; 7275 7276 h2c_head = tmp; 7277 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 7278 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 7279 le32_encode_bits(arg->sch_type, 7280 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 7281 le32_encode_bits(arg->slot_num, 7282 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 7283 le32_encode_bits(arg->btc_in_sch, 7284 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 7285 7286 tmp += sizeof(*h2c_head); 7287 for (i = 0; i < arg->slot_num; i++) 7288 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 7289 7290 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7291 H2C_CAT_MAC, 7292 H2C_CL_MRC, 7293 H2C_FUNC_ADD_MRC, 0, 0, 7294 len); 7295 7296 ret = rtw89_h2c_tx(rtwdev, skb, false); 7297 if (ret) { 7298 rtw89_err(rtwdev, "failed to send h2c\n"); 7299 dev_kfree_skb_any(skb); 7300 return -EBUSY; 7301 } 7302 7303 return 0; 7304 } 7305 7306 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 7307 const struct rtw89_fw_mrc_start_arg *arg) 7308 { 7309 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7310 struct rtw89_h2c_mrc_start *h2c; 7311 u32 len = sizeof(*h2c); 7312 struct sk_buff *skb; 7313 unsigned int cond; 7314 7315 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7316 if (!skb) { 7317 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 7318 return -ENOMEM; 7319 } 7320 7321 skb_put(skb, len); 7322 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 7323 7324 h2c->w0 = le32_encode_bits(arg->sch_idx, 7325 RTW89_H2C_MRC_START_W0_SCH_IDX) | 7326 le32_encode_bits(arg->old_sch_idx, 7327 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 7328 le32_encode_bits(arg->action, 7329 RTW89_H2C_MRC_START_W0_ACTION); 7330 7331 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 7332 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 7333 7334 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7335 H2C_CAT_MAC, 7336 H2C_CL_MRC, 7337 H2C_FUNC_START_MRC, 0, 0, 7338 len); 7339 7340 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 7341 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7342 } 7343 7344 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx) 7345 { 7346 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7347 struct rtw89_h2c_mrc_del *h2c; 7348 u32 len = sizeof(*h2c); 7349 struct sk_buff *skb; 7350 unsigned int cond; 7351 7352 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7353 if (!skb) { 7354 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 7355 return -ENOMEM; 7356 } 7357 7358 skb_put(skb, len); 7359 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 7360 7361 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX); 7362 7363 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7364 H2C_CAT_MAC, 7365 H2C_CL_MRC, 7366 H2C_FUNC_DEL_MRC, 0, 0, 7367 len); 7368 7369 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 7370 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7371 } 7372 7373 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 7374 const struct rtw89_fw_mrc_req_tsf_arg *arg, 7375 struct rtw89_mac_mrc_tsf_rpt *rpt) 7376 { 7377 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7378 struct rtw89_h2c_mrc_req_tsf *h2c; 7379 struct rtw89_mac_mrc_tsf_rpt *tmp; 7380 struct sk_buff *skb; 7381 unsigned int i; 7382 u32 len; 7383 int ret; 7384 7385 len = struct_size(h2c, infos, arg->num); 7386 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7387 if (!skb) { 7388 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 7389 return -ENOMEM; 7390 } 7391 7392 skb_put(skb, len); 7393 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 7394 7395 h2c->req_tsf_num = arg->num; 7396 for (i = 0; i < arg->num; i++) 7397 h2c->infos[i] = 7398 u8_encode_bits(arg->infos[i].band, 7399 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 7400 u8_encode_bits(arg->infos[i].port, 7401 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 7402 7403 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7404 H2C_CAT_MAC, 7405 H2C_CL_MRC, 7406 H2C_FUNC_MRC_REQ_TSF, 0, 0, 7407 len); 7408 7409 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 7410 if (ret) 7411 return ret; 7412 7413 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 7414 *rpt = *tmp; 7415 7416 return 0; 7417 } 7418 7419 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 7420 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 7421 { 7422 struct rtw89_h2c_mrc_upd_bitmap *h2c; 7423 u32 len = sizeof(*h2c); 7424 struct sk_buff *skb; 7425 int ret; 7426 7427 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7428 if (!skb) { 7429 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 7430 return -ENOMEM; 7431 } 7432 7433 skb_put(skb, len); 7434 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 7435 7436 h2c->w0 = le32_encode_bits(arg->sch_idx, 7437 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 7438 le32_encode_bits(arg->action, 7439 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 7440 le32_encode_bits(arg->macid, 7441 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 7442 h2c->w1 = le32_encode_bits(arg->client_macid, 7443 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 7444 7445 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7446 H2C_CAT_MAC, 7447 H2C_CL_MRC, 7448 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 7449 len); 7450 7451 ret = rtw89_h2c_tx(rtwdev, skb, false); 7452 if (ret) { 7453 rtw89_err(rtwdev, "failed to send h2c\n"); 7454 dev_kfree_skb_any(skb); 7455 return -EBUSY; 7456 } 7457 7458 return 0; 7459 } 7460 7461 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 7462 const struct rtw89_fw_mrc_sync_arg *arg) 7463 { 7464 struct rtw89_h2c_mrc_sync *h2c; 7465 u32 len = sizeof(*h2c); 7466 struct sk_buff *skb; 7467 int ret; 7468 7469 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7470 if (!skb) { 7471 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 7472 return -ENOMEM; 7473 } 7474 7475 skb_put(skb, len); 7476 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 7477 7478 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 7479 le32_encode_bits(arg->src.port, 7480 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 7481 le32_encode_bits(arg->src.band, 7482 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 7483 le32_encode_bits(arg->dest.port, 7484 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 7485 le32_encode_bits(arg->dest.band, 7486 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 7487 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 7488 7489 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7490 H2C_CAT_MAC, 7491 H2C_CL_MRC, 7492 H2C_FUNC_MRC_SYNC, 0, 0, 7493 len); 7494 7495 ret = rtw89_h2c_tx(rtwdev, skb, false); 7496 if (ret) { 7497 rtw89_err(rtwdev, "failed to send h2c\n"); 7498 dev_kfree_skb_any(skb); 7499 return -EBUSY; 7500 } 7501 7502 return 0; 7503 } 7504 7505 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 7506 const struct rtw89_fw_mrc_upd_duration_arg *arg) 7507 { 7508 struct rtw89_h2c_mrc_upd_duration *h2c; 7509 struct sk_buff *skb; 7510 unsigned int i; 7511 u32 len; 7512 int ret; 7513 7514 len = struct_size(h2c, slots, arg->slot_num); 7515 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7516 if (!skb) { 7517 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 7518 return -ENOMEM; 7519 } 7520 7521 skb_put(skb, len); 7522 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 7523 7524 h2c->w0 = le32_encode_bits(arg->sch_idx, 7525 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 7526 le32_encode_bits(arg->slot_num, 7527 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 7528 le32_encode_bits(false, 7529 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 7530 7531 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 7532 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 7533 7534 for (i = 0; i < arg->slot_num; i++) { 7535 h2c->slots[i] = 7536 le32_encode_bits(arg->slots[i].slot_idx, 7537 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 7538 le32_encode_bits(arg->slots[i].duration, 7539 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 7540 } 7541 7542 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7543 H2C_CAT_MAC, 7544 H2C_CL_MRC, 7545 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 7546 len); 7547 7548 ret = rtw89_h2c_tx(rtwdev, skb, false); 7549 if (ret) { 7550 rtw89_err(rtwdev, "failed to send h2c\n"); 7551 dev_kfree_skb_any(skb); 7552 return -EBUSY; 7553 } 7554 7555 return 0; 7556 } 7557 7558 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 7559 { 7560 static const u8 zeros[U8_MAX] = {}; 7561 7562 return memcmp(ext_ptr, zeros, ext_len) == 0; 7563 } 7564 7565 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 7566 ({ \ 7567 u8 __var_sz = sizeof(*(e)); \ 7568 bool __accept; \ 7569 if (__var_sz >= (ent_sz)) \ 7570 __accept = true; \ 7571 else \ 7572 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 7573 (ent_sz) - __var_sz);\ 7574 __accept; \ 7575 }) 7576 7577 static bool 7578 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 7579 const void *cursor, 7580 const struct rtw89_txpwr_conf *conf) 7581 { 7582 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7583 return false; 7584 7585 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 7586 return false; 7587 7588 switch (e->rs) { 7589 case RTW89_RS_CCK: 7590 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 7591 return false; 7592 break; 7593 case RTW89_RS_OFDM: 7594 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 7595 return false; 7596 break; 7597 case RTW89_RS_MCS: 7598 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 7599 e->nss >= RTW89_NSS_NUM || 7600 e->ofdma >= RTW89_OFDMA_NUM) 7601 return false; 7602 break; 7603 case RTW89_RS_HEDCM: 7604 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 7605 e->nss >= RTW89_NSS_HEDCM_NUM || 7606 e->ofdma >= RTW89_OFDMA_NUM) 7607 return false; 7608 break; 7609 case RTW89_RS_OFFSET: 7610 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 7611 return false; 7612 break; 7613 default: 7614 return false; 7615 } 7616 7617 return true; 7618 } 7619 7620 static 7621 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 7622 const struct rtw89_txpwr_table *tbl) 7623 { 7624 const struct rtw89_txpwr_conf *conf = tbl->data; 7625 struct rtw89_fw_txpwr_byrate_entry entry = {}; 7626 struct rtw89_txpwr_byrate *byr_head; 7627 struct rtw89_rate_desc desc = {}; 7628 const void *cursor; 7629 u32 data; 7630 s8 *byr; 7631 int i; 7632 7633 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7634 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 7635 continue; 7636 7637 byr_head = &rtwdev->byr[entry.band][entry.bw]; 7638 data = le32_to_cpu(entry.data); 7639 desc.ofdma = entry.ofdma; 7640 desc.nss = entry.nss; 7641 desc.rs = entry.rs; 7642 7643 for (i = 0; i < entry.len; i++, data >>= 8) { 7644 desc.idx = entry.shf + i; 7645 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 7646 *byr = data & 0xff; 7647 } 7648 } 7649 } 7650 7651 static bool 7652 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 7653 const void *cursor, 7654 const struct rtw89_txpwr_conf *conf) 7655 { 7656 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7657 return false; 7658 7659 if (e->bw >= RTW89_2G_BW_NUM) 7660 return false; 7661 if (e->nt >= RTW89_NTX_NUM) 7662 return false; 7663 if (e->rs >= RTW89_RS_LMT_NUM) 7664 return false; 7665 if (e->bf >= RTW89_BF_NUM) 7666 return false; 7667 if (e->regd >= RTW89_REGD_NUM) 7668 return false; 7669 if (e->ch_idx >= RTW89_2G_CH_NUM) 7670 return false; 7671 7672 return true; 7673 } 7674 7675 static 7676 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 7677 { 7678 const struct rtw89_txpwr_conf *conf = &data->conf; 7679 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 7680 const void *cursor; 7681 7682 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7683 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 7684 continue; 7685 7686 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 7687 [entry.ch_idx] = entry.v; 7688 } 7689 } 7690 7691 static bool 7692 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 7693 const void *cursor, 7694 const struct rtw89_txpwr_conf *conf) 7695 { 7696 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7697 return false; 7698 7699 if (e->bw >= RTW89_5G_BW_NUM) 7700 return false; 7701 if (e->nt >= RTW89_NTX_NUM) 7702 return false; 7703 if (e->rs >= RTW89_RS_LMT_NUM) 7704 return false; 7705 if (e->bf >= RTW89_BF_NUM) 7706 return false; 7707 if (e->regd >= RTW89_REGD_NUM) 7708 return false; 7709 if (e->ch_idx >= RTW89_5G_CH_NUM) 7710 return false; 7711 7712 return true; 7713 } 7714 7715 static 7716 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 7717 { 7718 const struct rtw89_txpwr_conf *conf = &data->conf; 7719 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 7720 const void *cursor; 7721 7722 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7723 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 7724 continue; 7725 7726 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 7727 [entry.ch_idx] = entry.v; 7728 } 7729 } 7730 7731 static bool 7732 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 7733 const void *cursor, 7734 const struct rtw89_txpwr_conf *conf) 7735 { 7736 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7737 return false; 7738 7739 if (e->bw >= RTW89_6G_BW_NUM) 7740 return false; 7741 if (e->nt >= RTW89_NTX_NUM) 7742 return false; 7743 if (e->rs >= RTW89_RS_LMT_NUM) 7744 return false; 7745 if (e->bf >= RTW89_BF_NUM) 7746 return false; 7747 if (e->regd >= RTW89_REGD_NUM) 7748 return false; 7749 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 7750 return false; 7751 if (e->ch_idx >= RTW89_6G_CH_NUM) 7752 return false; 7753 7754 return true; 7755 } 7756 7757 static 7758 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 7759 { 7760 const struct rtw89_txpwr_conf *conf = &data->conf; 7761 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 7762 const void *cursor; 7763 7764 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7765 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 7766 continue; 7767 7768 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 7769 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 7770 } 7771 } 7772 7773 static bool 7774 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 7775 const void *cursor, 7776 const struct rtw89_txpwr_conf *conf) 7777 { 7778 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7779 return false; 7780 7781 if (e->ru >= RTW89_RU_NUM) 7782 return false; 7783 if (e->nt >= RTW89_NTX_NUM) 7784 return false; 7785 if (e->regd >= RTW89_REGD_NUM) 7786 return false; 7787 if (e->ch_idx >= RTW89_2G_CH_NUM) 7788 return false; 7789 7790 return true; 7791 } 7792 7793 static 7794 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 7795 { 7796 const struct rtw89_txpwr_conf *conf = &data->conf; 7797 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 7798 const void *cursor; 7799 7800 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7801 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 7802 continue; 7803 7804 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 7805 } 7806 } 7807 7808 static bool 7809 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 7810 const void *cursor, 7811 const struct rtw89_txpwr_conf *conf) 7812 { 7813 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7814 return false; 7815 7816 if (e->ru >= RTW89_RU_NUM) 7817 return false; 7818 if (e->nt >= RTW89_NTX_NUM) 7819 return false; 7820 if (e->regd >= RTW89_REGD_NUM) 7821 return false; 7822 if (e->ch_idx >= RTW89_5G_CH_NUM) 7823 return false; 7824 7825 return true; 7826 } 7827 7828 static 7829 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 7830 { 7831 const struct rtw89_txpwr_conf *conf = &data->conf; 7832 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 7833 const void *cursor; 7834 7835 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7836 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 7837 continue; 7838 7839 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 7840 } 7841 } 7842 7843 static bool 7844 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 7845 const void *cursor, 7846 const struct rtw89_txpwr_conf *conf) 7847 { 7848 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7849 return false; 7850 7851 if (e->ru >= RTW89_RU_NUM) 7852 return false; 7853 if (e->nt >= RTW89_NTX_NUM) 7854 return false; 7855 if (e->regd >= RTW89_REGD_NUM) 7856 return false; 7857 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 7858 return false; 7859 if (e->ch_idx >= RTW89_6G_CH_NUM) 7860 return false; 7861 7862 return true; 7863 } 7864 7865 static 7866 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 7867 { 7868 const struct rtw89_txpwr_conf *conf = &data->conf; 7869 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 7870 const void *cursor; 7871 7872 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7873 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 7874 continue; 7875 7876 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 7877 [entry.ch_idx] = entry.v; 7878 } 7879 } 7880 7881 static bool 7882 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 7883 const void *cursor, 7884 const struct rtw89_txpwr_conf *conf) 7885 { 7886 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7887 return false; 7888 7889 if (e->band >= RTW89_BAND_NUM) 7890 return false; 7891 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 7892 return false; 7893 if (e->regd >= RTW89_REGD_NUM) 7894 return false; 7895 7896 return true; 7897 } 7898 7899 static 7900 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 7901 { 7902 const struct rtw89_txpwr_conf *conf = &data->conf; 7903 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 7904 const void *cursor; 7905 7906 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7907 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 7908 continue; 7909 7910 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 7911 } 7912 } 7913 7914 static bool 7915 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 7916 const void *cursor, 7917 const struct rtw89_txpwr_conf *conf) 7918 { 7919 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7920 return false; 7921 7922 if (e->band >= RTW89_BAND_NUM) 7923 return false; 7924 if (e->regd >= RTW89_REGD_NUM) 7925 return false; 7926 7927 return true; 7928 } 7929 7930 static 7931 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 7932 { 7933 const struct rtw89_txpwr_conf *conf = &data->conf; 7934 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 7935 const void *cursor; 7936 7937 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7938 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 7939 continue; 7940 7941 data->v[entry.band][entry.regd] = entry.v; 7942 } 7943 } 7944 7945 const struct rtw89_rfe_parms * 7946 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 7947 const struct rtw89_rfe_parms *init) 7948 { 7949 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 7950 struct rtw89_rfe_parms *parms; 7951 7952 if (!rfe_data) 7953 return init; 7954 7955 parms = &rfe_data->rfe_parms; 7956 if (init) 7957 *parms = *init; 7958 7959 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 7960 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 7961 rfe_data->byrate.tbl.size = 0; /* don't care here */ 7962 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 7963 parms->byr_tbl = &rfe_data->byrate.tbl; 7964 } 7965 7966 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 7967 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 7968 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 7969 } 7970 7971 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 7972 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 7973 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 7974 } 7975 7976 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 7977 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 7978 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 7979 } 7980 7981 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 7982 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 7983 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 7984 } 7985 7986 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 7987 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 7988 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 7989 } 7990 7991 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 7992 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 7993 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 7994 } 7995 7996 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 7997 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 7998 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 7999 } 8000 8001 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 8002 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 8003 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 8004 } 8005 8006 return parms; 8007 } 8008