1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/if_arp.h> 6 #include "cam.h" 7 #include "chan.h" 8 #include "coex.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "phy.h" 13 #include "ps.h" 14 #include "reg.h" 15 #include "util.h" 16 #include "wow.h" 17 18 struct rtw89_eapol_2_of_2 { 19 u8 gtkbody[14]; 20 u8 key_des_ver; 21 u8 rsvd[92]; 22 } __packed; 23 24 struct rtw89_sa_query { 25 u8 category; 26 u8 action; 27 } __packed; 28 29 struct rtw89_arp_rsp { 30 u8 llc_hdr[sizeof(rfc1042_header)]; 31 __be16 llc_type; 32 struct arphdr arp_hdr; 33 u8 sender_hw[ETH_ALEN]; 34 __be32 sender_ip; 35 u8 target_hw[ETH_ALEN]; 36 __be32 target_ip; 37 } __packed; 38 39 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 40 41 union rtw89_fw_element_arg { 42 size_t offset; 43 enum rtw89_rf_path rf_path; 44 enum rtw89_fw_type fw_type; 45 }; 46 47 struct rtw89_fw_element_handler { 48 int (*fn)(struct rtw89_dev *rtwdev, 49 const struct rtw89_fw_element_hdr *elm, 50 const union rtw89_fw_element_arg arg); 51 const union rtw89_fw_element_arg arg; 52 const char *name; 53 }; 54 55 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 56 struct sk_buff *skb); 57 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 58 struct rtw89_wait_info *wait, unsigned int cond); 59 60 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 61 bool header) 62 { 63 struct sk_buff *skb; 64 u32 header_len = 0; 65 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 66 67 if (header) 68 header_len = H2C_HEADER_LEN; 69 70 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 71 if (!skb) 72 return NULL; 73 skb_reserve(skb, header_len + h2c_desc_size); 74 memset(skb->data, 0, len); 75 76 return skb; 77 } 78 79 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 80 { 81 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 82 } 83 84 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 85 { 86 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 87 } 88 89 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 90 { 91 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 92 u8 val; 93 int ret; 94 95 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 96 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 97 1, FWDL_WAIT_CNT, false, rtwdev, type); 98 if (ret) { 99 switch (val) { 100 case RTW89_FWDL_CHECKSUM_FAIL: 101 rtw89_err(rtwdev, "fw checksum fail\n"); 102 return -EINVAL; 103 104 case RTW89_FWDL_SECURITY_FAIL: 105 rtw89_err(rtwdev, "fw security fail\n"); 106 return -EINVAL; 107 108 case RTW89_FWDL_CV_NOT_MATCH: 109 rtw89_err(rtwdev, "fw cv not match\n"); 110 return -EINVAL; 111 112 default: 113 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 114 return -EBUSY; 115 } 116 } 117 118 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 119 120 return 0; 121 } 122 123 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 124 struct rtw89_fw_bin_info *info) 125 { 126 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 127 struct rtw89_fw_hdr_section_info *section_info; 128 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 129 const struct rtw89_fw_hdr_section *section; 130 const u8 *fw_end = fw + len; 131 const u8 *bin; 132 u32 base_hdr_len; 133 u32 mssc_len = 0; 134 u32 i; 135 136 if (!info) 137 return -EINVAL; 138 139 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 140 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 141 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 142 143 if (info->dynamic_hdr_en) { 144 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 145 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 146 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 147 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 148 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 149 return -EINVAL; 150 } 151 } else { 152 info->hdr_len = base_hdr_len; 153 info->dynamic_hdr_len = 0; 154 } 155 156 bin = fw + info->hdr_len; 157 158 /* jump to section header */ 159 section_info = info->section_info; 160 for (i = 0; i < info->section_num; i++) { 161 section = &fw_hdr->sections[i]; 162 section_info->type = 163 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 164 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 165 section_info->mssc = 166 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 167 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 168 } else { 169 section_info->mssc = 0; 170 } 171 172 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 173 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 174 section_info->len += FWDL_SECTION_CHKSUM_LEN; 175 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 176 section_info->dladdr = 177 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 178 section_info->addr = bin; 179 bin += section_info->len; 180 section_info++; 181 } 182 183 if (fw_end != bin + mssc_len) { 184 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 185 return -EINVAL; 186 } 187 188 return 0; 189 } 190 191 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 192 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 193 u32 rmp_tbl_size, u32 *key_idx) 194 { 195 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 196 u32 sel_byte_idx; 197 u32 mss_sel_idx; 198 u8 sel_bit_idx; 199 int i; 200 201 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 202 if (!mss_hdr->defen) 203 return -ENOENT; 204 205 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 206 sec->mss_key_num; 207 } else { 208 if (mss_hdr->defen) 209 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 210 else 211 mss_sel_idx = 0; 212 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 213 le16_to_cpu(mss_hdr->msscust_max) + 214 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 215 sec->mss_key_num; 216 } 217 218 sel_byte_idx = mss_sel_idx >> 3; 219 sel_bit_idx = mss_sel_idx & 0x7; 220 221 if (sel_byte_idx >= rmp_tbl_size) 222 return -EFAULT; 223 224 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 225 return -ENOENT; 226 227 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 228 229 for (i = 0; i < sel_byte_idx; i++) 230 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 231 232 return 0; 233 } 234 235 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 236 struct rtw89_fw_bin_info *info, 237 struct rtw89_fw_hdr_section_info *section_info, 238 const struct rtw89_fw_hdr_section_v1 *section, 239 const void *content, 240 u32 *mssc_len) 241 { 242 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 243 const union rtw89_fw_section_mssc_content *section_content = content; 244 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 245 u32 rmp_tbl_size; 246 u32 key_sign_len; 247 u32 real_key_idx; 248 u32 sb_sel_ver; 249 int ret; 250 251 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 252 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 253 return -ENOENT; 254 } 255 256 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 257 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 258 le16_to_cpu(mss_hdr->msscust_max) * 259 mss_hdr->mssdev_max) >> 3; 260 if (mss_hdr->defen) 261 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 262 } else { 263 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 264 mss_hdr->rmpfmt); 265 return -EINVAL; 266 } 267 268 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 269 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 270 rmp_tbl_size, (int)sizeof(*mss_hdr), 271 le32_to_cpu(mss_hdr->key_raw_offset)); 272 return -EINVAL; 273 } 274 275 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 276 if (!key_sign_len) 277 key_sign_len = 512; 278 279 if (info->dsp_checksum) 280 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 281 282 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 283 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 284 285 if (!sec->secure_boot) 286 goto out; 287 288 sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v); 289 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 290 goto ignore; 291 292 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 293 if (ret) 294 goto ignore; 295 296 section_info->key_addr = content + section_info->len + 297 le32_to_cpu(mss_hdr->key_raw_offset) + 298 key_sign_len * real_key_idx; 299 section_info->key_len = key_sign_len; 300 section_info->key_idx = real_key_idx; 301 302 out: 303 if (info->secure_section_exist) { 304 section_info->ignore = true; 305 return 0; 306 } 307 308 info->secure_section_exist = true; 309 310 return 0; 311 312 ignore: 313 section_info->ignore = true; 314 315 return 0; 316 } 317 318 static int __parse_security_section(struct rtw89_dev *rtwdev, 319 struct rtw89_fw_bin_info *info, 320 struct rtw89_fw_hdr_section_info *section_info, 321 const struct rtw89_fw_hdr_section_v1 *section, 322 const void *content, 323 u32 *mssc_len) 324 { 325 int ret; 326 327 section_info->mssc = 328 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 329 330 if (section_info->mssc == FORMATTED_MSSC) { 331 ret = __parse_formatted_mssc(rtwdev, info, section_info, 332 section, content, mssc_len); 333 if (ret) 334 return -EINVAL; 335 } else { 336 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 337 if (info->dsp_checksum) 338 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 339 340 info->secure_section_exist = true; 341 } 342 343 return 0; 344 } 345 346 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 347 struct rtw89_fw_bin_info *info) 348 { 349 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 350 struct rtw89_fw_hdr_section_info *section_info; 351 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 352 const struct rtw89_fw_hdr_section_v1 *section; 353 const u8 *fw_end = fw + len; 354 const u8 *bin; 355 u32 base_hdr_len; 356 u32 mssc_len; 357 int ret; 358 u32 i; 359 360 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 361 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 362 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 363 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 364 365 if (info->dynamic_hdr_en) { 366 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 367 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 368 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 369 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 370 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 371 return -EINVAL; 372 } 373 } else { 374 info->hdr_len = base_hdr_len; 375 info->dynamic_hdr_len = 0; 376 } 377 378 bin = fw + info->hdr_len; 379 380 /* jump to section header */ 381 section_info = info->section_info; 382 for (i = 0; i < info->section_num; i++) { 383 section = &fw_hdr->sections[i]; 384 385 section_info->type = 386 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 387 section_info->len = 388 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 389 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 390 section_info->len += FWDL_SECTION_CHKSUM_LEN; 391 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 392 section_info->dladdr = 393 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 394 section_info->addr = bin; 395 396 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 397 ret = __parse_security_section(rtwdev, info, section_info, 398 section, bin, &mssc_len); 399 if (ret) 400 return ret; 401 } else { 402 section_info->mssc = 0; 403 mssc_len = 0; 404 } 405 406 rtw89_debug(rtwdev, RTW89_DBG_FW, 407 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 408 i, section_info->type, section_info->len, 409 section_info->mssc, mssc_len, bin - fw); 410 rtw89_debug(rtwdev, RTW89_DBG_FW, 411 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 412 section_info->ignore, section_info->key_addr, 413 section_info->key_addr ? 414 section_info->key_addr - section_info->addr : 0, 415 section_info->key_len, section_info->key_idx); 416 417 bin += section_info->len + mssc_len; 418 section_info++; 419 } 420 421 if (fw_end != bin) { 422 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 423 return -EINVAL; 424 } 425 426 if (!info->secure_section_exist) 427 rtw89_warn(rtwdev, "no firmware secure section\n"); 428 429 return 0; 430 } 431 432 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 433 const struct rtw89_fw_suit *fw_suit, 434 struct rtw89_fw_bin_info *info) 435 { 436 const u8 *fw = fw_suit->data; 437 u32 len = fw_suit->size; 438 439 if (!fw || !len) { 440 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 441 return -ENOENT; 442 } 443 444 switch (fw_suit->hdr_ver) { 445 case 0: 446 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 447 case 1: 448 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 449 default: 450 return -ENOENT; 451 } 452 } 453 454 static 455 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 456 struct rtw89_fw_suit *fw_suit, bool nowarn) 457 { 458 struct rtw89_fw_info *fw_info = &rtwdev->fw; 459 const struct firmware *firmware = fw_info->req.firmware; 460 const u8 *mfw = firmware->data; 461 u32 mfw_len = firmware->size; 462 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 463 const struct rtw89_mfw_info *mfw_info = NULL, *tmp; 464 int i; 465 466 if (mfw_hdr->sig != RTW89_MFW_SIG) { 467 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 468 /* legacy firmware support normal type only */ 469 if (type != RTW89_FW_NORMAL) 470 return -EINVAL; 471 fw_suit->data = mfw; 472 fw_suit->size = mfw_len; 473 return 0; 474 } 475 476 for (i = 0; i < mfw_hdr->fw_nr; i++) { 477 tmp = &mfw_hdr->info[i]; 478 if (tmp->type != type) 479 continue; 480 481 if (type == RTW89_FW_LOGFMT) { 482 mfw_info = tmp; 483 goto found; 484 } 485 486 /* Version order of WiFi firmware in firmware file are not in order, 487 * pass all firmware to find the equal or less but closest version. 488 */ 489 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { 490 if (!mfw_info || mfw_info->cv < tmp->cv) 491 mfw_info = tmp; 492 } 493 } 494 495 if (mfw_info) 496 goto found; 497 498 if (!nowarn) 499 rtw89_err(rtwdev, "no suitable firmware found\n"); 500 return -ENOENT; 501 502 found: 503 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 504 fw_suit->size = le32_to_cpu(mfw_info->size); 505 return 0; 506 } 507 508 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 509 { 510 struct rtw89_fw_info *fw_info = &rtwdev->fw; 511 const struct firmware *firmware = fw_info->req.firmware; 512 const struct rtw89_mfw_hdr *mfw_hdr = 513 (const struct rtw89_mfw_hdr *)firmware->data; 514 const struct rtw89_mfw_info *mfw_info; 515 u32 size; 516 517 if (mfw_hdr->sig != RTW89_MFW_SIG) { 518 rtw89_warn(rtwdev, "not mfw format\n"); 519 return 0; 520 } 521 522 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 523 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 524 525 return size; 526 } 527 528 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 529 struct rtw89_fw_suit *fw_suit, 530 const struct rtw89_fw_hdr *hdr) 531 { 532 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 533 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 534 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 535 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 536 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 537 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 538 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 539 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 540 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 541 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 542 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 543 } 544 545 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 546 struct rtw89_fw_suit *fw_suit, 547 const struct rtw89_fw_hdr_v1 *hdr) 548 { 549 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 550 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 551 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 552 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 553 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 554 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 555 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 556 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 557 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 558 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 559 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 560 } 561 562 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 563 enum rtw89_fw_type type, 564 struct rtw89_fw_suit *fw_suit) 565 { 566 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 567 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 568 569 if (type == RTW89_FW_LOGFMT) 570 return 0; 571 572 fw_suit->type = type; 573 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 574 575 switch (fw_suit->hdr_ver) { 576 case 0: 577 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 578 break; 579 case 1: 580 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 581 break; 582 default: 583 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 584 fw_suit->hdr_ver); 585 return -ENOENT; 586 } 587 588 rtw89_info(rtwdev, 589 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 590 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 591 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 592 593 return 0; 594 } 595 596 static 597 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 598 bool nowarn) 599 { 600 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 601 int ret; 602 603 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 604 if (ret) 605 return ret; 606 607 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 608 } 609 610 static 611 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 612 const struct rtw89_fw_element_hdr *elm, 613 const union rtw89_fw_element_arg arg) 614 { 615 enum rtw89_fw_type type = arg.fw_type; 616 struct rtw89_hal *hal = &rtwdev->hal; 617 struct rtw89_fw_suit *fw_suit; 618 619 /* Version of BB MCU is in decreasing order in firmware file, so take 620 * first equal or less version, which is equal or less but closest version. 621 */ 622 if (hal->cv < elm->u.bbmcu.cv) 623 return 1; /* ignore this element */ 624 625 fw_suit = rtw89_fw_suit_get(rtwdev, type); 626 if (fw_suit->data) 627 return 1; /* ignore this element (a firmware is taken already) */ 628 629 fw_suit->data = elm->u.bbmcu.contents; 630 fw_suit->size = le32_to_cpu(elm->size); 631 632 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 633 } 634 635 #define __DEF_FW_FEAT_COND(__cond, __op) \ 636 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 637 { \ 638 return suit_ver_code __op comp_ver_code; \ 639 } 640 641 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 642 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 643 __DEF_FW_FEAT_COND(lt, <); /* less than */ 644 645 struct __fw_feat_cfg { 646 enum rtw89_core_chip_id chip_id; 647 enum rtw89_fw_feature feature; 648 u32 ver_code; 649 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 650 }; 651 652 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 653 { \ 654 .chip_id = _chip, \ 655 .feature = RTW89_FW_FEATURE_ ## _feat, \ 656 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 657 .cond = __fw_feat_cond_ ## _cond, \ 658 } 659 660 static const struct __fw_feat_cfg fw_feat_tbl[] = { 661 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 662 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 663 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 664 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 665 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 666 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 667 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 668 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 669 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 670 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 671 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 672 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 673 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 674 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 675 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER), 676 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 677 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 678 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 679 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 680 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 681 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 682 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 683 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), 684 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 685 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 686 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0), 687 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 688 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 689 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0), 690 }; 691 692 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 693 const struct rtw89_chip_info *chip, 694 u32 ver_code) 695 { 696 int i; 697 698 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 699 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 700 701 if (chip->chip_id != ent->chip_id) 702 continue; 703 704 if (ent->cond(ver_code, ent->ver_code)) 705 RTW89_SET_FW_FEATURE(ent->feature, fw); 706 } 707 } 708 709 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 710 { 711 const struct rtw89_chip_info *chip = rtwdev->chip; 712 const struct rtw89_fw_suit *fw_suit; 713 u32 suit_ver_code; 714 715 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 716 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 717 718 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 719 } 720 721 const struct firmware * 722 rtw89_early_fw_feature_recognize(struct device *device, 723 const struct rtw89_chip_info *chip, 724 struct rtw89_fw_info *early_fw, 725 int *used_fw_format) 726 { 727 const struct firmware *firmware; 728 char fw_name[64]; 729 int fw_format; 730 u32 ver_code; 731 int ret; 732 733 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 734 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 735 chip->fw_basename, fw_format); 736 737 ret = request_firmware(&firmware, fw_name, device); 738 if (!ret) { 739 dev_info(device, "loaded firmware %s\n", fw_name); 740 *used_fw_format = fw_format; 741 break; 742 } 743 } 744 745 if (ret) { 746 dev_err(device, "failed to early request firmware: %d\n", ret); 747 return NULL; 748 } 749 750 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 751 752 if (!ver_code) 753 goto out; 754 755 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 756 757 out: 758 return firmware; 759 } 760 761 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 762 { 763 const struct rtw89_chip_info *chip = rtwdev->chip; 764 int ret; 765 766 if (chip->try_ce_fw) { 767 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 768 if (!ret) 769 goto normal_done; 770 } 771 772 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 773 if (ret) 774 return ret; 775 776 normal_done: 777 /* It still works if wowlan firmware isn't existing. */ 778 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 779 780 /* It still works if log format file isn't existing. */ 781 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 782 783 rtw89_fw_recognize_features(rtwdev); 784 785 rtw89_coex_recognize_ver(rtwdev); 786 787 return 0; 788 } 789 790 static 791 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 792 const struct rtw89_fw_element_hdr *elm, 793 const union rtw89_fw_element_arg arg) 794 { 795 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 796 struct rtw89_phy_table *tbl; 797 struct rtw89_reg2_def *regs; 798 enum rtw89_rf_path rf_path; 799 u32 n_regs, i; 800 u8 idx; 801 802 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 803 if (!tbl) 804 return -ENOMEM; 805 806 switch (le32_to_cpu(elm->id)) { 807 case RTW89_FW_ELEMENT_ID_BB_REG: 808 elm_info->bb_tbl = tbl; 809 break; 810 case RTW89_FW_ELEMENT_ID_BB_GAIN: 811 elm_info->bb_gain = tbl; 812 break; 813 case RTW89_FW_ELEMENT_ID_RADIO_A: 814 case RTW89_FW_ELEMENT_ID_RADIO_B: 815 case RTW89_FW_ELEMENT_ID_RADIO_C: 816 case RTW89_FW_ELEMENT_ID_RADIO_D: 817 rf_path = arg.rf_path; 818 idx = elm->u.reg2.idx; 819 820 elm_info->rf_radio[idx] = tbl; 821 tbl->rf_path = rf_path; 822 tbl->config = rtw89_phy_config_rf_reg_v1; 823 break; 824 case RTW89_FW_ELEMENT_ID_RF_NCTL: 825 elm_info->rf_nctl = tbl; 826 break; 827 default: 828 kfree(tbl); 829 return -ENOENT; 830 } 831 832 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 833 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); 834 if (!regs) 835 goto out; 836 837 for (i = 0; i < n_regs; i++) { 838 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 839 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 840 } 841 842 tbl->n_regs = n_regs; 843 tbl->regs = regs; 844 845 return 0; 846 847 out: 848 kfree(tbl); 849 return -ENOMEM; 850 } 851 852 static 853 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 854 const struct rtw89_fw_element_hdr *elm, 855 const union rtw89_fw_element_arg arg) 856 { 857 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 858 const unsigned long offset = arg.offset; 859 struct rtw89_efuse *efuse = &rtwdev->efuse; 860 struct rtw89_txpwr_conf *conf; 861 862 if (!rtwdev->rfe_data) { 863 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 864 if (!rtwdev->rfe_data) 865 return -ENOMEM; 866 } 867 868 conf = (void *)rtwdev->rfe_data + offset; 869 870 /* if multiple matched, take the last eventually */ 871 if (txpwr_elm->rfe_type == efuse->rfe_type) 872 goto setup; 873 874 /* without one is matched, accept default */ 875 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 876 (!rtw89_txpwr_conf_valid(conf) || 877 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 878 goto setup; 879 880 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 881 elm->id, txpwr_elm->rfe_type); 882 return 0; 883 884 setup: 885 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 886 elm->id, txpwr_elm->rfe_type); 887 888 conf->rfe_type = txpwr_elm->rfe_type; 889 conf->ent_sz = txpwr_elm->ent_sz; 890 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 891 conf->data = txpwr_elm->content; 892 return 0; 893 } 894 895 static 896 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 897 const struct rtw89_fw_element_hdr *elm, 898 const union rtw89_fw_element_arg arg) 899 { 900 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 901 const struct rtw89_chip_info *chip = rtwdev->chip; 902 u32 needed_bitmap = 0; 903 u32 offset = 0; 904 int subband; 905 u32 bitmap; 906 int type; 907 908 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 909 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 910 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 911 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 912 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 913 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 914 915 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 916 917 if ((bitmap & needed_bitmap) != needed_bitmap) { 918 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n", 919 needed_bitmap, bitmap); 920 return -ENOENT; 921 } 922 923 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL); 924 if (!elm_info->txpwr_trk) 925 return -ENOMEM; 926 927 for (type = 0; bitmap; type++, bitmap >>= 1) { 928 if (!(bitmap & BIT(0))) 929 continue; 930 931 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 932 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 933 subband = 4; 934 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 935 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 936 subband = 3; 937 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 938 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 939 subband = 1; 940 else 941 break; 942 943 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 944 945 offset += subband; 946 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 947 goto err; 948 } 949 950 return 0; 951 952 err: 953 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 954 offset, le32_to_cpu(elm->size)); 955 kfree(elm_info->txpwr_trk); 956 elm_info->txpwr_trk = NULL; 957 958 return -EFAULT; 959 } 960 961 static 962 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 963 const struct rtw89_fw_element_hdr *elm, 964 const union rtw89_fw_element_arg arg) 965 { 966 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 967 u8 rfk_id; 968 969 if (elm_info->rfk_log_fmt) 970 goto allocated; 971 972 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL); 973 if (!elm_info->rfk_log_fmt) 974 return 1; /* this is an optional element, so just ignore this */ 975 976 allocated: 977 rfk_id = elm->u.rfk_log_fmt.rfk_id; 978 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 979 return 1; 980 981 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 982 983 return 0; 984 } 985 986 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 987 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 988 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 989 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 990 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 991 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 992 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 993 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 994 { .rf_path = RF_PATH_A }, "radio A"}, 995 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 996 { .rf_path = RF_PATH_B }, NULL}, 997 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 998 { .rf_path = RF_PATH_C }, NULL}, 999 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 1000 { .rf_path = RF_PATH_D }, NULL}, 1001 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 1002 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 1003 rtw89_fw_recognize_txpwr_from_elm, 1004 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 1005 }, 1006 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 1007 rtw89_fw_recognize_txpwr_from_elm, 1008 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 1009 }, 1010 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 1011 rtw89_fw_recognize_txpwr_from_elm, 1012 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 1013 }, 1014 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 1015 rtw89_fw_recognize_txpwr_from_elm, 1016 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 1017 }, 1018 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 1019 rtw89_fw_recognize_txpwr_from_elm, 1020 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 1021 }, 1022 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 1023 rtw89_fw_recognize_txpwr_from_elm, 1024 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 1025 }, 1026 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 1027 rtw89_fw_recognize_txpwr_from_elm, 1028 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 1029 }, 1030 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 1031 rtw89_fw_recognize_txpwr_from_elm, 1032 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 1033 }, 1034 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 1035 rtw89_fw_recognize_txpwr_from_elm, 1036 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 1037 }, 1038 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 1039 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 1040 }, 1041 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 1042 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 1043 }, 1044 }; 1045 1046 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 1047 { 1048 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1049 const struct firmware *firmware = fw_info->req.firmware; 1050 const struct rtw89_chip_info *chip = rtwdev->chip; 1051 u32 unrecognized_elements = chip->needed_fw_elms; 1052 const struct rtw89_fw_element_handler *handler; 1053 const struct rtw89_fw_element_hdr *hdr; 1054 u32 elm_size; 1055 u32 elem_id; 1056 u32 offset; 1057 int ret; 1058 1059 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1060 1061 offset = rtw89_mfw_get_size(rtwdev); 1062 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1063 if (offset == 0) 1064 return -EINVAL; 1065 1066 while (offset + sizeof(*hdr) < firmware->size) { 1067 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1068 1069 elm_size = le32_to_cpu(hdr->size); 1070 if (offset + elm_size >= firmware->size) { 1071 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1072 break; 1073 } 1074 1075 elem_id = le32_to_cpu(hdr->id); 1076 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1077 goto next; 1078 1079 handler = &__fw_element_handlers[elem_id]; 1080 if (!handler->fn) 1081 goto next; 1082 1083 ret = handler->fn(rtwdev, hdr, handler->arg); 1084 if (ret == 1) /* ignore this element */ 1085 goto next; 1086 if (ret) 1087 return ret; 1088 1089 if (handler->name) 1090 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1091 handler->name, hdr->ver); 1092 1093 unrecognized_elements &= ~BIT(elem_id); 1094 next: 1095 offset += sizeof(*hdr) + elm_size; 1096 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1097 } 1098 1099 if (unrecognized_elements) { 1100 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1101 unrecognized_elements); 1102 return -ENOENT; 1103 } 1104 1105 return 0; 1106 } 1107 1108 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1109 u8 type, u8 cat, u8 class, u8 func, 1110 bool rack, bool dack, u32 len) 1111 { 1112 struct fwcmd_hdr *hdr; 1113 1114 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1115 1116 if (!(rtwdev->fw.h2c_seq % 4)) 1117 rack = true; 1118 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1119 FIELD_PREP(H2C_HDR_CAT, cat) | 1120 FIELD_PREP(H2C_HDR_CLASS, class) | 1121 FIELD_PREP(H2C_HDR_FUNC, func) | 1122 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1123 1124 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1125 len + H2C_HEADER_LEN) | 1126 (rack ? H2C_HDR_REC_ACK : 0) | 1127 (dack ? H2C_HDR_DONE_ACK : 0)); 1128 1129 rtwdev->fw.h2c_seq++; 1130 } 1131 1132 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1133 struct sk_buff *skb, 1134 u8 type, u8 cat, u8 class, u8 func, 1135 u32 len) 1136 { 1137 struct fwcmd_hdr *hdr; 1138 1139 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1140 1141 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1142 FIELD_PREP(H2C_HDR_CAT, cat) | 1143 FIELD_PREP(H2C_HDR_CLASS, class) | 1144 FIELD_PREP(H2C_HDR_FUNC, func) | 1145 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1146 1147 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1148 len + H2C_HEADER_LEN)); 1149 } 1150 1151 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1152 struct rtw89_fw_bin_info *info, 1153 struct rtw89_fw_hdr *fw_hdr) 1154 { 1155 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1156 FW_HDR_W7_PART_SIZE); 1157 1158 return 0; 1159 } 1160 1161 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1162 struct rtw89_fw_bin_info *info, 1163 struct rtw89_fw_hdr_v1 *fw_hdr) 1164 { 1165 struct rtw89_fw_hdr_section_info *section_info; 1166 struct rtw89_fw_hdr_section_v1 *section; 1167 u8 dst_sec_idx = 0; 1168 u8 sec_idx; 1169 1170 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1171 FW_HDR_V1_W7_PART_SIZE); 1172 1173 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1174 section_info = &info->section_info[sec_idx]; 1175 section = &fw_hdr->sections[sec_idx]; 1176 1177 if (section_info->ignore) 1178 continue; 1179 1180 if (dst_sec_idx != sec_idx) 1181 fw_hdr->sections[dst_sec_idx] = *section; 1182 1183 dst_sec_idx++; 1184 } 1185 1186 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1187 1188 return (info->section_num - dst_sec_idx) * sizeof(*section); 1189 } 1190 1191 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1192 const struct rtw89_fw_suit *fw_suit, 1193 struct rtw89_fw_bin_info *info) 1194 { 1195 u32 len = info->hdr_len - info->dynamic_hdr_len; 1196 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1197 const u8 *fw = fw_suit->data; 1198 struct rtw89_fw_hdr *fw_hdr; 1199 struct sk_buff *skb; 1200 u32 truncated; 1201 u32 ret = 0; 1202 1203 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1204 if (!skb) { 1205 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1206 return -ENOMEM; 1207 } 1208 1209 skb_put_data(skb, fw, len); 1210 1211 switch (fw_suit->hdr_ver) { 1212 case 0: 1213 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1214 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1215 break; 1216 case 1: 1217 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1218 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1219 break; 1220 default: 1221 ret = -EOPNOTSUPP; 1222 goto fail; 1223 } 1224 1225 if (truncated) { 1226 len -= truncated; 1227 skb_trim(skb, len); 1228 } 1229 1230 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1231 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1232 H2C_FUNC_MAC_FWHDR_DL, len); 1233 1234 ret = rtw89_h2c_tx(rtwdev, skb, false); 1235 if (ret) { 1236 rtw89_err(rtwdev, "failed to send h2c\n"); 1237 ret = -1; 1238 goto fail; 1239 } 1240 1241 return 0; 1242 fail: 1243 dev_kfree_skb_any(skb); 1244 1245 return ret; 1246 } 1247 1248 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1249 const struct rtw89_fw_suit *fw_suit, 1250 struct rtw89_fw_bin_info *info) 1251 { 1252 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1253 int ret; 1254 1255 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1256 if (ret) { 1257 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1258 return ret; 1259 } 1260 1261 ret = mac->fwdl_check_path_ready(rtwdev, false); 1262 if (ret) { 1263 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1264 return ret; 1265 } 1266 1267 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1268 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1269 1270 return 0; 1271 } 1272 1273 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1274 struct rtw89_fw_hdr_section_info *info) 1275 { 1276 struct sk_buff *skb; 1277 const u8 *section = info->addr; 1278 u32 residue_len = info->len; 1279 bool copy_key = false; 1280 u32 pkt_len; 1281 int ret; 1282 1283 if (info->ignore) 1284 return 0; 1285 1286 if (info->key_addr && info->key_len) { 1287 if (info->len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) 1288 rtw89_warn(rtwdev, "ignore to copy key data because of len %d, %d, %d\n", 1289 info->len, FWDL_SECTION_PER_PKT_LEN, info->key_len); 1290 else 1291 copy_key = true; 1292 } 1293 1294 while (residue_len) { 1295 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 1296 pkt_len = FWDL_SECTION_PER_PKT_LEN; 1297 else 1298 pkt_len = residue_len; 1299 1300 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1301 if (!skb) { 1302 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1303 return -ENOMEM; 1304 } 1305 skb_put_data(skb, section, pkt_len); 1306 1307 if (copy_key) 1308 memcpy(skb->data + pkt_len - info->key_len, 1309 info->key_addr, info->key_len); 1310 1311 ret = rtw89_h2c_tx(rtwdev, skb, true); 1312 if (ret) { 1313 rtw89_err(rtwdev, "failed to send h2c\n"); 1314 ret = -1; 1315 goto fail; 1316 } 1317 1318 section += pkt_len; 1319 residue_len -= pkt_len; 1320 } 1321 1322 return 0; 1323 fail: 1324 dev_kfree_skb_any(skb); 1325 1326 return ret; 1327 } 1328 1329 static enum rtw89_fwdl_check_type 1330 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1331 const struct rtw89_fw_suit *fw_suit) 1332 { 1333 switch (fw_suit->type) { 1334 case RTW89_FW_BBMCU0: 1335 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1336 case RTW89_FW_BBMCU1: 1337 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1338 default: 1339 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1340 } 1341 } 1342 1343 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1344 const struct rtw89_fw_suit *fw_suit, 1345 struct rtw89_fw_bin_info *info) 1346 { 1347 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1348 const struct rtw89_chip_info *chip = rtwdev->chip; 1349 enum rtw89_fwdl_check_type chk_type; 1350 u8 section_num = info->section_num; 1351 int ret; 1352 1353 while (section_num--) { 1354 ret = __rtw89_fw_download_main(rtwdev, section_info); 1355 if (ret) 1356 return ret; 1357 section_info++; 1358 } 1359 1360 if (chip->chip_gen == RTW89_CHIP_AX) 1361 return 0; 1362 1363 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1364 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1365 if (ret) { 1366 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1367 fw_suit->type); 1368 return ret; 1369 } 1370 1371 return 0; 1372 } 1373 1374 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1375 { 1376 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1377 u32 addr = R_AX_DBG_PORT_SEL; 1378 u32 val32; 1379 u16 index; 1380 1381 if (chip_gen == RTW89_CHIP_BE) { 1382 addr = R_BE_WLCPU_PORT_PC; 1383 goto dump; 1384 } 1385 1386 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1387 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1388 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1389 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1390 1391 dump: 1392 for (index = 0; index < 15; index++) { 1393 val32 = rtw89_read32(rtwdev, addr); 1394 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1395 fsleep(10); 1396 } 1397 } 1398 1399 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1400 { 1401 u32 val32; 1402 1403 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1404 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1405 1406 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1407 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1408 1409 rtw89_fw_prog_cnt_dump(rtwdev); 1410 } 1411 1412 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1413 struct rtw89_fw_suit *fw_suit) 1414 { 1415 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1416 struct rtw89_fw_bin_info info = {}; 1417 int ret; 1418 1419 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1420 if (ret) { 1421 rtw89_err(rtwdev, "parse fw header fail\n"); 1422 return ret; 1423 } 1424 1425 if (rtwdev->chip->chip_id == RTL8922A && 1426 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1427 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1428 1429 ret = mac->fwdl_check_path_ready(rtwdev, true); 1430 if (ret) { 1431 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1432 return ret; 1433 } 1434 1435 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1436 if (ret) 1437 return ret; 1438 1439 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1440 if (ret) 1441 return ret; 1442 1443 return 0; 1444 } 1445 1446 static 1447 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1448 bool include_bb) 1449 { 1450 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1451 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1452 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1453 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1454 int ret; 1455 int i; 1456 1457 mac->disable_cpu(rtwdev); 1458 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1459 if (ret) 1460 return ret; 1461 1462 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1463 if (ret) 1464 goto fwdl_err; 1465 1466 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1467 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1468 1469 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1470 if (ret) 1471 goto fwdl_err; 1472 } 1473 1474 fw_info->h2c_seq = 0; 1475 fw_info->rec_seq = 0; 1476 fw_info->h2c_counter = 0; 1477 fw_info->c2h_counter = 0; 1478 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1479 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1480 1481 mdelay(5); 1482 1483 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1484 if (ret) { 1485 rtw89_warn(rtwdev, "download firmware fail\n"); 1486 goto fwdl_err; 1487 } 1488 1489 return ret; 1490 1491 fwdl_err: 1492 rtw89_fw_dl_fail_dump(rtwdev); 1493 return ret; 1494 } 1495 1496 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1497 bool include_bb) 1498 { 1499 int retry; 1500 int ret; 1501 1502 for (retry = 0; retry < 5; retry++) { 1503 ret = __rtw89_fw_download(rtwdev, type, include_bb); 1504 if (!ret) 1505 return 0; 1506 } 1507 1508 return ret; 1509 } 1510 1511 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1512 { 1513 struct rtw89_fw_info *fw = &rtwdev->fw; 1514 1515 wait_for_completion(&fw->req.completion); 1516 if (!fw->req.firmware) 1517 return -EINVAL; 1518 1519 return 0; 1520 } 1521 1522 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1523 struct rtw89_fw_req_info *req, 1524 const char *fw_name, bool nowarn) 1525 { 1526 int ret; 1527 1528 if (req->firmware) { 1529 rtw89_debug(rtwdev, RTW89_DBG_FW, 1530 "full firmware has been early requested\n"); 1531 complete_all(&req->completion); 1532 return 0; 1533 } 1534 1535 if (nowarn) 1536 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1537 else 1538 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1539 1540 complete_all(&req->completion); 1541 1542 return ret; 1543 } 1544 1545 void rtw89_load_firmware_work(struct work_struct *work) 1546 { 1547 struct rtw89_dev *rtwdev = 1548 container_of(work, struct rtw89_dev, load_firmware_work); 1549 const struct rtw89_chip_info *chip = rtwdev->chip; 1550 char fw_name[64]; 1551 1552 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1553 chip->fw_basename, rtwdev->fw.fw_format); 1554 1555 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1556 } 1557 1558 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1559 { 1560 if (!tbl) 1561 return; 1562 1563 kfree(tbl->regs); 1564 kfree(tbl); 1565 } 1566 1567 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1568 { 1569 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1570 int i; 1571 1572 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1573 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1574 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1575 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1576 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1577 1578 kfree(elm_info->txpwr_trk); 1579 kfree(elm_info->rfk_log_fmt); 1580 } 1581 1582 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1583 { 1584 struct rtw89_fw_info *fw = &rtwdev->fw; 1585 1586 cancel_work_sync(&rtwdev->load_firmware_work); 1587 1588 if (fw->req.firmware) { 1589 release_firmware(fw->req.firmware); 1590 1591 /* assign NULL back in case rtw89_free_ieee80211_hw() 1592 * try to release the same one again. 1593 */ 1594 fw->req.firmware = NULL; 1595 } 1596 1597 kfree(fw->log.fmts); 1598 rtw89_unload_firmware_elements(rtwdev); 1599 } 1600 1601 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1602 { 1603 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1604 u32 i; 1605 1606 if (fmt_id > fw_log->last_fmt_id) 1607 return 0; 1608 1609 for (i = 0; i < fw_log->fmt_count; i++) { 1610 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1611 return i; 1612 } 1613 return 0; 1614 } 1615 1616 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1617 { 1618 struct rtw89_fw_log *log = &rtwdev->fw.log; 1619 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1620 struct rtw89_fw_suit *suit = &log->suit; 1621 const void *fmts_ptr, *fmts_end_ptr; 1622 u32 fmt_count; 1623 int i; 1624 1625 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1626 fmt_count = le32_to_cpu(suit_hdr->count); 1627 log->fmt_ids = suit_hdr->ids; 1628 fmts_ptr = &suit_hdr->ids[fmt_count]; 1629 fmts_end_ptr = suit->data + suit->size; 1630 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1631 if (!log->fmts) 1632 return -ENOMEM; 1633 1634 for (i = 0; i < fmt_count; i++) { 1635 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1636 if (!fmts_ptr) 1637 break; 1638 1639 (*log->fmts)[i] = fmts_ptr; 1640 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1641 log->fmt_count++; 1642 fmts_ptr += strlen(fmts_ptr); 1643 } 1644 1645 return 0; 1646 } 1647 1648 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1649 { 1650 struct rtw89_fw_log *log = &rtwdev->fw.log; 1651 struct rtw89_fw_suit *suit = &log->suit; 1652 1653 if (!suit || !suit->data) { 1654 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1655 return -EINVAL; 1656 } 1657 if (log->fmts) 1658 return 0; 1659 1660 return rtw89_fw_log_create_fmts_dict(rtwdev); 1661 } 1662 1663 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 1664 const struct rtw89_fw_c2h_log_fmt *log_fmt, 1665 u32 fmt_idx, u8 para_int, bool raw_data) 1666 { 1667 const char *(*fmts)[] = rtwdev->fw.log.fmts; 1668 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 1669 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 1670 int i; 1671 1672 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 1673 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 1674 log_fmt->argc); 1675 return; 1676 } 1677 1678 if (para_int) 1679 for (i = 0 ; i < log_fmt->argc; i++) 1680 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 1681 1682 if (raw_data) { 1683 if (para_int) 1684 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1685 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 1686 para_int, log_fmt->argc, (int)sizeof(args), args); 1687 else 1688 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1689 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 1690 para_int, log_fmt->argc, log_fmt->u.raw); 1691 } else { 1692 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 1693 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 1694 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 1695 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 1696 args[0xf]); 1697 } 1698 1699 rtw89_info(rtwdev, "C2H log: %s", str_buf); 1700 } 1701 1702 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 1703 { 1704 const struct rtw89_fw_c2h_log_fmt *log_fmt; 1705 u8 para_int; 1706 u32 fmt_idx; 1707 1708 if (len < RTW89_C2H_HEADER_LEN) { 1709 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 1710 return; 1711 } 1712 1713 buf += RTW89_C2H_HEADER_LEN; 1714 len -= RTW89_C2H_HEADER_LEN; 1715 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 1716 1717 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 1718 goto plain_log; 1719 1720 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 1721 goto plain_log; 1722 1723 if (!rtwdev->fw.log.fmts) 1724 return; 1725 1726 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 1727 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 1728 1729 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 1730 rtw89_info(rtwdev, "C2H log: %s%s", 1731 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 1732 else if (fmt_idx != 0 && para_int) 1733 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 1734 else 1735 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 1736 return; 1737 1738 plain_log: 1739 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 1740 1741 } 1742 1743 #define H2C_CAM_LEN 60 1744 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1745 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 1746 { 1747 struct sk_buff *skb; 1748 int ret; 1749 1750 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 1751 if (!skb) { 1752 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1753 return -ENOMEM; 1754 } 1755 skb_put(skb, H2C_CAM_LEN); 1756 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 1757 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 1758 1759 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1760 H2C_CAT_MAC, 1761 H2C_CL_MAC_ADDR_CAM_UPDATE, 1762 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 1763 H2C_CAM_LEN); 1764 1765 ret = rtw89_h2c_tx(rtwdev, skb, false); 1766 if (ret) { 1767 rtw89_err(rtwdev, "failed to send h2c\n"); 1768 goto fail; 1769 } 1770 1771 return 0; 1772 fail: 1773 dev_kfree_skb_any(skb); 1774 1775 return ret; 1776 } 1777 1778 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 1779 struct rtw89_vif *rtwvif, 1780 struct rtw89_sta *rtwsta) 1781 { 1782 struct rtw89_h2c_dctlinfo_ud_v1 *h2c; 1783 u32 len = sizeof(*h2c); 1784 struct sk_buff *skb; 1785 int ret; 1786 1787 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1788 if (!skb) { 1789 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1790 return -ENOMEM; 1791 } 1792 skb_put(skb, len); 1793 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; 1794 1795 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, h2c); 1796 1797 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1798 H2C_CAT_MAC, 1799 H2C_CL_MAC_FR_EXCHG, 1800 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 1801 len); 1802 1803 ret = rtw89_h2c_tx(rtwdev, skb, false); 1804 if (ret) { 1805 rtw89_err(rtwdev, "failed to send h2c\n"); 1806 goto fail; 1807 } 1808 1809 return 0; 1810 fail: 1811 dev_kfree_skb_any(skb); 1812 1813 return ret; 1814 } 1815 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 1816 1817 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 1818 struct rtw89_vif *rtwvif, 1819 struct rtw89_sta *rtwsta) 1820 { 1821 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1822 u32 len = sizeof(*h2c); 1823 struct sk_buff *skb; 1824 int ret; 1825 1826 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1827 if (!skb) { 1828 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1829 return -ENOMEM; 1830 } 1831 skb_put(skb, len); 1832 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1833 1834 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif, rtwsta, h2c); 1835 1836 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1837 H2C_CAT_MAC, 1838 H2C_CL_MAC_FR_EXCHG, 1839 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1840 len); 1841 1842 ret = rtw89_h2c_tx(rtwdev, skb, false); 1843 if (ret) { 1844 rtw89_err(rtwdev, "failed to send h2c\n"); 1845 goto fail; 1846 } 1847 1848 return 0; 1849 fail: 1850 dev_kfree_skb_any(skb); 1851 1852 return ret; 1853 } 1854 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 1855 1856 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 1857 struct rtw89_vif *rtwvif, 1858 struct rtw89_sta *rtwsta) 1859 { 1860 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1861 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1862 u32 len = sizeof(*h2c); 1863 struct sk_buff *skb; 1864 int ret; 1865 1866 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1867 if (!skb) { 1868 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 1869 return -ENOMEM; 1870 } 1871 skb_put(skb, len); 1872 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1873 1874 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 1875 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 1876 1877 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 1878 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 1879 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 1880 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 1881 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 1882 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 1883 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 1884 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 1885 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 1886 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 1887 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 1888 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 1889 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 1890 1891 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1892 H2C_CAT_MAC, 1893 H2C_CL_MAC_FR_EXCHG, 1894 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1895 len); 1896 1897 ret = rtw89_h2c_tx(rtwdev, skb, false); 1898 if (ret) { 1899 rtw89_err(rtwdev, "failed to send h2c\n"); 1900 goto fail; 1901 } 1902 1903 return 0; 1904 fail: 1905 dev_kfree_skb_any(skb); 1906 1907 return ret; 1908 } 1909 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 1910 1911 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 1912 bool valid, struct ieee80211_ampdu_params *params) 1913 { 1914 const struct rtw89_chip_info *chip = rtwdev->chip; 1915 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 1916 struct rtw89_h2c_ba_cam *h2c; 1917 u8 macid = rtwsta->mac_id; 1918 u32 len = sizeof(*h2c); 1919 struct sk_buff *skb; 1920 u8 entry_idx; 1921 int ret; 1922 1923 ret = valid ? 1924 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 1925 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 1926 if (ret) { 1927 /* it still works even if we don't have static BA CAM, because 1928 * hardware can create dynamic BA CAM automatically. 1929 */ 1930 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 1931 "failed to %s entry tid=%d for h2c ba cam\n", 1932 valid ? "alloc" : "free", params->tid); 1933 return 0; 1934 } 1935 1936 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1937 if (!skb) { 1938 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 1939 return -ENOMEM; 1940 } 1941 skb_put(skb, len); 1942 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 1943 1944 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 1945 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 1946 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 1947 else 1948 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 1949 if (!valid) 1950 goto end; 1951 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 1952 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 1953 if (params->buf_size > 64) 1954 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 1955 else 1956 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 1957 /* If init req is set, hw will set the ssn */ 1958 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 1959 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 1960 1961 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 1962 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 1963 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BA_CAM_W1_BAND); 1964 } 1965 1966 end: 1967 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1968 H2C_CAT_MAC, 1969 H2C_CL_BA_CAM, 1970 H2C_FUNC_MAC_BA_CAM, 0, 1, 1971 len); 1972 1973 ret = rtw89_h2c_tx(rtwdev, skb, false); 1974 if (ret) { 1975 rtw89_err(rtwdev, "failed to send h2c\n"); 1976 goto fail; 1977 } 1978 1979 return 0; 1980 fail: 1981 dev_kfree_skb_any(skb); 1982 1983 return ret; 1984 } 1985 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 1986 1987 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 1988 u8 entry_idx, u8 uid) 1989 { 1990 struct rtw89_h2c_ba_cam *h2c; 1991 u32 len = sizeof(*h2c); 1992 struct sk_buff *skb; 1993 int ret; 1994 1995 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1996 if (!skb) { 1997 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 1998 return -ENOMEM; 1999 } 2000 skb_put(skb, len); 2001 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2002 2003 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 2004 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 2005 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 2006 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 2007 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 2008 2009 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2010 H2C_CAT_MAC, 2011 H2C_CL_BA_CAM, 2012 H2C_FUNC_MAC_BA_CAM, 0, 1, 2013 len); 2014 2015 ret = rtw89_h2c_tx(rtwdev, skb, false); 2016 if (ret) { 2017 rtw89_err(rtwdev, "failed to send h2c\n"); 2018 goto fail; 2019 } 2020 2021 return 0; 2022 fail: 2023 dev_kfree_skb_any(skb); 2024 2025 return ret; 2026 } 2027 2028 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 2029 { 2030 const struct rtw89_chip_info *chip = rtwdev->chip; 2031 u8 entry_idx = chip->bacam_num; 2032 u8 uid = 0; 2033 int i; 2034 2035 for (i = 0; i < chip->bacam_dynamic_num; i++) { 2036 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 2037 entry_idx++; 2038 uid++; 2039 } 2040 } 2041 2042 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 2043 bool valid, struct ieee80211_ampdu_params *params) 2044 { 2045 const struct rtw89_chip_info *chip = rtwdev->chip; 2046 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 2047 struct rtw89_h2c_ba_cam_v1 *h2c; 2048 u8 macid = rtwsta->mac_id; 2049 u32 len = sizeof(*h2c); 2050 struct sk_buff *skb; 2051 u8 entry_idx; 2052 u8 bmap_size; 2053 int ret; 2054 2055 ret = valid ? 2056 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 2057 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 2058 if (ret) { 2059 /* it still works even if we don't have static BA CAM, because 2060 * hardware can create dynamic BA CAM automatically. 2061 */ 2062 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2063 "failed to %s entry tid=%d for h2c ba cam\n", 2064 valid ? "alloc" : "free", params->tid); 2065 return 0; 2066 } 2067 2068 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2069 if (!skb) { 2070 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2071 return -ENOMEM; 2072 } 2073 skb_put(skb, len); 2074 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2075 2076 if (params->buf_size > 512) 2077 bmap_size = 10; 2078 else if (params->buf_size > 256) 2079 bmap_size = 8; 2080 else if (params->buf_size > 64) 2081 bmap_size = 4; 2082 else 2083 bmap_size = 0; 2084 2085 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2086 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2087 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2088 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2089 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2090 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2091 2092 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2093 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2094 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2095 le32_encode_bits(!!rtwvif->mac_idx, RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2096 2097 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2098 H2C_CAT_MAC, 2099 H2C_CL_BA_CAM, 2100 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2101 len); 2102 2103 ret = rtw89_h2c_tx(rtwdev, skb, false); 2104 if (ret) { 2105 rtw89_err(rtwdev, "failed to send h2c\n"); 2106 goto fail; 2107 } 2108 2109 return 0; 2110 fail: 2111 dev_kfree_skb_any(skb); 2112 2113 return ret; 2114 } 2115 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2116 2117 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2118 u8 offset, u8 mac_idx) 2119 { 2120 struct rtw89_h2c_ba_cam_init *h2c; 2121 u32 len = sizeof(*h2c); 2122 struct sk_buff *skb; 2123 int ret; 2124 2125 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2126 if (!skb) { 2127 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2128 return -ENOMEM; 2129 } 2130 skb_put(skb, len); 2131 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2132 2133 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2134 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2135 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2136 2137 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2138 H2C_CAT_MAC, 2139 H2C_CL_BA_CAM, 2140 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2141 len); 2142 2143 ret = rtw89_h2c_tx(rtwdev, skb, false); 2144 if (ret) { 2145 rtw89_err(rtwdev, "failed to send h2c\n"); 2146 goto fail; 2147 } 2148 2149 return 0; 2150 fail: 2151 dev_kfree_skb_any(skb); 2152 2153 return ret; 2154 } 2155 2156 #define H2C_LOG_CFG_LEN 12 2157 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2158 { 2159 struct sk_buff *skb; 2160 u32 comp = 0; 2161 int ret; 2162 2163 if (enable) 2164 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2165 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2166 BIT(RTW89_FW_LOG_COMP_SCAN); 2167 2168 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2169 if (!skb) { 2170 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2171 return -ENOMEM; 2172 } 2173 2174 skb_put(skb, H2C_LOG_CFG_LEN); 2175 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2176 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2177 SET_LOG_CFG_COMP(skb->data, comp); 2178 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2179 2180 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2181 H2C_CAT_MAC, 2182 H2C_CL_FW_INFO, 2183 H2C_FUNC_LOG_CFG, 0, 0, 2184 H2C_LOG_CFG_LEN); 2185 2186 ret = rtw89_h2c_tx(rtwdev, skb, false); 2187 if (ret) { 2188 rtw89_err(rtwdev, "failed to send h2c\n"); 2189 goto fail; 2190 } 2191 2192 return 0; 2193 fail: 2194 dev_kfree_skb_any(skb); 2195 2196 return ret; 2197 } 2198 2199 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 2200 struct rtw89_vif *rtwvif) 2201 { 2202 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 2203 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; 2204 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2205 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 2206 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2207 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2208 struct rtw89_eapol_2_of_2 *eapol_pkt; 2209 struct ieee80211_hdr_3addr *hdr; 2210 struct sk_buff *skb; 2211 u8 key_des_ver; 2212 2213 if (rtw_wow->ptk_alg == 3) 2214 key_des_ver = 1; 2215 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) 2216 key_des_ver = 2; 2217 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) 2218 key_des_ver = 3; 2219 else 2220 key_des_ver = 0; 2221 2222 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); 2223 if (!skb) 2224 return NULL; 2225 2226 hdr = skb_put_zero(skb, sizeof(*hdr)); 2227 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2228 IEEE80211_FCTL_TODS | 2229 IEEE80211_FCTL_PROTECTED); 2230 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2231 ether_addr_copy(hdr->addr2, vif->addr); 2232 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2233 2234 skb_put_zero(skb, sec_hdr_len); 2235 2236 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); 2237 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); 2238 eapol_pkt->key_des_ver = key_des_ver; 2239 2240 return skb; 2241 } 2242 2243 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, 2244 struct rtw89_vif *rtwvif) 2245 { 2246 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2247 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 2248 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2249 struct ieee80211_hdr_3addr *hdr; 2250 struct rtw89_sa_query *sa_query; 2251 struct sk_buff *skb; 2252 2253 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); 2254 if (!skb) 2255 return NULL; 2256 2257 hdr = skb_put_zero(skb, sizeof(*hdr)); 2258 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2259 IEEE80211_STYPE_ACTION | 2260 IEEE80211_FCTL_PROTECTED); 2261 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2262 ether_addr_copy(hdr->addr2, vif->addr); 2263 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2264 2265 skb_put_zero(skb, sec_hdr_len); 2266 2267 sa_query = skb_put_zero(skb, sizeof(*sa_query)); 2268 sa_query->category = WLAN_CATEGORY_SA_QUERY; 2269 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; 2270 2271 return skb; 2272 } 2273 2274 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, 2275 struct rtw89_vif *rtwvif) 2276 { 2277 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2278 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2279 struct ieee80211_hdr_3addr *hdr; 2280 struct rtw89_arp_rsp *arp_skb; 2281 struct arphdr *arp_hdr; 2282 struct sk_buff *skb; 2283 __le16 fc; 2284 2285 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); 2286 if (!skb) 2287 return NULL; 2288 2289 hdr = skb_put_zero(skb, sizeof(*hdr)); 2290 2291 if (rtw_wow->ptk_alg) 2292 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | 2293 IEEE80211_FCTL_PROTECTED); 2294 else 2295 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); 2296 2297 hdr->frame_control = fc; 2298 ether_addr_copy(hdr->addr1, rtwvif->bssid); 2299 ether_addr_copy(hdr->addr2, rtwvif->mac_addr); 2300 ether_addr_copy(hdr->addr3, rtwvif->bssid); 2301 2302 skb_put_zero(skb, sec_hdr_len); 2303 2304 arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); 2305 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 2306 arp_skb->llc_type = htons(ETH_P_ARP); 2307 2308 arp_hdr = &arp_skb->arp_hdr; 2309 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 2310 arp_hdr->ar_pro = htons(ETH_P_IP); 2311 arp_hdr->ar_hln = ETH_ALEN; 2312 arp_hdr->ar_pln = 4; 2313 arp_hdr->ar_op = htons(ARPOP_REPLY); 2314 2315 ether_addr_copy(arp_skb->sender_hw, rtwvif->mac_addr); 2316 arp_skb->sender_ip = rtwvif->ip_addr; 2317 2318 return skb; 2319 } 2320 2321 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2322 struct rtw89_vif *rtwvif, 2323 enum rtw89_fw_pkt_ofld_type type, 2324 u8 *id) 2325 { 2326 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2327 struct rtw89_pktofld_info *info; 2328 struct sk_buff *skb; 2329 int ret; 2330 2331 info = kzalloc(sizeof(*info), GFP_KERNEL); 2332 if (!info) 2333 return -ENOMEM; 2334 2335 switch (type) { 2336 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2337 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2338 break; 2339 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2340 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2341 break; 2342 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2343 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 2344 break; 2345 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2346 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 2347 break; 2348 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2349 skb = rtw89_eapol_get(rtwdev, rtwvif); 2350 break; 2351 case RTW89_PKT_OFLD_TYPE_SA_QUERY: 2352 skb = rtw89_sa_query_get(rtwdev, rtwvif); 2353 break; 2354 case RTW89_PKT_OFLD_TYPE_ARP_RSP: 2355 skb = rtw89_arp_response_get(rtwdev, rtwvif); 2356 break; 2357 default: 2358 goto err; 2359 } 2360 2361 if (!skb) 2362 goto err; 2363 2364 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 2365 kfree_skb(skb); 2366 2367 if (ret) 2368 goto err; 2369 2370 list_add_tail(&info->list, &rtwvif->general_pkt_list); 2371 *id = info->id; 2372 return 0; 2373 2374 err: 2375 kfree(info); 2376 return -ENOMEM; 2377 } 2378 2379 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 2380 struct rtw89_vif *rtwvif, bool notify_fw) 2381 { 2382 struct list_head *pkt_list = &rtwvif->general_pkt_list; 2383 struct rtw89_pktofld_info *info, *tmp; 2384 2385 list_for_each_entry_safe(info, tmp, pkt_list, list) { 2386 if (notify_fw) 2387 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2388 else 2389 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 2390 list_del(&info->list); 2391 kfree(info); 2392 } 2393 } 2394 2395 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 2396 { 2397 struct rtw89_vif *rtwvif; 2398 2399 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2400 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw); 2401 } 2402 2403 #define H2C_GENERAL_PKT_LEN 6 2404 #define H2C_GENERAL_PKT_ID_UND 0xff 2405 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 2406 struct rtw89_vif *rtwvif, u8 macid) 2407 { 2408 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 2409 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 2410 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 2411 struct sk_buff *skb; 2412 int ret; 2413 2414 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 2415 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 2416 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 2417 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 2418 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 2419 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 2420 2421 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 2422 if (!skb) { 2423 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2424 return -ENOMEM; 2425 } 2426 skb_put(skb, H2C_GENERAL_PKT_LEN); 2427 SET_GENERAL_PKT_MACID(skb->data, macid); 2428 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2429 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 2430 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 2431 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 2432 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2433 2434 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2435 H2C_CAT_MAC, 2436 H2C_CL_FW_INFO, 2437 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 2438 H2C_GENERAL_PKT_LEN); 2439 2440 ret = rtw89_h2c_tx(rtwdev, skb, false); 2441 if (ret) { 2442 rtw89_err(rtwdev, "failed to send h2c\n"); 2443 goto fail; 2444 } 2445 2446 return 0; 2447 fail: 2448 dev_kfree_skb_any(skb); 2449 2450 return ret; 2451 } 2452 2453 #define H2C_LPS_PARM_LEN 8 2454 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 2455 struct rtw89_lps_parm *lps_param) 2456 { 2457 struct sk_buff *skb; 2458 int ret; 2459 2460 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 2461 if (!skb) { 2462 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2463 return -ENOMEM; 2464 } 2465 skb_put(skb, H2C_LPS_PARM_LEN); 2466 2467 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 2468 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 2469 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 2470 SET_LPS_PARM_RLBM(skb->data, 1); 2471 SET_LPS_PARM_SMARTPS(skb->data, 1); 2472 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 2473 SET_LPS_PARM_VOUAPSD(skb->data, 0); 2474 SET_LPS_PARM_VIUAPSD(skb->data, 0); 2475 SET_LPS_PARM_BEUAPSD(skb->data, 0); 2476 SET_LPS_PARM_BKUAPSD(skb->data, 0); 2477 2478 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2479 H2C_CAT_MAC, 2480 H2C_CL_MAC_PS, 2481 H2C_FUNC_MAC_LPS_PARM, 0, 1, 2482 H2C_LPS_PARM_LEN); 2483 2484 ret = rtw89_h2c_tx(rtwdev, skb, false); 2485 if (ret) { 2486 rtw89_err(rtwdev, "failed to send h2c\n"); 2487 goto fail; 2488 } 2489 2490 return 0; 2491 fail: 2492 dev_kfree_skb_any(skb); 2493 2494 return ret; 2495 } 2496 2497 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2498 { 2499 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 2500 rtwvif->chanctx_idx); 2501 const struct rtw89_chip_info *chip = rtwdev->chip; 2502 struct rtw89_h2c_lps_ch_info *h2c; 2503 u32 len = sizeof(*h2c); 2504 struct sk_buff *skb; 2505 u32 done; 2506 int ret; 2507 2508 if (chip->chip_gen != RTW89_CHIP_BE) 2509 return 0; 2510 2511 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2512 if (!skb) { 2513 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 2514 return -ENOMEM; 2515 } 2516 skb_put(skb, len); 2517 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2518 2519 h2c->info[0].central_ch = chan->channel; 2520 h2c->info[0].pri_ch = chan->primary_channel; 2521 h2c->info[0].band = chan->band_type; 2522 h2c->info[0].bw = chan->band_width; 2523 h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF); 2524 2525 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2526 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2527 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 2528 2529 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2530 ret = rtw89_h2c_tx(rtwdev, skb, false); 2531 if (ret) { 2532 rtw89_err(rtwdev, "failed to send h2c\n"); 2533 goto fail; 2534 } 2535 2536 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2537 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2538 if (ret) 2539 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 2540 2541 return 0; 2542 fail: 2543 dev_kfree_skb_any(skb); 2544 2545 return ret; 2546 } 2547 2548 #define H2C_P2P_ACT_LEN 20 2549 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2550 struct ieee80211_p2p_noa_desc *desc, 2551 u8 act, u8 noa_id) 2552 { 2553 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2554 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 2555 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 2556 struct sk_buff *skb; 2557 u8 *cmd; 2558 int ret; 2559 2560 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 2561 if (!skb) { 2562 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 2563 return -ENOMEM; 2564 } 2565 skb_put(skb, H2C_P2P_ACT_LEN); 2566 cmd = skb->data; 2567 2568 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 2569 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 2570 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 2571 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 2572 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 2573 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 2574 if (desc) { 2575 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 2576 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 2577 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 2578 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 2579 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 2580 } 2581 2582 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2583 H2C_CAT_MAC, H2C_CL_MAC_PS, 2584 H2C_FUNC_P2P_ACT, 0, 0, 2585 H2C_P2P_ACT_LEN); 2586 2587 ret = rtw89_h2c_tx(rtwdev, skb, false); 2588 if (ret) { 2589 rtw89_err(rtwdev, "failed to send h2c\n"); 2590 goto fail; 2591 } 2592 2593 return 0; 2594 fail: 2595 dev_kfree_skb_any(skb); 2596 2597 return ret; 2598 } 2599 2600 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 2601 struct sk_buff *skb) 2602 { 2603 const struct rtw89_chip_info *chip = rtwdev->chip; 2604 struct rtw89_hal *hal = &rtwdev->hal; 2605 u8 ntx_path; 2606 u8 map_b; 2607 2608 if (chip->rf_path_num == 1) { 2609 ntx_path = RF_A; 2610 map_b = 0; 2611 } else { 2612 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 2613 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 2614 } 2615 2616 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 2617 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 2618 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 2619 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 2620 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 2621 } 2622 2623 #define H2C_CMC_TBL_LEN 68 2624 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 2625 struct rtw89_vif *rtwvif, 2626 struct rtw89_sta *rtwsta) 2627 { 2628 const struct rtw89_chip_info *chip = rtwdev->chip; 2629 u8 macid = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2630 struct sk_buff *skb; 2631 int ret; 2632 2633 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2634 if (!skb) { 2635 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2636 return -ENOMEM; 2637 } 2638 skb_put(skb, H2C_CMC_TBL_LEN); 2639 SET_CTRL_INFO_MACID(skb->data, macid); 2640 SET_CTRL_INFO_OPERATION(skb->data, 1); 2641 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2642 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 2643 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 2644 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 2645 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 2646 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 2647 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 2648 } 2649 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 2650 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 2651 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 2652 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2653 2654 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2655 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2656 chip->h2c_cctl_func_id, 0, 1, 2657 H2C_CMC_TBL_LEN); 2658 2659 ret = rtw89_h2c_tx(rtwdev, skb, false); 2660 if (ret) { 2661 rtw89_err(rtwdev, "failed to send h2c\n"); 2662 goto fail; 2663 } 2664 2665 return 0; 2666 fail: 2667 dev_kfree_skb_any(skb); 2668 2669 return ret; 2670 } 2671 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 2672 2673 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2674 struct rtw89_vif *rtwvif, 2675 struct rtw89_sta *rtwsta) 2676 { 2677 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2678 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2679 u32 len = sizeof(*h2c); 2680 struct sk_buff *skb; 2681 int ret; 2682 2683 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2684 if (!skb) { 2685 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 2686 return -ENOMEM; 2687 } 2688 skb_put(skb, len); 2689 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2690 2691 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 2692 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2693 2694 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE); 2695 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 2696 2697 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 2698 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 2699 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2700 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 2701 2702 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 2703 2704 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 2705 2706 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 2707 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 2708 2709 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 2710 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 2711 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 2712 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 2713 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 2714 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 2715 2716 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 2717 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 2718 2719 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 2720 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 2721 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 2722 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 2723 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 2724 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 2725 2726 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 2727 2728 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 2729 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 2730 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 2731 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 2732 2733 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 2734 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 2735 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 2736 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 2737 2738 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2739 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2740 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 2741 len); 2742 2743 ret = rtw89_h2c_tx(rtwdev, skb, false); 2744 if (ret) { 2745 rtw89_err(rtwdev, "failed to send h2c\n"); 2746 goto fail; 2747 } 2748 2749 return 0; 2750 fail: 2751 dev_kfree_skb_any(skb); 2752 2753 return ret; 2754 } 2755 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 2756 2757 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 2758 struct ieee80211_sta *sta, u8 *pads) 2759 { 2760 bool ppe_th; 2761 u8 ppe16, ppe8; 2762 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 2763 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 2764 u8 ru_bitmap; 2765 u8 n, idx, sh; 2766 u16 ppe; 2767 int i; 2768 2769 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 2770 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 2771 if (!ppe_th) { 2772 u8 pad; 2773 2774 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 2775 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 2776 2777 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 2778 pads[i] = pad; 2779 2780 return; 2781 } 2782 2783 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 2784 n = hweight8(ru_bitmap); 2785 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 2786 2787 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 2788 if (!(ru_bitmap & BIT(i))) { 2789 pads[i] = 1; 2790 continue; 2791 } 2792 2793 idx = n >> 3; 2794 sh = n & 7; 2795 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 2796 2797 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 2798 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2799 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 2800 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2801 2802 if (ppe16 != 7 && ppe8 == 7) 2803 pads[i] = RTW89_PE_DURATION_16; 2804 else if (ppe8 != 7) 2805 pads[i] = RTW89_PE_DURATION_8; 2806 else 2807 pads[i] = RTW89_PE_DURATION_0; 2808 } 2809 } 2810 2811 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 2812 struct ieee80211_vif *vif, 2813 struct ieee80211_sta *sta) 2814 { 2815 const struct rtw89_chip_info *chip = rtwdev->chip; 2816 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 2817 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2818 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 2819 rtwvif->chanctx_idx); 2820 struct sk_buff *skb; 2821 u8 pads[RTW89_PPE_BW_NUM]; 2822 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2823 u16 lowest_rate; 2824 int ret; 2825 2826 memset(pads, 0, sizeof(pads)); 2827 if (sta && sta->deflink.he_cap.has_he) 2828 __get_sta_he_pkt_padding(rtwdev, sta, pads); 2829 2830 if (vif->p2p) 2831 lowest_rate = RTW89_HW_RATE_OFDM6; 2832 else if (chan->band_type == RTW89_BAND_2G) 2833 lowest_rate = RTW89_HW_RATE_CCK1; 2834 else 2835 lowest_rate = RTW89_HW_RATE_OFDM6; 2836 2837 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2838 if (!skb) { 2839 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2840 return -ENOMEM; 2841 } 2842 skb_put(skb, H2C_CMC_TBL_LEN); 2843 SET_CTRL_INFO_MACID(skb->data, mac_id); 2844 SET_CTRL_INFO_OPERATION(skb->data, 1); 2845 SET_CMC_TBL_DISRTSFB(skb->data, 1); 2846 SET_CMC_TBL_DISDATAFB(skb->data, 1); 2847 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 2848 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 2849 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 2850 if (vif->type == NL80211_IFTYPE_STATION) 2851 SET_CMC_TBL_ULDL(skb->data, 1); 2852 else 2853 SET_CMC_TBL_ULDL(skb->data, 0); 2854 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 2855 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 2856 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 2857 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 2858 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 2859 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 2860 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2861 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 2862 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 2863 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 2864 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 2865 } 2866 if (sta) 2867 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 2868 sta->deflink.he_cap.has_he); 2869 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 2870 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2871 2872 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2873 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2874 chip->h2c_cctl_func_id, 0, 1, 2875 H2C_CMC_TBL_LEN); 2876 2877 ret = rtw89_h2c_tx(rtwdev, skb, false); 2878 if (ret) { 2879 rtw89_err(rtwdev, "failed to send h2c\n"); 2880 goto fail; 2881 } 2882 2883 return 0; 2884 fail: 2885 dev_kfree_skb_any(skb); 2886 2887 return ret; 2888 } 2889 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 2890 2891 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 2892 struct ieee80211_sta *sta, u8 *pads) 2893 { 2894 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 2895 u16 ppe_thres_hdr; 2896 u8 ppe16, ppe8; 2897 u8 n, idx, sh; 2898 u8 ru_bitmap; 2899 bool ppe_th; 2900 u16 ppe; 2901 int i; 2902 2903 ppe_th = !!u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5], 2904 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 2905 if (!ppe_th) { 2906 u8 pad; 2907 2908 pad = u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5], 2909 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 2910 2911 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 2912 pads[i] = pad; 2913 2914 return; 2915 } 2916 2917 ppe_thres_hdr = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres); 2918 ru_bitmap = u16_get_bits(ppe_thres_hdr, 2919 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 2920 n = hweight8(ru_bitmap); 2921 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 2922 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 2923 2924 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 2925 if (!(ru_bitmap & BIT(i))) { 2926 pads[i] = 1; 2927 continue; 2928 } 2929 2930 idx = n >> 3; 2931 sh = n & 7; 2932 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 2933 2934 ppe = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres + idx); 2935 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2936 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 2937 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2938 2939 if (ppe16 != 7 && ppe8 == 7) 2940 pads[i] = RTW89_PE_DURATION_16_20; 2941 else if (ppe8 != 7) 2942 pads[i] = RTW89_PE_DURATION_8; 2943 else 2944 pads[i] = RTW89_PE_DURATION_0; 2945 } 2946 } 2947 2948 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2949 struct ieee80211_vif *vif, 2950 struct ieee80211_sta *sta) 2951 { 2952 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2953 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 2954 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx); 2955 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2956 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2957 u8 pads[RTW89_PPE_BW_NUM]; 2958 u32 len = sizeof(*h2c); 2959 struct sk_buff *skb; 2960 u16 lowest_rate; 2961 int ret; 2962 2963 memset(pads, 0, sizeof(pads)); 2964 if (sta) { 2965 if (sta->deflink.eht_cap.has_eht) 2966 __get_sta_eht_pkt_padding(rtwdev, sta, pads); 2967 else if (sta->deflink.he_cap.has_he) 2968 __get_sta_he_pkt_padding(rtwdev, sta, pads); 2969 } 2970 2971 if (vif->p2p) 2972 lowest_rate = RTW89_HW_RATE_OFDM6; 2973 else if (chan->band_type == RTW89_BAND_2G) 2974 lowest_rate = RTW89_HW_RATE_CCK1; 2975 else 2976 lowest_rate = RTW89_HW_RATE_OFDM6; 2977 2978 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2979 if (!skb) { 2980 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 2981 return -ENOMEM; 2982 } 2983 skb_put(skb, len); 2984 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2985 2986 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 2987 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2988 2989 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 2990 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 2991 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 2992 CCTLINFO_G7_W0_DISDATAFB); 2993 2994 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2995 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2996 2997 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 2998 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 2999 3000 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3001 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3002 3003 h2c->w4 = le32_encode_bits(rtwvif->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 3004 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 3005 3006 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 3007 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 3008 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 3009 } 3010 3011 if (vif->bss_conf.eht_support) { 3012 u16 punct = vif->bss_conf.chanreq.oper.punctured; 3013 3014 h2c->w4 |= le32_encode_bits(~punct, 3015 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3016 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3017 } 3018 3019 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 3020 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3021 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 3022 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3023 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 3024 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3025 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 3026 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3027 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 3028 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3029 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 3030 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 3031 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 3032 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 3033 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3034 3035 h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 3036 CCTLINFO_G7_W6_ULDL); 3037 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL); 3038 3039 if (sta) { 3040 h2c->w8 = le32_encode_bits(sta->deflink.he_cap.has_he, 3041 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3042 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3043 } 3044 3045 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3046 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3047 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3048 len); 3049 3050 ret = rtw89_h2c_tx(rtwdev, skb, false); 3051 if (ret) { 3052 rtw89_err(rtwdev, "failed to send h2c\n"); 3053 goto fail; 3054 } 3055 3056 return 0; 3057 fail: 3058 dev_kfree_skb_any(skb); 3059 3060 return ret; 3061 } 3062 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 3063 3064 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3065 struct ieee80211_vif *vif, 3066 struct ieee80211_sta *sta) 3067 { 3068 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 3069 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3070 u32 len = sizeof(*h2c); 3071 struct sk_buff *skb; 3072 u16 agg_num = 0; 3073 u8 ba_bmap = 0; 3074 int ret; 3075 u8 tid; 3076 3077 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3078 if (!skb) { 3079 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 3080 return -ENOMEM; 3081 } 3082 skb_put(skb, len); 3083 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3084 3085 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 3086 if (agg_num == 0) 3087 agg_num = rtwsta->ampdu_params[tid].agg_num; 3088 else 3089 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 3090 } 3091 3092 if (agg_num <= 0x20) 3093 ba_bmap = 3; 3094 else if (agg_num > 0x20 && agg_num <= 0x40) 3095 ba_bmap = 0; 3096 else if (agg_num > 0x40 && agg_num <= 0x80) 3097 ba_bmap = 1; 3098 else if (agg_num > 0x80 && agg_num <= 0x100) 3099 ba_bmap = 2; 3100 else if (agg_num > 0x100 && agg_num <= 0x200) 3101 ba_bmap = 4; 3102 else if (agg_num > 0x200 && agg_num <= 0x400) 3103 ba_bmap = 5; 3104 3105 h2c->c0 = le32_encode_bits(rtwsta->mac_id, CCTLINFO_G7_C0_MACID) | 3106 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3107 3108 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 3109 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 3110 3111 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3112 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3113 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 3114 len); 3115 3116 ret = rtw89_h2c_tx(rtwdev, skb, false); 3117 if (ret) { 3118 rtw89_err(rtwdev, "failed to send h2c\n"); 3119 goto fail; 3120 } 3121 3122 return 0; 3123 fail: 3124 dev_kfree_skb_any(skb); 3125 3126 return ret; 3127 } 3128 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 3129 3130 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 3131 struct rtw89_sta *rtwsta) 3132 { 3133 const struct rtw89_chip_info *chip = rtwdev->chip; 3134 struct sk_buff *skb; 3135 int ret; 3136 3137 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3138 if (!skb) { 3139 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3140 return -ENOMEM; 3141 } 3142 skb_put(skb, H2C_CMC_TBL_LEN); 3143 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 3144 SET_CTRL_INFO_OPERATION(skb->data, 1); 3145 if (rtwsta->cctl_tx_time) { 3146 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 3147 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 3148 } 3149 if (rtwsta->cctl_tx_retry_limit) { 3150 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 3151 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 3152 } 3153 3154 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3155 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3156 chip->h2c_cctl_func_id, 0, 1, 3157 H2C_CMC_TBL_LEN); 3158 3159 ret = rtw89_h2c_tx(rtwdev, skb, false); 3160 if (ret) { 3161 rtw89_err(rtwdev, "failed to send h2c\n"); 3162 goto fail; 3163 } 3164 3165 return 0; 3166 fail: 3167 dev_kfree_skb_any(skb); 3168 3169 return ret; 3170 } 3171 3172 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 3173 struct rtw89_sta *rtwsta) 3174 { 3175 const struct rtw89_chip_info *chip = rtwdev->chip; 3176 struct sk_buff *skb; 3177 int ret; 3178 3179 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 3180 return 0; 3181 3182 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3183 if (!skb) { 3184 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3185 return -ENOMEM; 3186 } 3187 skb_put(skb, H2C_CMC_TBL_LEN); 3188 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 3189 SET_CTRL_INFO_OPERATION(skb->data, 1); 3190 3191 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3192 3193 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3194 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3195 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 3196 H2C_CMC_TBL_LEN); 3197 3198 ret = rtw89_h2c_tx(rtwdev, skb, false); 3199 if (ret) { 3200 rtw89_err(rtwdev, "failed to send h2c\n"); 3201 goto fail; 3202 } 3203 3204 return 0; 3205 fail: 3206 dev_kfree_skb_any(skb); 3207 3208 return ret; 3209 } 3210 3211 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 3212 struct rtw89_vif *rtwvif) 3213 { 3214 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3215 rtwvif->chanctx_idx); 3216 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3217 struct rtw89_h2c_bcn_upd *h2c; 3218 struct sk_buff *skb_beacon; 3219 struct ieee80211_hdr *hdr; 3220 u32 len = sizeof(*h2c); 3221 struct sk_buff *skb; 3222 int bcn_total_len; 3223 u16 beacon_rate; 3224 u16 tim_offset; 3225 void *noa_data; 3226 u8 noa_len; 3227 int ret; 3228 3229 if (vif->p2p) 3230 beacon_rate = RTW89_HW_RATE_OFDM6; 3231 else if (chan->band_type == RTW89_BAND_2G) 3232 beacon_rate = RTW89_HW_RATE_CCK1; 3233 else 3234 beacon_rate = RTW89_HW_RATE_OFDM6; 3235 3236 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3237 NULL, 0); 3238 if (!skb_beacon) { 3239 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3240 return -ENOMEM; 3241 } 3242 3243 noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data); 3244 if (noa_len && 3245 (noa_len <= skb_tailroom(skb_beacon) || 3246 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3247 skb_put_data(skb_beacon, noa_data, noa_len); 3248 } 3249 3250 hdr = (struct ieee80211_hdr *)skb_beacon; 3251 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3252 3253 bcn_total_len = len + skb_beacon->len; 3254 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3255 if (!skb) { 3256 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3257 dev_kfree_skb_any(skb_beacon); 3258 return -ENOMEM; 3259 } 3260 skb_put(skb, len); 3261 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 3262 3263 h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_W0_PORT) | 3264 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 3265 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 3266 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 3267 h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 3268 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 3269 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 3270 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 3271 3272 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3273 dev_kfree_skb_any(skb_beacon); 3274 3275 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3276 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3277 H2C_FUNC_MAC_BCN_UPD, 0, 1, 3278 bcn_total_len); 3279 3280 ret = rtw89_h2c_tx(rtwdev, skb, false); 3281 if (ret) { 3282 rtw89_err(rtwdev, "failed to send h2c\n"); 3283 dev_kfree_skb_any(skb); 3284 return ret; 3285 } 3286 3287 return 0; 3288 } 3289 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 3290 3291 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 3292 struct rtw89_vif *rtwvif) 3293 { 3294 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx); 3295 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3296 struct rtw89_h2c_bcn_upd_be *h2c; 3297 struct sk_buff *skb_beacon; 3298 struct ieee80211_hdr *hdr; 3299 u32 len = sizeof(*h2c); 3300 struct sk_buff *skb; 3301 int bcn_total_len; 3302 u16 beacon_rate; 3303 u16 tim_offset; 3304 void *noa_data; 3305 u8 noa_len; 3306 int ret; 3307 3308 if (vif->p2p) 3309 beacon_rate = RTW89_HW_RATE_OFDM6; 3310 else if (chan->band_type == RTW89_BAND_2G) 3311 beacon_rate = RTW89_HW_RATE_CCK1; 3312 else 3313 beacon_rate = RTW89_HW_RATE_OFDM6; 3314 3315 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3316 NULL, 0); 3317 if (!skb_beacon) { 3318 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3319 return -ENOMEM; 3320 } 3321 3322 noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data); 3323 if (noa_len && 3324 (noa_len <= skb_tailroom(skb_beacon) || 3325 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3326 skb_put_data(skb_beacon, noa_data, noa_len); 3327 } 3328 3329 hdr = (struct ieee80211_hdr *)skb_beacon; 3330 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3331 3332 bcn_total_len = len + skb_beacon->len; 3333 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3334 if (!skb) { 3335 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3336 dev_kfree_skb_any(skb_beacon); 3337 return -ENOMEM; 3338 } 3339 skb_put(skb, len); 3340 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 3341 3342 h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 3343 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 3344 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 3345 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 3346 h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 3347 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 3348 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 3349 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 3350 3351 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3352 dev_kfree_skb_any(skb_beacon); 3353 3354 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3355 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3356 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 3357 bcn_total_len); 3358 3359 ret = rtw89_h2c_tx(rtwdev, skb, false); 3360 if (ret) { 3361 rtw89_err(rtwdev, "failed to send h2c\n"); 3362 goto fail; 3363 } 3364 3365 return 0; 3366 3367 fail: 3368 dev_kfree_skb_any(skb); 3369 3370 return ret; 3371 } 3372 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 3373 3374 #define H2C_ROLE_MAINTAIN_LEN 4 3375 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 3376 struct rtw89_vif *rtwvif, 3377 struct rtw89_sta *rtwsta, 3378 enum rtw89_upd_mode upd_mode) 3379 { 3380 struct sk_buff *skb; 3381 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 3382 u8 self_role; 3383 int ret; 3384 3385 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 3386 if (rtwsta) 3387 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3388 else 3389 self_role = rtwvif->self_role; 3390 } else { 3391 self_role = rtwvif->self_role; 3392 } 3393 3394 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 3395 if (!skb) { 3396 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3397 return -ENOMEM; 3398 } 3399 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 3400 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 3401 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 3402 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 3403 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 3404 3405 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3406 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3407 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 3408 H2C_ROLE_MAINTAIN_LEN); 3409 3410 ret = rtw89_h2c_tx(rtwdev, skb, false); 3411 if (ret) { 3412 rtw89_err(rtwdev, "failed to send h2c\n"); 3413 goto fail; 3414 } 3415 3416 return 0; 3417 fail: 3418 dev_kfree_skb_any(skb); 3419 3420 return ret; 3421 } 3422 3423 static enum rtw89_fw_sta_type 3424 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3425 struct rtw89_sta *rtwsta) 3426 { 3427 struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta); 3428 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3429 3430 if (!sta) 3431 goto by_vif; 3432 3433 if (sta->deflink.eht_cap.has_eht) 3434 return RTW89_FW_BE_STA; 3435 else if (sta->deflink.he_cap.has_he) 3436 return RTW89_FW_AX_STA; 3437 else 3438 return RTW89_FW_N_AC_STA; 3439 3440 by_vif: 3441 if (vif->bss_conf.eht_support) 3442 return RTW89_FW_BE_STA; 3443 else if (vif->bss_conf.he_support) 3444 return RTW89_FW_AX_STA; 3445 else 3446 return RTW89_FW_N_AC_STA; 3447 } 3448 3449 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3450 struct rtw89_sta *rtwsta, bool dis_conn) 3451 { 3452 struct sk_buff *skb; 3453 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 3454 u8 self_role = rtwvif->self_role; 3455 enum rtw89_fw_sta_type sta_type; 3456 u8 net_type = rtwvif->net_type; 3457 struct rtw89_h2c_join_v1 *h2c_v1; 3458 struct rtw89_h2c_join *h2c; 3459 u32 len = sizeof(*h2c); 3460 bool format_v1 = false; 3461 int ret; 3462 3463 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 3464 len = sizeof(*h2c_v1); 3465 format_v1 = true; 3466 } 3467 3468 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 3469 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3470 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 3471 } 3472 3473 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3474 if (!skb) { 3475 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3476 return -ENOMEM; 3477 } 3478 skb_put(skb, len); 3479 h2c = (struct rtw89_h2c_join *)skb->data; 3480 3481 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 3482 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 3483 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 3484 le32_encode_bits(rtwvif->wmm, RTW89_H2C_JOININFO_W0_WMM) | 3485 le32_encode_bits(rtwvif->trigger, RTW89_H2C_JOININFO_W0_TGR) | 3486 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 3487 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 3488 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 3489 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 3490 le32_encode_bits(rtwvif->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 3491 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 3492 le32_encode_bits(rtwvif->wifi_role, RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 3493 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 3494 3495 if (!format_v1) 3496 goto done; 3497 3498 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 3499 3500 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif, rtwsta); 3501 3502 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE); 3503 h2c_v1->w2 = 0; 3504 3505 done: 3506 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3507 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3508 H2C_FUNC_MAC_JOININFO, 0, 1, 3509 len); 3510 3511 ret = rtw89_h2c_tx(rtwdev, skb, false); 3512 if (ret) { 3513 rtw89_err(rtwdev, "failed to send h2c\n"); 3514 goto fail; 3515 } 3516 3517 return 0; 3518 fail: 3519 dev_kfree_skb_any(skb); 3520 3521 return ret; 3522 } 3523 3524 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 3525 { 3526 struct rtw89_h2c_notify_dbcc *h2c; 3527 u32 len = sizeof(*h2c); 3528 struct sk_buff *skb; 3529 int ret; 3530 3531 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3532 if (!skb) { 3533 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 3534 return -ENOMEM; 3535 } 3536 skb_put(skb, len); 3537 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 3538 3539 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 3540 3541 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3542 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3543 H2C_FUNC_NOTIFY_DBCC, 0, 1, 3544 len); 3545 3546 ret = rtw89_h2c_tx(rtwdev, skb, false); 3547 if (ret) { 3548 rtw89_err(rtwdev, "failed to send h2c\n"); 3549 goto fail; 3550 } 3551 3552 return 0; 3553 fail: 3554 dev_kfree_skb_any(skb); 3555 3556 return ret; 3557 } 3558 3559 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 3560 bool pause) 3561 { 3562 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 3563 struct rtw89_fw_macid_pause_grp *h2c; 3564 __le32 set = cpu_to_le32(BIT(sh)); 3565 u8 h2c_macid_pause_id; 3566 struct sk_buff *skb; 3567 u32 len; 3568 int ret; 3569 3570 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 3571 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 3572 len = sizeof(*h2c_new); 3573 } else { 3574 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 3575 len = sizeof(*h2c); 3576 } 3577 3578 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3579 if (!skb) { 3580 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 3581 return -ENOMEM; 3582 } 3583 skb_put(skb, len); 3584 3585 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 3586 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 3587 3588 h2c_new->n[0].pause_mask_grp[grp] = set; 3589 h2c_new->n[0].sleep_mask_grp[grp] = set; 3590 if (pause) { 3591 h2c_new->n[0].pause_grp[grp] = set; 3592 h2c_new->n[0].sleep_grp[grp] = set; 3593 } 3594 } else { 3595 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 3596 3597 h2c->mask_grp[grp] = set; 3598 if (pause) 3599 h2c->pause_grp[grp] = set; 3600 } 3601 3602 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3603 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3604 h2c_macid_pause_id, 1, 0, 3605 len); 3606 3607 ret = rtw89_h2c_tx(rtwdev, skb, false); 3608 if (ret) { 3609 rtw89_err(rtwdev, "failed to send h2c\n"); 3610 goto fail; 3611 } 3612 3613 return 0; 3614 fail: 3615 dev_kfree_skb_any(skb); 3616 3617 return ret; 3618 } 3619 3620 #define H2C_EDCA_LEN 12 3621 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3622 u8 ac, u32 val) 3623 { 3624 struct sk_buff *skb; 3625 int ret; 3626 3627 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 3628 if (!skb) { 3629 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 3630 return -ENOMEM; 3631 } 3632 skb_put(skb, H2C_EDCA_LEN); 3633 RTW89_SET_EDCA_SEL(skb->data, 0); 3634 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 3635 RTW89_SET_EDCA_WMM(skb->data, 0); 3636 RTW89_SET_EDCA_AC(skb->data, ac); 3637 RTW89_SET_EDCA_PARAM(skb->data, val); 3638 3639 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3640 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3641 H2C_FUNC_USR_EDCA, 0, 1, 3642 H2C_EDCA_LEN); 3643 3644 ret = rtw89_h2c_tx(rtwdev, skb, false); 3645 if (ret) { 3646 rtw89_err(rtwdev, "failed to send h2c\n"); 3647 goto fail; 3648 } 3649 3650 return 0; 3651 fail: 3652 dev_kfree_skb_any(skb); 3653 3654 return ret; 3655 } 3656 3657 #define H2C_TSF32_TOGL_LEN 4 3658 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3659 bool en) 3660 { 3661 struct sk_buff *skb; 3662 u16 early_us = en ? 2000 : 0; 3663 u8 *cmd; 3664 int ret; 3665 3666 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 3667 if (!skb) { 3668 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 3669 return -ENOMEM; 3670 } 3671 skb_put(skb, H2C_TSF32_TOGL_LEN); 3672 cmd = skb->data; 3673 3674 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 3675 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 3676 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 3677 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 3678 3679 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3680 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3681 H2C_FUNC_TSF32_TOGL, 0, 0, 3682 H2C_TSF32_TOGL_LEN); 3683 3684 ret = rtw89_h2c_tx(rtwdev, skb, false); 3685 if (ret) { 3686 rtw89_err(rtwdev, "failed to send h2c\n"); 3687 goto fail; 3688 } 3689 3690 return 0; 3691 fail: 3692 dev_kfree_skb_any(skb); 3693 3694 return ret; 3695 } 3696 3697 #define H2C_OFLD_CFG_LEN 8 3698 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 3699 { 3700 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 3701 struct sk_buff *skb; 3702 int ret; 3703 3704 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 3705 if (!skb) { 3706 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 3707 return -ENOMEM; 3708 } 3709 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 3710 3711 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3712 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3713 H2C_FUNC_OFLD_CFG, 0, 1, 3714 H2C_OFLD_CFG_LEN); 3715 3716 ret = rtw89_h2c_tx(rtwdev, skb, false); 3717 if (ret) { 3718 rtw89_err(rtwdev, "failed to send h2c\n"); 3719 goto fail; 3720 } 3721 3722 return 0; 3723 fail: 3724 dev_kfree_skb_any(skb); 3725 3726 return ret; 3727 } 3728 3729 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 3730 struct ieee80211_vif *vif, 3731 bool connect) 3732 { 3733 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 3734 struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL; 3735 s32 thold = RTW89_DEFAULT_CQM_THOLD; 3736 u32 hyst = RTW89_DEFAULT_CQM_HYST; 3737 struct rtw89_h2c_bcnfltr *h2c; 3738 u32 len = sizeof(*h2c); 3739 struct sk_buff *skb; 3740 int ret; 3741 3742 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 3743 return -EINVAL; 3744 3745 if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA) 3746 return -EINVAL; 3747 3748 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3749 if (!skb) { 3750 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 3751 return -ENOMEM; 3752 } 3753 3754 skb_put(skb, len); 3755 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 3756 3757 if (bss_conf->cqm_rssi_hyst) 3758 hyst = bss_conf->cqm_rssi_hyst; 3759 if (bss_conf->cqm_rssi_thold) 3760 thold = bss_conf->cqm_rssi_thold; 3761 3762 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 3763 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 3764 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 3765 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 3766 RTW89_H2C_BCNFLTR_W0_MODE) | 3767 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 3768 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 3769 le32_encode_bits(thold + MAX_RSSI, 3770 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 3771 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 3772 3773 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3774 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3775 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 3776 3777 ret = rtw89_h2c_tx(rtwdev, skb, false); 3778 if (ret) { 3779 rtw89_err(rtwdev, "failed to send h2c\n"); 3780 goto fail; 3781 } 3782 3783 return 0; 3784 fail: 3785 dev_kfree_skb_any(skb); 3786 3787 return ret; 3788 } 3789 3790 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 3791 struct rtw89_rx_phy_ppdu *phy_ppdu) 3792 { 3793 struct rtw89_h2c_ofld_rssi *h2c; 3794 u32 len = sizeof(*h2c); 3795 struct sk_buff *skb; 3796 s8 rssi; 3797 int ret; 3798 3799 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 3800 return -EINVAL; 3801 3802 if (!phy_ppdu) 3803 return -EINVAL; 3804 3805 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3806 if (!skb) { 3807 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 3808 return -ENOMEM; 3809 } 3810 3811 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 3812 skb_put(skb, len); 3813 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 3814 3815 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 3816 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 3817 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 3818 3819 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3820 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3821 H2C_FUNC_OFLD_RSSI, 0, 1, len); 3822 3823 ret = rtw89_h2c_tx(rtwdev, skb, false); 3824 if (ret) { 3825 rtw89_err(rtwdev, "failed to send h2c\n"); 3826 goto fail; 3827 } 3828 3829 return 0; 3830 fail: 3831 dev_kfree_skb_any(skb); 3832 3833 return ret; 3834 } 3835 3836 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3837 { 3838 struct rtw89_traffic_stats *stats = &rtwvif->stats; 3839 struct rtw89_h2c_ofld *h2c; 3840 u32 len = sizeof(*h2c); 3841 struct sk_buff *skb; 3842 int ret; 3843 3844 if (rtwvif->net_type != RTW89_NET_TYPE_INFRA) 3845 return -EINVAL; 3846 3847 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3848 if (!skb) { 3849 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 3850 return -ENOMEM; 3851 } 3852 3853 skb_put(skb, len); 3854 h2c = (struct rtw89_h2c_ofld *)skb->data; 3855 3856 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 3857 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 3858 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 3859 3860 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3861 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3862 H2C_FUNC_OFLD_TP, 0, 1, len); 3863 3864 ret = rtw89_h2c_tx(rtwdev, skb, false); 3865 if (ret) { 3866 rtw89_err(rtwdev, "failed to send h2c\n"); 3867 goto fail; 3868 } 3869 3870 return 0; 3871 fail: 3872 dev_kfree_skb_any(skb); 3873 3874 return ret; 3875 } 3876 3877 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 3878 { 3879 const struct rtw89_chip_info *chip = rtwdev->chip; 3880 struct rtw89_h2c_ra_v1 *h2c_v1; 3881 struct rtw89_h2c_ra *h2c; 3882 u32 len = sizeof(*h2c); 3883 bool format_v1 = false; 3884 struct sk_buff *skb; 3885 int ret; 3886 3887 if (chip->chip_gen == RTW89_CHIP_BE) { 3888 len = sizeof(*h2c_v1); 3889 format_v1 = true; 3890 } 3891 3892 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3893 if (!skb) { 3894 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3895 return -ENOMEM; 3896 } 3897 skb_put(skb, len); 3898 h2c = (struct rtw89_h2c_ra *)skb->data; 3899 rtw89_debug(rtwdev, RTW89_DBG_RA, 3900 "ra cmd msk: %llx ", ra->ra_mask); 3901 3902 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 3903 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 3904 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 3905 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 3906 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 3907 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 3908 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 3909 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 3910 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 3911 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 3912 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 3913 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 3914 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 3915 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 3916 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 3917 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 3918 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 3919 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 3920 3921 if (!format_v1) 3922 goto csi; 3923 3924 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 3925 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 3926 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 3927 3928 csi: 3929 if (!csi) 3930 goto done; 3931 3932 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 3933 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 3934 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 3935 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 3936 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 3937 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 3938 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 3939 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 3940 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 3941 3942 done: 3943 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3944 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 3945 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 3946 len); 3947 3948 ret = rtw89_h2c_tx(rtwdev, skb, false); 3949 if (ret) { 3950 rtw89_err(rtwdev, "failed to send h2c\n"); 3951 goto fail; 3952 } 3953 3954 return 0; 3955 fail: 3956 dev_kfree_skb_any(skb); 3957 3958 return ret; 3959 } 3960 3961 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 3962 { 3963 struct rtw89_btc *btc = &rtwdev->btc; 3964 struct rtw89_btc_dm *dm = &btc->dm; 3965 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 3966 struct rtw89_btc_module *module = &init_info->module; 3967 struct rtw89_btc_ant_info *ant = &module->ant; 3968 struct rtw89_h2c_cxinit *h2c; 3969 u32 len = sizeof(*h2c); 3970 struct sk_buff *skb; 3971 int ret; 3972 3973 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3974 if (!skb) { 3975 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 3976 return -ENOMEM; 3977 } 3978 skb_put(skb, len); 3979 h2c = (struct rtw89_h2c_cxinit *)skb->data; 3980 3981 h2c->hdr.type = type; 3982 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 3983 3984 h2c->ant_type = ant->type; 3985 h2c->ant_num = ant->num; 3986 h2c->ant_iso = ant->isolation; 3987 h2c->ant_info = 3988 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 3989 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 3990 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 3991 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 3992 3993 h2c->mod_rfe = module->rfe_type; 3994 h2c->mod_cv = module->cv; 3995 h2c->mod_info = 3996 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 3997 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 3998 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 3999 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 4000 h2c->mod_adie_kt = module->kt_ver_adie; 4001 h2c->wl_gch = init_info->wl_guard_ch; 4002 4003 h2c->info = 4004 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 4005 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 4006 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 4007 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 4008 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 4009 4010 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4011 H2C_CAT_OUTSRC, BTFC_SET, 4012 SET_DRV_INFO, 0, 0, 4013 len); 4014 4015 ret = rtw89_h2c_tx(rtwdev, skb, false); 4016 if (ret) { 4017 rtw89_err(rtwdev, "failed to send h2c\n"); 4018 goto fail; 4019 } 4020 4021 return 0; 4022 fail: 4023 dev_kfree_skb_any(skb); 4024 4025 return ret; 4026 } 4027 4028 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 4029 { 4030 struct rtw89_btc *btc = &rtwdev->btc; 4031 struct rtw89_btc_dm *dm = &btc->dm; 4032 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 4033 struct rtw89_h2c_cxinit_v7 *h2c; 4034 u32 len = sizeof(*h2c); 4035 struct sk_buff *skb; 4036 int ret; 4037 4038 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4039 if (!skb) { 4040 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 4041 return -ENOMEM; 4042 } 4043 skb_put(skb, len); 4044 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 4045 4046 h2c->hdr.type = type; 4047 h2c->hdr.ver = btc->ver->fcxinit; 4048 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4049 h2c->init = *init_info; 4050 4051 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4052 H2C_CAT_OUTSRC, BTFC_SET, 4053 SET_DRV_INFO, 0, 0, 4054 len); 4055 4056 ret = rtw89_h2c_tx(rtwdev, skb, false); 4057 if (ret) { 4058 rtw89_err(rtwdev, "failed to send h2c\n"); 4059 goto fail; 4060 } 4061 4062 return 0; 4063 fail: 4064 dev_kfree_skb_any(skb); 4065 4066 return ret; 4067 } 4068 4069 #define PORT_DATA_OFFSET 4 4070 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 4071 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 4072 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 4073 4074 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 4075 { 4076 struct rtw89_btc *btc = &rtwdev->btc; 4077 const struct rtw89_btc_ver *ver = btc->ver; 4078 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4079 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 4080 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4081 struct rtw89_btc_wl_active_role *active = role_info->active_role; 4082 struct sk_buff *skb; 4083 u32 len; 4084 u8 offset = 0; 4085 u8 *cmd; 4086 int ret; 4087 int i; 4088 4089 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 4090 4091 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4092 if (!skb) { 4093 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4094 return -ENOMEM; 4095 } 4096 skb_put(skb, len); 4097 cmd = skb->data; 4098 4099 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4100 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4101 4102 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4103 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4104 4105 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4106 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4107 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4108 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4109 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4110 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4111 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4112 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4113 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4114 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4115 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4116 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4117 4118 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4119 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4120 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4121 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4122 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4123 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4124 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4125 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4126 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4127 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4128 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4129 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4130 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4131 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4132 } 4133 4134 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4135 H2C_CAT_OUTSRC, BTFC_SET, 4136 SET_DRV_INFO, 0, 0, 4137 len); 4138 4139 ret = rtw89_h2c_tx(rtwdev, skb, false); 4140 if (ret) { 4141 rtw89_err(rtwdev, "failed to send h2c\n"); 4142 goto fail; 4143 } 4144 4145 return 0; 4146 fail: 4147 dev_kfree_skb_any(skb); 4148 4149 return ret; 4150 } 4151 4152 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 4153 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4154 4155 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 4156 { 4157 struct rtw89_btc *btc = &rtwdev->btc; 4158 const struct rtw89_btc_ver *ver = btc->ver; 4159 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4160 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 4161 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4162 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 4163 struct sk_buff *skb; 4164 u32 len; 4165 u8 *cmd, offset; 4166 int ret; 4167 int i; 4168 4169 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 4170 4171 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4172 if (!skb) { 4173 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4174 return -ENOMEM; 4175 } 4176 skb_put(skb, len); 4177 cmd = skb->data; 4178 4179 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4180 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4181 4182 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4183 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4184 4185 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4186 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4187 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4188 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4189 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4190 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4191 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4192 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4193 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4194 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4195 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4196 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4197 4198 offset = PORT_DATA_OFFSET; 4199 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4200 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4201 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4202 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4203 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4204 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4205 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4206 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4207 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4208 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4209 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4210 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4211 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4212 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4213 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 4214 } 4215 4216 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4217 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4218 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4219 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4220 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4221 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4222 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4223 4224 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4225 H2C_CAT_OUTSRC, BTFC_SET, 4226 SET_DRV_INFO, 0, 0, 4227 len); 4228 4229 ret = rtw89_h2c_tx(rtwdev, skb, false); 4230 if (ret) { 4231 rtw89_err(rtwdev, "failed to send h2c\n"); 4232 goto fail; 4233 } 4234 4235 return 0; 4236 fail: 4237 dev_kfree_skb_any(skb); 4238 4239 return ret; 4240 } 4241 4242 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 4243 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4244 4245 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 4246 { 4247 struct rtw89_btc *btc = &rtwdev->btc; 4248 const struct rtw89_btc_ver *ver = btc->ver; 4249 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4250 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 4251 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4252 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 4253 struct sk_buff *skb; 4254 u32 len; 4255 u8 *cmd, offset; 4256 int ret; 4257 int i; 4258 4259 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 4260 4261 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4262 if (!skb) { 4263 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4264 return -ENOMEM; 4265 } 4266 skb_put(skb, len); 4267 cmd = skb->data; 4268 4269 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4270 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4271 4272 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4273 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4274 4275 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4276 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4277 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4278 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4279 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4280 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4281 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4282 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4283 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4284 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4285 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4286 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4287 4288 offset = PORT_DATA_OFFSET; 4289 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4290 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 4291 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 4292 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 4293 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 4294 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 4295 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 4296 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 4297 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 4298 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 4299 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 4300 } 4301 4302 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4303 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4304 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4305 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4306 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4307 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4308 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4309 4310 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4311 H2C_CAT_OUTSRC, BTFC_SET, 4312 SET_DRV_INFO, 0, 0, 4313 len); 4314 4315 ret = rtw89_h2c_tx(rtwdev, skb, false); 4316 if (ret) { 4317 rtw89_err(rtwdev, "failed to send h2c\n"); 4318 goto fail; 4319 } 4320 4321 return 0; 4322 fail: 4323 dev_kfree_skb_any(skb); 4324 4325 return ret; 4326 } 4327 4328 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 4329 { 4330 struct rtw89_btc *btc = &rtwdev->btc; 4331 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 4332 struct rtw89_h2c_cxrole_v8 *h2c; 4333 u32 len = sizeof(*h2c); 4334 struct sk_buff *skb; 4335 int ret; 4336 4337 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4338 if (!skb) { 4339 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4340 return -ENOMEM; 4341 } 4342 skb_put(skb, len); 4343 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 4344 4345 h2c->hdr.type = type; 4346 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4347 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4348 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4349 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4350 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4351 4352 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4353 H2C_CAT_OUTSRC, BTFC_SET, 4354 SET_DRV_INFO, 0, 0, 4355 len); 4356 4357 ret = rtw89_h2c_tx(rtwdev, skb, false); 4358 if (ret) { 4359 rtw89_err(rtwdev, "failed to send h2c\n"); 4360 goto fail; 4361 } 4362 4363 return 0; 4364 fail: 4365 dev_kfree_skb_any(skb); 4366 4367 return ret; 4368 } 4369 4370 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 4371 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 4372 { 4373 struct rtw89_btc *btc = &rtwdev->btc; 4374 const struct rtw89_btc_ver *ver = btc->ver; 4375 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 4376 struct sk_buff *skb; 4377 u8 *cmd; 4378 int ret; 4379 4380 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 4381 if (!skb) { 4382 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4383 return -ENOMEM; 4384 } 4385 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 4386 cmd = skb->data; 4387 4388 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4389 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 4390 4391 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 4392 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 4393 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 4394 if (ver->fcxctrl == 0) 4395 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 4396 4397 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4398 H2C_CAT_OUTSRC, BTFC_SET, 4399 SET_DRV_INFO, 0, 0, 4400 H2C_LEN_CXDRVINFO_CTRL); 4401 4402 ret = rtw89_h2c_tx(rtwdev, skb, false); 4403 if (ret) { 4404 rtw89_err(rtwdev, "failed to send h2c\n"); 4405 goto fail; 4406 } 4407 4408 return 0; 4409 fail: 4410 dev_kfree_skb_any(skb); 4411 4412 return ret; 4413 } 4414 4415 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 4416 { 4417 struct rtw89_btc *btc = &rtwdev->btc; 4418 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 4419 struct rtw89_h2c_cxctrl_v7 *h2c; 4420 u32 len = sizeof(*h2c); 4421 struct sk_buff *skb; 4422 int ret; 4423 4424 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4425 if (!skb) { 4426 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4427 return -ENOMEM; 4428 } 4429 skb_put(skb, len); 4430 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 4431 4432 h2c->hdr.type = type; 4433 h2c->hdr.ver = btc->ver->fcxctrl; 4434 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 4435 h2c->ctrl = *ctrl; 4436 4437 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4438 H2C_CAT_OUTSRC, BTFC_SET, 4439 SET_DRV_INFO, 0, 0, len); 4440 4441 ret = rtw89_h2c_tx(rtwdev, skb, false); 4442 if (ret) { 4443 rtw89_err(rtwdev, "failed to send h2c\n"); 4444 goto fail; 4445 } 4446 4447 return 0; 4448 fail: 4449 dev_kfree_skb_any(skb); 4450 4451 return ret; 4452 } 4453 4454 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 4455 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 4456 { 4457 struct rtw89_btc *btc = &rtwdev->btc; 4458 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 4459 struct sk_buff *skb; 4460 u8 *cmd; 4461 int ret; 4462 4463 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 4464 if (!skb) { 4465 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 4466 return -ENOMEM; 4467 } 4468 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 4469 cmd = skb->data; 4470 4471 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4472 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 4473 4474 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 4475 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 4476 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 4477 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 4478 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 4479 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 4480 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 4481 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 4482 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 4483 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 4484 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 4485 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 4486 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 4487 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 4488 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 4489 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 4490 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 4491 4492 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4493 H2C_CAT_OUTSRC, BTFC_SET, 4494 SET_DRV_INFO, 0, 0, 4495 H2C_LEN_CXDRVINFO_TRX); 4496 4497 ret = rtw89_h2c_tx(rtwdev, skb, false); 4498 if (ret) { 4499 rtw89_err(rtwdev, "failed to send h2c\n"); 4500 goto fail; 4501 } 4502 4503 return 0; 4504 fail: 4505 dev_kfree_skb_any(skb); 4506 4507 return ret; 4508 } 4509 4510 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 4511 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 4512 { 4513 struct rtw89_btc *btc = &rtwdev->btc; 4514 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4515 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 4516 struct sk_buff *skb; 4517 u8 *cmd; 4518 int ret; 4519 4520 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 4521 if (!skb) { 4522 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4523 return -ENOMEM; 4524 } 4525 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 4526 cmd = skb->data; 4527 4528 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4529 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 4530 4531 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 4532 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 4533 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 4534 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 4535 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 4536 4537 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4538 H2C_CAT_OUTSRC, BTFC_SET, 4539 SET_DRV_INFO, 0, 0, 4540 H2C_LEN_CXDRVINFO_RFK); 4541 4542 ret = rtw89_h2c_tx(rtwdev, skb, false); 4543 if (ret) { 4544 rtw89_err(rtwdev, "failed to send h2c\n"); 4545 goto fail; 4546 } 4547 4548 return 0; 4549 fail: 4550 dev_kfree_skb_any(skb); 4551 4552 return ret; 4553 } 4554 4555 #define H2C_LEN_PKT_OFLD 4 4556 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 4557 { 4558 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4559 struct sk_buff *skb; 4560 unsigned int cond; 4561 u8 *cmd; 4562 int ret; 4563 4564 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 4565 if (!skb) { 4566 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4567 return -ENOMEM; 4568 } 4569 skb_put(skb, H2C_LEN_PKT_OFLD); 4570 cmd = skb->data; 4571 4572 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 4573 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 4574 4575 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4576 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4577 H2C_FUNC_PACKET_OFLD, 1, 1, 4578 H2C_LEN_PKT_OFLD); 4579 4580 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 4581 4582 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4583 if (ret < 0) { 4584 rtw89_debug(rtwdev, RTW89_DBG_FW, 4585 "failed to del pkt ofld: id %d, ret %d\n", 4586 id, ret); 4587 return ret; 4588 } 4589 4590 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 4591 return 0; 4592 } 4593 4594 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 4595 struct sk_buff *skb_ofld) 4596 { 4597 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4598 struct sk_buff *skb; 4599 unsigned int cond; 4600 u8 *cmd; 4601 u8 alloc_id; 4602 int ret; 4603 4604 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 4605 RTW89_MAX_PKT_OFLD_NUM); 4606 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 4607 return -ENOSPC; 4608 4609 *id = alloc_id; 4610 4611 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 4612 if (!skb) { 4613 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4614 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 4615 return -ENOMEM; 4616 } 4617 skb_put(skb, H2C_LEN_PKT_OFLD); 4618 cmd = skb->data; 4619 4620 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 4621 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 4622 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 4623 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 4624 4625 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4626 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4627 H2C_FUNC_PACKET_OFLD, 1, 1, 4628 H2C_LEN_PKT_OFLD + skb_ofld->len); 4629 4630 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 4631 4632 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4633 if (ret < 0) { 4634 rtw89_debug(rtwdev, RTW89_DBG_FW, 4635 "failed to add pkt ofld: id %d, ret %d\n", 4636 alloc_id, ret); 4637 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 4638 return ret; 4639 } 4640 4641 return 0; 4642 } 4643 4644 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num, 4645 struct list_head *chan_list) 4646 { 4647 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4648 struct rtw89_h2c_chinfo_elem *elem; 4649 struct rtw89_mac_chinfo *ch_info; 4650 struct rtw89_h2c_chinfo *h2c; 4651 struct sk_buff *skb; 4652 unsigned int cond; 4653 int skb_len; 4654 int ret; 4655 4656 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 4657 4658 skb_len = struct_size(h2c, elem, ch_num); 4659 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 4660 if (!skb) { 4661 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 4662 return -ENOMEM; 4663 } 4664 skb_put(skb, sizeof(*h2c)); 4665 h2c = (struct rtw89_h2c_chinfo *)skb->data; 4666 4667 h2c->ch_num = ch_num; 4668 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 4669 4670 list_for_each_entry(ch_info, chan_list, list) { 4671 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 4672 4673 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 4674 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 4675 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 4676 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 4677 4678 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 4679 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 4680 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 4681 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 4682 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 4683 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 4684 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 4685 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 4686 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 4687 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 4688 4689 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 4690 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 4691 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 4692 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 4693 4694 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 4695 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 4696 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 4697 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 4698 } 4699 4700 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4701 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4702 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 4703 4704 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 4705 4706 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4707 if (ret) { 4708 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 4709 return ret; 4710 } 4711 4712 return 0; 4713 } 4714 4715 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 4716 struct list_head *chan_list) 4717 { 4718 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4719 struct rtw89_h2c_chinfo_elem_be *elem; 4720 struct rtw89_mac_chinfo_be *ch_info; 4721 struct rtw89_h2c_chinfo *h2c; 4722 struct sk_buff *skb; 4723 unsigned int cond; 4724 int skb_len; 4725 int ret; 4726 4727 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 4728 4729 skb_len = struct_size(h2c, elem, ch_num); 4730 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 4731 if (!skb) { 4732 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 4733 return -ENOMEM; 4734 } 4735 4736 skb_put(skb, sizeof(*h2c)); 4737 h2c = (struct rtw89_h2c_chinfo *)skb->data; 4738 4739 h2c->ch_num = ch_num; 4740 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 4741 h2c->arg = u8_encode_bits(RTW89_PHY_0, RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 4742 4743 list_for_each_entry(ch_info, chan_list, list) { 4744 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 4745 4746 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) | 4747 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 4748 le32_encode_bits(ch_info->central_ch, 4749 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 4750 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 4751 4752 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 4753 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 4754 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 4755 le32_encode_bits(ch_info->pause_data, 4756 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 4757 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 4758 le32_encode_bits(ch_info->rand_seq_num, 4759 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 4760 le32_encode_bits(ch_info->notify_action, 4761 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 4762 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 4763 RTW89_H2C_CHINFO_BE_W1_PROBE) | 4764 le32_encode_bits(ch_info->leave_crit, 4765 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 4766 le32_encode_bits(ch_info->chkpt_timer, 4767 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 4768 4769 elem->w2 = le32_encode_bits(ch_info->leave_time, 4770 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 4771 le32_encode_bits(ch_info->leave_th, 4772 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 4773 le32_encode_bits(ch_info->tx_pkt_ctrl, 4774 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 4775 4776 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 4777 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 4778 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 4779 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 4780 4781 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 4782 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 4783 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 4784 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 4785 4786 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 4787 le32_encode_bits(ch_info->fw_probe0_ssids, 4788 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 4789 4790 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 4791 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 4792 le32_encode_bits(ch_info->fw_probe0_bssids, 4793 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 4794 } 4795 4796 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4797 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4798 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 4799 4800 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 4801 4802 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4803 if (ret) { 4804 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 4805 return ret; 4806 } 4807 4808 return 0; 4809 } 4810 4811 #define RTW89_SCAN_DELAY_TSF_UNIT 104800 4812 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, 4813 struct rtw89_scan_option *option, 4814 struct rtw89_vif *rtwvif, 4815 bool wowlan) 4816 { 4817 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4818 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 4819 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE; 4820 struct rtw89_h2c_scanofld *h2c; 4821 u32 len = sizeof(*h2c); 4822 struct sk_buff *skb; 4823 unsigned int cond; 4824 u64 tsf = 0; 4825 int ret; 4826 4827 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4828 if (!skb) { 4829 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 4830 return -ENOMEM; 4831 } 4832 skb_put(skb, len); 4833 h2c = (struct rtw89_h2c_scanofld *)skb->data; 4834 4835 if (option->delay) { 4836 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif, &tsf); 4837 if (ret) { 4838 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret); 4839 scan_mode = RTW89_SCAN_IMMEDIATE; 4840 } else { 4841 scan_mode = RTW89_SCAN_DELAY; 4842 tsf += option->delay * RTW89_SCAN_DELAY_TSF_UNIT; 4843 } 4844 } 4845 4846 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 4847 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 4848 le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) | 4849 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 4850 4851 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 4852 le32_encode_bits(option->target_ch_mode, 4853 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 4854 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) | 4855 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 4856 4857 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) | 4858 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD); 4859 4860 if (option->target_ch_mode) { 4861 h2c->w1 |= le32_encode_bits(op->band_width, 4862 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 4863 le32_encode_bits(op->primary_channel, 4864 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 4865 le32_encode_bits(op->channel, 4866 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 4867 h2c->w0 |= le32_encode_bits(op->band_type, 4868 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 4869 } 4870 4871 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf), 4872 RTW89_H2C_SCANOFLD_W3_TSF_HIGH); 4873 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf), 4874 RTW89_H2C_SCANOFLD_W4_TSF_LOW); 4875 4876 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4877 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4878 H2C_FUNC_SCANOFLD, 1, 1, 4879 len); 4880 4881 if (option->enable) 4882 cond = RTW89_SCANOFLD_WAIT_COND_START; 4883 else 4884 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 4885 4886 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4887 if (ret) { 4888 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 4889 return ret; 4890 } 4891 4892 return 0; 4893 } 4894 4895 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 4896 struct rtw89_scan_option *option) 4897 { 4898 struct ieee80211_supported_band *sband; 4899 struct ieee80211_channel *chan; 4900 u8 i, idx; 4901 4902 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 4903 if (!sband) { 4904 option->prohib_chan = U64_MAX; 4905 return; 4906 } 4907 4908 for (i = 0; i < sband->n_channels; i++) { 4909 chan = &sband->channels[i]; 4910 if (chan->flags & IEEE80211_CHAN_DISABLED) { 4911 idx = (chan->hw_value - 1) / 4; 4912 option->prohib_chan |= BIT(idx); 4913 } 4914 } 4915 } 4916 4917 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 4918 struct rtw89_scan_option *option, 4919 struct rtw89_vif *rtwvif, 4920 bool wowlan) 4921 { 4922 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 4923 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4924 struct cfg80211_scan_request *req = rtwvif->scan_req; 4925 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 4926 struct rtw89_chan *op = &scan_info->op_chan; 4927 struct rtw89_h2c_scanofld_be_opch *opch; 4928 struct rtw89_pktofld_info *pkt_info; 4929 struct rtw89_h2c_scanofld_be *h2c; 4930 struct sk_buff *skb; 4931 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 4932 u8 opch_size = sizeof(*opch) * option->num_opch; 4933 u8 probe_id[NUM_NL80211_BANDS]; 4934 u8 cfg_len = sizeof(*h2c); 4935 unsigned int cond; 4936 void *ptr; 4937 int ret; 4938 u32 len; 4939 u8 i; 4940 4941 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 4942 4943 len = cfg_len + macc_role_size + opch_size; 4944 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4945 if (!skb) { 4946 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 4947 return -ENOMEM; 4948 } 4949 4950 skb_put(skb, len); 4951 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 4952 ptr = skb->data; 4953 4954 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 4955 4956 if (!wowlan) { 4957 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 4958 if (pkt_info->wildcard_6ghz) { 4959 /* Provide wildcard as template */ 4960 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 4961 break; 4962 } 4963 } 4964 } 4965 4966 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 4967 le32_encode_bits(option->scan_mode, 4968 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 4969 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 4970 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 4971 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 4972 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 4973 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 4974 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 4975 4976 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 4977 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 4978 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 4979 4980 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 4981 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 4982 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 4983 4984 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 4985 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 4986 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 4987 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 4988 4989 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 4990 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 4991 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 4992 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 4993 le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 4994 4995 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 4996 4997 h2c->w6 = le32_encode_bits(option->prohib_chan, 4998 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 4999 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 5000 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 5001 if (!wowlan && req->no_cck) { 5002 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); 5003 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, 5004 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | 5005 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5006 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | 5007 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5008 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); 5009 } 5010 5011 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { 5012 cfg_len = offsetofend(typeof(*h2c), w8); 5013 goto flex_member; 5014 } 5015 5016 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), 5017 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | 5018 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0), 5019 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) | 5020 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0), 5021 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP); 5022 5023 flex_member: 5024 ptr += cfg_len; 5025 5026 for (i = 0; i < option->num_macc_role; i++) { 5027 macc_role = ptr; 5028 macc_role->w0 = 5029 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 5030 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 5031 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 5032 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 5033 ptr += sizeof(*macc_role); 5034 } 5035 5036 for (i = 0; i < option->num_opch; i++) { 5037 opch = ptr; 5038 opch->w0 = le32_encode_bits(rtwvif->mac_id, 5039 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 5040 le32_encode_bits(option->band, 5041 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 5042 le32_encode_bits(rtwvif->port, 5043 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 5044 le32_encode_bits(RTW89_SCAN_OPMODE_INTV, 5045 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 5046 le32_encode_bits(true, 5047 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 5048 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10, 5049 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 5050 5051 opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME, 5052 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) | 5053 le32_encode_bits(op->band_type, 5054 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 5055 le32_encode_bits(op->band_width, 5056 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 5057 le32_encode_bits(0x3, 5058 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 5059 le32_encode_bits(op->primary_channel, 5060 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 5061 le32_encode_bits(op->channel, 5062 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 5063 5064 opch->w2 = le32_encode_bits(0, 5065 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 5066 le32_encode_bits(0, 5067 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 5068 le32_encode_bits(2, 5069 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS); 5070 5071 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5072 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 5073 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5074 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 5075 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5076 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 5077 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5078 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 5079 ptr += sizeof(*opch); 5080 } 5081 5082 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5083 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5084 H2C_FUNC_SCANOFLD_BE, 1, 1, 5085 len); 5086 5087 if (option->enable) 5088 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 5089 else 5090 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 5091 5092 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5093 if (ret) { 5094 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 5095 return ret; 5096 } 5097 5098 return 0; 5099 } 5100 5101 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 5102 struct rtw89_fw_h2c_rf_reg_info *info, 5103 u16 len, u8 page) 5104 { 5105 struct sk_buff *skb; 5106 u8 class = info->rf_path == RF_PATH_A ? 5107 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 5108 int ret; 5109 5110 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5111 if (!skb) { 5112 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 5113 return -ENOMEM; 5114 } 5115 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 5116 5117 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5118 H2C_CAT_OUTSRC, class, page, 0, 0, 5119 len); 5120 5121 ret = rtw89_h2c_tx(rtwdev, skb, false); 5122 if (ret) { 5123 rtw89_err(rtwdev, "failed to send h2c\n"); 5124 goto fail; 5125 } 5126 5127 return 0; 5128 fail: 5129 dev_kfree_skb_any(skb); 5130 5131 return ret; 5132 } 5133 5134 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 5135 { 5136 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 5137 struct rtw89_fw_h2c_rf_get_mccch *mccch; 5138 struct sk_buff *skb; 5139 int ret; 5140 u8 idx; 5141 5142 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 5143 if (!skb) { 5144 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5145 return -ENOMEM; 5146 } 5147 skb_put(skb, sizeof(*mccch)); 5148 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 5149 5150 idx = rfk_mcc->table_idx; 5151 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 5152 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 5153 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 5154 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 5155 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 5156 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 5157 5158 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5159 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 5160 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 5161 sizeof(*mccch)); 5162 5163 ret = rtw89_h2c_tx(rtwdev, skb, false); 5164 if (ret) { 5165 rtw89_err(rtwdev, "failed to send h2c\n"); 5166 goto fail; 5167 } 5168 5169 return 0; 5170 fail: 5171 dev_kfree_skb_any(skb); 5172 5173 return ret; 5174 } 5175 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 5176 5177 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 5178 enum rtw89_phy_idx phy_idx) 5179 { 5180 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 5181 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; 5182 struct rtw89_fw_h2c_rfk_pre_info *h2c; 5183 u8 tbl_sel = rfk_mcc->table_idx; 5184 u32 len = sizeof(*h2c); 5185 struct sk_buff *skb; 5186 u8 ver = U8_MAX; 5187 u8 tbl, path; 5188 u32 val32; 5189 int ret; 5190 5191 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 5192 len = sizeof(*h2c_v0); 5193 ver = 0; 5194 } 5195 5196 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5197 if (!skb) { 5198 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 5199 return -ENOMEM; 5200 } 5201 skb_put(skb, len); 5202 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 5203 5204 h2c->common.mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 5205 5206 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 5207 5208 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 5209 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5210 h2c->common.dbcc.ch[path][tbl] = 5211 cpu_to_le32(rfk_mcc->ch[tbl]); 5212 h2c->common.dbcc.band[path][tbl] = 5213 cpu_to_le32(rfk_mcc->band[tbl]); 5214 } 5215 } 5216 5217 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5218 h2c->common.tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]); 5219 h2c->common.tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]); 5220 } 5221 5222 h2c->common.phy_idx = cpu_to_le32(phy_idx); 5223 5224 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ 5225 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; 5226 5227 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]); 5228 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]); 5229 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]); 5230 5231 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 5232 h2c_v0->ktbl_sel0 = cpu_to_le32(val32); 5233 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 5234 h2c_v0->ktbl_sel1 = cpu_to_le32(val32); 5235 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 5236 h2c_v0->rfmod0 = cpu_to_le32(val32); 5237 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 5238 h2c_v0->rfmod1 = cpu_to_le32(val32); 5239 5240 if (rtw89_is_mlo_1_1(rtwdev)) 5241 h2c_v0->mlo_1_1 = cpu_to_le32(1); 5242 5243 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 5244 5245 goto done; 5246 } 5247 5248 if (rtw89_is_mlo_1_1(rtwdev)) 5249 h2c->mlo_1_1 = cpu_to_le32(1); 5250 done: 5251 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5252 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5253 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 5254 len); 5255 5256 ret = rtw89_h2c_tx(rtwdev, skb, false); 5257 if (ret) { 5258 rtw89_err(rtwdev, "failed to send h2c\n"); 5259 goto fail; 5260 } 5261 5262 return 0; 5263 fail: 5264 dev_kfree_skb_any(skb); 5265 5266 return ret; 5267 } 5268 5269 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5270 enum rtw89_tssi_mode tssi_mode) 5271 { 5272 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 5273 RTW89_CHANCTX_0); 5274 struct rtw89_hal *hal = &rtwdev->hal; 5275 struct rtw89_h2c_rf_tssi *h2c; 5276 u32 len = sizeof(*h2c); 5277 struct sk_buff *skb; 5278 int ret; 5279 5280 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5281 if (!skb) { 5282 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 5283 return -ENOMEM; 5284 } 5285 skb_put(skb, len); 5286 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 5287 5288 h2c->len = cpu_to_le16(len); 5289 h2c->phy = phy_idx; 5290 h2c->ch = chan->channel; 5291 h2c->bw = chan->band_width; 5292 h2c->band = chan->band_type; 5293 h2c->hwtx_en = true; 5294 h2c->cv = hal->cv; 5295 h2c->tssi_mode = tssi_mode; 5296 5297 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 5298 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 5299 5300 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5301 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5302 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 5303 5304 ret = rtw89_h2c_tx(rtwdev, skb, false); 5305 if (ret) { 5306 rtw89_err(rtwdev, "failed to send h2c\n"); 5307 goto fail; 5308 } 5309 5310 return 0; 5311 fail: 5312 dev_kfree_skb_any(skb); 5313 5314 return ret; 5315 } 5316 5317 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5318 { 5319 struct rtw89_h2c_rf_iqk *h2c; 5320 u32 len = sizeof(*h2c); 5321 struct sk_buff *skb; 5322 int ret; 5323 5324 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5325 if (!skb) { 5326 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 5327 return -ENOMEM; 5328 } 5329 skb_put(skb, len); 5330 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 5331 5332 h2c->phy_idx = cpu_to_le32(phy_idx); 5333 h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en); 5334 5335 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5336 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5337 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 5338 5339 ret = rtw89_h2c_tx(rtwdev, skb, false); 5340 if (ret) { 5341 rtw89_err(rtwdev, "failed to send h2c\n"); 5342 goto fail; 5343 } 5344 5345 return 0; 5346 fail: 5347 dev_kfree_skb_any(skb); 5348 5349 return ret; 5350 } 5351 5352 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5353 { 5354 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 5355 RTW89_CHANCTX_0); 5356 struct rtw89_h2c_rf_dpk *h2c; 5357 u32 len = sizeof(*h2c); 5358 struct sk_buff *skb; 5359 int ret; 5360 5361 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5362 if (!skb) { 5363 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 5364 return -ENOMEM; 5365 } 5366 skb_put(skb, len); 5367 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 5368 5369 h2c->len = len; 5370 h2c->phy = phy_idx; 5371 h2c->dpk_enable = true; 5372 h2c->kpath = RF_AB; 5373 h2c->cur_band = chan->band_type; 5374 h2c->cur_bw = chan->band_width; 5375 h2c->cur_ch = chan->channel; 5376 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5377 5378 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5379 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5380 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 5381 5382 ret = rtw89_h2c_tx(rtwdev, skb, false); 5383 if (ret) { 5384 rtw89_err(rtwdev, "failed to send h2c\n"); 5385 goto fail; 5386 } 5387 5388 return 0; 5389 fail: 5390 dev_kfree_skb_any(skb); 5391 5392 return ret; 5393 } 5394 5395 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5396 { 5397 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 5398 RTW89_CHANCTX_0); 5399 struct rtw89_hal *hal = &rtwdev->hal; 5400 struct rtw89_h2c_rf_txgapk *h2c; 5401 u32 len = sizeof(*h2c); 5402 struct sk_buff *skb; 5403 int ret; 5404 5405 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5406 if (!skb) { 5407 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 5408 return -ENOMEM; 5409 } 5410 skb_put(skb, len); 5411 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 5412 5413 h2c->len = len; 5414 h2c->ktype = 2; 5415 h2c->phy = phy_idx; 5416 h2c->kpath = RF_AB; 5417 h2c->band = chan->band_type; 5418 h2c->bw = chan->band_width; 5419 h2c->ch = chan->channel; 5420 h2c->cv = hal->cv; 5421 5422 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5423 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5424 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 5425 5426 ret = rtw89_h2c_tx(rtwdev, skb, false); 5427 if (ret) { 5428 rtw89_err(rtwdev, "failed to send h2c\n"); 5429 goto fail; 5430 } 5431 5432 return 0; 5433 fail: 5434 dev_kfree_skb_any(skb); 5435 5436 return ret; 5437 } 5438 5439 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5440 { 5441 struct rtw89_h2c_rf_dack *h2c; 5442 u32 len = sizeof(*h2c); 5443 struct sk_buff *skb; 5444 int ret; 5445 5446 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5447 if (!skb) { 5448 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 5449 return -ENOMEM; 5450 } 5451 skb_put(skb, len); 5452 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 5453 5454 h2c->len = cpu_to_le32(len); 5455 h2c->phy = cpu_to_le32(phy_idx); 5456 h2c->type = cpu_to_le32(0); 5457 5458 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5459 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5460 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 5461 5462 ret = rtw89_h2c_tx(rtwdev, skb, false); 5463 if (ret) { 5464 rtw89_err(rtwdev, "failed to send h2c\n"); 5465 goto fail; 5466 } 5467 5468 return 0; 5469 fail: 5470 dev_kfree_skb_any(skb); 5471 5472 return ret; 5473 } 5474 5475 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5476 { 5477 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 5478 RTW89_CHANCTX_0); 5479 struct rtw89_h2c_rf_rxdck *h2c; 5480 u32 len = sizeof(*h2c); 5481 struct sk_buff *skb; 5482 int ret; 5483 5484 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5485 if (!skb) { 5486 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 5487 return -ENOMEM; 5488 } 5489 skb_put(skb, len); 5490 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 5491 5492 h2c->len = len; 5493 h2c->phy = phy_idx; 5494 h2c->is_afe = false; 5495 h2c->kpath = RF_AB; 5496 h2c->cur_band = chan->band_type; 5497 h2c->cur_bw = chan->band_width; 5498 h2c->cur_ch = chan->channel; 5499 h2c->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5500 5501 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5502 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5503 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 5504 5505 ret = rtw89_h2c_tx(rtwdev, skb, false); 5506 if (ret) { 5507 rtw89_err(rtwdev, "failed to send h2c\n"); 5508 goto fail; 5509 } 5510 5511 return 0; 5512 fail: 5513 dev_kfree_skb_any(skb); 5514 5515 return ret; 5516 } 5517 5518 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 5519 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 5520 bool rack, bool dack) 5521 { 5522 struct sk_buff *skb; 5523 int ret; 5524 5525 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5526 if (!skb) { 5527 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 5528 return -ENOMEM; 5529 } 5530 skb_put_data(skb, buf, len); 5531 5532 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5533 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 5534 len); 5535 5536 ret = rtw89_h2c_tx(rtwdev, skb, false); 5537 if (ret) { 5538 rtw89_err(rtwdev, "failed to send h2c\n"); 5539 goto fail; 5540 } 5541 5542 return 0; 5543 fail: 5544 dev_kfree_skb_any(skb); 5545 5546 return ret; 5547 } 5548 5549 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 5550 { 5551 struct sk_buff *skb; 5552 int ret; 5553 5554 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 5555 if (!skb) { 5556 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 5557 return -ENOMEM; 5558 } 5559 skb_put_data(skb, buf, len); 5560 5561 ret = rtw89_h2c_tx(rtwdev, skb, false); 5562 if (ret) { 5563 rtw89_err(rtwdev, "failed to send h2c\n"); 5564 goto fail; 5565 } 5566 5567 return 0; 5568 fail: 5569 dev_kfree_skb_any(skb); 5570 5571 return ret; 5572 } 5573 5574 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 5575 { 5576 struct rtw89_early_h2c *early_h2c; 5577 5578 lockdep_assert_held(&rtwdev->mutex); 5579 5580 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 5581 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 5582 } 5583 } 5584 5585 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 5586 { 5587 struct rtw89_early_h2c *early_h2c, *tmp; 5588 5589 mutex_lock(&rtwdev->mutex); 5590 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 5591 list_del(&early_h2c->list); 5592 kfree(early_h2c->h2c); 5593 kfree(early_h2c); 5594 } 5595 mutex_unlock(&rtwdev->mutex); 5596 } 5597 5598 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 5599 { 5600 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 5601 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 5602 5603 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 5604 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 5605 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 5606 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 5607 } 5608 5609 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 5610 struct sk_buff *c2h) 5611 { 5612 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 5613 u8 category = attr->category; 5614 u8 class = attr->class; 5615 u8 func = attr->func; 5616 5617 switch (category) { 5618 default: 5619 return false; 5620 case RTW89_C2H_CAT_MAC: 5621 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 5622 case RTW89_C2H_CAT_OUTSRC: 5623 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 5624 } 5625 } 5626 5627 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 5628 { 5629 rtw89_fw_c2h_parse_attr(c2h); 5630 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 5631 goto enqueue; 5632 5633 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 5634 dev_kfree_skb_any(c2h); 5635 return; 5636 5637 enqueue: 5638 skb_queue_tail(&rtwdev->c2h_queue, c2h); 5639 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 5640 } 5641 5642 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 5643 struct sk_buff *skb) 5644 { 5645 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 5646 u8 category = attr->category; 5647 u8 class = attr->class; 5648 u8 func = attr->func; 5649 u16 len = attr->len; 5650 bool dump = true; 5651 5652 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 5653 return; 5654 5655 switch (category) { 5656 case RTW89_C2H_CAT_TEST: 5657 break; 5658 case RTW89_C2H_CAT_MAC: 5659 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 5660 if (class == RTW89_MAC_C2H_CLASS_INFO && 5661 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 5662 dump = false; 5663 break; 5664 case RTW89_C2H_CAT_OUTSRC: 5665 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 5666 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 5667 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 5668 else 5669 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 5670 break; 5671 } 5672 5673 if (dump) 5674 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 5675 } 5676 5677 void rtw89_fw_c2h_work(struct work_struct *work) 5678 { 5679 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 5680 c2h_work); 5681 struct sk_buff *skb, *tmp; 5682 5683 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 5684 skb_unlink(skb, &rtwdev->c2h_queue); 5685 mutex_lock(&rtwdev->mutex); 5686 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 5687 mutex_unlock(&rtwdev->mutex); 5688 dev_kfree_skb_any(skb); 5689 } 5690 } 5691 5692 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 5693 struct rtw89_mac_h2c_info *info) 5694 { 5695 const struct rtw89_chip_info *chip = rtwdev->chip; 5696 struct rtw89_fw_info *fw_info = &rtwdev->fw; 5697 const u32 *h2c_reg = chip->h2c_regs; 5698 u8 i, val, len; 5699 int ret; 5700 5701 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 5702 rtwdev, chip->h2c_ctrl_reg); 5703 if (ret) { 5704 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 5705 return ret; 5706 } 5707 5708 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 5709 sizeof(info->u.h2creg[0])); 5710 5711 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 5712 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 5713 5714 for (i = 0; i < RTW89_H2CREG_MAX; i++) 5715 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 5716 5717 fw_info->h2c_counter++; 5718 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 5719 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 5720 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 5721 5722 return 0; 5723 } 5724 5725 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 5726 struct rtw89_mac_c2h_info *info) 5727 { 5728 const struct rtw89_chip_info *chip = rtwdev->chip; 5729 struct rtw89_fw_info *fw_info = &rtwdev->fw; 5730 const u32 *c2h_reg = chip->c2h_regs; 5731 u32 ret; 5732 u8 i, val; 5733 5734 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 5735 5736 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 5737 RTW89_C2H_TIMEOUT, false, rtwdev, 5738 chip->c2h_ctrl_reg); 5739 if (ret) { 5740 rtw89_warn(rtwdev, "c2h reg timeout\n"); 5741 return ret; 5742 } 5743 5744 for (i = 0; i < RTW89_C2HREG_MAX; i++) 5745 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 5746 5747 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 5748 5749 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 5750 info->content_len = 5751 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 5752 RTW89_C2HREG_HDR_LEN; 5753 5754 fw_info->c2h_counter++; 5755 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 5756 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 5757 5758 return 0; 5759 } 5760 5761 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 5762 struct rtw89_mac_h2c_info *h2c_info, 5763 struct rtw89_mac_c2h_info *c2h_info) 5764 { 5765 u32 ret; 5766 5767 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 5768 lockdep_assert_held(&rtwdev->mutex); 5769 5770 if (!h2c_info && !c2h_info) 5771 return -EINVAL; 5772 5773 if (!h2c_info) 5774 goto recv_c2h; 5775 5776 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 5777 if (ret) 5778 return ret; 5779 5780 recv_c2h: 5781 if (!c2h_info) 5782 return 0; 5783 5784 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 5785 if (ret) 5786 return ret; 5787 5788 return 0; 5789 } 5790 5791 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 5792 { 5793 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 5794 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 5795 return; 5796 } 5797 5798 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 5799 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 5800 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 5801 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 5802 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 5803 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 5804 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 5805 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 5806 5807 rtw89_fw_prog_cnt_dump(rtwdev); 5808 } 5809 5810 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 5811 { 5812 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 5813 struct rtw89_pktofld_info *info, *tmp; 5814 u8 idx; 5815 5816 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 5817 if (!(rtwdev->chip->support_bands & BIT(idx))) 5818 continue; 5819 5820 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 5821 if (test_bit(info->id, rtwdev->pkt_offload)) 5822 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 5823 list_del(&info->list); 5824 kfree(info); 5825 } 5826 } 5827 } 5828 5829 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 5830 struct rtw89_vif *rtwvif, 5831 struct rtw89_pktofld_info *info, 5832 enum nl80211_band band, u8 ssid_idx) 5833 { 5834 struct cfg80211_scan_request *req = rtwvif->scan_req; 5835 5836 if (band != NL80211_BAND_6GHZ) 5837 return false; 5838 5839 if (req->ssids[ssid_idx].ssid_len) { 5840 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 5841 req->ssids[ssid_idx].ssid_len); 5842 info->ssid_len = req->ssids[ssid_idx].ssid_len; 5843 return false; 5844 } else { 5845 info->wildcard_6ghz = true; 5846 return true; 5847 } 5848 } 5849 5850 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 5851 struct rtw89_vif *rtwvif, 5852 struct sk_buff *skb, u8 ssid_idx) 5853 { 5854 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5855 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 5856 struct rtw89_pktofld_info *info; 5857 struct sk_buff *new; 5858 int ret = 0; 5859 u8 band; 5860 5861 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 5862 if (!(rtwdev->chip->support_bands & BIT(band))) 5863 continue; 5864 5865 new = skb_copy(skb, GFP_KERNEL); 5866 if (!new) { 5867 ret = -ENOMEM; 5868 goto out; 5869 } 5870 skb_put_data(new, ies->ies[band], ies->len[band]); 5871 skb_put_data(new, ies->common_ies, ies->common_ie_len); 5872 5873 info = kzalloc(sizeof(*info), GFP_KERNEL); 5874 if (!info) { 5875 ret = -ENOMEM; 5876 kfree_skb(new); 5877 goto out; 5878 } 5879 5880 rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band, 5881 ssid_idx); 5882 5883 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 5884 if (ret) { 5885 kfree_skb(new); 5886 kfree(info); 5887 goto out; 5888 } 5889 5890 list_add_tail(&info->list, &scan_info->pkt_list[band]); 5891 kfree_skb(new); 5892 } 5893 out: 5894 return ret; 5895 } 5896 5897 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 5898 struct rtw89_vif *rtwvif) 5899 { 5900 struct cfg80211_scan_request *req = rtwvif->scan_req; 5901 struct sk_buff *skb; 5902 u8 num = req->n_ssids, i; 5903 int ret; 5904 5905 for (i = 0; i < num; i++) { 5906 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 5907 req->ssids[i].ssid, 5908 req->ssids[i].ssid_len, 5909 req->ie_len); 5910 if (!skb) 5911 return -ENOMEM; 5912 5913 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i); 5914 kfree_skb(skb); 5915 5916 if (ret) 5917 return ret; 5918 } 5919 5920 return 0; 5921 } 5922 5923 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 5924 struct cfg80211_scan_request *req, 5925 struct rtw89_mac_chinfo *ch_info) 5926 { 5927 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 5928 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 5929 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 5930 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 5931 struct cfg80211_scan_6ghz_params *params; 5932 struct rtw89_pktofld_info *info, *tmp; 5933 struct ieee80211_hdr *hdr; 5934 struct sk_buff *skb; 5935 bool found; 5936 int ret = 0; 5937 u8 i; 5938 5939 if (!req->n_6ghz_params) 5940 return 0; 5941 5942 for (i = 0; i < req->n_6ghz_params; i++) { 5943 params = &req->scan_6ghz_params[i]; 5944 5945 if (req->channels[params->channel_idx]->hw_value != 5946 ch_info->pri_ch) 5947 continue; 5948 5949 found = false; 5950 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 5951 if (ether_addr_equal(tmp->bssid, params->bssid)) { 5952 found = true; 5953 break; 5954 } 5955 } 5956 if (found) 5957 continue; 5958 5959 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 5960 NULL, 0, req->ie_len); 5961 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 5962 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 5963 hdr = (struct ieee80211_hdr *)skb->data; 5964 ether_addr_copy(hdr->addr3, params->bssid); 5965 5966 info = kzalloc(sizeof(*info), GFP_KERNEL); 5967 if (!info) { 5968 ret = -ENOMEM; 5969 kfree_skb(skb); 5970 goto out; 5971 } 5972 5973 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 5974 if (ret) { 5975 kfree_skb(skb); 5976 kfree(info); 5977 goto out; 5978 } 5979 5980 ether_addr_copy(info->bssid, params->bssid); 5981 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 5982 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 5983 5984 ch_info->tx_pkt = true; 5985 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 5986 5987 kfree_skb(skb); 5988 } 5989 5990 out: 5991 return ret; 5992 } 5993 5994 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, 5995 int chan_type, int ssid_num, 5996 struct rtw89_mac_chinfo *ch_info) 5997 { 5998 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 5999 struct rtw89_pktofld_info *info; 6000 u8 probe_count = 0; 6001 6002 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6003 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6004 ch_info->bw = RTW89_SCAN_WIDTH; 6005 ch_info->tx_pkt = true; 6006 ch_info->cfg_tx_pwr = false; 6007 ch_info->tx_pwr_idx = 0; 6008 ch_info->tx_null = false; 6009 ch_info->pause_data = false; 6010 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6011 6012 if (ssid_num) { 6013 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6014 if (info->channel_6ghz && 6015 ch_info->pri_ch != info->channel_6ghz) 6016 continue; 6017 else if (info->channel_6ghz && probe_count != 0) 6018 ch_info->period += RTW89_CHANNEL_TIME_6G; 6019 6020 if (info->wildcard_6ghz) 6021 continue; 6022 6023 ch_info->pkt_id[probe_count++] = info->id; 6024 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6025 break; 6026 } 6027 ch_info->num_pkt = probe_count; 6028 } 6029 6030 switch (chan_type) { 6031 case RTW89_CHAN_DFS: 6032 if (ch_info->ch_band != RTW89_BAND_6G) 6033 ch_info->period = max_t(u8, ch_info->period, 6034 RTW89_DFS_CHAN_TIME); 6035 ch_info->dwell_time = RTW89_DWELL_TIME; 6036 break; 6037 case RTW89_CHAN_ACTIVE: 6038 break; 6039 default: 6040 rtw89_err(rtwdev, "Channel type out of bound\n"); 6041 } 6042 } 6043 6044 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 6045 int ssid_num, 6046 struct rtw89_mac_chinfo *ch_info) 6047 { 6048 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6049 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 6050 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 6051 struct cfg80211_scan_request *req = rtwvif->scan_req; 6052 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 6053 struct rtw89_pktofld_info *info; 6054 u8 band, probe_count = 0; 6055 int ret; 6056 6057 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6058 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6059 ch_info->bw = RTW89_SCAN_WIDTH; 6060 ch_info->tx_pkt = true; 6061 ch_info->cfg_tx_pwr = false; 6062 ch_info->tx_pwr_idx = 0; 6063 ch_info->tx_null = false; 6064 ch_info->pause_data = false; 6065 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6066 6067 if (ch_info->ch_band == RTW89_BAND_6G) { 6068 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6069 !ch_info->is_psc) { 6070 ch_info->tx_pkt = false; 6071 if (!req->duration_mandatory) 6072 ch_info->period -= RTW89_DWELL_TIME_6G; 6073 } 6074 } 6075 6076 ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info); 6077 if (ret) 6078 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 6079 6080 if (ssid_num) { 6081 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6082 6083 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6084 if (info->channel_6ghz && 6085 ch_info->pri_ch != info->channel_6ghz) 6086 continue; 6087 else if (info->channel_6ghz && probe_count != 0) 6088 ch_info->period += RTW89_CHANNEL_TIME_6G; 6089 6090 if (info->wildcard_6ghz) 6091 continue; 6092 6093 ch_info->pkt_id[probe_count++] = info->id; 6094 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6095 break; 6096 } 6097 ch_info->num_pkt = probe_count; 6098 } 6099 6100 switch (chan_type) { 6101 case RTW89_CHAN_OPERATE: 6102 ch_info->central_ch = op->channel; 6103 ch_info->pri_ch = op->primary_channel; 6104 ch_info->ch_band = op->band_type; 6105 ch_info->bw = op->band_width; 6106 ch_info->tx_null = true; 6107 ch_info->num_pkt = 0; 6108 break; 6109 case RTW89_CHAN_DFS: 6110 if (ch_info->ch_band != RTW89_BAND_6G) 6111 ch_info->period = max_t(u8, ch_info->period, 6112 RTW89_DFS_CHAN_TIME); 6113 ch_info->dwell_time = RTW89_DWELL_TIME; 6114 break; 6115 case RTW89_CHAN_ACTIVE: 6116 break; 6117 default: 6118 rtw89_err(rtwdev, "Channel type out of bound\n"); 6119 } 6120 } 6121 6122 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6123 int ssid_num, 6124 struct rtw89_mac_chinfo_be *ch_info) 6125 { 6126 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6127 struct rtw89_pktofld_info *info; 6128 u8 probe_count = 0, i; 6129 6130 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6131 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6132 ch_info->bw = RTW89_SCAN_WIDTH; 6133 ch_info->tx_null = false; 6134 ch_info->pause_data = false; 6135 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6136 6137 if (ssid_num) { 6138 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6139 ch_info->pkt_id[probe_count++] = info->id; 6140 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6141 break; 6142 } 6143 } 6144 6145 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6146 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6147 6148 switch (chan_type) { 6149 case RTW89_CHAN_DFS: 6150 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6151 ch_info->dwell_time = RTW89_DWELL_TIME; 6152 break; 6153 case RTW89_CHAN_ACTIVE: 6154 break; 6155 default: 6156 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6157 break; 6158 } 6159 } 6160 6161 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6162 int ssid_num, 6163 struct rtw89_mac_chinfo_be *ch_info) 6164 { 6165 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6166 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 6167 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 6168 struct cfg80211_scan_request *req = rtwvif->scan_req; 6169 struct rtw89_pktofld_info *info; 6170 u8 band, probe_count = 0, i; 6171 6172 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6173 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6174 ch_info->bw = RTW89_SCAN_WIDTH; 6175 ch_info->tx_null = false; 6176 ch_info->pause_data = false; 6177 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6178 6179 if (ssid_num) { 6180 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6181 6182 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6183 if (info->channel_6ghz && 6184 ch_info->pri_ch != info->channel_6ghz) 6185 continue; 6186 6187 if (info->wildcard_6ghz) 6188 continue; 6189 6190 ch_info->pkt_id[probe_count++] = info->id; 6191 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6192 break; 6193 } 6194 } 6195 6196 if (ch_info->ch_band == RTW89_BAND_6G) { 6197 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6198 !ch_info->is_psc) { 6199 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6200 if (!req->duration_mandatory) 6201 ch_info->period -= RTW89_DWELL_TIME_6G; 6202 } 6203 } 6204 6205 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6206 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6207 6208 switch (chan_type) { 6209 case RTW89_CHAN_DFS: 6210 if (ch_info->ch_band != RTW89_BAND_6G) 6211 ch_info->period = 6212 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6213 ch_info->dwell_time = RTW89_DWELL_TIME; 6214 break; 6215 case RTW89_CHAN_ACTIVE: 6216 break; 6217 default: 6218 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6219 break; 6220 } 6221 } 6222 6223 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6224 struct rtw89_vif *rtwvif) 6225 { 6226 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6227 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6228 struct rtw89_mac_chinfo *ch_info, *tmp; 6229 struct ieee80211_channel *channel; 6230 struct list_head chan_list; 6231 int list_len; 6232 enum rtw89_chan_type type; 6233 int ret = 0; 6234 u32 idx; 6235 6236 INIT_LIST_HEAD(&chan_list); 6237 for (idx = 0, list_len = 0; 6238 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6239 idx++, list_len++) { 6240 channel = nd_config->channels[idx]; 6241 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6242 if (!ch_info) { 6243 ret = -ENOMEM; 6244 goto out; 6245 } 6246 6247 ch_info->period = RTW89_CHANNEL_TIME; 6248 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6249 ch_info->central_ch = channel->hw_value; 6250 ch_info->pri_ch = channel->hw_value; 6251 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6252 6253 if (channel->flags & 6254 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6255 type = RTW89_CHAN_DFS; 6256 else 6257 type = RTW89_CHAN_ACTIVE; 6258 6259 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); 6260 list_add_tail(&ch_info->list, &chan_list); 6261 } 6262 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6263 6264 out: 6265 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6266 list_del(&ch_info->list); 6267 kfree(ch_info); 6268 } 6269 6270 return ret; 6271 } 6272 6273 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6274 struct rtw89_vif *rtwvif, bool connected) 6275 { 6276 struct cfg80211_scan_request *req = rtwvif->scan_req; 6277 struct rtw89_mac_chinfo *ch_info, *tmp; 6278 struct ieee80211_channel *channel; 6279 struct list_head chan_list; 6280 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 6281 int list_len, off_chan_time = 0; 6282 enum rtw89_chan_type type; 6283 int ret = 0; 6284 u32 idx; 6285 6286 INIT_LIST_HEAD(&chan_list); 6287 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6288 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6289 idx++, list_len++) { 6290 channel = req->channels[idx]; 6291 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6292 if (!ch_info) { 6293 ret = -ENOMEM; 6294 goto out; 6295 } 6296 6297 if (req->duration) 6298 ch_info->period = req->duration; 6299 else if (channel->band == NL80211_BAND_6GHZ) 6300 ch_info->period = RTW89_CHANNEL_TIME_6G + 6301 RTW89_DWELL_TIME_6G; 6302 else 6303 ch_info->period = RTW89_CHANNEL_TIME; 6304 6305 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6306 ch_info->central_ch = channel->hw_value; 6307 ch_info->pri_ch = channel->hw_value; 6308 ch_info->rand_seq_num = random_seq; 6309 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6310 6311 if (channel->flags & 6312 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6313 type = RTW89_CHAN_DFS; 6314 else 6315 type = RTW89_CHAN_ACTIVE; 6316 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 6317 6318 if (connected && 6319 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 6320 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 6321 if (!tmp) { 6322 ret = -ENOMEM; 6323 kfree(ch_info); 6324 goto out; 6325 } 6326 6327 type = RTW89_CHAN_OPERATE; 6328 tmp->period = req->duration_mandatory ? 6329 req->duration : RTW89_CHANNEL_TIME; 6330 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 6331 list_add_tail(&tmp->list, &chan_list); 6332 off_chan_time = 0; 6333 list_len++; 6334 } 6335 list_add_tail(&ch_info->list, &chan_list); 6336 off_chan_time += ch_info->period; 6337 } 6338 rtwdev->scan_info.last_chan_idx = idx; 6339 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6340 6341 out: 6342 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6343 list_del(&ch_info->list); 6344 kfree(ch_info); 6345 } 6346 6347 return ret; 6348 } 6349 6350 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6351 struct rtw89_vif *rtwvif) 6352 { 6353 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6354 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6355 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6356 struct ieee80211_channel *channel; 6357 struct list_head chan_list; 6358 enum rtw89_chan_type type; 6359 int list_len, ret; 6360 u32 idx; 6361 6362 INIT_LIST_HEAD(&chan_list); 6363 6364 for (idx = 0, list_len = 0; 6365 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6366 idx++, list_len++) { 6367 channel = nd_config->channels[idx]; 6368 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6369 if (!ch_info) { 6370 ret = -ENOMEM; 6371 goto out; 6372 } 6373 6374 ch_info->period = RTW89_CHANNEL_TIME; 6375 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6376 ch_info->central_ch = channel->hw_value; 6377 ch_info->pri_ch = channel->hw_value; 6378 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6379 6380 if (channel->flags & 6381 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6382 type = RTW89_CHAN_DFS; 6383 else 6384 type = RTW89_CHAN_ACTIVE; 6385 6386 rtw89_pno_scan_add_chan_be(rtwdev, type, 6387 nd_config->n_match_sets, ch_info); 6388 list_add_tail(&ch_info->list, &chan_list); 6389 } 6390 6391 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list); 6392 6393 out: 6394 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6395 list_del(&ch_info->list); 6396 kfree(ch_info); 6397 } 6398 6399 return ret; 6400 } 6401 6402 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6403 struct rtw89_vif *rtwvif, bool connected) 6404 { 6405 struct cfg80211_scan_request *req = rtwvif->scan_req; 6406 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6407 struct ieee80211_channel *channel; 6408 struct list_head chan_list; 6409 enum rtw89_chan_type type; 6410 int list_len, ret; 6411 bool random_seq; 6412 u32 idx; 6413 6414 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 6415 INIT_LIST_HEAD(&chan_list); 6416 6417 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6418 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6419 idx++, list_len++) { 6420 channel = req->channels[idx]; 6421 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6422 if (!ch_info) { 6423 ret = -ENOMEM; 6424 goto out; 6425 } 6426 6427 if (req->duration) 6428 ch_info->period = req->duration; 6429 else if (channel->band == NL80211_BAND_6GHZ) 6430 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6431 else 6432 ch_info->period = RTW89_CHANNEL_TIME; 6433 6434 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6435 ch_info->central_ch = channel->hw_value; 6436 ch_info->pri_ch = channel->hw_value; 6437 ch_info->rand_seq_num = random_seq; 6438 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6439 6440 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6441 type = RTW89_CHAN_DFS; 6442 else 6443 type = RTW89_CHAN_ACTIVE; 6444 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 6445 6446 list_add_tail(&ch_info->list, &chan_list); 6447 } 6448 6449 rtwdev->scan_info.last_chan_idx = idx; 6450 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list); 6451 6452 out: 6453 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6454 list_del(&ch_info->list); 6455 kfree(ch_info); 6456 } 6457 6458 return ret; 6459 } 6460 6461 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 6462 struct rtw89_vif *rtwvif, bool connected) 6463 { 6464 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6465 int ret; 6466 6467 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 6468 if (ret) { 6469 rtw89_err(rtwdev, "Update probe request failed\n"); 6470 goto out; 6471 } 6472 ret = mac->add_chan_list(rtwdev, rtwvif, connected); 6473 out: 6474 return ret; 6475 } 6476 6477 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 6478 struct ieee80211_scan_request *scan_req) 6479 { 6480 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 6481 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6482 struct cfg80211_scan_request *req = &scan_req->req; 6483 u32 rx_fltr = rtwdev->hal.rx_fltr; 6484 u8 mac_addr[ETH_ALEN]; 6485 6486 rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan); 6487 rtwdev->scan_info.scanning_vif = vif; 6488 rtwdev->scan_info.last_chan_idx = 0; 6489 rtwdev->scan_info.abort = false; 6490 rtwvif->scan_ies = &scan_req->ies; 6491 rtwvif->scan_req = req; 6492 ieee80211_stop_queues(rtwdev->hw); 6493 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, false); 6494 6495 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 6496 get_random_mask_addr(mac_addr, req->mac_addr, 6497 req->mac_addr_mask); 6498 else 6499 ether_addr_copy(mac_addr, vif->addr); 6500 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 6501 6502 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 6503 rx_fltr &= ~B_AX_A_BC; 6504 rx_fltr &= ~B_AX_A_A1_MATCH; 6505 rtw89_write32_mask(rtwdev, 6506 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 6507 B_AX_RX_FLTR_CFG_MASK, 6508 rx_fltr); 6509 6510 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); 6511 } 6512 6513 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 6514 bool aborted) 6515 { 6516 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6517 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6518 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 6519 struct cfg80211_scan_info info = { 6520 .aborted = aborted, 6521 }; 6522 6523 if (!vif) 6524 return; 6525 6526 rtw89_write32_mask(rtwdev, 6527 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 6528 B_AX_RX_FLTR_CFG_MASK, 6529 rtwdev->hal.rx_fltr); 6530 6531 rtw89_core_scan_complete(rtwdev, vif, true); 6532 ieee80211_scan_completed(rtwdev->hw, &info); 6533 ieee80211_wake_queues(rtwdev->hw); 6534 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, true); 6535 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 6536 6537 rtw89_release_pkt_list(rtwdev); 6538 rtwvif->scan_req = NULL; 6539 rtwvif->scan_ies = NULL; 6540 scan_info->last_chan_idx = 0; 6541 scan_info->scanning_vif = NULL; 6542 scan_info->abort = false; 6543 6544 rtw89_chanctx_proceed(rtwdev); 6545 } 6546 6547 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 6548 { 6549 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6550 int ret; 6551 6552 scan_info->abort = true; 6553 6554 ret = rtw89_hw_scan_offload(rtwdev, vif, false); 6555 if (ret) 6556 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); 6557 6558 /* Indicate ieee80211_scan_completed() before returning, which is safe 6559 * because scan abort command always waits for completion of 6560 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan 6561 * work properly. 6562 */ 6563 rtw89_hw_scan_complete(rtwdev, vif, true); 6564 } 6565 6566 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 6567 { 6568 struct rtw89_vif *rtwvif; 6569 6570 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 6571 /* This variable implies connected or during attempt to connect */ 6572 if (!is_zero_ether_addr(rtwvif->bssid)) 6573 return true; 6574 } 6575 6576 return false; 6577 } 6578 6579 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 6580 bool enable) 6581 { 6582 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6583 struct rtw89_scan_option opt = {0}; 6584 struct rtw89_vif *rtwvif; 6585 bool connected; 6586 int ret = 0; 6587 6588 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 6589 if (!rtwvif) 6590 return -EINVAL; 6591 6592 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 6593 opt.enable = enable; 6594 opt.target_ch_mode = connected; 6595 if (enable) { 6596 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected); 6597 if (ret) 6598 goto out; 6599 } 6600 6601 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 6602 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 6603 opt.scan_mode = RTW89_SCAN_MODE_SA; 6604 opt.band = RTW89_PHY_0; 6605 opt.num_macc_role = 0; 6606 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 6607 opt.num_opch = connected ? 1 : 0; 6608 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 6609 } 6610 6611 ret = mac->scan_offload(rtwdev, &opt, rtwvif, false); 6612 out: 6613 return ret; 6614 } 6615 6616 #define H2C_FW_CPU_EXCEPTION_LEN 4 6617 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 6618 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 6619 { 6620 struct sk_buff *skb; 6621 int ret; 6622 6623 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 6624 if (!skb) { 6625 rtw89_err(rtwdev, 6626 "failed to alloc skb for fw cpu exception\n"); 6627 return -ENOMEM; 6628 } 6629 6630 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 6631 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 6632 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 6633 6634 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6635 H2C_CAT_TEST, 6636 H2C_CL_FW_STATUS_TEST, 6637 H2C_FUNC_CPU_EXCEPTION, 0, 0, 6638 H2C_FW_CPU_EXCEPTION_LEN); 6639 6640 ret = rtw89_h2c_tx(rtwdev, skb, false); 6641 if (ret) { 6642 rtw89_err(rtwdev, "failed to send h2c\n"); 6643 goto fail; 6644 } 6645 6646 return 0; 6647 6648 fail: 6649 dev_kfree_skb_any(skb); 6650 return ret; 6651 } 6652 6653 #define H2C_PKT_DROP_LEN 24 6654 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 6655 const struct rtw89_pkt_drop_params *params) 6656 { 6657 struct sk_buff *skb; 6658 int ret; 6659 6660 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 6661 if (!skb) { 6662 rtw89_err(rtwdev, 6663 "failed to alloc skb for packet drop\n"); 6664 return -ENOMEM; 6665 } 6666 6667 switch (params->sel) { 6668 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 6669 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 6670 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 6671 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 6672 case RTW89_PKT_DROP_SEL_BAND_ONCE: 6673 break; 6674 default: 6675 rtw89_debug(rtwdev, RTW89_DBG_FW, 6676 "H2C of pkt drop might not fully support sel: %d yet\n", 6677 params->sel); 6678 break; 6679 } 6680 6681 skb_put(skb, H2C_PKT_DROP_LEN); 6682 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 6683 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 6684 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 6685 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 6686 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 6687 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 6688 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 6689 params->macid_band_sel[0]); 6690 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 6691 params->macid_band_sel[1]); 6692 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 6693 params->macid_band_sel[2]); 6694 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 6695 params->macid_band_sel[3]); 6696 6697 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6698 H2C_CAT_MAC, 6699 H2C_CL_MAC_FW_OFLD, 6700 H2C_FUNC_PKT_DROP, 0, 0, 6701 H2C_PKT_DROP_LEN); 6702 6703 ret = rtw89_h2c_tx(rtwdev, skb, false); 6704 if (ret) { 6705 rtw89_err(rtwdev, "failed to send h2c\n"); 6706 goto fail; 6707 } 6708 6709 return 0; 6710 6711 fail: 6712 dev_kfree_skb_any(skb); 6713 return ret; 6714 } 6715 6716 #define H2C_KEEP_ALIVE_LEN 4 6717 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6718 bool enable) 6719 { 6720 struct sk_buff *skb; 6721 u8 pkt_id = 0; 6722 int ret; 6723 6724 if (enable) { 6725 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 6726 RTW89_PKT_OFLD_TYPE_NULL_DATA, 6727 &pkt_id); 6728 if (ret) 6729 return -EPERM; 6730 } 6731 6732 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 6733 if (!skb) { 6734 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 6735 return -ENOMEM; 6736 } 6737 6738 skb_put(skb, H2C_KEEP_ALIVE_LEN); 6739 6740 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 6741 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 6742 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 6743 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 6744 6745 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6746 H2C_CAT_MAC, 6747 H2C_CL_MAC_WOW, 6748 H2C_FUNC_KEEP_ALIVE, 0, 1, 6749 H2C_KEEP_ALIVE_LEN); 6750 6751 ret = rtw89_h2c_tx(rtwdev, skb, false); 6752 if (ret) { 6753 rtw89_err(rtwdev, "failed to send h2c\n"); 6754 goto fail; 6755 } 6756 6757 return 0; 6758 6759 fail: 6760 dev_kfree_skb_any(skb); 6761 6762 return ret; 6763 } 6764 6765 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6766 bool enable) 6767 { 6768 struct rtw89_h2c_arp_offload *h2c; 6769 u32 len = sizeof(*h2c); 6770 struct sk_buff *skb; 6771 u8 pkt_id = 0; 6772 int ret; 6773 6774 if (enable) { 6775 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 6776 RTW89_PKT_OFLD_TYPE_ARP_RSP, 6777 &pkt_id); 6778 if (ret) 6779 return ret; 6780 } 6781 6782 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6783 if (!skb) { 6784 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); 6785 return -ENOMEM; 6786 } 6787 6788 skb_put(skb, len); 6789 h2c = (struct rtw89_h2c_arp_offload *)skb->data; 6790 6791 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | 6792 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | 6793 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | 6794 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); 6795 6796 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6797 H2C_CAT_MAC, 6798 H2C_CL_MAC_WOW, 6799 H2C_FUNC_ARP_OFLD, 0, 1, 6800 len); 6801 6802 ret = rtw89_h2c_tx(rtwdev, skb, false); 6803 if (ret) { 6804 rtw89_err(rtwdev, "failed to send h2c\n"); 6805 goto fail; 6806 } 6807 6808 return 0; 6809 6810 fail: 6811 dev_kfree_skb_any(skb); 6812 6813 return ret; 6814 } 6815 6816 #define H2C_DISCONNECT_DETECT_LEN 8 6817 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 6818 struct rtw89_vif *rtwvif, bool enable) 6819 { 6820 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6821 struct sk_buff *skb; 6822 u8 macid = rtwvif->mac_id; 6823 int ret; 6824 6825 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 6826 if (!skb) { 6827 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 6828 return -ENOMEM; 6829 } 6830 6831 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 6832 6833 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 6834 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 6835 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 6836 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 6837 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 6838 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 6839 } 6840 6841 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6842 H2C_CAT_MAC, 6843 H2C_CL_MAC_WOW, 6844 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 6845 H2C_DISCONNECT_DETECT_LEN); 6846 6847 ret = rtw89_h2c_tx(rtwdev, skb, false); 6848 if (ret) { 6849 rtw89_err(rtwdev, "failed to send h2c\n"); 6850 goto fail; 6851 } 6852 6853 return 0; 6854 6855 fail: 6856 dev_kfree_skb_any(skb); 6857 6858 return ret; 6859 } 6860 6861 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6862 bool enable) 6863 { 6864 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6865 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6866 struct rtw89_h2c_cfg_nlo *h2c; 6867 u32 len = sizeof(*h2c); 6868 struct sk_buff *skb; 6869 int ret, i; 6870 6871 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6872 if (!skb) { 6873 rtw89_err(rtwdev, "failed to alloc skb for nlo\n"); 6874 return -ENOMEM; 6875 } 6876 6877 skb_put(skb, len); 6878 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data; 6879 6880 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) | 6881 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) | 6882 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_NLO_W0_MACID); 6883 6884 if (enable) { 6885 h2c->nlo_cnt = nd_config->n_match_sets; 6886 for (i = 0 ; i < nd_config->n_match_sets; i++) { 6887 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len; 6888 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid, 6889 nd_config->match_sets[i].ssid.ssid_len); 6890 } 6891 } 6892 6893 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6894 H2C_CAT_MAC, 6895 H2C_CL_MAC_WOW, 6896 H2C_FUNC_NLO, 0, 1, 6897 len); 6898 6899 ret = rtw89_h2c_tx(rtwdev, skb, false); 6900 if (ret) { 6901 rtw89_err(rtwdev, "failed to send h2c\n"); 6902 goto fail; 6903 } 6904 6905 return 0; 6906 6907 fail: 6908 dev_kfree_skb_any(skb); 6909 return ret; 6910 } 6911 6912 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6913 bool enable) 6914 { 6915 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6916 struct rtw89_h2c_wow_global *h2c; 6917 u8 macid = rtwvif->mac_id; 6918 u32 len = sizeof(*h2c); 6919 struct sk_buff *skb; 6920 int ret; 6921 6922 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6923 if (!skb) { 6924 rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); 6925 return -ENOMEM; 6926 } 6927 6928 skb_put(skb, len); 6929 h2c = (struct rtw89_h2c_wow_global *)skb->data; 6930 6931 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | 6932 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | 6933 le32_encode_bits(rtw_wow->ptk_alg, 6934 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | 6935 le32_encode_bits(rtw_wow->gtk_alg, 6936 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); 6937 h2c->key_info = rtw_wow->key_info; 6938 6939 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6940 H2C_CAT_MAC, 6941 H2C_CL_MAC_WOW, 6942 H2C_FUNC_WOW_GLOBAL, 0, 1, 6943 len); 6944 6945 ret = rtw89_h2c_tx(rtwdev, skb, false); 6946 if (ret) { 6947 rtw89_err(rtwdev, "failed to send h2c\n"); 6948 goto fail; 6949 } 6950 6951 return 0; 6952 6953 fail: 6954 dev_kfree_skb_any(skb); 6955 6956 return ret; 6957 } 6958 6959 #define H2C_WAKEUP_CTRL_LEN 4 6960 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 6961 struct rtw89_vif *rtwvif, 6962 bool enable) 6963 { 6964 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6965 struct sk_buff *skb; 6966 u8 macid = rtwvif->mac_id; 6967 int ret; 6968 6969 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 6970 if (!skb) { 6971 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); 6972 return -ENOMEM; 6973 } 6974 6975 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 6976 6977 if (rtw_wow->pattern_cnt) 6978 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 6979 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 6980 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 6981 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 6982 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 6983 6984 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 6985 6986 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6987 H2C_CAT_MAC, 6988 H2C_CL_MAC_WOW, 6989 H2C_FUNC_WAKEUP_CTRL, 0, 1, 6990 H2C_WAKEUP_CTRL_LEN); 6991 6992 ret = rtw89_h2c_tx(rtwdev, skb, false); 6993 if (ret) { 6994 rtw89_err(rtwdev, "failed to send h2c\n"); 6995 goto fail; 6996 } 6997 6998 return 0; 6999 7000 fail: 7001 dev_kfree_skb_any(skb); 7002 7003 return ret; 7004 } 7005 7006 #define H2C_WOW_CAM_UPD_LEN 24 7007 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 7008 struct rtw89_wow_cam_info *cam_info) 7009 { 7010 struct sk_buff *skb; 7011 int ret; 7012 7013 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 7014 if (!skb) { 7015 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7016 return -ENOMEM; 7017 } 7018 7019 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 7020 7021 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 7022 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 7023 if (cam_info->valid) { 7024 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 7025 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 7026 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 7027 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 7028 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 7029 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 7030 cam_info->negative_pattern_match); 7031 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 7032 cam_info->skip_mac_hdr); 7033 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 7034 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 7035 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 7036 } 7037 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 7038 7039 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7040 H2C_CAT_MAC, 7041 H2C_CL_MAC_WOW, 7042 H2C_FUNC_WOW_CAM_UPD, 0, 1, 7043 H2C_WOW_CAM_UPD_LEN); 7044 7045 ret = rtw89_h2c_tx(rtwdev, skb, false); 7046 if (ret) { 7047 rtw89_err(rtwdev, "failed to send h2c\n"); 7048 goto fail; 7049 } 7050 7051 return 0; 7052 fail: 7053 dev_kfree_skb_any(skb); 7054 7055 return ret; 7056 } 7057 7058 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, 7059 struct rtw89_vif *rtwvif, 7060 bool enable) 7061 { 7062 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7063 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; 7064 struct rtw89_h2c_wow_gtk_ofld *h2c; 7065 u8 macid = rtwvif->mac_id; 7066 u32 len = sizeof(*h2c); 7067 u8 pkt_id_sa_query = 0; 7068 struct sk_buff *skb; 7069 u8 pkt_id_eapol = 0; 7070 int ret; 7071 7072 if (!rtw_wow->gtk_alg) 7073 return 0; 7074 7075 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7076 if (!skb) { 7077 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); 7078 return -ENOMEM; 7079 } 7080 7081 skb_put(skb, len); 7082 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; 7083 7084 if (!enable) 7085 goto hdr; 7086 7087 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 7088 RTW89_PKT_OFLD_TYPE_EAPOL_KEY, 7089 &pkt_id_eapol); 7090 if (ret) 7091 goto fail; 7092 7093 if (gtk_info->igtk_keyid) { 7094 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 7095 RTW89_PKT_OFLD_TYPE_SA_QUERY, 7096 &pkt_id_sa_query); 7097 if (ret) 7098 goto fail; 7099 } 7100 7101 /* not support TKIP yet */ 7102 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 7103 le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 7104 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 7105 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 7106 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | 7107 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); 7108 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, 7109 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | 7110 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); 7111 h2c->gtk_info = rtw_wow->gtk_info; 7112 7113 hdr: 7114 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7115 H2C_CAT_MAC, 7116 H2C_CL_MAC_WOW, 7117 H2C_FUNC_GTK_OFLD, 0, 1, 7118 len); 7119 7120 ret = rtw89_h2c_tx(rtwdev, skb, false); 7121 if (ret) { 7122 rtw89_err(rtwdev, "failed to send h2c\n"); 7123 goto fail; 7124 } 7125 return 0; 7126 fail: 7127 dev_kfree_skb_any(skb); 7128 7129 return ret; 7130 } 7131 7132 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 7133 bool enable) 7134 { 7135 struct rtw89_h2c_fwips *h2c; 7136 u32 len = sizeof(*h2c); 7137 struct sk_buff *skb; 7138 int ret; 7139 7140 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7141 if (!skb) { 7142 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n"); 7143 return -ENOMEM; 7144 } 7145 skb_put(skb, len); 7146 h2c = (struct rtw89_h2c_fwips *)skb->data; 7147 7148 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_FW_IPS_W0_MACID) | 7149 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE); 7150 7151 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7152 H2C_CAT_MAC, 7153 H2C_CL_MAC_PS, 7154 H2C_FUNC_IPS_CFG, 0, 1, 7155 len); 7156 7157 ret = rtw89_h2c_tx(rtwdev, skb, false); 7158 if (ret) { 7159 rtw89_err(rtwdev, "failed to send h2c\n"); 7160 goto fail; 7161 } 7162 return 0; 7163 fail: 7164 dev_kfree_skb_any(skb); 7165 7166 return ret; 7167 } 7168 7169 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) 7170 { 7171 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 7172 struct rtw89_h2c_wow_aoac *h2c; 7173 u32 len = sizeof(*h2c); 7174 struct sk_buff *skb; 7175 unsigned int cond; 7176 7177 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7178 if (!skb) { 7179 rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); 7180 return -ENOMEM; 7181 } 7182 7183 skb_put(skb, len); 7184 7185 /* This H2C only nofity firmware to generate AOAC report C2H, 7186 * no need any parameter. 7187 */ 7188 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7189 H2C_CAT_MAC, 7190 H2C_CL_MAC_WOW, 7191 H2C_FUNC_AOAC_REPORT_REQ, 1, 0, 7192 len); 7193 7194 cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ); 7195 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7196 } 7197 7198 /* Return < 0, if failures happen during waiting for the condition. 7199 * Return 0, when waiting for the condition succeeds. 7200 * Return > 0, if the wait is considered unreachable due to driver/FW design, 7201 * where 1 means during SER. 7202 */ 7203 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 7204 struct rtw89_wait_info *wait, unsigned int cond) 7205 { 7206 int ret; 7207 7208 ret = rtw89_h2c_tx(rtwdev, skb, false); 7209 if (ret) { 7210 rtw89_err(rtwdev, "failed to send h2c\n"); 7211 dev_kfree_skb_any(skb); 7212 return -EBUSY; 7213 } 7214 7215 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 7216 return 1; 7217 7218 return rtw89_wait_for_cond(wait, cond); 7219 } 7220 7221 #define H2C_ADD_MCC_LEN 16 7222 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 7223 const struct rtw89_fw_mcc_add_req *p) 7224 { 7225 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7226 struct sk_buff *skb; 7227 unsigned int cond; 7228 7229 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 7230 if (!skb) { 7231 rtw89_err(rtwdev, 7232 "failed to alloc skb for add mcc\n"); 7233 return -ENOMEM; 7234 } 7235 7236 skb_put(skb, H2C_ADD_MCC_LEN); 7237 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 7238 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 7239 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 7240 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 7241 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 7242 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 7243 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 7244 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 7245 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 7246 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 7247 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 7248 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 7249 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 7250 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 7251 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 7252 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 7253 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 7254 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 7255 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 7256 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 7257 7258 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7259 H2C_CAT_MAC, 7260 H2C_CL_MCC, 7261 H2C_FUNC_ADD_MCC, 0, 0, 7262 H2C_ADD_MCC_LEN); 7263 7264 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 7265 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7266 } 7267 7268 #define H2C_START_MCC_LEN 12 7269 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 7270 const struct rtw89_fw_mcc_start_req *p) 7271 { 7272 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7273 struct sk_buff *skb; 7274 unsigned int cond; 7275 7276 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 7277 if (!skb) { 7278 rtw89_err(rtwdev, 7279 "failed to alloc skb for start mcc\n"); 7280 return -ENOMEM; 7281 } 7282 7283 skb_put(skb, H2C_START_MCC_LEN); 7284 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 7285 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 7286 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 7287 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 7288 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 7289 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 7290 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 7291 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 7292 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 7293 7294 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7295 H2C_CAT_MAC, 7296 H2C_CL_MCC, 7297 H2C_FUNC_START_MCC, 0, 0, 7298 H2C_START_MCC_LEN); 7299 7300 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 7301 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7302 } 7303 7304 #define H2C_STOP_MCC_LEN 4 7305 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7306 bool prev_groups) 7307 { 7308 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7309 struct sk_buff *skb; 7310 unsigned int cond; 7311 7312 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 7313 if (!skb) { 7314 rtw89_err(rtwdev, 7315 "failed to alloc skb for stop mcc\n"); 7316 return -ENOMEM; 7317 } 7318 7319 skb_put(skb, H2C_STOP_MCC_LEN); 7320 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 7321 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 7322 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 7323 7324 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7325 H2C_CAT_MAC, 7326 H2C_CL_MCC, 7327 H2C_FUNC_STOP_MCC, 0, 0, 7328 H2C_STOP_MCC_LEN); 7329 7330 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 7331 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7332 } 7333 7334 #define H2C_DEL_MCC_GROUP_LEN 4 7335 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 7336 bool prev_groups) 7337 { 7338 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7339 struct sk_buff *skb; 7340 unsigned int cond; 7341 7342 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 7343 if (!skb) { 7344 rtw89_err(rtwdev, 7345 "failed to alloc skb for del mcc group\n"); 7346 return -ENOMEM; 7347 } 7348 7349 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 7350 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 7351 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 7352 7353 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7354 H2C_CAT_MAC, 7355 H2C_CL_MCC, 7356 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 7357 H2C_DEL_MCC_GROUP_LEN); 7358 7359 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 7360 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7361 } 7362 7363 #define H2C_RESET_MCC_GROUP_LEN 4 7364 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 7365 { 7366 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7367 struct sk_buff *skb; 7368 unsigned int cond; 7369 7370 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 7371 if (!skb) { 7372 rtw89_err(rtwdev, 7373 "failed to alloc skb for reset mcc group\n"); 7374 return -ENOMEM; 7375 } 7376 7377 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 7378 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 7379 7380 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7381 H2C_CAT_MAC, 7382 H2C_CL_MCC, 7383 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 7384 H2C_RESET_MCC_GROUP_LEN); 7385 7386 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 7387 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7388 } 7389 7390 #define H2C_MCC_REQ_TSF_LEN 4 7391 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 7392 const struct rtw89_fw_mcc_tsf_req *req, 7393 struct rtw89_mac_mcc_tsf_rpt *rpt) 7394 { 7395 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7396 struct rtw89_mac_mcc_tsf_rpt *tmp; 7397 struct sk_buff *skb; 7398 unsigned int cond; 7399 int ret; 7400 7401 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 7402 if (!skb) { 7403 rtw89_err(rtwdev, 7404 "failed to alloc skb for mcc req tsf\n"); 7405 return -ENOMEM; 7406 } 7407 7408 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 7409 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 7410 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 7411 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 7412 7413 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7414 H2C_CAT_MAC, 7415 H2C_CL_MCC, 7416 H2C_FUNC_MCC_REQ_TSF, 0, 0, 7417 H2C_MCC_REQ_TSF_LEN); 7418 7419 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 7420 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7421 if (ret) 7422 return ret; 7423 7424 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 7425 *rpt = *tmp; 7426 7427 return 0; 7428 } 7429 7430 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 7431 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7432 u8 *bitmap) 7433 { 7434 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7435 struct sk_buff *skb; 7436 unsigned int cond; 7437 u8 map_len; 7438 u8 h2c_len; 7439 7440 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 7441 map_len = RTW89_MAX_MAC_ID_NUM / 8; 7442 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 7443 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 7444 if (!skb) { 7445 rtw89_err(rtwdev, 7446 "failed to alloc skb for mcc macid bitmap\n"); 7447 return -ENOMEM; 7448 } 7449 7450 skb_put(skb, h2c_len); 7451 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 7452 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 7453 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 7454 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 7455 7456 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7457 H2C_CAT_MAC, 7458 H2C_CL_MCC, 7459 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 7460 h2c_len); 7461 7462 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 7463 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7464 } 7465 7466 #define H2C_MCC_SYNC_LEN 4 7467 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 7468 u8 target, u8 offset) 7469 { 7470 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7471 struct sk_buff *skb; 7472 unsigned int cond; 7473 7474 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 7475 if (!skb) { 7476 rtw89_err(rtwdev, 7477 "failed to alloc skb for mcc sync\n"); 7478 return -ENOMEM; 7479 } 7480 7481 skb_put(skb, H2C_MCC_SYNC_LEN); 7482 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 7483 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 7484 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 7485 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 7486 7487 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7488 H2C_CAT_MAC, 7489 H2C_CL_MCC, 7490 H2C_FUNC_MCC_SYNC, 0, 0, 7491 H2C_MCC_SYNC_LEN); 7492 7493 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 7494 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7495 } 7496 7497 #define H2C_MCC_SET_DURATION_LEN 20 7498 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 7499 const struct rtw89_fw_mcc_duration *p) 7500 { 7501 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7502 struct sk_buff *skb; 7503 unsigned int cond; 7504 7505 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 7506 if (!skb) { 7507 rtw89_err(rtwdev, 7508 "failed to alloc skb for mcc set duration\n"); 7509 return -ENOMEM; 7510 } 7511 7512 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 7513 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 7514 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 7515 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 7516 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 7517 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 7518 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 7519 p->start_tsf_low); 7520 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 7521 p->start_tsf_high); 7522 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 7523 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 7524 7525 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7526 H2C_CAT_MAC, 7527 H2C_CL_MCC, 7528 H2C_FUNC_MCC_SET_DURATION, 0, 0, 7529 H2C_MCC_SET_DURATION_LEN); 7530 7531 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 7532 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7533 } 7534 7535 static 7536 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 7537 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 7538 struct rtw89_h2c_mrc_add_slot *slot_h2c) 7539 { 7540 bool fill_h2c = !!slot_h2c; 7541 unsigned int i; 7542 7543 if (!fill_h2c) 7544 goto calc_len; 7545 7546 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 7547 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 7548 le32_encode_bits(slot_arg->courtesy_en, 7549 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 7550 le32_encode_bits(slot_arg->role_num, 7551 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 7552 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 7553 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 7554 le32_encode_bits(slot_arg->courtesy_target, 7555 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 7556 7557 for (i = 0; i < slot_arg->role_num; i++) { 7558 slot_h2c->roles[i].w0 = 7559 le32_encode_bits(slot_arg->roles[i].macid, 7560 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 7561 le32_encode_bits(slot_arg->roles[i].role_type, 7562 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 7563 le32_encode_bits(slot_arg->roles[i].is_master, 7564 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 7565 le32_encode_bits(slot_arg->roles[i].en_tx_null, 7566 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 7567 le32_encode_bits(false, 7568 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 7569 le32_encode_bits(false, 7570 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 7571 slot_h2c->roles[i].w1 = 7572 le32_encode_bits(slot_arg->roles[i].central_ch, 7573 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 7574 le32_encode_bits(slot_arg->roles[i].primary_ch, 7575 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 7576 le32_encode_bits(slot_arg->roles[i].bw, 7577 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 7578 le32_encode_bits(slot_arg->roles[i].band, 7579 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 7580 le32_encode_bits(slot_arg->roles[i].null_early, 7581 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 7582 le32_encode_bits(false, 7583 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 7584 le32_encode_bits(true, 7585 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 7586 slot_h2c->roles[i].macid_main_bitmap = 7587 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 7588 slot_h2c->roles[i].macid_paired_bitmap = 7589 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 7590 } 7591 7592 calc_len: 7593 return struct_size(slot_h2c, roles, slot_arg->role_num); 7594 } 7595 7596 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 7597 const struct rtw89_fw_mrc_add_arg *arg) 7598 { 7599 struct rtw89_h2c_mrc_add *h2c_head; 7600 struct sk_buff *skb; 7601 unsigned int i; 7602 void *tmp; 7603 u32 len; 7604 int ret; 7605 7606 len = sizeof(*h2c_head); 7607 for (i = 0; i < arg->slot_num; i++) 7608 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 7609 7610 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7611 if (!skb) { 7612 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 7613 return -ENOMEM; 7614 } 7615 7616 skb_put(skb, len); 7617 tmp = skb->data; 7618 7619 h2c_head = tmp; 7620 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 7621 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 7622 le32_encode_bits(arg->sch_type, 7623 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 7624 le32_encode_bits(arg->slot_num, 7625 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 7626 le32_encode_bits(arg->btc_in_sch, 7627 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 7628 7629 tmp += sizeof(*h2c_head); 7630 for (i = 0; i < arg->slot_num; i++) 7631 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 7632 7633 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7634 H2C_CAT_MAC, 7635 H2C_CL_MRC, 7636 H2C_FUNC_ADD_MRC, 0, 0, 7637 len); 7638 7639 ret = rtw89_h2c_tx(rtwdev, skb, false); 7640 if (ret) { 7641 rtw89_err(rtwdev, "failed to send h2c\n"); 7642 dev_kfree_skb_any(skb); 7643 return -EBUSY; 7644 } 7645 7646 return 0; 7647 } 7648 7649 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 7650 const struct rtw89_fw_mrc_start_arg *arg) 7651 { 7652 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7653 struct rtw89_h2c_mrc_start *h2c; 7654 u32 len = sizeof(*h2c); 7655 struct sk_buff *skb; 7656 unsigned int cond; 7657 7658 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7659 if (!skb) { 7660 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 7661 return -ENOMEM; 7662 } 7663 7664 skb_put(skb, len); 7665 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 7666 7667 h2c->w0 = le32_encode_bits(arg->sch_idx, 7668 RTW89_H2C_MRC_START_W0_SCH_IDX) | 7669 le32_encode_bits(arg->old_sch_idx, 7670 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 7671 le32_encode_bits(arg->action, 7672 RTW89_H2C_MRC_START_W0_ACTION); 7673 7674 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 7675 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 7676 7677 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7678 H2C_CAT_MAC, 7679 H2C_CL_MRC, 7680 H2C_FUNC_START_MRC, 0, 0, 7681 len); 7682 7683 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 7684 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7685 } 7686 7687 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx) 7688 { 7689 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7690 struct rtw89_h2c_mrc_del *h2c; 7691 u32 len = sizeof(*h2c); 7692 struct sk_buff *skb; 7693 unsigned int cond; 7694 7695 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7696 if (!skb) { 7697 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 7698 return -ENOMEM; 7699 } 7700 7701 skb_put(skb, len); 7702 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 7703 7704 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) | 7705 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX); 7706 7707 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7708 H2C_CAT_MAC, 7709 H2C_CL_MRC, 7710 H2C_FUNC_DEL_MRC, 0, 0, 7711 len); 7712 7713 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 7714 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7715 } 7716 7717 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 7718 const struct rtw89_fw_mrc_req_tsf_arg *arg, 7719 struct rtw89_mac_mrc_tsf_rpt *rpt) 7720 { 7721 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7722 struct rtw89_h2c_mrc_req_tsf *h2c; 7723 struct rtw89_mac_mrc_tsf_rpt *tmp; 7724 struct sk_buff *skb; 7725 unsigned int i; 7726 u32 len; 7727 int ret; 7728 7729 len = struct_size(h2c, infos, arg->num); 7730 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7731 if (!skb) { 7732 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 7733 return -ENOMEM; 7734 } 7735 7736 skb_put(skb, len); 7737 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 7738 7739 h2c->req_tsf_num = arg->num; 7740 for (i = 0; i < arg->num; i++) 7741 h2c->infos[i] = 7742 u8_encode_bits(arg->infos[i].band, 7743 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 7744 u8_encode_bits(arg->infos[i].port, 7745 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 7746 7747 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7748 H2C_CAT_MAC, 7749 H2C_CL_MRC, 7750 H2C_FUNC_MRC_REQ_TSF, 0, 0, 7751 len); 7752 7753 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 7754 if (ret) 7755 return ret; 7756 7757 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 7758 *rpt = *tmp; 7759 7760 return 0; 7761 } 7762 7763 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 7764 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 7765 { 7766 struct rtw89_h2c_mrc_upd_bitmap *h2c; 7767 u32 len = sizeof(*h2c); 7768 struct sk_buff *skb; 7769 int ret; 7770 7771 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7772 if (!skb) { 7773 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 7774 return -ENOMEM; 7775 } 7776 7777 skb_put(skb, len); 7778 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 7779 7780 h2c->w0 = le32_encode_bits(arg->sch_idx, 7781 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 7782 le32_encode_bits(arg->action, 7783 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 7784 le32_encode_bits(arg->macid, 7785 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 7786 h2c->w1 = le32_encode_bits(arg->client_macid, 7787 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 7788 7789 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7790 H2C_CAT_MAC, 7791 H2C_CL_MRC, 7792 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 7793 len); 7794 7795 ret = rtw89_h2c_tx(rtwdev, skb, false); 7796 if (ret) { 7797 rtw89_err(rtwdev, "failed to send h2c\n"); 7798 dev_kfree_skb_any(skb); 7799 return -EBUSY; 7800 } 7801 7802 return 0; 7803 } 7804 7805 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 7806 const struct rtw89_fw_mrc_sync_arg *arg) 7807 { 7808 struct rtw89_h2c_mrc_sync *h2c; 7809 u32 len = sizeof(*h2c); 7810 struct sk_buff *skb; 7811 int ret; 7812 7813 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7814 if (!skb) { 7815 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 7816 return -ENOMEM; 7817 } 7818 7819 skb_put(skb, len); 7820 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 7821 7822 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 7823 le32_encode_bits(arg->src.port, 7824 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 7825 le32_encode_bits(arg->src.band, 7826 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 7827 le32_encode_bits(arg->dest.port, 7828 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 7829 le32_encode_bits(arg->dest.band, 7830 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 7831 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 7832 7833 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7834 H2C_CAT_MAC, 7835 H2C_CL_MRC, 7836 H2C_FUNC_MRC_SYNC, 0, 0, 7837 len); 7838 7839 ret = rtw89_h2c_tx(rtwdev, skb, false); 7840 if (ret) { 7841 rtw89_err(rtwdev, "failed to send h2c\n"); 7842 dev_kfree_skb_any(skb); 7843 return -EBUSY; 7844 } 7845 7846 return 0; 7847 } 7848 7849 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 7850 const struct rtw89_fw_mrc_upd_duration_arg *arg) 7851 { 7852 struct rtw89_h2c_mrc_upd_duration *h2c; 7853 struct sk_buff *skb; 7854 unsigned int i; 7855 u32 len; 7856 int ret; 7857 7858 len = struct_size(h2c, slots, arg->slot_num); 7859 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7860 if (!skb) { 7861 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 7862 return -ENOMEM; 7863 } 7864 7865 skb_put(skb, len); 7866 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 7867 7868 h2c->w0 = le32_encode_bits(arg->sch_idx, 7869 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 7870 le32_encode_bits(arg->slot_num, 7871 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 7872 le32_encode_bits(false, 7873 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 7874 7875 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 7876 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 7877 7878 for (i = 0; i < arg->slot_num; i++) { 7879 h2c->slots[i] = 7880 le32_encode_bits(arg->slots[i].slot_idx, 7881 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 7882 le32_encode_bits(arg->slots[i].duration, 7883 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 7884 } 7885 7886 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7887 H2C_CAT_MAC, 7888 H2C_CL_MRC, 7889 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 7890 len); 7891 7892 ret = rtw89_h2c_tx(rtwdev, skb, false); 7893 if (ret) { 7894 rtw89_err(rtwdev, "failed to send h2c\n"); 7895 dev_kfree_skb_any(skb); 7896 return -EBUSY; 7897 } 7898 7899 return 0; 7900 } 7901 7902 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 7903 { 7904 static const u8 zeros[U8_MAX] = {}; 7905 7906 return memcmp(ext_ptr, zeros, ext_len) == 0; 7907 } 7908 7909 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 7910 ({ \ 7911 u8 __var_sz = sizeof(*(e)); \ 7912 bool __accept; \ 7913 if (__var_sz >= (ent_sz)) \ 7914 __accept = true; \ 7915 else \ 7916 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 7917 (ent_sz) - __var_sz);\ 7918 __accept; \ 7919 }) 7920 7921 static bool 7922 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 7923 const void *cursor, 7924 const struct rtw89_txpwr_conf *conf) 7925 { 7926 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7927 return false; 7928 7929 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 7930 return false; 7931 7932 switch (e->rs) { 7933 case RTW89_RS_CCK: 7934 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 7935 return false; 7936 break; 7937 case RTW89_RS_OFDM: 7938 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 7939 return false; 7940 break; 7941 case RTW89_RS_MCS: 7942 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 7943 e->nss >= RTW89_NSS_NUM || 7944 e->ofdma >= RTW89_OFDMA_NUM) 7945 return false; 7946 break; 7947 case RTW89_RS_HEDCM: 7948 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 7949 e->nss >= RTW89_NSS_HEDCM_NUM || 7950 e->ofdma >= RTW89_OFDMA_NUM) 7951 return false; 7952 break; 7953 case RTW89_RS_OFFSET: 7954 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 7955 return false; 7956 break; 7957 default: 7958 return false; 7959 } 7960 7961 return true; 7962 } 7963 7964 static 7965 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 7966 const struct rtw89_txpwr_table *tbl) 7967 { 7968 const struct rtw89_txpwr_conf *conf = tbl->data; 7969 struct rtw89_fw_txpwr_byrate_entry entry = {}; 7970 struct rtw89_txpwr_byrate *byr_head; 7971 struct rtw89_rate_desc desc = {}; 7972 const void *cursor; 7973 u32 data; 7974 s8 *byr; 7975 int i; 7976 7977 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7978 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 7979 continue; 7980 7981 byr_head = &rtwdev->byr[entry.band][entry.bw]; 7982 data = le32_to_cpu(entry.data); 7983 desc.ofdma = entry.ofdma; 7984 desc.nss = entry.nss; 7985 desc.rs = entry.rs; 7986 7987 for (i = 0; i < entry.len; i++, data >>= 8) { 7988 desc.idx = entry.shf + i; 7989 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 7990 *byr = data & 0xff; 7991 } 7992 } 7993 } 7994 7995 static bool 7996 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 7997 const void *cursor, 7998 const struct rtw89_txpwr_conf *conf) 7999 { 8000 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8001 return false; 8002 8003 if (e->bw >= RTW89_2G_BW_NUM) 8004 return false; 8005 if (e->nt >= RTW89_NTX_NUM) 8006 return false; 8007 if (e->rs >= RTW89_RS_LMT_NUM) 8008 return false; 8009 if (e->bf >= RTW89_BF_NUM) 8010 return false; 8011 if (e->regd >= RTW89_REGD_NUM) 8012 return false; 8013 if (e->ch_idx >= RTW89_2G_CH_NUM) 8014 return false; 8015 8016 return true; 8017 } 8018 8019 static 8020 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 8021 { 8022 const struct rtw89_txpwr_conf *conf = &data->conf; 8023 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 8024 const void *cursor; 8025 8026 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8027 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 8028 continue; 8029 8030 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8031 [entry.ch_idx] = entry.v; 8032 } 8033 } 8034 8035 static bool 8036 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 8037 const void *cursor, 8038 const struct rtw89_txpwr_conf *conf) 8039 { 8040 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8041 return false; 8042 8043 if (e->bw >= RTW89_5G_BW_NUM) 8044 return false; 8045 if (e->nt >= RTW89_NTX_NUM) 8046 return false; 8047 if (e->rs >= RTW89_RS_LMT_NUM) 8048 return false; 8049 if (e->bf >= RTW89_BF_NUM) 8050 return false; 8051 if (e->regd >= RTW89_REGD_NUM) 8052 return false; 8053 if (e->ch_idx >= RTW89_5G_CH_NUM) 8054 return false; 8055 8056 return true; 8057 } 8058 8059 static 8060 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 8061 { 8062 const struct rtw89_txpwr_conf *conf = &data->conf; 8063 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 8064 const void *cursor; 8065 8066 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8067 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 8068 continue; 8069 8070 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8071 [entry.ch_idx] = entry.v; 8072 } 8073 } 8074 8075 static bool 8076 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 8077 const void *cursor, 8078 const struct rtw89_txpwr_conf *conf) 8079 { 8080 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8081 return false; 8082 8083 if (e->bw >= RTW89_6G_BW_NUM) 8084 return false; 8085 if (e->nt >= RTW89_NTX_NUM) 8086 return false; 8087 if (e->rs >= RTW89_RS_LMT_NUM) 8088 return false; 8089 if (e->bf >= RTW89_BF_NUM) 8090 return false; 8091 if (e->regd >= RTW89_REGD_NUM) 8092 return false; 8093 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8094 return false; 8095 if (e->ch_idx >= RTW89_6G_CH_NUM) 8096 return false; 8097 8098 return true; 8099 } 8100 8101 static 8102 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 8103 { 8104 const struct rtw89_txpwr_conf *conf = &data->conf; 8105 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 8106 const void *cursor; 8107 8108 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8109 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 8110 continue; 8111 8112 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8113 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 8114 } 8115 } 8116 8117 static bool 8118 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 8119 const void *cursor, 8120 const struct rtw89_txpwr_conf *conf) 8121 { 8122 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8123 return false; 8124 8125 if (e->ru >= RTW89_RU_NUM) 8126 return false; 8127 if (e->nt >= RTW89_NTX_NUM) 8128 return false; 8129 if (e->regd >= RTW89_REGD_NUM) 8130 return false; 8131 if (e->ch_idx >= RTW89_2G_CH_NUM) 8132 return false; 8133 8134 return true; 8135 } 8136 8137 static 8138 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 8139 { 8140 const struct rtw89_txpwr_conf *conf = &data->conf; 8141 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 8142 const void *cursor; 8143 8144 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8145 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 8146 continue; 8147 8148 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8149 } 8150 } 8151 8152 static bool 8153 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 8154 const void *cursor, 8155 const struct rtw89_txpwr_conf *conf) 8156 { 8157 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8158 return false; 8159 8160 if (e->ru >= RTW89_RU_NUM) 8161 return false; 8162 if (e->nt >= RTW89_NTX_NUM) 8163 return false; 8164 if (e->regd >= RTW89_REGD_NUM) 8165 return false; 8166 if (e->ch_idx >= RTW89_5G_CH_NUM) 8167 return false; 8168 8169 return true; 8170 } 8171 8172 static 8173 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 8174 { 8175 const struct rtw89_txpwr_conf *conf = &data->conf; 8176 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 8177 const void *cursor; 8178 8179 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8180 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 8181 continue; 8182 8183 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8184 } 8185 } 8186 8187 static bool 8188 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 8189 const void *cursor, 8190 const struct rtw89_txpwr_conf *conf) 8191 { 8192 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8193 return false; 8194 8195 if (e->ru >= RTW89_RU_NUM) 8196 return false; 8197 if (e->nt >= RTW89_NTX_NUM) 8198 return false; 8199 if (e->regd >= RTW89_REGD_NUM) 8200 return false; 8201 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8202 return false; 8203 if (e->ch_idx >= RTW89_6G_CH_NUM) 8204 return false; 8205 8206 return true; 8207 } 8208 8209 static 8210 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 8211 { 8212 const struct rtw89_txpwr_conf *conf = &data->conf; 8213 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 8214 const void *cursor; 8215 8216 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8217 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 8218 continue; 8219 8220 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 8221 [entry.ch_idx] = entry.v; 8222 } 8223 } 8224 8225 static bool 8226 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 8227 const void *cursor, 8228 const struct rtw89_txpwr_conf *conf) 8229 { 8230 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8231 return false; 8232 8233 if (e->band >= RTW89_BAND_NUM) 8234 return false; 8235 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 8236 return false; 8237 if (e->regd >= RTW89_REGD_NUM) 8238 return false; 8239 8240 return true; 8241 } 8242 8243 static 8244 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 8245 { 8246 const struct rtw89_txpwr_conf *conf = &data->conf; 8247 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 8248 const void *cursor; 8249 8250 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8251 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 8252 continue; 8253 8254 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 8255 } 8256 } 8257 8258 static bool 8259 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 8260 const void *cursor, 8261 const struct rtw89_txpwr_conf *conf) 8262 { 8263 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8264 return false; 8265 8266 if (e->band >= RTW89_BAND_NUM) 8267 return false; 8268 if (e->regd >= RTW89_REGD_NUM) 8269 return false; 8270 8271 return true; 8272 } 8273 8274 static 8275 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 8276 { 8277 const struct rtw89_txpwr_conf *conf = &data->conf; 8278 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 8279 const void *cursor; 8280 8281 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8282 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 8283 continue; 8284 8285 data->v[entry.band][entry.regd] = entry.v; 8286 } 8287 } 8288 8289 const struct rtw89_rfe_parms * 8290 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 8291 const struct rtw89_rfe_parms *init) 8292 { 8293 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 8294 struct rtw89_rfe_parms *parms; 8295 8296 if (!rfe_data) 8297 return init; 8298 8299 parms = &rfe_data->rfe_parms; 8300 if (init) 8301 *parms = *init; 8302 8303 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 8304 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 8305 rfe_data->byrate.tbl.size = 0; /* don't care here */ 8306 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 8307 parms->byr_tbl = &rfe_data->byrate.tbl; 8308 } 8309 8310 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 8311 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 8312 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 8313 } 8314 8315 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 8316 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 8317 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 8318 } 8319 8320 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 8321 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 8322 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 8323 } 8324 8325 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 8326 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 8327 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 8328 } 8329 8330 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 8331 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 8332 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 8333 } 8334 8335 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 8336 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 8337 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 8338 } 8339 8340 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 8341 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 8342 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 8343 } 8344 8345 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 8346 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 8347 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 8348 } 8349 8350 return parms; 8351 } 8352