1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include <linux/if_arp.h> 6 #include "cam.h" 7 #include "chan.h" 8 #include "coex.h" 9 #include "debug.h" 10 #include "fw.h" 11 #include "mac.h" 12 #include "phy.h" 13 #include "ps.h" 14 #include "reg.h" 15 #include "util.h" 16 #include "wow.h" 17 18 struct rtw89_eapol_2_of_2 { 19 u8 gtkbody[14]; 20 u8 key_des_ver; 21 u8 rsvd[92]; 22 } __packed; 23 24 struct rtw89_sa_query { 25 u8 category; 26 u8 action; 27 } __packed; 28 29 struct rtw89_arp_rsp { 30 u8 llc_hdr[sizeof(rfc1042_header)]; 31 __be16 llc_type; 32 struct arphdr arp_hdr; 33 u8 sender_hw[ETH_ALEN]; 34 __be32 sender_ip; 35 u8 target_hw[ETH_ALEN]; 36 __be32 target_ip; 37 } __packed; 38 39 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 40 41 union rtw89_fw_element_arg { 42 size_t offset; 43 enum rtw89_rf_path rf_path; 44 enum rtw89_fw_type fw_type; 45 }; 46 47 struct rtw89_fw_element_handler { 48 int (*fn)(struct rtw89_dev *rtwdev, 49 const struct rtw89_fw_element_hdr *elm, 50 const union rtw89_fw_element_arg arg); 51 const union rtw89_fw_element_arg arg; 52 const char *name; 53 }; 54 55 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 56 struct sk_buff *skb); 57 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 58 struct rtw89_wait_info *wait, unsigned int cond); 59 60 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 61 bool header) 62 { 63 struct sk_buff *skb; 64 u32 header_len = 0; 65 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 66 67 if (header) 68 header_len = H2C_HEADER_LEN; 69 70 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 71 if (!skb) 72 return NULL; 73 skb_reserve(skb, header_len + h2c_desc_size); 74 memset(skb->data, 0, len); 75 76 return skb; 77 } 78 79 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 80 { 81 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 82 } 83 84 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 85 { 86 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 87 } 88 89 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 90 { 91 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 92 u8 val; 93 int ret; 94 95 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 96 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 97 1, FWDL_WAIT_CNT, false, rtwdev, type); 98 if (ret) { 99 switch (val) { 100 case RTW89_FWDL_CHECKSUM_FAIL: 101 rtw89_err(rtwdev, "fw checksum fail\n"); 102 return -EINVAL; 103 104 case RTW89_FWDL_SECURITY_FAIL: 105 rtw89_err(rtwdev, "fw security fail\n"); 106 return -EINVAL; 107 108 case RTW89_FWDL_CV_NOT_MATCH: 109 rtw89_err(rtwdev, "fw cv not match\n"); 110 return -EINVAL; 111 112 default: 113 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 114 return -EBUSY; 115 } 116 } 117 118 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 119 120 return 0; 121 } 122 123 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 124 struct rtw89_fw_bin_info *info) 125 { 126 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 127 struct rtw89_fw_hdr_section_info *section_info; 128 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 129 const struct rtw89_fw_hdr_section *section; 130 const u8 *fw_end = fw + len; 131 const u8 *bin; 132 u32 base_hdr_len; 133 u32 mssc_len = 0; 134 u32 i; 135 136 if (!info) 137 return -EINVAL; 138 139 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 140 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 141 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 142 143 if (info->dynamic_hdr_en) { 144 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 145 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 146 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 147 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 148 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 149 return -EINVAL; 150 } 151 } else { 152 info->hdr_len = base_hdr_len; 153 info->dynamic_hdr_len = 0; 154 } 155 156 bin = fw + info->hdr_len; 157 158 /* jump to section header */ 159 section_info = info->section_info; 160 for (i = 0; i < info->section_num; i++) { 161 section = &fw_hdr->sections[i]; 162 section_info->type = 163 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 164 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 165 section_info->mssc = 166 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 167 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 168 } else { 169 section_info->mssc = 0; 170 } 171 172 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 173 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 174 section_info->len += FWDL_SECTION_CHKSUM_LEN; 175 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 176 section_info->dladdr = 177 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 178 section_info->addr = bin; 179 bin += section_info->len; 180 section_info++; 181 } 182 183 if (fw_end != bin + mssc_len) { 184 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 185 return -EINVAL; 186 } 187 188 return 0; 189 } 190 191 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 192 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 193 u32 rmp_tbl_size, u32 *key_idx) 194 { 195 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 196 u32 sel_byte_idx; 197 u32 mss_sel_idx; 198 u8 sel_bit_idx; 199 int i; 200 201 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 202 if (!mss_hdr->defen) 203 return -ENOENT; 204 205 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 206 sec->mss_key_num; 207 } else { 208 if (mss_hdr->defen) 209 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 210 else 211 mss_sel_idx = 0; 212 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 213 le16_to_cpu(mss_hdr->msscust_max) + 214 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 215 sec->mss_key_num; 216 } 217 218 sel_byte_idx = mss_sel_idx >> 3; 219 sel_bit_idx = mss_sel_idx & 0x7; 220 221 if (sel_byte_idx >= rmp_tbl_size) 222 return -EFAULT; 223 224 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 225 return -ENOENT; 226 227 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 228 229 for (i = 0; i < sel_byte_idx; i++) 230 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 231 232 return 0; 233 } 234 235 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 236 struct rtw89_fw_bin_info *info, 237 struct rtw89_fw_hdr_section_info *section_info, 238 const struct rtw89_fw_hdr_section_v1 *section, 239 const void *content, 240 u32 *mssc_len) 241 { 242 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 243 const union rtw89_fw_section_mssc_content *section_content = content; 244 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 245 u32 rmp_tbl_size; 246 u32 key_sign_len; 247 u32 real_key_idx; 248 u32 sb_sel_ver; 249 int ret; 250 251 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 252 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 253 return -ENOENT; 254 } 255 256 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 257 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 258 le16_to_cpu(mss_hdr->msscust_max) * 259 mss_hdr->mssdev_max) >> 3; 260 if (mss_hdr->defen) 261 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 262 } else { 263 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 264 mss_hdr->rmpfmt); 265 return -EINVAL; 266 } 267 268 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 269 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 270 rmp_tbl_size, (int)sizeof(*mss_hdr), 271 le32_to_cpu(mss_hdr->key_raw_offset)); 272 return -EINVAL; 273 } 274 275 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 276 if (!key_sign_len) 277 key_sign_len = 512; 278 279 if (info->dsp_checksum) 280 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 281 282 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 283 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 284 285 if (!sec->secure_boot) 286 goto out; 287 288 sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v); 289 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 290 goto ignore; 291 292 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 293 if (ret) 294 goto ignore; 295 296 section_info->key_addr = content + section_info->len + 297 le32_to_cpu(mss_hdr->key_raw_offset) + 298 key_sign_len * real_key_idx; 299 section_info->key_len = key_sign_len; 300 section_info->key_idx = real_key_idx; 301 302 out: 303 if (info->secure_section_exist) { 304 section_info->ignore = true; 305 return 0; 306 } 307 308 info->secure_section_exist = true; 309 310 return 0; 311 312 ignore: 313 section_info->ignore = true; 314 315 return 0; 316 } 317 318 static int __parse_security_section(struct rtw89_dev *rtwdev, 319 struct rtw89_fw_bin_info *info, 320 struct rtw89_fw_hdr_section_info *section_info, 321 const struct rtw89_fw_hdr_section_v1 *section, 322 const void *content, 323 u32 *mssc_len) 324 { 325 int ret; 326 327 section_info->mssc = 328 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 329 330 if (section_info->mssc == FORMATTED_MSSC) { 331 ret = __parse_formatted_mssc(rtwdev, info, section_info, 332 section, content, mssc_len); 333 if (ret) 334 return -EINVAL; 335 } else { 336 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 337 if (info->dsp_checksum) 338 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 339 340 info->secure_section_exist = true; 341 } 342 343 return 0; 344 } 345 346 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 347 struct rtw89_fw_bin_info *info) 348 { 349 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 350 struct rtw89_fw_hdr_section_info *section_info; 351 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 352 const struct rtw89_fw_hdr_section_v1 *section; 353 const u8 *fw_end = fw + len; 354 const u8 *bin; 355 u32 base_hdr_len; 356 u32 mssc_len; 357 int ret; 358 u32 i; 359 360 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 361 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 362 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 363 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 364 365 if (info->dynamic_hdr_en) { 366 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 367 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 368 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 369 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 370 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 371 return -EINVAL; 372 } 373 } else { 374 info->hdr_len = base_hdr_len; 375 info->dynamic_hdr_len = 0; 376 } 377 378 bin = fw + info->hdr_len; 379 380 /* jump to section header */ 381 section_info = info->section_info; 382 for (i = 0; i < info->section_num; i++) { 383 section = &fw_hdr->sections[i]; 384 385 section_info->type = 386 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 387 section_info->len = 388 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 389 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 390 section_info->len += FWDL_SECTION_CHKSUM_LEN; 391 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 392 section_info->dladdr = 393 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 394 section_info->addr = bin; 395 396 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 397 ret = __parse_security_section(rtwdev, info, section_info, 398 section, bin, &mssc_len); 399 if (ret) 400 return ret; 401 } else { 402 section_info->mssc = 0; 403 mssc_len = 0; 404 } 405 406 rtw89_debug(rtwdev, RTW89_DBG_FW, 407 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 408 i, section_info->type, section_info->len, 409 section_info->mssc, mssc_len, bin - fw); 410 rtw89_debug(rtwdev, RTW89_DBG_FW, 411 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 412 section_info->ignore, section_info->key_addr, 413 section_info->key_addr ? 414 section_info->key_addr - section_info->addr : 0, 415 section_info->key_len, section_info->key_idx); 416 417 bin += section_info->len + mssc_len; 418 section_info++; 419 } 420 421 if (fw_end != bin) { 422 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 423 return -EINVAL; 424 } 425 426 if (!info->secure_section_exist) 427 rtw89_warn(rtwdev, "no firmware secure section\n"); 428 429 return 0; 430 } 431 432 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 433 const struct rtw89_fw_suit *fw_suit, 434 struct rtw89_fw_bin_info *info) 435 { 436 const u8 *fw = fw_suit->data; 437 u32 len = fw_suit->size; 438 439 if (!fw || !len) { 440 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 441 return -ENOENT; 442 } 443 444 switch (fw_suit->hdr_ver) { 445 case 0: 446 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 447 case 1: 448 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 449 default: 450 return -ENOENT; 451 } 452 } 453 454 static 455 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 456 struct rtw89_fw_suit *fw_suit, bool nowarn) 457 { 458 struct rtw89_fw_info *fw_info = &rtwdev->fw; 459 const struct firmware *firmware = fw_info->req.firmware; 460 const u8 *mfw = firmware->data; 461 u32 mfw_len = firmware->size; 462 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 463 const struct rtw89_mfw_info *mfw_info = NULL, *tmp; 464 int i; 465 466 if (mfw_hdr->sig != RTW89_MFW_SIG) { 467 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 468 /* legacy firmware support normal type only */ 469 if (type != RTW89_FW_NORMAL) 470 return -EINVAL; 471 fw_suit->data = mfw; 472 fw_suit->size = mfw_len; 473 return 0; 474 } 475 476 for (i = 0; i < mfw_hdr->fw_nr; i++) { 477 tmp = &mfw_hdr->info[i]; 478 if (tmp->type != type) 479 continue; 480 481 if (type == RTW89_FW_LOGFMT) { 482 mfw_info = tmp; 483 goto found; 484 } 485 486 /* Version order of WiFi firmware in firmware file are not in order, 487 * pass all firmware to find the equal or less but closest version. 488 */ 489 if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { 490 if (!mfw_info || mfw_info->cv < tmp->cv) 491 mfw_info = tmp; 492 } 493 } 494 495 if (mfw_info) 496 goto found; 497 498 if (!nowarn) 499 rtw89_err(rtwdev, "no suitable firmware found\n"); 500 return -ENOENT; 501 502 found: 503 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 504 fw_suit->size = le32_to_cpu(mfw_info->size); 505 return 0; 506 } 507 508 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 509 { 510 struct rtw89_fw_info *fw_info = &rtwdev->fw; 511 const struct firmware *firmware = fw_info->req.firmware; 512 const struct rtw89_mfw_hdr *mfw_hdr = 513 (const struct rtw89_mfw_hdr *)firmware->data; 514 const struct rtw89_mfw_info *mfw_info; 515 u32 size; 516 517 if (mfw_hdr->sig != RTW89_MFW_SIG) { 518 rtw89_warn(rtwdev, "not mfw format\n"); 519 return 0; 520 } 521 522 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 523 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 524 525 return size; 526 } 527 528 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 529 struct rtw89_fw_suit *fw_suit, 530 const struct rtw89_fw_hdr *hdr) 531 { 532 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 533 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 534 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 535 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 536 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 537 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 538 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 539 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 540 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 541 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 542 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 543 } 544 545 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 546 struct rtw89_fw_suit *fw_suit, 547 const struct rtw89_fw_hdr_v1 *hdr) 548 { 549 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 550 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 551 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 552 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 553 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 554 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 555 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 556 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 557 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 558 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 559 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 560 } 561 562 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 563 enum rtw89_fw_type type, 564 struct rtw89_fw_suit *fw_suit) 565 { 566 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 567 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 568 569 if (type == RTW89_FW_LOGFMT) 570 return 0; 571 572 fw_suit->type = type; 573 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 574 575 switch (fw_suit->hdr_ver) { 576 case 0: 577 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 578 break; 579 case 1: 580 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 581 break; 582 default: 583 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 584 fw_suit->hdr_ver); 585 return -ENOENT; 586 } 587 588 rtw89_info(rtwdev, 589 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 590 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 591 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 592 593 return 0; 594 } 595 596 static 597 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 598 bool nowarn) 599 { 600 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 601 int ret; 602 603 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 604 if (ret) 605 return ret; 606 607 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 608 } 609 610 static 611 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 612 const struct rtw89_fw_element_hdr *elm, 613 const union rtw89_fw_element_arg arg) 614 { 615 enum rtw89_fw_type type = arg.fw_type; 616 struct rtw89_hal *hal = &rtwdev->hal; 617 struct rtw89_fw_suit *fw_suit; 618 619 /* Version of BB MCU is in decreasing order in firmware file, so take 620 * first equal or less version, which is equal or less but closest version. 621 */ 622 if (hal->cv < elm->u.bbmcu.cv) 623 return 1; /* ignore this element */ 624 625 fw_suit = rtw89_fw_suit_get(rtwdev, type); 626 if (fw_suit->data) 627 return 1; /* ignore this element (a firmware is taken already) */ 628 629 fw_suit->data = elm->u.bbmcu.contents; 630 fw_suit->size = le32_to_cpu(elm->size); 631 632 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 633 } 634 635 #define __DEF_FW_FEAT_COND(__cond, __op) \ 636 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 637 { \ 638 return suit_ver_code __op comp_ver_code; \ 639 } 640 641 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 642 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 643 __DEF_FW_FEAT_COND(lt, <); /* less than */ 644 645 struct __fw_feat_cfg { 646 enum rtw89_core_chip_id chip_id; 647 enum rtw89_fw_feature feature; 648 u32 ver_code; 649 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 650 }; 651 652 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 653 { \ 654 .chip_id = _chip, \ 655 .feature = RTW89_FW_FEATURE_ ## _feat, \ 656 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 657 .cond = __fw_feat_cond_ ## _cond, \ 658 } 659 660 static const struct __fw_feat_cfg fw_feat_tbl[] = { 661 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 662 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 663 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 664 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 665 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 666 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 667 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 668 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 669 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 670 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 671 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 672 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 673 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, NO_LPS_PG), 674 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 74, 0, TX_WAKE), 675 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 90, 0, CRASH_TRIGGER), 676 __CFG_FW_FEAT(RTL8852BT, ge, 0, 29, 91, 0, SCAN_OFFLOAD), 677 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 678 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 679 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 680 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 681 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 682 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), 683 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), 684 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 685 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 686 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 21, 0, SCAN_OFFLOAD_BE_V0), 687 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 688 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), 689 __CFG_FW_FEAT(RTL8922A, lt, 0, 35, 31, 0, RFK_PRE_NOTIFY_V0), 690 }; 691 692 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 693 const struct rtw89_chip_info *chip, 694 u32 ver_code) 695 { 696 int i; 697 698 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 699 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 700 701 if (chip->chip_id != ent->chip_id) 702 continue; 703 704 if (ent->cond(ver_code, ent->ver_code)) 705 RTW89_SET_FW_FEATURE(ent->feature, fw); 706 } 707 } 708 709 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 710 { 711 const struct rtw89_chip_info *chip = rtwdev->chip; 712 const struct rtw89_fw_suit *fw_suit; 713 u32 suit_ver_code; 714 715 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 716 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 717 718 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 719 } 720 721 const struct firmware * 722 rtw89_early_fw_feature_recognize(struct device *device, 723 const struct rtw89_chip_info *chip, 724 struct rtw89_fw_info *early_fw, 725 int *used_fw_format) 726 { 727 const struct firmware *firmware; 728 char fw_name[64]; 729 int fw_format; 730 u32 ver_code; 731 int ret; 732 733 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 734 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 735 chip->fw_basename, fw_format); 736 737 ret = request_firmware(&firmware, fw_name, device); 738 if (!ret) { 739 dev_info(device, "loaded firmware %s\n", fw_name); 740 *used_fw_format = fw_format; 741 break; 742 } 743 } 744 745 if (ret) { 746 dev_err(device, "failed to early request firmware: %d\n", ret); 747 return NULL; 748 } 749 750 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 751 752 if (!ver_code) 753 goto out; 754 755 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 756 757 out: 758 return firmware; 759 } 760 761 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 762 { 763 const struct rtw89_chip_info *chip = rtwdev->chip; 764 int ret; 765 766 if (chip->try_ce_fw) { 767 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 768 if (!ret) 769 goto normal_done; 770 } 771 772 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 773 if (ret) 774 return ret; 775 776 normal_done: 777 /* It still works if wowlan firmware isn't existing. */ 778 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 779 780 /* It still works if log format file isn't existing. */ 781 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 782 783 rtw89_fw_recognize_features(rtwdev); 784 785 rtw89_coex_recognize_ver(rtwdev); 786 787 return 0; 788 } 789 790 static 791 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 792 const struct rtw89_fw_element_hdr *elm, 793 const union rtw89_fw_element_arg arg) 794 { 795 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 796 struct rtw89_phy_table *tbl; 797 struct rtw89_reg2_def *regs; 798 enum rtw89_rf_path rf_path; 799 u32 n_regs, i; 800 u8 idx; 801 802 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 803 if (!tbl) 804 return -ENOMEM; 805 806 switch (le32_to_cpu(elm->id)) { 807 case RTW89_FW_ELEMENT_ID_BB_REG: 808 elm_info->bb_tbl = tbl; 809 break; 810 case RTW89_FW_ELEMENT_ID_BB_GAIN: 811 elm_info->bb_gain = tbl; 812 break; 813 case RTW89_FW_ELEMENT_ID_RADIO_A: 814 case RTW89_FW_ELEMENT_ID_RADIO_B: 815 case RTW89_FW_ELEMENT_ID_RADIO_C: 816 case RTW89_FW_ELEMENT_ID_RADIO_D: 817 rf_path = arg.rf_path; 818 idx = elm->u.reg2.idx; 819 820 elm_info->rf_radio[idx] = tbl; 821 tbl->rf_path = rf_path; 822 tbl->config = rtw89_phy_config_rf_reg_v1; 823 break; 824 case RTW89_FW_ELEMENT_ID_RF_NCTL: 825 elm_info->rf_nctl = tbl; 826 break; 827 default: 828 kfree(tbl); 829 return -ENOENT; 830 } 831 832 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 833 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); 834 if (!regs) 835 goto out; 836 837 for (i = 0; i < n_regs; i++) { 838 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 839 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 840 } 841 842 tbl->n_regs = n_regs; 843 tbl->regs = regs; 844 845 return 0; 846 847 out: 848 kfree(tbl); 849 return -ENOMEM; 850 } 851 852 static 853 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 854 const struct rtw89_fw_element_hdr *elm, 855 const union rtw89_fw_element_arg arg) 856 { 857 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 858 const unsigned long offset = arg.offset; 859 struct rtw89_efuse *efuse = &rtwdev->efuse; 860 struct rtw89_txpwr_conf *conf; 861 862 if (!rtwdev->rfe_data) { 863 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 864 if (!rtwdev->rfe_data) 865 return -ENOMEM; 866 } 867 868 conf = (void *)rtwdev->rfe_data + offset; 869 870 /* if multiple matched, take the last eventually */ 871 if (txpwr_elm->rfe_type == efuse->rfe_type) 872 goto setup; 873 874 /* without one is matched, accept default */ 875 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 876 (!rtw89_txpwr_conf_valid(conf) || 877 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 878 goto setup; 879 880 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 881 elm->id, txpwr_elm->rfe_type); 882 return 0; 883 884 setup: 885 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 886 elm->id, txpwr_elm->rfe_type); 887 888 conf->rfe_type = txpwr_elm->rfe_type; 889 conf->ent_sz = txpwr_elm->ent_sz; 890 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 891 conf->data = txpwr_elm->content; 892 return 0; 893 } 894 895 static 896 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 897 const struct rtw89_fw_element_hdr *elm, 898 const union rtw89_fw_element_arg arg) 899 { 900 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 901 const struct rtw89_chip_info *chip = rtwdev->chip; 902 u32 needed_bitmap = 0; 903 u32 offset = 0; 904 int subband; 905 u32 bitmap; 906 int type; 907 908 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 909 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 910 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 911 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 912 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 913 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 914 915 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 916 917 if ((bitmap & needed_bitmap) != needed_bitmap) { 918 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n", 919 needed_bitmap, bitmap); 920 return -ENOENT; 921 } 922 923 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL); 924 if (!elm_info->txpwr_trk) 925 return -ENOMEM; 926 927 for (type = 0; bitmap; type++, bitmap >>= 1) { 928 if (!(bitmap & BIT(0))) 929 continue; 930 931 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 932 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 933 subband = 4; 934 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 935 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 936 subband = 3; 937 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 938 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 939 subband = 1; 940 else 941 break; 942 943 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 944 945 offset += subband; 946 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 947 goto err; 948 } 949 950 return 0; 951 952 err: 953 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 954 offset, le32_to_cpu(elm->size)); 955 kfree(elm_info->txpwr_trk); 956 elm_info->txpwr_trk = NULL; 957 958 return -EFAULT; 959 } 960 961 static 962 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 963 const struct rtw89_fw_element_hdr *elm, 964 const union rtw89_fw_element_arg arg) 965 { 966 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 967 u8 rfk_id; 968 969 if (elm_info->rfk_log_fmt) 970 goto allocated; 971 972 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL); 973 if (!elm_info->rfk_log_fmt) 974 return 1; /* this is an optional element, so just ignore this */ 975 976 allocated: 977 rfk_id = elm->u.rfk_log_fmt.rfk_id; 978 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 979 return 1; 980 981 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 982 983 return 0; 984 } 985 986 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 987 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 988 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 989 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 990 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 991 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 992 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 993 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 994 { .rf_path = RF_PATH_A }, "radio A"}, 995 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 996 { .rf_path = RF_PATH_B }, NULL}, 997 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 998 { .rf_path = RF_PATH_C }, NULL}, 999 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 1000 { .rf_path = RF_PATH_D }, NULL}, 1001 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 1002 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 1003 rtw89_fw_recognize_txpwr_from_elm, 1004 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 1005 }, 1006 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 1007 rtw89_fw_recognize_txpwr_from_elm, 1008 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 1009 }, 1010 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 1011 rtw89_fw_recognize_txpwr_from_elm, 1012 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 1013 }, 1014 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 1015 rtw89_fw_recognize_txpwr_from_elm, 1016 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 1017 }, 1018 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 1019 rtw89_fw_recognize_txpwr_from_elm, 1020 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 1021 }, 1022 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 1023 rtw89_fw_recognize_txpwr_from_elm, 1024 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 1025 }, 1026 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 1027 rtw89_fw_recognize_txpwr_from_elm, 1028 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 1029 }, 1030 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 1031 rtw89_fw_recognize_txpwr_from_elm, 1032 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 1033 }, 1034 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 1035 rtw89_fw_recognize_txpwr_from_elm, 1036 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 1037 }, 1038 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 1039 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 1040 }, 1041 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 1042 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 1043 }, 1044 }; 1045 1046 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 1047 { 1048 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1049 const struct firmware *firmware = fw_info->req.firmware; 1050 const struct rtw89_chip_info *chip = rtwdev->chip; 1051 u32 unrecognized_elements = chip->needed_fw_elms; 1052 const struct rtw89_fw_element_handler *handler; 1053 const struct rtw89_fw_element_hdr *hdr; 1054 u32 elm_size; 1055 u32 elem_id; 1056 u32 offset; 1057 int ret; 1058 1059 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1060 1061 offset = rtw89_mfw_get_size(rtwdev); 1062 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1063 if (offset == 0) 1064 return -EINVAL; 1065 1066 while (offset + sizeof(*hdr) < firmware->size) { 1067 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1068 1069 elm_size = le32_to_cpu(hdr->size); 1070 if (offset + elm_size >= firmware->size) { 1071 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1072 break; 1073 } 1074 1075 elem_id = le32_to_cpu(hdr->id); 1076 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1077 goto next; 1078 1079 handler = &__fw_element_handlers[elem_id]; 1080 if (!handler->fn) 1081 goto next; 1082 1083 ret = handler->fn(rtwdev, hdr, handler->arg); 1084 if (ret == 1) /* ignore this element */ 1085 goto next; 1086 if (ret) 1087 return ret; 1088 1089 if (handler->name) 1090 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1091 handler->name, hdr->ver); 1092 1093 unrecognized_elements &= ~BIT(elem_id); 1094 next: 1095 offset += sizeof(*hdr) + elm_size; 1096 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1097 } 1098 1099 if (unrecognized_elements) { 1100 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1101 unrecognized_elements); 1102 return -ENOENT; 1103 } 1104 1105 return 0; 1106 } 1107 1108 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1109 u8 type, u8 cat, u8 class, u8 func, 1110 bool rack, bool dack, u32 len) 1111 { 1112 struct fwcmd_hdr *hdr; 1113 1114 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1115 1116 if (!(rtwdev->fw.h2c_seq % 4)) 1117 rack = true; 1118 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1119 FIELD_PREP(H2C_HDR_CAT, cat) | 1120 FIELD_PREP(H2C_HDR_CLASS, class) | 1121 FIELD_PREP(H2C_HDR_FUNC, func) | 1122 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1123 1124 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1125 len + H2C_HEADER_LEN) | 1126 (rack ? H2C_HDR_REC_ACK : 0) | 1127 (dack ? H2C_HDR_DONE_ACK : 0)); 1128 1129 rtwdev->fw.h2c_seq++; 1130 } 1131 1132 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1133 struct sk_buff *skb, 1134 u8 type, u8 cat, u8 class, u8 func, 1135 u32 len) 1136 { 1137 struct fwcmd_hdr *hdr; 1138 1139 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1140 1141 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1142 FIELD_PREP(H2C_HDR_CAT, cat) | 1143 FIELD_PREP(H2C_HDR_CLASS, class) | 1144 FIELD_PREP(H2C_HDR_FUNC, func) | 1145 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1146 1147 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1148 len + H2C_HEADER_LEN)); 1149 } 1150 1151 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1152 struct rtw89_fw_bin_info *info, 1153 struct rtw89_fw_hdr *fw_hdr) 1154 { 1155 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1156 FW_HDR_W7_PART_SIZE); 1157 1158 return 0; 1159 } 1160 1161 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1162 struct rtw89_fw_bin_info *info, 1163 struct rtw89_fw_hdr_v1 *fw_hdr) 1164 { 1165 struct rtw89_fw_hdr_section_info *section_info; 1166 struct rtw89_fw_hdr_section_v1 *section; 1167 u8 dst_sec_idx = 0; 1168 u8 sec_idx; 1169 1170 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1171 FW_HDR_V1_W7_PART_SIZE); 1172 1173 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1174 section_info = &info->section_info[sec_idx]; 1175 section = &fw_hdr->sections[sec_idx]; 1176 1177 if (section_info->ignore) 1178 continue; 1179 1180 if (dst_sec_idx != sec_idx) 1181 fw_hdr->sections[dst_sec_idx] = *section; 1182 1183 dst_sec_idx++; 1184 } 1185 1186 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1187 1188 return (info->section_num - dst_sec_idx) * sizeof(*section); 1189 } 1190 1191 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1192 const struct rtw89_fw_suit *fw_suit, 1193 struct rtw89_fw_bin_info *info) 1194 { 1195 u32 len = info->hdr_len - info->dynamic_hdr_len; 1196 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1197 const u8 *fw = fw_suit->data; 1198 struct rtw89_fw_hdr *fw_hdr; 1199 struct sk_buff *skb; 1200 u32 truncated; 1201 u32 ret = 0; 1202 1203 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1204 if (!skb) { 1205 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1206 return -ENOMEM; 1207 } 1208 1209 skb_put_data(skb, fw, len); 1210 1211 switch (fw_suit->hdr_ver) { 1212 case 0: 1213 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1214 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1215 break; 1216 case 1: 1217 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1218 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1219 break; 1220 default: 1221 ret = -EOPNOTSUPP; 1222 goto fail; 1223 } 1224 1225 if (truncated) { 1226 len -= truncated; 1227 skb_trim(skb, len); 1228 } 1229 1230 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1231 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1232 H2C_FUNC_MAC_FWHDR_DL, len); 1233 1234 ret = rtw89_h2c_tx(rtwdev, skb, false); 1235 if (ret) { 1236 rtw89_err(rtwdev, "failed to send h2c\n"); 1237 ret = -1; 1238 goto fail; 1239 } 1240 1241 return 0; 1242 fail: 1243 dev_kfree_skb_any(skb); 1244 1245 return ret; 1246 } 1247 1248 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1249 const struct rtw89_fw_suit *fw_suit, 1250 struct rtw89_fw_bin_info *info) 1251 { 1252 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1253 int ret; 1254 1255 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1256 if (ret) { 1257 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1258 return ret; 1259 } 1260 1261 ret = mac->fwdl_check_path_ready(rtwdev, false); 1262 if (ret) { 1263 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1264 return ret; 1265 } 1266 1267 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1268 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1269 1270 return 0; 1271 } 1272 1273 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1274 struct rtw89_fw_hdr_section_info *info) 1275 { 1276 struct sk_buff *skb; 1277 const u8 *section = info->addr; 1278 u32 residue_len = info->len; 1279 bool copy_key = false; 1280 u32 pkt_len; 1281 int ret; 1282 1283 if (info->ignore) 1284 return 0; 1285 1286 if (info->key_addr && info->key_len) { 1287 if (info->len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) 1288 rtw89_warn(rtwdev, "ignore to copy key data because of len %d, %d, %d\n", 1289 info->len, FWDL_SECTION_PER_PKT_LEN, info->key_len); 1290 else 1291 copy_key = true; 1292 } 1293 1294 while (residue_len) { 1295 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 1296 pkt_len = FWDL_SECTION_PER_PKT_LEN; 1297 else 1298 pkt_len = residue_len; 1299 1300 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1301 if (!skb) { 1302 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1303 return -ENOMEM; 1304 } 1305 skb_put_data(skb, section, pkt_len); 1306 1307 if (copy_key) 1308 memcpy(skb->data + pkt_len - info->key_len, 1309 info->key_addr, info->key_len); 1310 1311 ret = rtw89_h2c_tx(rtwdev, skb, true); 1312 if (ret) { 1313 rtw89_err(rtwdev, "failed to send h2c\n"); 1314 ret = -1; 1315 goto fail; 1316 } 1317 1318 section += pkt_len; 1319 residue_len -= pkt_len; 1320 } 1321 1322 return 0; 1323 fail: 1324 dev_kfree_skb_any(skb); 1325 1326 return ret; 1327 } 1328 1329 static enum rtw89_fwdl_check_type 1330 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1331 const struct rtw89_fw_suit *fw_suit) 1332 { 1333 switch (fw_suit->type) { 1334 case RTW89_FW_BBMCU0: 1335 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1336 case RTW89_FW_BBMCU1: 1337 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1338 default: 1339 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1340 } 1341 } 1342 1343 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1344 const struct rtw89_fw_suit *fw_suit, 1345 struct rtw89_fw_bin_info *info) 1346 { 1347 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1348 const struct rtw89_chip_info *chip = rtwdev->chip; 1349 enum rtw89_fwdl_check_type chk_type; 1350 u8 section_num = info->section_num; 1351 int ret; 1352 1353 while (section_num--) { 1354 ret = __rtw89_fw_download_main(rtwdev, section_info); 1355 if (ret) 1356 return ret; 1357 section_info++; 1358 } 1359 1360 if (chip->chip_gen == RTW89_CHIP_AX) 1361 return 0; 1362 1363 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1364 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1365 if (ret) { 1366 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1367 fw_suit->type); 1368 return ret; 1369 } 1370 1371 return 0; 1372 } 1373 1374 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1375 { 1376 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1377 u32 addr = R_AX_DBG_PORT_SEL; 1378 u32 val32; 1379 u16 index; 1380 1381 if (chip_gen == RTW89_CHIP_BE) { 1382 addr = R_BE_WLCPU_PORT_PC; 1383 goto dump; 1384 } 1385 1386 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1387 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1388 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1389 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1390 1391 dump: 1392 for (index = 0; index < 15; index++) { 1393 val32 = rtw89_read32(rtwdev, addr); 1394 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1395 fsleep(10); 1396 } 1397 } 1398 1399 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1400 { 1401 u32 val32; 1402 1403 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1404 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1405 1406 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1407 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1408 1409 rtw89_fw_prog_cnt_dump(rtwdev); 1410 } 1411 1412 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1413 struct rtw89_fw_suit *fw_suit) 1414 { 1415 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1416 struct rtw89_fw_bin_info info = {}; 1417 int ret; 1418 1419 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1420 if (ret) { 1421 rtw89_err(rtwdev, "parse fw header fail\n"); 1422 return ret; 1423 } 1424 1425 if (rtwdev->chip->chip_id == RTL8922A && 1426 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1427 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1428 1429 ret = mac->fwdl_check_path_ready(rtwdev, true); 1430 if (ret) { 1431 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1432 return ret; 1433 } 1434 1435 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1436 if (ret) 1437 return ret; 1438 1439 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1440 if (ret) 1441 return ret; 1442 1443 return 0; 1444 } 1445 1446 static 1447 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1448 bool include_bb) 1449 { 1450 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1451 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1452 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1453 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1454 int ret; 1455 int i; 1456 1457 mac->disable_cpu(rtwdev); 1458 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1459 if (ret) 1460 return ret; 1461 1462 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1463 if (ret) 1464 goto fwdl_err; 1465 1466 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1467 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1468 1469 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1470 if (ret) 1471 goto fwdl_err; 1472 } 1473 1474 fw_info->h2c_seq = 0; 1475 fw_info->rec_seq = 0; 1476 fw_info->h2c_counter = 0; 1477 fw_info->c2h_counter = 0; 1478 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1479 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1480 1481 mdelay(5); 1482 1483 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1484 if (ret) { 1485 rtw89_warn(rtwdev, "download firmware fail\n"); 1486 goto fwdl_err; 1487 } 1488 1489 return ret; 1490 1491 fwdl_err: 1492 rtw89_fw_dl_fail_dump(rtwdev); 1493 return ret; 1494 } 1495 1496 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1497 bool include_bb) 1498 { 1499 int retry; 1500 int ret; 1501 1502 for (retry = 0; retry < 5; retry++) { 1503 ret = __rtw89_fw_download(rtwdev, type, include_bb); 1504 if (!ret) 1505 return 0; 1506 } 1507 1508 return ret; 1509 } 1510 1511 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1512 { 1513 struct rtw89_fw_info *fw = &rtwdev->fw; 1514 1515 wait_for_completion(&fw->req.completion); 1516 if (!fw->req.firmware) 1517 return -EINVAL; 1518 1519 return 0; 1520 } 1521 1522 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1523 struct rtw89_fw_req_info *req, 1524 const char *fw_name, bool nowarn) 1525 { 1526 int ret; 1527 1528 if (req->firmware) { 1529 rtw89_debug(rtwdev, RTW89_DBG_FW, 1530 "full firmware has been early requested\n"); 1531 complete_all(&req->completion); 1532 return 0; 1533 } 1534 1535 if (nowarn) 1536 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1537 else 1538 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1539 1540 complete_all(&req->completion); 1541 1542 return ret; 1543 } 1544 1545 void rtw89_load_firmware_work(struct work_struct *work) 1546 { 1547 struct rtw89_dev *rtwdev = 1548 container_of(work, struct rtw89_dev, load_firmware_work); 1549 const struct rtw89_chip_info *chip = rtwdev->chip; 1550 char fw_name[64]; 1551 1552 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1553 chip->fw_basename, rtwdev->fw.fw_format); 1554 1555 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1556 } 1557 1558 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1559 { 1560 if (!tbl) 1561 return; 1562 1563 kfree(tbl->regs); 1564 kfree(tbl); 1565 } 1566 1567 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1568 { 1569 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1570 int i; 1571 1572 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1573 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1574 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1575 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1576 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1577 1578 kfree(elm_info->txpwr_trk); 1579 kfree(elm_info->rfk_log_fmt); 1580 } 1581 1582 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1583 { 1584 struct rtw89_fw_info *fw = &rtwdev->fw; 1585 1586 cancel_work_sync(&rtwdev->load_firmware_work); 1587 1588 if (fw->req.firmware) { 1589 release_firmware(fw->req.firmware); 1590 1591 /* assign NULL back in case rtw89_free_ieee80211_hw() 1592 * try to release the same one again. 1593 */ 1594 fw->req.firmware = NULL; 1595 } 1596 1597 kfree(fw->log.fmts); 1598 rtw89_unload_firmware_elements(rtwdev); 1599 } 1600 1601 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1602 { 1603 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1604 u32 i; 1605 1606 if (fmt_id > fw_log->last_fmt_id) 1607 return 0; 1608 1609 for (i = 0; i < fw_log->fmt_count; i++) { 1610 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1611 return i; 1612 } 1613 return 0; 1614 } 1615 1616 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1617 { 1618 struct rtw89_fw_log *log = &rtwdev->fw.log; 1619 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1620 struct rtw89_fw_suit *suit = &log->suit; 1621 const void *fmts_ptr, *fmts_end_ptr; 1622 u32 fmt_count; 1623 int i; 1624 1625 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1626 fmt_count = le32_to_cpu(suit_hdr->count); 1627 log->fmt_ids = suit_hdr->ids; 1628 fmts_ptr = &suit_hdr->ids[fmt_count]; 1629 fmts_end_ptr = suit->data + suit->size; 1630 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1631 if (!log->fmts) 1632 return -ENOMEM; 1633 1634 for (i = 0; i < fmt_count; i++) { 1635 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1636 if (!fmts_ptr) 1637 break; 1638 1639 (*log->fmts)[i] = fmts_ptr; 1640 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1641 log->fmt_count++; 1642 fmts_ptr += strlen(fmts_ptr); 1643 } 1644 1645 return 0; 1646 } 1647 1648 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1649 { 1650 struct rtw89_fw_log *log = &rtwdev->fw.log; 1651 struct rtw89_fw_suit *suit = &log->suit; 1652 1653 if (!suit || !suit->data) { 1654 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1655 return -EINVAL; 1656 } 1657 if (log->fmts) 1658 return 0; 1659 1660 return rtw89_fw_log_create_fmts_dict(rtwdev); 1661 } 1662 1663 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 1664 const struct rtw89_fw_c2h_log_fmt *log_fmt, 1665 u32 fmt_idx, u8 para_int, bool raw_data) 1666 { 1667 const char *(*fmts)[] = rtwdev->fw.log.fmts; 1668 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 1669 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 1670 int i; 1671 1672 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 1673 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 1674 log_fmt->argc); 1675 return; 1676 } 1677 1678 if (para_int) 1679 for (i = 0 ; i < log_fmt->argc; i++) 1680 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 1681 1682 if (raw_data) { 1683 if (para_int) 1684 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1685 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 1686 para_int, log_fmt->argc, (int)sizeof(args), args); 1687 else 1688 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1689 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 1690 para_int, log_fmt->argc, log_fmt->u.raw); 1691 } else { 1692 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 1693 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 1694 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 1695 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 1696 args[0xf]); 1697 } 1698 1699 rtw89_info(rtwdev, "C2H log: %s", str_buf); 1700 } 1701 1702 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 1703 { 1704 const struct rtw89_fw_c2h_log_fmt *log_fmt; 1705 u8 para_int; 1706 u32 fmt_idx; 1707 1708 if (len < RTW89_C2H_HEADER_LEN) { 1709 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 1710 return; 1711 } 1712 1713 buf += RTW89_C2H_HEADER_LEN; 1714 len -= RTW89_C2H_HEADER_LEN; 1715 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 1716 1717 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 1718 goto plain_log; 1719 1720 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 1721 goto plain_log; 1722 1723 if (!rtwdev->fw.log.fmts) 1724 return; 1725 1726 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 1727 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 1728 1729 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 1730 rtw89_info(rtwdev, "C2H log: %s%s", 1731 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 1732 else if (fmt_idx != 0 && para_int) 1733 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 1734 else 1735 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 1736 return; 1737 1738 plain_log: 1739 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 1740 1741 } 1742 1743 #define H2C_CAM_LEN 60 1744 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1745 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 1746 { 1747 struct sk_buff *skb; 1748 int ret; 1749 1750 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 1751 if (!skb) { 1752 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1753 return -ENOMEM; 1754 } 1755 skb_put(skb, H2C_CAM_LEN); 1756 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 1757 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 1758 1759 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1760 H2C_CAT_MAC, 1761 H2C_CL_MAC_ADDR_CAM_UPDATE, 1762 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 1763 H2C_CAM_LEN); 1764 1765 ret = rtw89_h2c_tx(rtwdev, skb, false); 1766 if (ret) { 1767 rtw89_err(rtwdev, "failed to send h2c\n"); 1768 goto fail; 1769 } 1770 1771 return 0; 1772 fail: 1773 dev_kfree_skb_any(skb); 1774 1775 return ret; 1776 } 1777 1778 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 1779 struct rtw89_vif *rtwvif, 1780 struct rtw89_sta *rtwsta) 1781 { 1782 struct rtw89_h2c_dctlinfo_ud_v1 *h2c; 1783 u32 len = sizeof(*h2c); 1784 struct sk_buff *skb; 1785 int ret; 1786 1787 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1788 if (!skb) { 1789 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1790 return -ENOMEM; 1791 } 1792 skb_put(skb, len); 1793 h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data; 1794 1795 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, h2c); 1796 1797 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1798 H2C_CAT_MAC, 1799 H2C_CL_MAC_FR_EXCHG, 1800 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 1801 len); 1802 1803 ret = rtw89_h2c_tx(rtwdev, skb, false); 1804 if (ret) { 1805 rtw89_err(rtwdev, "failed to send h2c\n"); 1806 goto fail; 1807 } 1808 1809 return 0; 1810 fail: 1811 dev_kfree_skb_any(skb); 1812 1813 return ret; 1814 } 1815 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 1816 1817 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 1818 struct rtw89_vif *rtwvif, 1819 struct rtw89_sta *rtwsta) 1820 { 1821 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1822 u32 len = sizeof(*h2c); 1823 struct sk_buff *skb; 1824 int ret; 1825 1826 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1827 if (!skb) { 1828 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1829 return -ENOMEM; 1830 } 1831 skb_put(skb, len); 1832 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1833 1834 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif, rtwsta, h2c); 1835 1836 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1837 H2C_CAT_MAC, 1838 H2C_CL_MAC_FR_EXCHG, 1839 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1840 len); 1841 1842 ret = rtw89_h2c_tx(rtwdev, skb, false); 1843 if (ret) { 1844 rtw89_err(rtwdev, "failed to send h2c\n"); 1845 goto fail; 1846 } 1847 1848 return 0; 1849 fail: 1850 dev_kfree_skb_any(skb); 1851 1852 return ret; 1853 } 1854 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 1855 1856 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 1857 struct rtw89_vif *rtwvif, 1858 struct rtw89_sta *rtwsta) 1859 { 1860 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1861 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1862 u32 len = sizeof(*h2c); 1863 struct sk_buff *skb; 1864 int ret; 1865 1866 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1867 if (!skb) { 1868 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 1869 return -ENOMEM; 1870 } 1871 skb_put(skb, len); 1872 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1873 1874 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 1875 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 1876 1877 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 1878 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 1879 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 1880 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 1881 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 1882 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 1883 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 1884 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 1885 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 1886 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 1887 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 1888 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 1889 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 1890 1891 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1892 H2C_CAT_MAC, 1893 H2C_CL_MAC_FR_EXCHG, 1894 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1895 len); 1896 1897 ret = rtw89_h2c_tx(rtwdev, skb, false); 1898 if (ret) { 1899 rtw89_err(rtwdev, "failed to send h2c\n"); 1900 goto fail; 1901 } 1902 1903 return 0; 1904 fail: 1905 dev_kfree_skb_any(skb); 1906 1907 return ret; 1908 } 1909 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 1910 1911 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 1912 bool valid, struct ieee80211_ampdu_params *params) 1913 { 1914 const struct rtw89_chip_info *chip = rtwdev->chip; 1915 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 1916 struct rtw89_h2c_ba_cam *h2c; 1917 u8 macid = rtwsta->mac_id; 1918 u32 len = sizeof(*h2c); 1919 struct sk_buff *skb; 1920 u8 entry_idx; 1921 int ret; 1922 1923 ret = valid ? 1924 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 1925 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 1926 if (ret) { 1927 /* it still works even if we don't have static BA CAM, because 1928 * hardware can create dynamic BA CAM automatically. 1929 */ 1930 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 1931 "failed to %s entry tid=%d for h2c ba cam\n", 1932 valid ? "alloc" : "free", params->tid); 1933 return 0; 1934 } 1935 1936 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1937 if (!skb) { 1938 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 1939 return -ENOMEM; 1940 } 1941 skb_put(skb, len); 1942 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 1943 1944 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 1945 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 1946 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 1947 else 1948 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 1949 if (!valid) 1950 goto end; 1951 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 1952 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 1953 if (params->buf_size > 64) 1954 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 1955 else 1956 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 1957 /* If init req is set, hw will set the ssn */ 1958 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 1959 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 1960 1961 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 1962 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 1963 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BA_CAM_W1_BAND); 1964 } 1965 1966 end: 1967 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1968 H2C_CAT_MAC, 1969 H2C_CL_BA_CAM, 1970 H2C_FUNC_MAC_BA_CAM, 0, 1, 1971 len); 1972 1973 ret = rtw89_h2c_tx(rtwdev, skb, false); 1974 if (ret) { 1975 rtw89_err(rtwdev, "failed to send h2c\n"); 1976 goto fail; 1977 } 1978 1979 return 0; 1980 fail: 1981 dev_kfree_skb_any(skb); 1982 1983 return ret; 1984 } 1985 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 1986 1987 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 1988 u8 entry_idx, u8 uid) 1989 { 1990 struct rtw89_h2c_ba_cam *h2c; 1991 u32 len = sizeof(*h2c); 1992 struct sk_buff *skb; 1993 int ret; 1994 1995 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1996 if (!skb) { 1997 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 1998 return -ENOMEM; 1999 } 2000 skb_put(skb, len); 2001 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 2002 2003 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 2004 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 2005 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 2006 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 2007 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 2008 2009 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2010 H2C_CAT_MAC, 2011 H2C_CL_BA_CAM, 2012 H2C_FUNC_MAC_BA_CAM, 0, 1, 2013 len); 2014 2015 ret = rtw89_h2c_tx(rtwdev, skb, false); 2016 if (ret) { 2017 rtw89_err(rtwdev, "failed to send h2c\n"); 2018 goto fail; 2019 } 2020 2021 return 0; 2022 fail: 2023 dev_kfree_skb_any(skb); 2024 2025 return ret; 2026 } 2027 2028 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 2029 { 2030 const struct rtw89_chip_info *chip = rtwdev->chip; 2031 u8 entry_idx = chip->bacam_num; 2032 u8 uid = 0; 2033 int i; 2034 2035 for (i = 0; i < chip->bacam_dynamic_num; i++) { 2036 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 2037 entry_idx++; 2038 uid++; 2039 } 2040 } 2041 2042 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 2043 bool valid, struct ieee80211_ampdu_params *params) 2044 { 2045 const struct rtw89_chip_info *chip = rtwdev->chip; 2046 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 2047 struct rtw89_h2c_ba_cam_v1 *h2c; 2048 u8 macid = rtwsta->mac_id; 2049 u32 len = sizeof(*h2c); 2050 struct sk_buff *skb; 2051 u8 entry_idx; 2052 u8 bmap_size; 2053 int ret; 2054 2055 ret = valid ? 2056 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 2057 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 2058 if (ret) { 2059 /* it still works even if we don't have static BA CAM, because 2060 * hardware can create dynamic BA CAM automatically. 2061 */ 2062 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2063 "failed to %s entry tid=%d for h2c ba cam\n", 2064 valid ? "alloc" : "free", params->tid); 2065 return 0; 2066 } 2067 2068 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2069 if (!skb) { 2070 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2071 return -ENOMEM; 2072 } 2073 skb_put(skb, len); 2074 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2075 2076 if (params->buf_size > 512) 2077 bmap_size = 10; 2078 else if (params->buf_size > 256) 2079 bmap_size = 8; 2080 else if (params->buf_size > 64) 2081 bmap_size = 4; 2082 else 2083 bmap_size = 0; 2084 2085 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2086 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2087 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2088 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2089 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2090 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2091 2092 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2093 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2094 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2095 le32_encode_bits(!!rtwvif->mac_idx, RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2096 2097 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2098 H2C_CAT_MAC, 2099 H2C_CL_BA_CAM, 2100 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2101 len); 2102 2103 ret = rtw89_h2c_tx(rtwdev, skb, false); 2104 if (ret) { 2105 rtw89_err(rtwdev, "failed to send h2c\n"); 2106 goto fail; 2107 } 2108 2109 return 0; 2110 fail: 2111 dev_kfree_skb_any(skb); 2112 2113 return ret; 2114 } 2115 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2116 2117 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2118 u8 offset, u8 mac_idx) 2119 { 2120 struct rtw89_h2c_ba_cam_init *h2c; 2121 u32 len = sizeof(*h2c); 2122 struct sk_buff *skb; 2123 int ret; 2124 2125 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2126 if (!skb) { 2127 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2128 return -ENOMEM; 2129 } 2130 skb_put(skb, len); 2131 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2132 2133 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2134 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2135 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2136 2137 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2138 H2C_CAT_MAC, 2139 H2C_CL_BA_CAM, 2140 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2141 len); 2142 2143 ret = rtw89_h2c_tx(rtwdev, skb, false); 2144 if (ret) { 2145 rtw89_err(rtwdev, "failed to send h2c\n"); 2146 goto fail; 2147 } 2148 2149 return 0; 2150 fail: 2151 dev_kfree_skb_any(skb); 2152 2153 return ret; 2154 } 2155 2156 #define H2C_LOG_CFG_LEN 12 2157 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2158 { 2159 struct sk_buff *skb; 2160 u32 comp = 0; 2161 int ret; 2162 2163 if (enable) 2164 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2165 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2166 BIT(RTW89_FW_LOG_COMP_SCAN); 2167 2168 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2169 if (!skb) { 2170 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2171 return -ENOMEM; 2172 } 2173 2174 skb_put(skb, H2C_LOG_CFG_LEN); 2175 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2176 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2177 SET_LOG_CFG_COMP(skb->data, comp); 2178 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2179 2180 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2181 H2C_CAT_MAC, 2182 H2C_CL_FW_INFO, 2183 H2C_FUNC_LOG_CFG, 0, 0, 2184 H2C_LOG_CFG_LEN); 2185 2186 ret = rtw89_h2c_tx(rtwdev, skb, false); 2187 if (ret) { 2188 rtw89_err(rtwdev, "failed to send h2c\n"); 2189 goto fail; 2190 } 2191 2192 return 0; 2193 fail: 2194 dev_kfree_skb_any(skb); 2195 2196 return ret; 2197 } 2198 2199 static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 2200 struct rtw89_vif *rtwvif) 2201 { 2202 static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88, 2203 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; 2204 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2205 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 2206 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2207 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2208 struct rtw89_eapol_2_of_2 *eapol_pkt; 2209 struct ieee80211_hdr_3addr *hdr; 2210 struct sk_buff *skb; 2211 u8 key_des_ver; 2212 2213 if (rtw_wow->ptk_alg == 3) 2214 key_des_ver = 1; 2215 else if (rtw_wow->akm == 1 || rtw_wow->akm == 2) 2216 key_des_ver = 2; 2217 else if (rtw_wow->akm > 2 && rtw_wow->akm < 7) 2218 key_des_ver = 3; 2219 else 2220 key_des_ver = 0; 2221 2222 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); 2223 if (!skb) 2224 return NULL; 2225 2226 hdr = skb_put_zero(skb, sizeof(*hdr)); 2227 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 2228 IEEE80211_FCTL_TODS | 2229 IEEE80211_FCTL_PROTECTED); 2230 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2231 ether_addr_copy(hdr->addr2, vif->addr); 2232 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2233 2234 skb_put_zero(skb, sec_hdr_len); 2235 2236 eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); 2237 memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); 2238 eapol_pkt->key_des_ver = key_des_ver; 2239 2240 return skb; 2241 } 2242 2243 static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, 2244 struct rtw89_vif *rtwvif) 2245 { 2246 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2247 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 2248 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2249 struct ieee80211_hdr_3addr *hdr; 2250 struct rtw89_sa_query *sa_query; 2251 struct sk_buff *skb; 2252 2253 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); 2254 if (!skb) 2255 return NULL; 2256 2257 hdr = skb_put_zero(skb, sizeof(*hdr)); 2258 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2259 IEEE80211_STYPE_ACTION | 2260 IEEE80211_FCTL_PROTECTED); 2261 ether_addr_copy(hdr->addr1, bss_conf->bssid); 2262 ether_addr_copy(hdr->addr2, vif->addr); 2263 ether_addr_copy(hdr->addr3, bss_conf->bssid); 2264 2265 skb_put_zero(skb, sec_hdr_len); 2266 2267 sa_query = skb_put_zero(skb, sizeof(*sa_query)); 2268 sa_query->category = WLAN_CATEGORY_SA_QUERY; 2269 sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; 2270 2271 return skb; 2272 } 2273 2274 static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, 2275 struct rtw89_vif *rtwvif) 2276 { 2277 u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); 2278 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 2279 struct ieee80211_hdr_3addr *hdr; 2280 struct rtw89_arp_rsp *arp_skb; 2281 struct arphdr *arp_hdr; 2282 struct sk_buff *skb; 2283 __le16 fc; 2284 2285 skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); 2286 if (!skb) 2287 return NULL; 2288 2289 hdr = skb_put_zero(skb, sizeof(*hdr)); 2290 2291 if (rtw_wow->ptk_alg) 2292 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | 2293 IEEE80211_FCTL_PROTECTED); 2294 else 2295 fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); 2296 2297 hdr->frame_control = fc; 2298 ether_addr_copy(hdr->addr1, rtwvif->bssid); 2299 ether_addr_copy(hdr->addr2, rtwvif->mac_addr); 2300 ether_addr_copy(hdr->addr3, rtwvif->bssid); 2301 2302 skb_put_zero(skb, sec_hdr_len); 2303 2304 arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); 2305 memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); 2306 arp_skb->llc_type = htons(ETH_P_ARP); 2307 2308 arp_hdr = &arp_skb->arp_hdr; 2309 arp_hdr->ar_hrd = htons(ARPHRD_ETHER); 2310 arp_hdr->ar_pro = htons(ETH_P_IP); 2311 arp_hdr->ar_hln = ETH_ALEN; 2312 arp_hdr->ar_pln = 4; 2313 arp_hdr->ar_op = htons(ARPOP_REPLY); 2314 2315 ether_addr_copy(arp_skb->sender_hw, rtwvif->mac_addr); 2316 arp_skb->sender_ip = rtwvif->ip_addr; 2317 2318 return skb; 2319 } 2320 2321 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2322 struct rtw89_vif *rtwvif, 2323 enum rtw89_fw_pkt_ofld_type type, 2324 u8 *id) 2325 { 2326 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2327 struct rtw89_pktofld_info *info; 2328 struct sk_buff *skb; 2329 int ret; 2330 2331 info = kzalloc(sizeof(*info), GFP_KERNEL); 2332 if (!info) 2333 return -ENOMEM; 2334 2335 switch (type) { 2336 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2337 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2338 break; 2339 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2340 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2341 break; 2342 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2343 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 2344 break; 2345 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2346 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 2347 break; 2348 case RTW89_PKT_OFLD_TYPE_EAPOL_KEY: 2349 skb = rtw89_eapol_get(rtwdev, rtwvif); 2350 break; 2351 case RTW89_PKT_OFLD_TYPE_SA_QUERY: 2352 skb = rtw89_sa_query_get(rtwdev, rtwvif); 2353 break; 2354 case RTW89_PKT_OFLD_TYPE_ARP_RSP: 2355 skb = rtw89_arp_response_get(rtwdev, rtwvif); 2356 break; 2357 default: 2358 goto err; 2359 } 2360 2361 if (!skb) 2362 goto err; 2363 2364 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 2365 kfree_skb(skb); 2366 2367 if (ret) 2368 goto err; 2369 2370 list_add_tail(&info->list, &rtwvif->general_pkt_list); 2371 *id = info->id; 2372 return 0; 2373 2374 err: 2375 kfree(info); 2376 return -ENOMEM; 2377 } 2378 2379 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 2380 struct rtw89_vif *rtwvif, bool notify_fw) 2381 { 2382 struct list_head *pkt_list = &rtwvif->general_pkt_list; 2383 struct rtw89_pktofld_info *info, *tmp; 2384 2385 list_for_each_entry_safe(info, tmp, pkt_list, list) { 2386 if (notify_fw) 2387 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2388 else 2389 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 2390 list_del(&info->list); 2391 kfree(info); 2392 } 2393 } 2394 2395 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 2396 { 2397 struct rtw89_vif *rtwvif; 2398 2399 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2400 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw); 2401 } 2402 2403 #define H2C_GENERAL_PKT_LEN 6 2404 #define H2C_GENERAL_PKT_ID_UND 0xff 2405 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 2406 struct rtw89_vif *rtwvif, u8 macid) 2407 { 2408 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 2409 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 2410 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 2411 struct sk_buff *skb; 2412 int ret; 2413 2414 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 2415 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 2416 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 2417 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 2418 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 2419 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 2420 2421 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 2422 if (!skb) { 2423 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2424 return -ENOMEM; 2425 } 2426 skb_put(skb, H2C_GENERAL_PKT_LEN); 2427 SET_GENERAL_PKT_MACID(skb->data, macid); 2428 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2429 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 2430 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 2431 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 2432 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2433 2434 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2435 H2C_CAT_MAC, 2436 H2C_CL_FW_INFO, 2437 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 2438 H2C_GENERAL_PKT_LEN); 2439 2440 ret = rtw89_h2c_tx(rtwdev, skb, false); 2441 if (ret) { 2442 rtw89_err(rtwdev, "failed to send h2c\n"); 2443 goto fail; 2444 } 2445 2446 return 0; 2447 fail: 2448 dev_kfree_skb_any(skb); 2449 2450 return ret; 2451 } 2452 2453 #define H2C_LPS_PARM_LEN 8 2454 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 2455 struct rtw89_lps_parm *lps_param) 2456 { 2457 struct sk_buff *skb; 2458 int ret; 2459 2460 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 2461 if (!skb) { 2462 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2463 return -ENOMEM; 2464 } 2465 skb_put(skb, H2C_LPS_PARM_LEN); 2466 2467 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 2468 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 2469 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 2470 SET_LPS_PARM_RLBM(skb->data, 1); 2471 SET_LPS_PARM_SMARTPS(skb->data, 1); 2472 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 2473 SET_LPS_PARM_VOUAPSD(skb->data, 0); 2474 SET_LPS_PARM_VIUAPSD(skb->data, 0); 2475 SET_LPS_PARM_BEUAPSD(skb->data, 0); 2476 SET_LPS_PARM_BKUAPSD(skb->data, 0); 2477 2478 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2479 H2C_CAT_MAC, 2480 H2C_CL_MAC_PS, 2481 H2C_FUNC_MAC_LPS_PARM, 0, 1, 2482 H2C_LPS_PARM_LEN); 2483 2484 ret = rtw89_h2c_tx(rtwdev, skb, false); 2485 if (ret) { 2486 rtw89_err(rtwdev, "failed to send h2c\n"); 2487 goto fail; 2488 } 2489 2490 return 0; 2491 fail: 2492 dev_kfree_skb_any(skb); 2493 2494 return ret; 2495 } 2496 2497 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2498 { 2499 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 2500 rtwvif->chanctx_idx); 2501 const struct rtw89_chip_info *chip = rtwdev->chip; 2502 struct rtw89_h2c_lps_ch_info *h2c; 2503 u32 len = sizeof(*h2c); 2504 struct sk_buff *skb; 2505 u32 done; 2506 int ret; 2507 2508 if (chip->chip_gen != RTW89_CHIP_BE) 2509 return 0; 2510 2511 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2512 if (!skb) { 2513 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 2514 return -ENOMEM; 2515 } 2516 skb_put(skb, len); 2517 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2518 2519 h2c->info[0].central_ch = chan->channel; 2520 h2c->info[0].pri_ch = chan->primary_channel; 2521 h2c->info[0].band = chan->band_type; 2522 h2c->info[0].bw = chan->band_width; 2523 h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF); 2524 2525 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2526 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2527 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 2528 2529 rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); 2530 ret = rtw89_h2c_tx(rtwdev, skb, false); 2531 if (ret) { 2532 rtw89_err(rtwdev, "failed to send h2c\n"); 2533 goto fail; 2534 } 2535 2536 ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, 2537 true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); 2538 if (ret) 2539 rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); 2540 2541 return 0; 2542 fail: 2543 dev_kfree_skb_any(skb); 2544 2545 return ret; 2546 } 2547 2548 #define H2C_P2P_ACT_LEN 20 2549 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2550 struct ieee80211_p2p_noa_desc *desc, 2551 u8 act, u8 noa_id) 2552 { 2553 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2554 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 2555 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 2556 struct sk_buff *skb; 2557 u8 *cmd; 2558 int ret; 2559 2560 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 2561 if (!skb) { 2562 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 2563 return -ENOMEM; 2564 } 2565 skb_put(skb, H2C_P2P_ACT_LEN); 2566 cmd = skb->data; 2567 2568 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 2569 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 2570 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 2571 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 2572 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 2573 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 2574 if (desc) { 2575 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 2576 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 2577 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 2578 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 2579 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 2580 } 2581 2582 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2583 H2C_CAT_MAC, H2C_CL_MAC_PS, 2584 H2C_FUNC_P2P_ACT, 0, 0, 2585 H2C_P2P_ACT_LEN); 2586 2587 ret = rtw89_h2c_tx(rtwdev, skb, false); 2588 if (ret) { 2589 rtw89_err(rtwdev, "failed to send h2c\n"); 2590 goto fail; 2591 } 2592 2593 return 0; 2594 fail: 2595 dev_kfree_skb_any(skb); 2596 2597 return ret; 2598 } 2599 2600 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 2601 struct sk_buff *skb) 2602 { 2603 const struct rtw89_chip_info *chip = rtwdev->chip; 2604 struct rtw89_hal *hal = &rtwdev->hal; 2605 u8 ntx_path; 2606 u8 map_b; 2607 2608 if (chip->rf_path_num == 1) { 2609 ntx_path = RF_A; 2610 map_b = 0; 2611 } else { 2612 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 2613 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 2614 } 2615 2616 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 2617 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 2618 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 2619 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 2620 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 2621 } 2622 2623 #define H2C_CMC_TBL_LEN 68 2624 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 2625 struct rtw89_vif *rtwvif, 2626 struct rtw89_sta *rtwsta) 2627 { 2628 const struct rtw89_chip_info *chip = rtwdev->chip; 2629 u8 macid = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2630 struct sk_buff *skb; 2631 int ret; 2632 2633 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2634 if (!skb) { 2635 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2636 return -ENOMEM; 2637 } 2638 skb_put(skb, H2C_CMC_TBL_LEN); 2639 SET_CTRL_INFO_MACID(skb->data, macid); 2640 SET_CTRL_INFO_OPERATION(skb->data, 1); 2641 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2642 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 2643 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 2644 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 2645 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 2646 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 2647 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 2648 } 2649 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 2650 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 2651 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 2652 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2653 2654 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2655 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2656 chip->h2c_cctl_func_id, 0, 1, 2657 H2C_CMC_TBL_LEN); 2658 2659 ret = rtw89_h2c_tx(rtwdev, skb, false); 2660 if (ret) { 2661 rtw89_err(rtwdev, "failed to send h2c\n"); 2662 goto fail; 2663 } 2664 2665 return 0; 2666 fail: 2667 dev_kfree_skb_any(skb); 2668 2669 return ret; 2670 } 2671 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 2672 2673 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2674 struct rtw89_vif *rtwvif, 2675 struct rtw89_sta *rtwsta) 2676 { 2677 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2678 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2679 u32 len = sizeof(*h2c); 2680 struct sk_buff *skb; 2681 int ret; 2682 2683 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2684 if (!skb) { 2685 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 2686 return -ENOMEM; 2687 } 2688 skb_put(skb, len); 2689 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2690 2691 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 2692 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2693 2694 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE); 2695 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 2696 2697 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 2698 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 2699 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2700 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 2701 2702 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 2703 2704 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 2705 2706 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 2707 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 2708 2709 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 2710 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 2711 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 2712 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 2713 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 2714 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 2715 2716 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 2717 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 2718 2719 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 2720 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 2721 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 2722 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 2723 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 2724 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 2725 2726 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 2727 2728 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 2729 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 2730 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 2731 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 2732 2733 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 2734 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 2735 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 2736 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 2737 2738 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2739 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2740 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 2741 len); 2742 2743 ret = rtw89_h2c_tx(rtwdev, skb, false); 2744 if (ret) { 2745 rtw89_err(rtwdev, "failed to send h2c\n"); 2746 goto fail; 2747 } 2748 2749 return 0; 2750 fail: 2751 dev_kfree_skb_any(skb); 2752 2753 return ret; 2754 } 2755 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 2756 2757 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 2758 struct ieee80211_sta *sta, u8 *pads) 2759 { 2760 bool ppe_th; 2761 u8 ppe16, ppe8; 2762 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 2763 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 2764 u8 ru_bitmap; 2765 u8 n, idx, sh; 2766 u16 ppe; 2767 int i; 2768 2769 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 2770 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 2771 if (!ppe_th) { 2772 u8 pad; 2773 2774 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 2775 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 2776 2777 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 2778 pads[i] = pad; 2779 2780 return; 2781 } 2782 2783 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 2784 n = hweight8(ru_bitmap); 2785 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 2786 2787 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 2788 if (!(ru_bitmap & BIT(i))) { 2789 pads[i] = 1; 2790 continue; 2791 } 2792 2793 idx = n >> 3; 2794 sh = n & 7; 2795 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 2796 2797 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 2798 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2799 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 2800 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2801 2802 if (ppe16 != 7 && ppe8 == 7) 2803 pads[i] = RTW89_PE_DURATION_16; 2804 else if (ppe8 != 7) 2805 pads[i] = RTW89_PE_DURATION_8; 2806 else 2807 pads[i] = RTW89_PE_DURATION_0; 2808 } 2809 } 2810 2811 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 2812 struct ieee80211_vif *vif, 2813 struct ieee80211_sta *sta) 2814 { 2815 const struct rtw89_chip_info *chip = rtwdev->chip; 2816 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 2817 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2818 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 2819 rtwvif->chanctx_idx); 2820 struct sk_buff *skb; 2821 u8 pads[RTW89_PPE_BW_NUM]; 2822 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2823 u16 lowest_rate; 2824 int ret; 2825 2826 memset(pads, 0, sizeof(pads)); 2827 if (sta && sta->deflink.he_cap.has_he) 2828 __get_sta_he_pkt_padding(rtwdev, sta, pads); 2829 2830 if (vif->p2p) 2831 lowest_rate = RTW89_HW_RATE_OFDM6; 2832 else if (chan->band_type == RTW89_BAND_2G) 2833 lowest_rate = RTW89_HW_RATE_CCK1; 2834 else 2835 lowest_rate = RTW89_HW_RATE_OFDM6; 2836 2837 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2838 if (!skb) { 2839 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2840 return -ENOMEM; 2841 } 2842 skb_put(skb, H2C_CMC_TBL_LEN); 2843 SET_CTRL_INFO_MACID(skb->data, mac_id); 2844 SET_CTRL_INFO_OPERATION(skb->data, 1); 2845 SET_CMC_TBL_DISRTSFB(skb->data, 1); 2846 SET_CMC_TBL_DISDATAFB(skb->data, 1); 2847 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 2848 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 2849 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 2850 if (vif->type == NL80211_IFTYPE_STATION) 2851 SET_CMC_TBL_ULDL(skb->data, 1); 2852 else 2853 SET_CMC_TBL_ULDL(skb->data, 0); 2854 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 2855 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 2856 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 2857 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 2858 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 2859 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 2860 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2861 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 2862 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 2863 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 2864 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 2865 } 2866 if (sta) 2867 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 2868 sta->deflink.he_cap.has_he); 2869 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 2870 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2871 2872 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2873 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2874 chip->h2c_cctl_func_id, 0, 1, 2875 H2C_CMC_TBL_LEN); 2876 2877 ret = rtw89_h2c_tx(rtwdev, skb, false); 2878 if (ret) { 2879 rtw89_err(rtwdev, "failed to send h2c\n"); 2880 goto fail; 2881 } 2882 2883 return 0; 2884 fail: 2885 dev_kfree_skb_any(skb); 2886 2887 return ret; 2888 } 2889 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 2890 2891 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 2892 struct ieee80211_sta *sta, u8 *pads) 2893 { 2894 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 2895 u16 ppe_thres_hdr; 2896 u8 ppe16, ppe8; 2897 u8 n, idx, sh; 2898 u8 ru_bitmap; 2899 bool ppe_th; 2900 u16 ppe; 2901 int i; 2902 2903 ppe_th = !!u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5], 2904 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 2905 if (!ppe_th) { 2906 u8 pad; 2907 2908 pad = u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5], 2909 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 2910 2911 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 2912 pads[i] = pad; 2913 2914 return; 2915 } 2916 2917 ppe_thres_hdr = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres); 2918 ru_bitmap = u16_get_bits(ppe_thres_hdr, 2919 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 2920 n = hweight8(ru_bitmap); 2921 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 2922 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 2923 2924 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 2925 if (!(ru_bitmap & BIT(i))) { 2926 pads[i] = 1; 2927 continue; 2928 } 2929 2930 idx = n >> 3; 2931 sh = n & 7; 2932 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 2933 2934 ppe = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres + idx); 2935 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2936 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 2937 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2938 2939 if (ppe16 != 7 && ppe8 == 7) 2940 pads[i] = RTW89_PE_DURATION_16_20; 2941 else if (ppe8 != 7) 2942 pads[i] = RTW89_PE_DURATION_8; 2943 else 2944 pads[i] = RTW89_PE_DURATION_0; 2945 } 2946 } 2947 2948 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2949 struct ieee80211_vif *vif, 2950 struct ieee80211_sta *sta) 2951 { 2952 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2953 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 2954 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx); 2955 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2956 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2957 u8 pads[RTW89_PPE_BW_NUM]; 2958 u32 len = sizeof(*h2c); 2959 struct sk_buff *skb; 2960 u16 lowest_rate; 2961 int ret; 2962 2963 memset(pads, 0, sizeof(pads)); 2964 if (sta) { 2965 if (sta->deflink.eht_cap.has_eht) 2966 __get_sta_eht_pkt_padding(rtwdev, sta, pads); 2967 else if (sta->deflink.he_cap.has_he) 2968 __get_sta_he_pkt_padding(rtwdev, sta, pads); 2969 } 2970 2971 if (vif->p2p) 2972 lowest_rate = RTW89_HW_RATE_OFDM6; 2973 else if (chan->band_type == RTW89_BAND_2G) 2974 lowest_rate = RTW89_HW_RATE_CCK1; 2975 else 2976 lowest_rate = RTW89_HW_RATE_OFDM6; 2977 2978 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2979 if (!skb) { 2980 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 2981 return -ENOMEM; 2982 } 2983 skb_put(skb, len); 2984 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2985 2986 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 2987 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2988 2989 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 2990 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 2991 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 2992 CCTLINFO_G7_W0_DISDATAFB); 2993 2994 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2995 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2996 2997 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 2998 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 2999 3000 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3001 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 3002 3003 h2c->w4 = le32_encode_bits(rtwvif->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 3004 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 3005 3006 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 3007 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 3008 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 3009 } 3010 3011 if (vif->bss_conf.eht_support) { 3012 u16 punct = vif->bss_conf.chanreq.oper.punctured; 3013 3014 h2c->w4 |= le32_encode_bits(~punct, 3015 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3016 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 3017 } 3018 3019 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 3020 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 3021 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 3022 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 3023 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 3024 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 3025 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 3026 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 3027 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 3028 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3029 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 3030 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 3031 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 3032 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 3033 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 3034 3035 h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 3036 CCTLINFO_G7_W6_ULDL); 3037 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL); 3038 3039 if (sta) { 3040 h2c->w8 = le32_encode_bits(sta->deflink.he_cap.has_he, 3041 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3042 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 3043 } 3044 3045 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3046 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3047 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 3048 len); 3049 3050 ret = rtw89_h2c_tx(rtwdev, skb, false); 3051 if (ret) { 3052 rtw89_err(rtwdev, "failed to send h2c\n"); 3053 goto fail; 3054 } 3055 3056 return 0; 3057 fail: 3058 dev_kfree_skb_any(skb); 3059 3060 return ret; 3061 } 3062 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 3063 3064 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 3065 struct ieee80211_vif *vif, 3066 struct ieee80211_sta *sta) 3067 { 3068 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 3069 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 3070 u32 len = sizeof(*h2c); 3071 struct sk_buff *skb; 3072 u16 agg_num = 0; 3073 u8 ba_bmap = 0; 3074 int ret; 3075 u8 tid; 3076 3077 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3078 if (!skb) { 3079 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 3080 return -ENOMEM; 3081 } 3082 skb_put(skb, len); 3083 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 3084 3085 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 3086 if (agg_num == 0) 3087 agg_num = rtwsta->ampdu_params[tid].agg_num; 3088 else 3089 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 3090 } 3091 3092 if (agg_num <= 0x20) 3093 ba_bmap = 3; 3094 else if (agg_num > 0x20 && agg_num <= 0x40) 3095 ba_bmap = 0; 3096 else if (agg_num > 0x40 && agg_num <= 0x80) 3097 ba_bmap = 1; 3098 else if (agg_num > 0x80 && agg_num <= 0x100) 3099 ba_bmap = 2; 3100 else if (agg_num > 0x100 && agg_num <= 0x200) 3101 ba_bmap = 4; 3102 else if (agg_num > 0x200 && agg_num <= 0x400) 3103 ba_bmap = 5; 3104 3105 h2c->c0 = le32_encode_bits(rtwsta->mac_id, CCTLINFO_G7_C0_MACID) | 3106 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 3107 3108 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 3109 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 3110 3111 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3112 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3113 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 3114 len); 3115 3116 ret = rtw89_h2c_tx(rtwdev, skb, false); 3117 if (ret) { 3118 rtw89_err(rtwdev, "failed to send h2c\n"); 3119 goto fail; 3120 } 3121 3122 return 0; 3123 fail: 3124 dev_kfree_skb_any(skb); 3125 3126 return ret; 3127 } 3128 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 3129 3130 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 3131 struct rtw89_sta *rtwsta) 3132 { 3133 const struct rtw89_chip_info *chip = rtwdev->chip; 3134 struct sk_buff *skb; 3135 int ret; 3136 3137 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3138 if (!skb) { 3139 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3140 return -ENOMEM; 3141 } 3142 skb_put(skb, H2C_CMC_TBL_LEN); 3143 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 3144 SET_CTRL_INFO_OPERATION(skb->data, 1); 3145 if (rtwsta->cctl_tx_time) { 3146 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 3147 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 3148 } 3149 if (rtwsta->cctl_tx_retry_limit) { 3150 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 3151 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 3152 } 3153 3154 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3155 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3156 chip->h2c_cctl_func_id, 0, 1, 3157 H2C_CMC_TBL_LEN); 3158 3159 ret = rtw89_h2c_tx(rtwdev, skb, false); 3160 if (ret) { 3161 rtw89_err(rtwdev, "failed to send h2c\n"); 3162 goto fail; 3163 } 3164 3165 return 0; 3166 fail: 3167 dev_kfree_skb_any(skb); 3168 3169 return ret; 3170 } 3171 3172 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 3173 struct rtw89_sta *rtwsta) 3174 { 3175 const struct rtw89_chip_info *chip = rtwdev->chip; 3176 struct sk_buff *skb; 3177 int ret; 3178 3179 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 3180 return 0; 3181 3182 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 3183 if (!skb) { 3184 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3185 return -ENOMEM; 3186 } 3187 skb_put(skb, H2C_CMC_TBL_LEN); 3188 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 3189 SET_CTRL_INFO_OPERATION(skb->data, 1); 3190 3191 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3192 3193 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3194 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3195 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 3196 H2C_CMC_TBL_LEN); 3197 3198 ret = rtw89_h2c_tx(rtwdev, skb, false); 3199 if (ret) { 3200 rtw89_err(rtwdev, "failed to send h2c\n"); 3201 goto fail; 3202 } 3203 3204 return 0; 3205 fail: 3206 dev_kfree_skb_any(skb); 3207 3208 return ret; 3209 } 3210 3211 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 3212 struct rtw89_vif *rtwvif) 3213 { 3214 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3215 rtwvif->chanctx_idx); 3216 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3217 struct rtw89_h2c_bcn_upd *h2c; 3218 struct sk_buff *skb_beacon; 3219 struct ieee80211_hdr *hdr; 3220 u32 len = sizeof(*h2c); 3221 struct sk_buff *skb; 3222 int bcn_total_len; 3223 u16 beacon_rate; 3224 u16 tim_offset; 3225 void *noa_data; 3226 u8 noa_len; 3227 int ret; 3228 3229 if (vif->p2p) 3230 beacon_rate = RTW89_HW_RATE_OFDM6; 3231 else if (chan->band_type == RTW89_BAND_2G) 3232 beacon_rate = RTW89_HW_RATE_CCK1; 3233 else 3234 beacon_rate = RTW89_HW_RATE_OFDM6; 3235 3236 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3237 NULL, 0); 3238 if (!skb_beacon) { 3239 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3240 return -ENOMEM; 3241 } 3242 3243 noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data); 3244 if (noa_len && 3245 (noa_len <= skb_tailroom(skb_beacon) || 3246 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3247 skb_put_data(skb_beacon, noa_data, noa_len); 3248 } 3249 3250 hdr = (struct ieee80211_hdr *)skb_beacon; 3251 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3252 3253 bcn_total_len = len + skb_beacon->len; 3254 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3255 if (!skb) { 3256 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3257 dev_kfree_skb_any(skb_beacon); 3258 return -ENOMEM; 3259 } 3260 skb_put(skb, len); 3261 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 3262 3263 h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_W0_PORT) | 3264 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 3265 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 3266 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 3267 h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 3268 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 3269 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 3270 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 3271 3272 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3273 dev_kfree_skb_any(skb_beacon); 3274 3275 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3276 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3277 H2C_FUNC_MAC_BCN_UPD, 0, 1, 3278 bcn_total_len); 3279 3280 ret = rtw89_h2c_tx(rtwdev, skb, false); 3281 if (ret) { 3282 rtw89_err(rtwdev, "failed to send h2c\n"); 3283 dev_kfree_skb_any(skb); 3284 return ret; 3285 } 3286 3287 return 0; 3288 } 3289 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 3290 3291 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 3292 struct rtw89_vif *rtwvif) 3293 { 3294 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, rtwvif->chanctx_idx); 3295 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3296 struct rtw89_h2c_bcn_upd_be *h2c; 3297 struct sk_buff *skb_beacon; 3298 struct ieee80211_hdr *hdr; 3299 u32 len = sizeof(*h2c); 3300 struct sk_buff *skb; 3301 int bcn_total_len; 3302 u16 beacon_rate; 3303 u16 tim_offset; 3304 void *noa_data; 3305 u8 noa_len; 3306 int ret; 3307 3308 if (vif->p2p) 3309 beacon_rate = RTW89_HW_RATE_OFDM6; 3310 else if (chan->band_type == RTW89_BAND_2G) 3311 beacon_rate = RTW89_HW_RATE_CCK1; 3312 else 3313 beacon_rate = RTW89_HW_RATE_OFDM6; 3314 3315 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3316 NULL, 0); 3317 if (!skb_beacon) { 3318 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3319 return -ENOMEM; 3320 } 3321 3322 noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data); 3323 if (noa_len && 3324 (noa_len <= skb_tailroom(skb_beacon) || 3325 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3326 skb_put_data(skb_beacon, noa_data, noa_len); 3327 } 3328 3329 hdr = (struct ieee80211_hdr *)skb_beacon; 3330 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3331 3332 bcn_total_len = len + skb_beacon->len; 3333 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3334 if (!skb) { 3335 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3336 dev_kfree_skb_any(skb_beacon); 3337 return -ENOMEM; 3338 } 3339 skb_put(skb, len); 3340 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 3341 3342 h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 3343 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 3344 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 3345 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 3346 h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 3347 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 3348 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 3349 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 3350 3351 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3352 dev_kfree_skb_any(skb_beacon); 3353 3354 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3355 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3356 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 3357 bcn_total_len); 3358 3359 ret = rtw89_h2c_tx(rtwdev, skb, false); 3360 if (ret) { 3361 rtw89_err(rtwdev, "failed to send h2c\n"); 3362 goto fail; 3363 } 3364 3365 return 0; 3366 3367 fail: 3368 dev_kfree_skb_any(skb); 3369 3370 return ret; 3371 } 3372 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 3373 3374 #define H2C_ROLE_MAINTAIN_LEN 4 3375 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 3376 struct rtw89_vif *rtwvif, 3377 struct rtw89_sta *rtwsta, 3378 enum rtw89_upd_mode upd_mode) 3379 { 3380 struct sk_buff *skb; 3381 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 3382 u8 self_role; 3383 int ret; 3384 3385 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 3386 if (rtwsta) 3387 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3388 else 3389 self_role = rtwvif->self_role; 3390 } else { 3391 self_role = rtwvif->self_role; 3392 } 3393 3394 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 3395 if (!skb) { 3396 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3397 return -ENOMEM; 3398 } 3399 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 3400 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 3401 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 3402 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 3403 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 3404 3405 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3406 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3407 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 3408 H2C_ROLE_MAINTAIN_LEN); 3409 3410 ret = rtw89_h2c_tx(rtwdev, skb, false); 3411 if (ret) { 3412 rtw89_err(rtwdev, "failed to send h2c\n"); 3413 goto fail; 3414 } 3415 3416 return 0; 3417 fail: 3418 dev_kfree_skb_any(skb); 3419 3420 return ret; 3421 } 3422 3423 static enum rtw89_fw_sta_type 3424 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3425 struct rtw89_sta *rtwsta) 3426 { 3427 struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta); 3428 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3429 3430 if (!sta) 3431 goto by_vif; 3432 3433 if (sta->deflink.eht_cap.has_eht) 3434 return RTW89_FW_BE_STA; 3435 else if (sta->deflink.he_cap.has_he) 3436 return RTW89_FW_AX_STA; 3437 else 3438 return RTW89_FW_N_AC_STA; 3439 3440 by_vif: 3441 if (vif->bss_conf.eht_support) 3442 return RTW89_FW_BE_STA; 3443 else if (vif->bss_conf.he_support) 3444 return RTW89_FW_AX_STA; 3445 else 3446 return RTW89_FW_N_AC_STA; 3447 } 3448 3449 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3450 struct rtw89_sta *rtwsta, bool dis_conn) 3451 { 3452 struct sk_buff *skb; 3453 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 3454 u8 self_role = rtwvif->self_role; 3455 enum rtw89_fw_sta_type sta_type; 3456 u8 net_type = rtwvif->net_type; 3457 struct rtw89_h2c_join_v1 *h2c_v1; 3458 struct rtw89_h2c_join *h2c; 3459 u32 len = sizeof(*h2c); 3460 bool format_v1 = false; 3461 int ret; 3462 3463 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 3464 len = sizeof(*h2c_v1); 3465 format_v1 = true; 3466 } 3467 3468 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 3469 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3470 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 3471 } 3472 3473 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3474 if (!skb) { 3475 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3476 return -ENOMEM; 3477 } 3478 skb_put(skb, len); 3479 h2c = (struct rtw89_h2c_join *)skb->data; 3480 3481 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 3482 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 3483 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 3484 le32_encode_bits(rtwvif->wmm, RTW89_H2C_JOININFO_W0_WMM) | 3485 le32_encode_bits(rtwvif->trigger, RTW89_H2C_JOININFO_W0_TGR) | 3486 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 3487 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 3488 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 3489 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 3490 le32_encode_bits(rtwvif->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 3491 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 3492 le32_encode_bits(rtwvif->wifi_role, RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 3493 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 3494 3495 if (!format_v1) 3496 goto done; 3497 3498 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 3499 3500 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif, rtwsta); 3501 3502 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE); 3503 h2c_v1->w2 = 0; 3504 3505 done: 3506 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3507 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3508 H2C_FUNC_MAC_JOININFO, 0, 1, 3509 len); 3510 3511 ret = rtw89_h2c_tx(rtwdev, skb, false); 3512 if (ret) { 3513 rtw89_err(rtwdev, "failed to send h2c\n"); 3514 goto fail; 3515 } 3516 3517 return 0; 3518 fail: 3519 dev_kfree_skb_any(skb); 3520 3521 return ret; 3522 } 3523 3524 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 3525 { 3526 struct rtw89_h2c_notify_dbcc *h2c; 3527 u32 len = sizeof(*h2c); 3528 struct sk_buff *skb; 3529 int ret; 3530 3531 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3532 if (!skb) { 3533 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 3534 return -ENOMEM; 3535 } 3536 skb_put(skb, len); 3537 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 3538 3539 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 3540 3541 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3542 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3543 H2C_FUNC_NOTIFY_DBCC, 0, 1, 3544 len); 3545 3546 ret = rtw89_h2c_tx(rtwdev, skb, false); 3547 if (ret) { 3548 rtw89_err(rtwdev, "failed to send h2c\n"); 3549 goto fail; 3550 } 3551 3552 return 0; 3553 fail: 3554 dev_kfree_skb_any(skb); 3555 3556 return ret; 3557 } 3558 3559 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 3560 bool pause) 3561 { 3562 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 3563 struct rtw89_fw_macid_pause_grp *h2c; 3564 __le32 set = cpu_to_le32(BIT(sh)); 3565 u8 h2c_macid_pause_id; 3566 struct sk_buff *skb; 3567 u32 len; 3568 int ret; 3569 3570 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 3571 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 3572 len = sizeof(*h2c_new); 3573 } else { 3574 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 3575 len = sizeof(*h2c); 3576 } 3577 3578 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3579 if (!skb) { 3580 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 3581 return -ENOMEM; 3582 } 3583 skb_put(skb, len); 3584 3585 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 3586 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 3587 3588 h2c_new->n[0].pause_mask_grp[grp] = set; 3589 h2c_new->n[0].sleep_mask_grp[grp] = set; 3590 if (pause) { 3591 h2c_new->n[0].pause_grp[grp] = set; 3592 h2c_new->n[0].sleep_grp[grp] = set; 3593 } 3594 } else { 3595 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 3596 3597 h2c->mask_grp[grp] = set; 3598 if (pause) 3599 h2c->pause_grp[grp] = set; 3600 } 3601 3602 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3603 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3604 h2c_macid_pause_id, 1, 0, 3605 len); 3606 3607 ret = rtw89_h2c_tx(rtwdev, skb, false); 3608 if (ret) { 3609 rtw89_err(rtwdev, "failed to send h2c\n"); 3610 goto fail; 3611 } 3612 3613 return 0; 3614 fail: 3615 dev_kfree_skb_any(skb); 3616 3617 return ret; 3618 } 3619 3620 #define H2C_EDCA_LEN 12 3621 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3622 u8 ac, u32 val) 3623 { 3624 struct sk_buff *skb; 3625 int ret; 3626 3627 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 3628 if (!skb) { 3629 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 3630 return -ENOMEM; 3631 } 3632 skb_put(skb, H2C_EDCA_LEN); 3633 RTW89_SET_EDCA_SEL(skb->data, 0); 3634 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 3635 RTW89_SET_EDCA_WMM(skb->data, 0); 3636 RTW89_SET_EDCA_AC(skb->data, ac); 3637 RTW89_SET_EDCA_PARAM(skb->data, val); 3638 3639 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3640 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3641 H2C_FUNC_USR_EDCA, 0, 1, 3642 H2C_EDCA_LEN); 3643 3644 ret = rtw89_h2c_tx(rtwdev, skb, false); 3645 if (ret) { 3646 rtw89_err(rtwdev, "failed to send h2c\n"); 3647 goto fail; 3648 } 3649 3650 return 0; 3651 fail: 3652 dev_kfree_skb_any(skb); 3653 3654 return ret; 3655 } 3656 3657 #define H2C_TSF32_TOGL_LEN 4 3658 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3659 bool en) 3660 { 3661 struct sk_buff *skb; 3662 u16 early_us = en ? 2000 : 0; 3663 u8 *cmd; 3664 int ret; 3665 3666 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 3667 if (!skb) { 3668 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 3669 return -ENOMEM; 3670 } 3671 skb_put(skb, H2C_TSF32_TOGL_LEN); 3672 cmd = skb->data; 3673 3674 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 3675 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 3676 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 3677 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 3678 3679 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3680 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3681 H2C_FUNC_TSF32_TOGL, 0, 0, 3682 H2C_TSF32_TOGL_LEN); 3683 3684 ret = rtw89_h2c_tx(rtwdev, skb, false); 3685 if (ret) { 3686 rtw89_err(rtwdev, "failed to send h2c\n"); 3687 goto fail; 3688 } 3689 3690 return 0; 3691 fail: 3692 dev_kfree_skb_any(skb); 3693 3694 return ret; 3695 } 3696 3697 #define H2C_OFLD_CFG_LEN 8 3698 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 3699 { 3700 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 3701 struct sk_buff *skb; 3702 int ret; 3703 3704 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 3705 if (!skb) { 3706 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 3707 return -ENOMEM; 3708 } 3709 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 3710 3711 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3712 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3713 H2C_FUNC_OFLD_CFG, 0, 1, 3714 H2C_OFLD_CFG_LEN); 3715 3716 ret = rtw89_h2c_tx(rtwdev, skb, false); 3717 if (ret) { 3718 rtw89_err(rtwdev, "failed to send h2c\n"); 3719 goto fail; 3720 } 3721 3722 return 0; 3723 fail: 3724 dev_kfree_skb_any(skb); 3725 3726 return ret; 3727 } 3728 3729 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 3730 struct ieee80211_vif *vif, 3731 bool connect) 3732 { 3733 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 3734 struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL; 3735 s32 thold = RTW89_DEFAULT_CQM_THOLD; 3736 u32 hyst = RTW89_DEFAULT_CQM_HYST; 3737 struct rtw89_h2c_bcnfltr *h2c; 3738 u32 len = sizeof(*h2c); 3739 struct sk_buff *skb; 3740 int ret; 3741 3742 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 3743 return -EINVAL; 3744 3745 if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA) 3746 return -EINVAL; 3747 3748 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3749 if (!skb) { 3750 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 3751 return -ENOMEM; 3752 } 3753 3754 skb_put(skb, len); 3755 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 3756 3757 if (bss_conf->cqm_rssi_hyst) 3758 hyst = bss_conf->cqm_rssi_hyst; 3759 if (bss_conf->cqm_rssi_thold) 3760 thold = bss_conf->cqm_rssi_thold; 3761 3762 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 3763 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 3764 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 3765 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 3766 RTW89_H2C_BCNFLTR_W0_MODE) | 3767 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 3768 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 3769 le32_encode_bits(thold + MAX_RSSI, 3770 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 3771 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 3772 3773 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3774 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3775 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 3776 3777 ret = rtw89_h2c_tx(rtwdev, skb, false); 3778 if (ret) { 3779 rtw89_err(rtwdev, "failed to send h2c\n"); 3780 goto fail; 3781 } 3782 3783 return 0; 3784 fail: 3785 dev_kfree_skb_any(skb); 3786 3787 return ret; 3788 } 3789 3790 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 3791 struct rtw89_rx_phy_ppdu *phy_ppdu) 3792 { 3793 struct rtw89_h2c_ofld_rssi *h2c; 3794 u32 len = sizeof(*h2c); 3795 struct sk_buff *skb; 3796 s8 rssi; 3797 int ret; 3798 3799 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 3800 return -EINVAL; 3801 3802 if (!phy_ppdu) 3803 return -EINVAL; 3804 3805 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3806 if (!skb) { 3807 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 3808 return -ENOMEM; 3809 } 3810 3811 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 3812 skb_put(skb, len); 3813 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 3814 3815 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 3816 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 3817 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 3818 3819 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3820 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3821 H2C_FUNC_OFLD_RSSI, 0, 1, len); 3822 3823 ret = rtw89_h2c_tx(rtwdev, skb, false); 3824 if (ret) { 3825 rtw89_err(rtwdev, "failed to send h2c\n"); 3826 goto fail; 3827 } 3828 3829 return 0; 3830 fail: 3831 dev_kfree_skb_any(skb); 3832 3833 return ret; 3834 } 3835 3836 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3837 { 3838 struct rtw89_traffic_stats *stats = &rtwvif->stats; 3839 struct rtw89_h2c_ofld *h2c; 3840 u32 len = sizeof(*h2c); 3841 struct sk_buff *skb; 3842 int ret; 3843 3844 if (rtwvif->net_type != RTW89_NET_TYPE_INFRA) 3845 return -EINVAL; 3846 3847 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3848 if (!skb) { 3849 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 3850 return -ENOMEM; 3851 } 3852 3853 skb_put(skb, len); 3854 h2c = (struct rtw89_h2c_ofld *)skb->data; 3855 3856 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 3857 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 3858 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 3859 3860 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3861 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3862 H2C_FUNC_OFLD_TP, 0, 1, len); 3863 3864 ret = rtw89_h2c_tx(rtwdev, skb, false); 3865 if (ret) { 3866 rtw89_err(rtwdev, "failed to send h2c\n"); 3867 goto fail; 3868 } 3869 3870 return 0; 3871 fail: 3872 dev_kfree_skb_any(skb); 3873 3874 return ret; 3875 } 3876 3877 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 3878 { 3879 const struct rtw89_chip_info *chip = rtwdev->chip; 3880 struct rtw89_h2c_ra_v1 *h2c_v1; 3881 struct rtw89_h2c_ra *h2c; 3882 u32 len = sizeof(*h2c); 3883 bool format_v1 = false; 3884 struct sk_buff *skb; 3885 int ret; 3886 3887 if (chip->chip_gen == RTW89_CHIP_BE) { 3888 len = sizeof(*h2c_v1); 3889 format_v1 = true; 3890 } 3891 3892 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3893 if (!skb) { 3894 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3895 return -ENOMEM; 3896 } 3897 skb_put(skb, len); 3898 h2c = (struct rtw89_h2c_ra *)skb->data; 3899 rtw89_debug(rtwdev, RTW89_DBG_RA, 3900 "ra cmd msk: %llx ", ra->ra_mask); 3901 3902 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 3903 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 3904 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 3905 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 3906 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 3907 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 3908 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 3909 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 3910 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 3911 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 3912 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 3913 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 3914 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 3915 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 3916 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 3917 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 3918 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 3919 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 3920 3921 if (!format_v1) 3922 goto csi; 3923 3924 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 3925 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 3926 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 3927 3928 csi: 3929 if (!csi) 3930 goto done; 3931 3932 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 3933 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 3934 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 3935 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 3936 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 3937 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 3938 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 3939 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 3940 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 3941 3942 done: 3943 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3944 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 3945 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 3946 len); 3947 3948 ret = rtw89_h2c_tx(rtwdev, skb, false); 3949 if (ret) { 3950 rtw89_err(rtwdev, "failed to send h2c\n"); 3951 goto fail; 3952 } 3953 3954 return 0; 3955 fail: 3956 dev_kfree_skb_any(skb); 3957 3958 return ret; 3959 } 3960 3961 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 3962 { 3963 struct rtw89_btc *btc = &rtwdev->btc; 3964 struct rtw89_btc_dm *dm = &btc->dm; 3965 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 3966 struct rtw89_btc_module *module = &init_info->module; 3967 struct rtw89_btc_ant_info *ant = &module->ant; 3968 struct rtw89_h2c_cxinit *h2c; 3969 u32 len = sizeof(*h2c); 3970 struct sk_buff *skb; 3971 int ret; 3972 3973 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3974 if (!skb) { 3975 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 3976 return -ENOMEM; 3977 } 3978 skb_put(skb, len); 3979 h2c = (struct rtw89_h2c_cxinit *)skb->data; 3980 3981 h2c->hdr.type = type; 3982 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 3983 3984 h2c->ant_type = ant->type; 3985 h2c->ant_num = ant->num; 3986 h2c->ant_iso = ant->isolation; 3987 h2c->ant_info = 3988 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 3989 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 3990 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 3991 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 3992 3993 h2c->mod_rfe = module->rfe_type; 3994 h2c->mod_cv = module->cv; 3995 h2c->mod_info = 3996 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 3997 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 3998 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 3999 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 4000 h2c->mod_adie_kt = module->kt_ver_adie; 4001 h2c->wl_gch = init_info->wl_guard_ch; 4002 4003 h2c->info = 4004 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 4005 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 4006 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 4007 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 4008 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 4009 4010 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4011 H2C_CAT_OUTSRC, BTFC_SET, 4012 SET_DRV_INFO, 0, 0, 4013 len); 4014 4015 ret = rtw89_h2c_tx(rtwdev, skb, false); 4016 if (ret) { 4017 rtw89_err(rtwdev, "failed to send h2c\n"); 4018 goto fail; 4019 } 4020 4021 return 0; 4022 fail: 4023 dev_kfree_skb_any(skb); 4024 4025 return ret; 4026 } 4027 4028 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 4029 { 4030 struct rtw89_btc *btc = &rtwdev->btc; 4031 struct rtw89_btc_dm *dm = &btc->dm; 4032 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 4033 struct rtw89_h2c_cxinit_v7 *h2c; 4034 u32 len = sizeof(*h2c); 4035 struct sk_buff *skb; 4036 int ret; 4037 4038 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4039 if (!skb) { 4040 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 4041 return -ENOMEM; 4042 } 4043 skb_put(skb, len); 4044 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 4045 4046 h2c->hdr.type = type; 4047 h2c->hdr.ver = btc->ver->fcxinit; 4048 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4049 h2c->init = *init_info; 4050 4051 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4052 H2C_CAT_OUTSRC, BTFC_SET, 4053 SET_DRV_INFO, 0, 0, 4054 len); 4055 4056 ret = rtw89_h2c_tx(rtwdev, skb, false); 4057 if (ret) { 4058 rtw89_err(rtwdev, "failed to send h2c\n"); 4059 goto fail; 4060 } 4061 4062 return 0; 4063 fail: 4064 dev_kfree_skb_any(skb); 4065 4066 return ret; 4067 } 4068 4069 #define PORT_DATA_OFFSET 4 4070 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 4071 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 4072 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 4073 4074 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 4075 { 4076 struct rtw89_btc *btc = &rtwdev->btc; 4077 const struct rtw89_btc_ver *ver = btc->ver; 4078 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4079 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 4080 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4081 struct rtw89_btc_wl_active_role *active = role_info->active_role; 4082 struct sk_buff *skb; 4083 u32 len; 4084 u8 offset = 0; 4085 u8 *cmd; 4086 int ret; 4087 int i; 4088 4089 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 4090 4091 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4092 if (!skb) { 4093 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4094 return -ENOMEM; 4095 } 4096 skb_put(skb, len); 4097 cmd = skb->data; 4098 4099 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4100 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4101 4102 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4103 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4104 4105 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4106 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4107 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4108 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4109 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4110 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4111 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4112 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4113 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4114 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4115 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4116 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4117 4118 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4119 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4120 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4121 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4122 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4123 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4124 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4125 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4126 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4127 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4128 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4129 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4130 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4131 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4132 } 4133 4134 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4135 H2C_CAT_OUTSRC, BTFC_SET, 4136 SET_DRV_INFO, 0, 0, 4137 len); 4138 4139 ret = rtw89_h2c_tx(rtwdev, skb, false); 4140 if (ret) { 4141 rtw89_err(rtwdev, "failed to send h2c\n"); 4142 goto fail; 4143 } 4144 4145 return 0; 4146 fail: 4147 dev_kfree_skb_any(skb); 4148 4149 return ret; 4150 } 4151 4152 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 4153 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4154 4155 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 4156 { 4157 struct rtw89_btc *btc = &rtwdev->btc; 4158 const struct rtw89_btc_ver *ver = btc->ver; 4159 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4160 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 4161 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4162 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 4163 struct sk_buff *skb; 4164 u32 len; 4165 u8 *cmd, offset; 4166 int ret; 4167 int i; 4168 4169 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 4170 4171 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4172 if (!skb) { 4173 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4174 return -ENOMEM; 4175 } 4176 skb_put(skb, len); 4177 cmd = skb->data; 4178 4179 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4180 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4181 4182 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4183 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4184 4185 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4186 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4187 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4188 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4189 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4190 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4191 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4192 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4193 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4194 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4195 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4196 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4197 4198 offset = PORT_DATA_OFFSET; 4199 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4200 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4201 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4202 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4203 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4204 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4205 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4206 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4207 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4208 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4209 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4210 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4211 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4212 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4213 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 4214 } 4215 4216 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4217 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4218 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4219 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4220 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4221 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4222 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4223 4224 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4225 H2C_CAT_OUTSRC, BTFC_SET, 4226 SET_DRV_INFO, 0, 0, 4227 len); 4228 4229 ret = rtw89_h2c_tx(rtwdev, skb, false); 4230 if (ret) { 4231 rtw89_err(rtwdev, "failed to send h2c\n"); 4232 goto fail; 4233 } 4234 4235 return 0; 4236 fail: 4237 dev_kfree_skb_any(skb); 4238 4239 return ret; 4240 } 4241 4242 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 4243 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4244 4245 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 4246 { 4247 struct rtw89_btc *btc = &rtwdev->btc; 4248 const struct rtw89_btc_ver *ver = btc->ver; 4249 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4250 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 4251 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4252 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 4253 struct sk_buff *skb; 4254 u32 len; 4255 u8 *cmd, offset; 4256 int ret; 4257 int i; 4258 4259 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 4260 4261 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4262 if (!skb) { 4263 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4264 return -ENOMEM; 4265 } 4266 skb_put(skb, len); 4267 cmd = skb->data; 4268 4269 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4270 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4271 4272 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4273 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4274 4275 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4276 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4277 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4278 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4279 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4280 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4281 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4282 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4283 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4284 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4285 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4286 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4287 4288 offset = PORT_DATA_OFFSET; 4289 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4290 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 4291 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 4292 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 4293 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 4294 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 4295 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 4296 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 4297 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 4298 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 4299 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 4300 } 4301 4302 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4303 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4304 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4305 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4306 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4307 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4308 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4309 4310 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4311 H2C_CAT_OUTSRC, BTFC_SET, 4312 SET_DRV_INFO, 0, 0, 4313 len); 4314 4315 ret = rtw89_h2c_tx(rtwdev, skb, false); 4316 if (ret) { 4317 rtw89_err(rtwdev, "failed to send h2c\n"); 4318 goto fail; 4319 } 4320 4321 return 0; 4322 fail: 4323 dev_kfree_skb_any(skb); 4324 4325 return ret; 4326 } 4327 4328 int rtw89_fw_h2c_cxdrv_role_v7(struct rtw89_dev *rtwdev, u8 type) 4329 { 4330 struct rtw89_btc *btc = &rtwdev->btc; 4331 struct rtw89_btc_wl_role_info_v7 *role = &btc->cx.wl.role_info_v7; 4332 struct rtw89_h2c_cxrole_v7 *h2c; 4333 u32 len = sizeof(*h2c); 4334 struct sk_buff *skb; 4335 int ret; 4336 4337 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4338 if (!skb) { 4339 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4340 return -ENOMEM; 4341 } 4342 skb_put(skb, len); 4343 h2c = (struct rtw89_h2c_cxrole_v7 *)skb->data; 4344 4345 h2c->hdr.type = type; 4346 h2c->hdr.ver = btc->ver->fwlrole; 4347 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4348 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4349 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4350 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4351 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4352 h2c->_u32.dbcc_en = cpu_to_le32(role->dbcc_en); 4353 h2c->_u32.dbcc_chg = cpu_to_le32(role->dbcc_chg); 4354 h2c->_u32.dbcc_2g_phy = cpu_to_le32(role->dbcc_2g_phy); 4355 4356 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4357 H2C_CAT_OUTSRC, BTFC_SET, 4358 SET_DRV_INFO, 0, 0, 4359 len); 4360 4361 ret = rtw89_h2c_tx(rtwdev, skb, false); 4362 if (ret) { 4363 rtw89_err(rtwdev, "failed to send h2c\n"); 4364 goto fail; 4365 } 4366 4367 return 0; 4368 fail: 4369 dev_kfree_skb_any(skb); 4370 4371 return ret; 4372 } 4373 4374 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 4375 { 4376 struct rtw89_btc *btc = &rtwdev->btc; 4377 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 4378 struct rtw89_h2c_cxrole_v8 *h2c; 4379 u32 len = sizeof(*h2c); 4380 struct sk_buff *skb; 4381 int ret; 4382 4383 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4384 if (!skb) { 4385 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4386 return -ENOMEM; 4387 } 4388 skb_put(skb, len); 4389 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 4390 4391 h2c->hdr.type = type; 4392 h2c->hdr.ver = btc->ver->fwlrole; 4393 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4394 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4395 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4396 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4397 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4398 4399 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4400 H2C_CAT_OUTSRC, BTFC_SET, 4401 SET_DRV_INFO, 0, 0, 4402 len); 4403 4404 ret = rtw89_h2c_tx(rtwdev, skb, false); 4405 if (ret) { 4406 rtw89_err(rtwdev, "failed to send h2c\n"); 4407 goto fail; 4408 } 4409 4410 return 0; 4411 fail: 4412 dev_kfree_skb_any(skb); 4413 4414 return ret; 4415 } 4416 4417 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 4418 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 4419 { 4420 struct rtw89_btc *btc = &rtwdev->btc; 4421 const struct rtw89_btc_ver *ver = btc->ver; 4422 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 4423 struct sk_buff *skb; 4424 u8 *cmd; 4425 int ret; 4426 4427 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 4428 if (!skb) { 4429 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4430 return -ENOMEM; 4431 } 4432 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 4433 cmd = skb->data; 4434 4435 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4436 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 4437 4438 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 4439 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 4440 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 4441 if (ver->fcxctrl == 0) 4442 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 4443 4444 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4445 H2C_CAT_OUTSRC, BTFC_SET, 4446 SET_DRV_INFO, 0, 0, 4447 H2C_LEN_CXDRVINFO_CTRL); 4448 4449 ret = rtw89_h2c_tx(rtwdev, skb, false); 4450 if (ret) { 4451 rtw89_err(rtwdev, "failed to send h2c\n"); 4452 goto fail; 4453 } 4454 4455 return 0; 4456 fail: 4457 dev_kfree_skb_any(skb); 4458 4459 return ret; 4460 } 4461 4462 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 4463 { 4464 struct rtw89_btc *btc = &rtwdev->btc; 4465 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 4466 struct rtw89_h2c_cxctrl_v7 *h2c; 4467 u32 len = sizeof(*h2c); 4468 struct sk_buff *skb; 4469 int ret; 4470 4471 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4472 if (!skb) { 4473 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl_v7\n"); 4474 return -ENOMEM; 4475 } 4476 skb_put(skb, len); 4477 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 4478 4479 h2c->hdr.type = type; 4480 h2c->hdr.ver = btc->ver->fcxctrl; 4481 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 4482 h2c->ctrl = *ctrl; 4483 4484 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4485 H2C_CAT_OUTSRC, BTFC_SET, 4486 SET_DRV_INFO, 0, 0, len); 4487 4488 ret = rtw89_h2c_tx(rtwdev, skb, false); 4489 if (ret) { 4490 rtw89_err(rtwdev, "failed to send h2c\n"); 4491 goto fail; 4492 } 4493 4494 return 0; 4495 fail: 4496 dev_kfree_skb_any(skb); 4497 4498 return ret; 4499 } 4500 4501 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 4502 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 4503 { 4504 struct rtw89_btc *btc = &rtwdev->btc; 4505 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 4506 struct sk_buff *skb; 4507 u8 *cmd; 4508 int ret; 4509 4510 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 4511 if (!skb) { 4512 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 4513 return -ENOMEM; 4514 } 4515 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 4516 cmd = skb->data; 4517 4518 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4519 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 4520 4521 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 4522 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 4523 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 4524 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 4525 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 4526 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 4527 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 4528 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 4529 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 4530 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 4531 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 4532 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 4533 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 4534 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 4535 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 4536 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 4537 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 4538 4539 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4540 H2C_CAT_OUTSRC, BTFC_SET, 4541 SET_DRV_INFO, 0, 0, 4542 H2C_LEN_CXDRVINFO_TRX); 4543 4544 ret = rtw89_h2c_tx(rtwdev, skb, false); 4545 if (ret) { 4546 rtw89_err(rtwdev, "failed to send h2c\n"); 4547 goto fail; 4548 } 4549 4550 return 0; 4551 fail: 4552 dev_kfree_skb_any(skb); 4553 4554 return ret; 4555 } 4556 4557 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 4558 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 4559 { 4560 struct rtw89_btc *btc = &rtwdev->btc; 4561 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4562 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 4563 struct sk_buff *skb; 4564 u8 *cmd; 4565 int ret; 4566 4567 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 4568 if (!skb) { 4569 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4570 return -ENOMEM; 4571 } 4572 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 4573 cmd = skb->data; 4574 4575 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4576 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 4577 4578 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 4579 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 4580 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 4581 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 4582 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 4583 4584 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4585 H2C_CAT_OUTSRC, BTFC_SET, 4586 SET_DRV_INFO, 0, 0, 4587 H2C_LEN_CXDRVINFO_RFK); 4588 4589 ret = rtw89_h2c_tx(rtwdev, skb, false); 4590 if (ret) { 4591 rtw89_err(rtwdev, "failed to send h2c\n"); 4592 goto fail; 4593 } 4594 4595 return 0; 4596 fail: 4597 dev_kfree_skb_any(skb); 4598 4599 return ret; 4600 } 4601 4602 #define H2C_LEN_PKT_OFLD 4 4603 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 4604 { 4605 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4606 struct sk_buff *skb; 4607 unsigned int cond; 4608 u8 *cmd; 4609 int ret; 4610 4611 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 4612 if (!skb) { 4613 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4614 return -ENOMEM; 4615 } 4616 skb_put(skb, H2C_LEN_PKT_OFLD); 4617 cmd = skb->data; 4618 4619 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 4620 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 4621 4622 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4623 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4624 H2C_FUNC_PACKET_OFLD, 1, 1, 4625 H2C_LEN_PKT_OFLD); 4626 4627 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 4628 4629 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4630 if (ret < 0) { 4631 rtw89_debug(rtwdev, RTW89_DBG_FW, 4632 "failed to del pkt ofld: id %d, ret %d\n", 4633 id, ret); 4634 return ret; 4635 } 4636 4637 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 4638 return 0; 4639 } 4640 4641 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 4642 struct sk_buff *skb_ofld) 4643 { 4644 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4645 struct sk_buff *skb; 4646 unsigned int cond; 4647 u8 *cmd; 4648 u8 alloc_id; 4649 int ret; 4650 4651 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 4652 RTW89_MAX_PKT_OFLD_NUM); 4653 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 4654 return -ENOSPC; 4655 4656 *id = alloc_id; 4657 4658 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 4659 if (!skb) { 4660 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4661 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 4662 return -ENOMEM; 4663 } 4664 skb_put(skb, H2C_LEN_PKT_OFLD); 4665 cmd = skb->data; 4666 4667 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 4668 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 4669 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 4670 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 4671 4672 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4673 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4674 H2C_FUNC_PACKET_OFLD, 1, 1, 4675 H2C_LEN_PKT_OFLD + skb_ofld->len); 4676 4677 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 4678 4679 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4680 if (ret < 0) { 4681 rtw89_debug(rtwdev, RTW89_DBG_FW, 4682 "failed to add pkt ofld: id %d, ret %d\n", 4683 alloc_id, ret); 4684 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 4685 return ret; 4686 } 4687 4688 return 0; 4689 } 4690 4691 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num, 4692 struct list_head *chan_list) 4693 { 4694 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4695 struct rtw89_h2c_chinfo_elem *elem; 4696 struct rtw89_mac_chinfo *ch_info; 4697 struct rtw89_h2c_chinfo *h2c; 4698 struct sk_buff *skb; 4699 unsigned int cond; 4700 int skb_len; 4701 int ret; 4702 4703 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 4704 4705 skb_len = struct_size(h2c, elem, ch_num); 4706 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 4707 if (!skb) { 4708 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 4709 return -ENOMEM; 4710 } 4711 skb_put(skb, sizeof(*h2c)); 4712 h2c = (struct rtw89_h2c_chinfo *)skb->data; 4713 4714 h2c->ch_num = ch_num; 4715 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 4716 4717 list_for_each_entry(ch_info, chan_list, list) { 4718 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 4719 4720 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 4721 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 4722 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 4723 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 4724 4725 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 4726 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 4727 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 4728 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 4729 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 4730 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 4731 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 4732 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 4733 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 4734 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 4735 4736 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 4737 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 4738 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 4739 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 4740 4741 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 4742 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 4743 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 4744 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 4745 } 4746 4747 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4748 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4749 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 4750 4751 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 4752 4753 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4754 if (ret) { 4755 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 4756 return ret; 4757 } 4758 4759 return 0; 4760 } 4761 4762 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 4763 struct list_head *chan_list) 4764 { 4765 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4766 struct rtw89_h2c_chinfo_elem_be *elem; 4767 struct rtw89_mac_chinfo_be *ch_info; 4768 struct rtw89_h2c_chinfo *h2c; 4769 struct sk_buff *skb; 4770 unsigned int cond; 4771 int skb_len; 4772 int ret; 4773 4774 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 4775 4776 skb_len = struct_size(h2c, elem, ch_num); 4777 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 4778 if (!skb) { 4779 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 4780 return -ENOMEM; 4781 } 4782 4783 skb_put(skb, sizeof(*h2c)); 4784 h2c = (struct rtw89_h2c_chinfo *)skb->data; 4785 4786 h2c->ch_num = ch_num; 4787 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 4788 h2c->arg = u8_encode_bits(RTW89_PHY_0, RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 4789 4790 list_for_each_entry(ch_info, chan_list, list) { 4791 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 4792 4793 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) | 4794 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 4795 le32_encode_bits(ch_info->central_ch, 4796 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 4797 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 4798 4799 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 4800 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 4801 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 4802 le32_encode_bits(ch_info->pause_data, 4803 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 4804 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 4805 le32_encode_bits(ch_info->rand_seq_num, 4806 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 4807 le32_encode_bits(ch_info->notify_action, 4808 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 4809 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 4810 RTW89_H2C_CHINFO_BE_W1_PROBE) | 4811 le32_encode_bits(ch_info->leave_crit, 4812 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 4813 le32_encode_bits(ch_info->chkpt_timer, 4814 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 4815 4816 elem->w2 = le32_encode_bits(ch_info->leave_time, 4817 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 4818 le32_encode_bits(ch_info->leave_th, 4819 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 4820 le32_encode_bits(ch_info->tx_pkt_ctrl, 4821 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 4822 4823 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 4824 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 4825 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 4826 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 4827 4828 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 4829 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 4830 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 4831 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 4832 4833 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 4834 le32_encode_bits(ch_info->fw_probe0_ssids, 4835 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 4836 4837 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 4838 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 4839 le32_encode_bits(ch_info->fw_probe0_bssids, 4840 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 4841 } 4842 4843 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4844 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4845 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 4846 4847 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 4848 4849 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4850 if (ret) { 4851 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 4852 return ret; 4853 } 4854 4855 return 0; 4856 } 4857 4858 #define RTW89_SCAN_DELAY_TSF_UNIT 104800 4859 int rtw89_fw_h2c_scan_offload_ax(struct rtw89_dev *rtwdev, 4860 struct rtw89_scan_option *option, 4861 struct rtw89_vif *rtwvif, 4862 bool wowlan) 4863 { 4864 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4865 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 4866 enum rtw89_scan_mode scan_mode = RTW89_SCAN_IMMEDIATE; 4867 struct rtw89_h2c_scanofld *h2c; 4868 u32 len = sizeof(*h2c); 4869 struct sk_buff *skb; 4870 unsigned int cond; 4871 u64 tsf = 0; 4872 int ret; 4873 4874 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4875 if (!skb) { 4876 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 4877 return -ENOMEM; 4878 } 4879 skb_put(skb, len); 4880 h2c = (struct rtw89_h2c_scanofld *)skb->data; 4881 4882 if (option->delay) { 4883 ret = rtw89_mac_port_get_tsf(rtwdev, rtwvif, &tsf); 4884 if (ret) { 4885 rtw89_warn(rtwdev, "NLO failed to get port tsf: %d\n", ret); 4886 scan_mode = RTW89_SCAN_IMMEDIATE; 4887 } else { 4888 scan_mode = RTW89_SCAN_DELAY; 4889 tsf += option->delay * RTW89_SCAN_DELAY_TSF_UNIT; 4890 } 4891 } 4892 4893 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 4894 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 4895 le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) | 4896 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 4897 4898 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 4899 le32_encode_bits(option->target_ch_mode, 4900 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 4901 le32_encode_bits(scan_mode, RTW89_H2C_SCANOFLD_W1_START_MODE) | 4902 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 4903 4904 h2c->w2 = le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_W2_NORM_PD) | 4905 le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_W2_SLOW_PD); 4906 4907 if (option->target_ch_mode) { 4908 h2c->w1 |= le32_encode_bits(op->band_width, 4909 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 4910 le32_encode_bits(op->primary_channel, 4911 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 4912 le32_encode_bits(op->channel, 4913 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 4914 h2c->w0 |= le32_encode_bits(op->band_type, 4915 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 4916 } 4917 4918 h2c->tsf_high = le32_encode_bits(upper_32_bits(tsf), 4919 RTW89_H2C_SCANOFLD_W3_TSF_HIGH); 4920 h2c->tsf_low = le32_encode_bits(lower_32_bits(tsf), 4921 RTW89_H2C_SCANOFLD_W4_TSF_LOW); 4922 4923 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4924 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4925 H2C_FUNC_SCANOFLD, 1, 1, 4926 len); 4927 4928 if (option->enable) 4929 cond = RTW89_SCANOFLD_WAIT_COND_START; 4930 else 4931 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 4932 4933 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4934 if (ret) { 4935 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 4936 return ret; 4937 } 4938 4939 return 0; 4940 } 4941 4942 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 4943 struct rtw89_scan_option *option) 4944 { 4945 struct ieee80211_supported_band *sband; 4946 struct ieee80211_channel *chan; 4947 u8 i, idx; 4948 4949 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 4950 if (!sband) { 4951 option->prohib_chan = U64_MAX; 4952 return; 4953 } 4954 4955 for (i = 0; i < sband->n_channels; i++) { 4956 chan = &sband->channels[i]; 4957 if (chan->flags & IEEE80211_CHAN_DISABLED) { 4958 idx = (chan->hw_value - 1) / 4; 4959 option->prohib_chan |= BIT(idx); 4960 } 4961 } 4962 } 4963 4964 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 4965 struct rtw89_scan_option *option, 4966 struct rtw89_vif *rtwvif, 4967 bool wowlan) 4968 { 4969 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 4970 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4971 struct cfg80211_scan_request *req = rtwvif->scan_req; 4972 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 4973 struct rtw89_chan *op = &scan_info->op_chan; 4974 struct rtw89_h2c_scanofld_be_opch *opch; 4975 struct rtw89_pktofld_info *pkt_info; 4976 struct rtw89_h2c_scanofld_be *h2c; 4977 struct sk_buff *skb; 4978 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 4979 u8 opch_size = sizeof(*opch) * option->num_opch; 4980 u8 probe_id[NUM_NL80211_BANDS]; 4981 u8 cfg_len = sizeof(*h2c); 4982 unsigned int cond; 4983 void *ptr; 4984 int ret; 4985 u32 len; 4986 u8 i; 4987 4988 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 4989 4990 len = cfg_len + macc_role_size + opch_size; 4991 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4992 if (!skb) { 4993 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 4994 return -ENOMEM; 4995 } 4996 4997 skb_put(skb, len); 4998 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 4999 ptr = skb->data; 5000 5001 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 5002 5003 if (!wowlan) { 5004 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 5005 if (pkt_info->wildcard_6ghz) { 5006 /* Provide wildcard as template */ 5007 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 5008 break; 5009 } 5010 } 5011 } 5012 5013 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 5014 le32_encode_bits(option->scan_mode, 5015 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 5016 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 5017 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 5018 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 5019 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 5020 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 5021 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 5022 5023 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 5024 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 5025 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 5026 5027 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 5028 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 5029 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 5030 5031 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 5032 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 5033 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 5034 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 5035 5036 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 5037 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 5038 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 5039 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 5040 le32_encode_bits(option->delay, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 5041 5042 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 5043 5044 h2c->w6 = le32_encode_bits(option->prohib_chan, 5045 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 5046 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 5047 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 5048 if (!wowlan && req->no_cck) { 5049 h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); 5050 h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, 5051 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | 5052 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5053 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | 5054 le32_encode_bits(RTW89_HW_RATE_OFDM6, 5055 RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); 5056 } 5057 5058 if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD_BE_V0, &rtwdev->fw)) { 5059 cfg_len = offsetofend(typeof(*h2c), w8); 5060 goto flex_member; 5061 } 5062 5063 h2c->w9 = le32_encode_bits(sizeof(*h2c) / sizeof(h2c->w0), 5064 RTW89_H2C_SCANOFLD_BE_W9_SIZE_CFG) | 5065 le32_encode_bits(sizeof(*macc_role) / sizeof(macc_role->w0), 5066 RTW89_H2C_SCANOFLD_BE_W9_SIZE_MACC) | 5067 le32_encode_bits(sizeof(*opch) / sizeof(opch->w0), 5068 RTW89_H2C_SCANOFLD_BE_W9_SIZE_OP); 5069 5070 flex_member: 5071 ptr += cfg_len; 5072 5073 for (i = 0; i < option->num_macc_role; i++) { 5074 macc_role = ptr; 5075 macc_role->w0 = 5076 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 5077 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 5078 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 5079 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 5080 ptr += sizeof(*macc_role); 5081 } 5082 5083 for (i = 0; i < option->num_opch; i++) { 5084 opch = ptr; 5085 opch->w0 = le32_encode_bits(rtwvif->mac_id, 5086 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 5087 le32_encode_bits(option->band, 5088 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 5089 le32_encode_bits(rtwvif->port, 5090 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 5091 le32_encode_bits(RTW89_SCAN_OPMODE_INTV, 5092 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 5093 le32_encode_bits(true, 5094 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 5095 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10, 5096 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 5097 5098 opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME, 5099 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) | 5100 le32_encode_bits(op->band_type, 5101 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 5102 le32_encode_bits(op->band_width, 5103 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 5104 le32_encode_bits(0x3, 5105 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 5106 le32_encode_bits(op->primary_channel, 5107 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 5108 le32_encode_bits(op->channel, 5109 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 5110 5111 opch->w2 = le32_encode_bits(0, 5112 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 5113 le32_encode_bits(0, 5114 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 5115 le32_encode_bits(2, 5116 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS); 5117 5118 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5119 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 5120 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5121 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 5122 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5123 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 5124 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 5125 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 5126 ptr += sizeof(*opch); 5127 } 5128 5129 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5130 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 5131 H2C_FUNC_SCANOFLD_BE, 1, 1, 5132 len); 5133 5134 if (option->enable) 5135 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 5136 else 5137 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 5138 5139 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 5140 if (ret) { 5141 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 5142 return ret; 5143 } 5144 5145 return 0; 5146 } 5147 5148 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 5149 struct rtw89_fw_h2c_rf_reg_info *info, 5150 u16 len, u8 page) 5151 { 5152 struct sk_buff *skb; 5153 u8 class = info->rf_path == RF_PATH_A ? 5154 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 5155 int ret; 5156 5157 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5158 if (!skb) { 5159 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 5160 return -ENOMEM; 5161 } 5162 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 5163 5164 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5165 H2C_CAT_OUTSRC, class, page, 0, 0, 5166 len); 5167 5168 ret = rtw89_h2c_tx(rtwdev, skb, false); 5169 if (ret) { 5170 rtw89_err(rtwdev, "failed to send h2c\n"); 5171 goto fail; 5172 } 5173 5174 return 0; 5175 fail: 5176 dev_kfree_skb_any(skb); 5177 5178 return ret; 5179 } 5180 5181 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 5182 { 5183 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 5184 struct rtw89_fw_h2c_rf_get_mccch *mccch; 5185 struct sk_buff *skb; 5186 int ret; 5187 u8 idx; 5188 5189 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 5190 if (!skb) { 5191 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 5192 return -ENOMEM; 5193 } 5194 skb_put(skb, sizeof(*mccch)); 5195 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 5196 5197 idx = rfk_mcc->table_idx; 5198 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 5199 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 5200 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 5201 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 5202 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 5203 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 5204 5205 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5206 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 5207 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 5208 sizeof(*mccch)); 5209 5210 ret = rtw89_h2c_tx(rtwdev, skb, false); 5211 if (ret) { 5212 rtw89_err(rtwdev, "failed to send h2c\n"); 5213 goto fail; 5214 } 5215 5216 return 0; 5217 fail: 5218 dev_kfree_skb_any(skb); 5219 5220 return ret; 5221 } 5222 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 5223 5224 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 5225 enum rtw89_phy_idx phy_idx) 5226 { 5227 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 5228 struct rtw89_fw_h2c_rfk_pre_info_v0 *h2c_v0; 5229 struct rtw89_fw_h2c_rfk_pre_info *h2c; 5230 u8 tbl_sel = rfk_mcc->table_idx; 5231 u32 len = sizeof(*h2c); 5232 struct sk_buff *skb; 5233 u8 ver = U8_MAX; 5234 u8 tbl, path; 5235 u32 val32; 5236 int ret; 5237 5238 if (RTW89_CHK_FW_FEATURE(RFK_PRE_NOTIFY_V0, &rtwdev->fw)) { 5239 len = sizeof(*h2c_v0); 5240 ver = 0; 5241 } 5242 5243 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5244 if (!skb) { 5245 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 5246 return -ENOMEM; 5247 } 5248 skb_put(skb, len); 5249 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 5250 5251 h2c->common.mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 5252 5253 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 5254 5255 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 5256 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5257 h2c->common.dbcc.ch[path][tbl] = 5258 cpu_to_le32(rfk_mcc->ch[tbl]); 5259 h2c->common.dbcc.band[path][tbl] = 5260 cpu_to_le32(rfk_mcc->band[tbl]); 5261 } 5262 } 5263 5264 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 5265 h2c->common.tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]); 5266 h2c->common.tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]); 5267 } 5268 5269 h2c->common.phy_idx = cpu_to_le32(phy_idx); 5270 5271 if (ver == 0) { /* RFK_PRE_NOTIFY_V0 */ 5272 h2c_v0 = (struct rtw89_fw_h2c_rfk_pre_info_v0 *)skb->data; 5273 5274 h2c_v0->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]); 5275 h2c_v0->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]); 5276 h2c_v0->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]); 5277 5278 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 5279 h2c_v0->ktbl_sel0 = cpu_to_le32(val32); 5280 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 5281 h2c_v0->ktbl_sel1 = cpu_to_le32(val32); 5282 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 5283 h2c_v0->rfmod0 = cpu_to_le32(val32); 5284 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 5285 h2c_v0->rfmod1 = cpu_to_le32(val32); 5286 5287 if (rtw89_is_mlo_1_1(rtwdev)) 5288 h2c_v0->mlo_1_1 = cpu_to_le32(1); 5289 5290 h2c_v0->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 5291 5292 goto done; 5293 } 5294 5295 if (rtw89_is_mlo_1_1(rtwdev)) 5296 h2c->mlo_1_1 = cpu_to_le32(1); 5297 done: 5298 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5299 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5300 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 5301 len); 5302 5303 ret = rtw89_h2c_tx(rtwdev, skb, false); 5304 if (ret) { 5305 rtw89_err(rtwdev, "failed to send h2c\n"); 5306 goto fail; 5307 } 5308 5309 return 0; 5310 fail: 5311 dev_kfree_skb_any(skb); 5312 5313 return ret; 5314 } 5315 5316 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5317 const struct rtw89_chan *chan, enum rtw89_tssi_mode tssi_mode) 5318 { 5319 struct rtw89_hal *hal = &rtwdev->hal; 5320 struct rtw89_h2c_rf_tssi *h2c; 5321 u32 len = sizeof(*h2c); 5322 struct sk_buff *skb; 5323 int ret; 5324 5325 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5326 if (!skb) { 5327 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 5328 return -ENOMEM; 5329 } 5330 skb_put(skb, len); 5331 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 5332 5333 h2c->len = cpu_to_le16(len); 5334 h2c->phy = phy_idx; 5335 h2c->ch = chan->channel; 5336 h2c->bw = chan->band_width; 5337 h2c->band = chan->band_type; 5338 h2c->hwtx_en = true; 5339 h2c->cv = hal->cv; 5340 h2c->tssi_mode = tssi_mode; 5341 5342 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 5343 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 5344 5345 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5346 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5347 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 5348 5349 ret = rtw89_h2c_tx(rtwdev, skb, false); 5350 if (ret) { 5351 rtw89_err(rtwdev, "failed to send h2c\n"); 5352 goto fail; 5353 } 5354 5355 return 0; 5356 fail: 5357 dev_kfree_skb_any(skb); 5358 5359 return ret; 5360 } 5361 5362 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5363 const struct rtw89_chan *chan) 5364 { 5365 struct rtw89_h2c_rf_iqk *h2c; 5366 u32 len = sizeof(*h2c); 5367 struct sk_buff *skb; 5368 int ret; 5369 5370 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5371 if (!skb) { 5372 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 5373 return -ENOMEM; 5374 } 5375 skb_put(skb, len); 5376 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 5377 5378 h2c->phy_idx = cpu_to_le32(phy_idx); 5379 h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en); 5380 5381 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5382 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5383 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 5384 5385 ret = rtw89_h2c_tx(rtwdev, skb, false); 5386 if (ret) { 5387 rtw89_err(rtwdev, "failed to send h2c\n"); 5388 goto fail; 5389 } 5390 5391 return 0; 5392 fail: 5393 dev_kfree_skb_any(skb); 5394 5395 return ret; 5396 } 5397 5398 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5399 const struct rtw89_chan *chan) 5400 { 5401 struct rtw89_h2c_rf_dpk *h2c; 5402 u32 len = sizeof(*h2c); 5403 struct sk_buff *skb; 5404 int ret; 5405 5406 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5407 if (!skb) { 5408 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 5409 return -ENOMEM; 5410 } 5411 skb_put(skb, len); 5412 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 5413 5414 h2c->len = len; 5415 h2c->phy = phy_idx; 5416 h2c->dpk_enable = true; 5417 h2c->kpath = RF_AB; 5418 h2c->cur_band = chan->band_type; 5419 h2c->cur_bw = chan->band_width; 5420 h2c->cur_ch = chan->channel; 5421 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5422 5423 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5424 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5425 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 5426 5427 ret = rtw89_h2c_tx(rtwdev, skb, false); 5428 if (ret) { 5429 rtw89_err(rtwdev, "failed to send h2c\n"); 5430 goto fail; 5431 } 5432 5433 return 0; 5434 fail: 5435 dev_kfree_skb_any(skb); 5436 5437 return ret; 5438 } 5439 5440 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5441 const struct rtw89_chan *chan) 5442 { 5443 struct rtw89_hal *hal = &rtwdev->hal; 5444 struct rtw89_h2c_rf_txgapk *h2c; 5445 u32 len = sizeof(*h2c); 5446 struct sk_buff *skb; 5447 int ret; 5448 5449 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5450 if (!skb) { 5451 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 5452 return -ENOMEM; 5453 } 5454 skb_put(skb, len); 5455 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 5456 5457 h2c->len = len; 5458 h2c->ktype = 2; 5459 h2c->phy = phy_idx; 5460 h2c->kpath = RF_AB; 5461 h2c->band = chan->band_type; 5462 h2c->bw = chan->band_width; 5463 h2c->ch = chan->channel; 5464 h2c->cv = hal->cv; 5465 5466 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5467 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5468 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 5469 5470 ret = rtw89_h2c_tx(rtwdev, skb, false); 5471 if (ret) { 5472 rtw89_err(rtwdev, "failed to send h2c\n"); 5473 goto fail; 5474 } 5475 5476 return 0; 5477 fail: 5478 dev_kfree_skb_any(skb); 5479 5480 return ret; 5481 } 5482 5483 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5484 const struct rtw89_chan *chan) 5485 { 5486 struct rtw89_h2c_rf_dack *h2c; 5487 u32 len = sizeof(*h2c); 5488 struct sk_buff *skb; 5489 int ret; 5490 5491 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5492 if (!skb) { 5493 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 5494 return -ENOMEM; 5495 } 5496 skb_put(skb, len); 5497 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 5498 5499 h2c->len = cpu_to_le32(len); 5500 h2c->phy = cpu_to_le32(phy_idx); 5501 h2c->type = cpu_to_le32(0); 5502 5503 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5504 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5505 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 5506 5507 ret = rtw89_h2c_tx(rtwdev, skb, false); 5508 if (ret) { 5509 rtw89_err(rtwdev, "failed to send h2c\n"); 5510 goto fail; 5511 } 5512 5513 return 0; 5514 fail: 5515 dev_kfree_skb_any(skb); 5516 5517 return ret; 5518 } 5519 5520 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5521 const struct rtw89_chan *chan) 5522 { 5523 struct rtw89_h2c_rf_rxdck *h2c; 5524 u32 len = sizeof(*h2c); 5525 struct sk_buff *skb; 5526 int ret; 5527 5528 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5529 if (!skb) { 5530 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 5531 return -ENOMEM; 5532 } 5533 skb_put(skb, len); 5534 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 5535 5536 h2c->len = len; 5537 h2c->phy = phy_idx; 5538 h2c->is_afe = false; 5539 h2c->kpath = RF_AB; 5540 h2c->cur_band = chan->band_type; 5541 h2c->cur_bw = chan->band_width; 5542 h2c->cur_ch = chan->channel; 5543 h2c->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5544 5545 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5546 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5547 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 5548 5549 ret = rtw89_h2c_tx(rtwdev, skb, false); 5550 if (ret) { 5551 rtw89_err(rtwdev, "failed to send h2c\n"); 5552 goto fail; 5553 } 5554 5555 return 0; 5556 fail: 5557 dev_kfree_skb_any(skb); 5558 5559 return ret; 5560 } 5561 5562 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 5563 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 5564 bool rack, bool dack) 5565 { 5566 struct sk_buff *skb; 5567 int ret; 5568 5569 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5570 if (!skb) { 5571 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 5572 return -ENOMEM; 5573 } 5574 skb_put_data(skb, buf, len); 5575 5576 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5577 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 5578 len); 5579 5580 ret = rtw89_h2c_tx(rtwdev, skb, false); 5581 if (ret) { 5582 rtw89_err(rtwdev, "failed to send h2c\n"); 5583 goto fail; 5584 } 5585 5586 return 0; 5587 fail: 5588 dev_kfree_skb_any(skb); 5589 5590 return ret; 5591 } 5592 5593 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 5594 { 5595 struct sk_buff *skb; 5596 int ret; 5597 5598 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 5599 if (!skb) { 5600 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 5601 return -ENOMEM; 5602 } 5603 skb_put_data(skb, buf, len); 5604 5605 ret = rtw89_h2c_tx(rtwdev, skb, false); 5606 if (ret) { 5607 rtw89_err(rtwdev, "failed to send h2c\n"); 5608 goto fail; 5609 } 5610 5611 return 0; 5612 fail: 5613 dev_kfree_skb_any(skb); 5614 5615 return ret; 5616 } 5617 5618 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 5619 { 5620 struct rtw89_early_h2c *early_h2c; 5621 5622 lockdep_assert_held(&rtwdev->mutex); 5623 5624 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 5625 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 5626 } 5627 } 5628 5629 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 5630 { 5631 struct rtw89_early_h2c *early_h2c, *tmp; 5632 5633 mutex_lock(&rtwdev->mutex); 5634 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 5635 list_del(&early_h2c->list); 5636 kfree(early_h2c->h2c); 5637 kfree(early_h2c); 5638 } 5639 mutex_unlock(&rtwdev->mutex); 5640 } 5641 5642 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 5643 { 5644 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 5645 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 5646 5647 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 5648 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 5649 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 5650 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 5651 } 5652 5653 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 5654 struct sk_buff *c2h) 5655 { 5656 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 5657 u8 category = attr->category; 5658 u8 class = attr->class; 5659 u8 func = attr->func; 5660 5661 switch (category) { 5662 default: 5663 return false; 5664 case RTW89_C2H_CAT_MAC: 5665 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 5666 case RTW89_C2H_CAT_OUTSRC: 5667 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 5668 } 5669 } 5670 5671 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 5672 { 5673 rtw89_fw_c2h_parse_attr(c2h); 5674 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 5675 goto enqueue; 5676 5677 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 5678 dev_kfree_skb_any(c2h); 5679 return; 5680 5681 enqueue: 5682 skb_queue_tail(&rtwdev->c2h_queue, c2h); 5683 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 5684 } 5685 5686 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 5687 struct sk_buff *skb) 5688 { 5689 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 5690 u8 category = attr->category; 5691 u8 class = attr->class; 5692 u8 func = attr->func; 5693 u16 len = attr->len; 5694 bool dump = true; 5695 5696 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 5697 return; 5698 5699 switch (category) { 5700 case RTW89_C2H_CAT_TEST: 5701 break; 5702 case RTW89_C2H_CAT_MAC: 5703 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 5704 if (class == RTW89_MAC_C2H_CLASS_INFO && 5705 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 5706 dump = false; 5707 break; 5708 case RTW89_C2H_CAT_OUTSRC: 5709 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 5710 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 5711 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 5712 else 5713 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 5714 break; 5715 } 5716 5717 if (dump) 5718 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 5719 } 5720 5721 void rtw89_fw_c2h_work(struct work_struct *work) 5722 { 5723 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 5724 c2h_work); 5725 struct sk_buff *skb, *tmp; 5726 5727 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 5728 skb_unlink(skb, &rtwdev->c2h_queue); 5729 mutex_lock(&rtwdev->mutex); 5730 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 5731 mutex_unlock(&rtwdev->mutex); 5732 dev_kfree_skb_any(skb); 5733 } 5734 } 5735 5736 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 5737 struct rtw89_mac_h2c_info *info) 5738 { 5739 const struct rtw89_chip_info *chip = rtwdev->chip; 5740 struct rtw89_fw_info *fw_info = &rtwdev->fw; 5741 const u32 *h2c_reg = chip->h2c_regs; 5742 u8 i, val, len; 5743 int ret; 5744 5745 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 5746 rtwdev, chip->h2c_ctrl_reg); 5747 if (ret) { 5748 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 5749 return ret; 5750 } 5751 5752 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 5753 sizeof(info->u.h2creg[0])); 5754 5755 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 5756 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 5757 5758 for (i = 0; i < RTW89_H2CREG_MAX; i++) 5759 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 5760 5761 fw_info->h2c_counter++; 5762 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 5763 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 5764 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 5765 5766 return 0; 5767 } 5768 5769 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 5770 struct rtw89_mac_c2h_info *info) 5771 { 5772 const struct rtw89_chip_info *chip = rtwdev->chip; 5773 struct rtw89_fw_info *fw_info = &rtwdev->fw; 5774 const u32 *c2h_reg = chip->c2h_regs; 5775 u32 ret; 5776 u8 i, val; 5777 5778 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 5779 5780 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 5781 RTW89_C2H_TIMEOUT, false, rtwdev, 5782 chip->c2h_ctrl_reg); 5783 if (ret) { 5784 rtw89_warn(rtwdev, "c2h reg timeout\n"); 5785 return ret; 5786 } 5787 5788 for (i = 0; i < RTW89_C2HREG_MAX; i++) 5789 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 5790 5791 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 5792 5793 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 5794 info->content_len = 5795 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 5796 RTW89_C2HREG_HDR_LEN; 5797 5798 fw_info->c2h_counter++; 5799 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 5800 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 5801 5802 return 0; 5803 } 5804 5805 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 5806 struct rtw89_mac_h2c_info *h2c_info, 5807 struct rtw89_mac_c2h_info *c2h_info) 5808 { 5809 u32 ret; 5810 5811 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 5812 lockdep_assert_held(&rtwdev->mutex); 5813 5814 if (!h2c_info && !c2h_info) 5815 return -EINVAL; 5816 5817 if (!h2c_info) 5818 goto recv_c2h; 5819 5820 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 5821 if (ret) 5822 return ret; 5823 5824 recv_c2h: 5825 if (!c2h_info) 5826 return 0; 5827 5828 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 5829 if (ret) 5830 return ret; 5831 5832 return 0; 5833 } 5834 5835 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 5836 { 5837 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 5838 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 5839 return; 5840 } 5841 5842 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 5843 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 5844 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 5845 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 5846 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 5847 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 5848 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 5849 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 5850 5851 rtw89_fw_prog_cnt_dump(rtwdev); 5852 } 5853 5854 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 5855 { 5856 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 5857 struct rtw89_pktofld_info *info, *tmp; 5858 u8 idx; 5859 5860 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 5861 if (!(rtwdev->chip->support_bands & BIT(idx))) 5862 continue; 5863 5864 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 5865 if (test_bit(info->id, rtwdev->pkt_offload)) 5866 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 5867 list_del(&info->list); 5868 kfree(info); 5869 } 5870 } 5871 } 5872 5873 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 5874 struct rtw89_vif *rtwvif, 5875 struct rtw89_pktofld_info *info, 5876 enum nl80211_band band, u8 ssid_idx) 5877 { 5878 struct cfg80211_scan_request *req = rtwvif->scan_req; 5879 5880 if (band != NL80211_BAND_6GHZ) 5881 return false; 5882 5883 if (req->ssids[ssid_idx].ssid_len) { 5884 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 5885 req->ssids[ssid_idx].ssid_len); 5886 info->ssid_len = req->ssids[ssid_idx].ssid_len; 5887 return false; 5888 } else { 5889 info->wildcard_6ghz = true; 5890 return true; 5891 } 5892 } 5893 5894 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 5895 struct rtw89_vif *rtwvif, 5896 struct sk_buff *skb, u8 ssid_idx) 5897 { 5898 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5899 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 5900 struct rtw89_pktofld_info *info; 5901 struct sk_buff *new; 5902 int ret = 0; 5903 u8 band; 5904 5905 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 5906 if (!(rtwdev->chip->support_bands & BIT(band))) 5907 continue; 5908 5909 new = skb_copy(skb, GFP_KERNEL); 5910 if (!new) { 5911 ret = -ENOMEM; 5912 goto out; 5913 } 5914 skb_put_data(new, ies->ies[band], ies->len[band]); 5915 skb_put_data(new, ies->common_ies, ies->common_ie_len); 5916 5917 info = kzalloc(sizeof(*info), GFP_KERNEL); 5918 if (!info) { 5919 ret = -ENOMEM; 5920 kfree_skb(new); 5921 goto out; 5922 } 5923 5924 rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band, 5925 ssid_idx); 5926 5927 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 5928 if (ret) { 5929 kfree_skb(new); 5930 kfree(info); 5931 goto out; 5932 } 5933 5934 list_add_tail(&info->list, &scan_info->pkt_list[band]); 5935 kfree_skb(new); 5936 } 5937 out: 5938 return ret; 5939 } 5940 5941 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 5942 struct rtw89_vif *rtwvif) 5943 { 5944 struct cfg80211_scan_request *req = rtwvif->scan_req; 5945 struct sk_buff *skb; 5946 u8 num = req->n_ssids, i; 5947 int ret; 5948 5949 for (i = 0; i < num; i++) { 5950 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 5951 req->ssids[i].ssid, 5952 req->ssids[i].ssid_len, 5953 req->ie_len); 5954 if (!skb) 5955 return -ENOMEM; 5956 5957 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i); 5958 kfree_skb(skb); 5959 5960 if (ret) 5961 return ret; 5962 } 5963 5964 return 0; 5965 } 5966 5967 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 5968 struct cfg80211_scan_request *req, 5969 struct rtw89_mac_chinfo *ch_info) 5970 { 5971 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 5972 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 5973 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 5974 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 5975 struct cfg80211_scan_6ghz_params *params; 5976 struct rtw89_pktofld_info *info, *tmp; 5977 struct ieee80211_hdr *hdr; 5978 struct sk_buff *skb; 5979 bool found; 5980 int ret = 0; 5981 u8 i; 5982 5983 if (!req->n_6ghz_params) 5984 return 0; 5985 5986 for (i = 0; i < req->n_6ghz_params; i++) { 5987 params = &req->scan_6ghz_params[i]; 5988 5989 if (req->channels[params->channel_idx]->hw_value != 5990 ch_info->pri_ch) 5991 continue; 5992 5993 found = false; 5994 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 5995 if (ether_addr_equal(tmp->bssid, params->bssid)) { 5996 found = true; 5997 break; 5998 } 5999 } 6000 if (found) 6001 continue; 6002 6003 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 6004 NULL, 0, req->ie_len); 6005 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 6006 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 6007 hdr = (struct ieee80211_hdr *)skb->data; 6008 ether_addr_copy(hdr->addr3, params->bssid); 6009 6010 info = kzalloc(sizeof(*info), GFP_KERNEL); 6011 if (!info) { 6012 ret = -ENOMEM; 6013 kfree_skb(skb); 6014 goto out; 6015 } 6016 6017 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 6018 if (ret) { 6019 kfree_skb(skb); 6020 kfree(info); 6021 goto out; 6022 } 6023 6024 ether_addr_copy(info->bssid, params->bssid); 6025 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 6026 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 6027 6028 ch_info->tx_pkt = true; 6029 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6030 6031 kfree_skb(skb); 6032 } 6033 6034 out: 6035 return ret; 6036 } 6037 6038 static void rtw89_pno_scan_add_chan_ax(struct rtw89_dev *rtwdev, 6039 int chan_type, int ssid_num, 6040 struct rtw89_mac_chinfo *ch_info) 6041 { 6042 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6043 struct rtw89_pktofld_info *info; 6044 u8 probe_count = 0; 6045 6046 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6047 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6048 ch_info->bw = RTW89_SCAN_WIDTH; 6049 ch_info->tx_pkt = true; 6050 ch_info->cfg_tx_pwr = false; 6051 ch_info->tx_pwr_idx = 0; 6052 ch_info->tx_null = false; 6053 ch_info->pause_data = false; 6054 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6055 6056 if (ssid_num) { 6057 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6058 if (info->channel_6ghz && 6059 ch_info->pri_ch != info->channel_6ghz) 6060 continue; 6061 else if (info->channel_6ghz && probe_count != 0) 6062 ch_info->period += RTW89_CHANNEL_TIME_6G; 6063 6064 if (info->wildcard_6ghz) 6065 continue; 6066 6067 ch_info->pkt_id[probe_count++] = info->id; 6068 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6069 break; 6070 } 6071 ch_info->num_pkt = probe_count; 6072 } 6073 6074 switch (chan_type) { 6075 case RTW89_CHAN_DFS: 6076 if (ch_info->ch_band != RTW89_BAND_6G) 6077 ch_info->period = max_t(u8, ch_info->period, 6078 RTW89_DFS_CHAN_TIME); 6079 ch_info->dwell_time = RTW89_DWELL_TIME; 6080 break; 6081 case RTW89_CHAN_ACTIVE: 6082 break; 6083 default: 6084 rtw89_err(rtwdev, "Channel type out of bound\n"); 6085 } 6086 } 6087 6088 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 6089 int ssid_num, 6090 struct rtw89_mac_chinfo *ch_info) 6091 { 6092 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6093 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 6094 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 6095 struct cfg80211_scan_request *req = rtwvif->scan_req; 6096 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 6097 struct rtw89_pktofld_info *info; 6098 u8 band, probe_count = 0; 6099 int ret; 6100 6101 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6102 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6103 ch_info->bw = RTW89_SCAN_WIDTH; 6104 ch_info->tx_pkt = true; 6105 ch_info->cfg_tx_pwr = false; 6106 ch_info->tx_pwr_idx = 0; 6107 ch_info->tx_null = false; 6108 ch_info->pause_data = false; 6109 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6110 6111 if (ch_info->ch_band == RTW89_BAND_6G) { 6112 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6113 !ch_info->is_psc) { 6114 ch_info->tx_pkt = false; 6115 if (!req->duration_mandatory) 6116 ch_info->period -= RTW89_DWELL_TIME_6G; 6117 } 6118 } 6119 6120 ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info); 6121 if (ret) 6122 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 6123 6124 if (ssid_num) { 6125 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6126 6127 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6128 if (info->channel_6ghz && 6129 ch_info->pri_ch != info->channel_6ghz) 6130 continue; 6131 else if (info->channel_6ghz && probe_count != 0) 6132 ch_info->period += RTW89_CHANNEL_TIME_6G; 6133 6134 if (info->wildcard_6ghz) 6135 continue; 6136 6137 ch_info->pkt_id[probe_count++] = info->id; 6138 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6139 break; 6140 } 6141 ch_info->num_pkt = probe_count; 6142 } 6143 6144 switch (chan_type) { 6145 case RTW89_CHAN_OPERATE: 6146 ch_info->central_ch = op->channel; 6147 ch_info->pri_ch = op->primary_channel; 6148 ch_info->ch_band = op->band_type; 6149 ch_info->bw = op->band_width; 6150 ch_info->tx_null = true; 6151 ch_info->num_pkt = 0; 6152 break; 6153 case RTW89_CHAN_DFS: 6154 if (ch_info->ch_band != RTW89_BAND_6G) 6155 ch_info->period = max_t(u8, ch_info->period, 6156 RTW89_DFS_CHAN_TIME); 6157 ch_info->dwell_time = RTW89_DWELL_TIME; 6158 break; 6159 case RTW89_CHAN_ACTIVE: 6160 break; 6161 default: 6162 rtw89_err(rtwdev, "Channel type out of bound\n"); 6163 } 6164 } 6165 6166 static void rtw89_pno_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6167 int ssid_num, 6168 struct rtw89_mac_chinfo_be *ch_info) 6169 { 6170 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6171 struct rtw89_pktofld_info *info; 6172 u8 probe_count = 0, i; 6173 6174 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6175 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6176 ch_info->bw = RTW89_SCAN_WIDTH; 6177 ch_info->tx_null = false; 6178 ch_info->pause_data = false; 6179 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6180 6181 if (ssid_num) { 6182 list_for_each_entry(info, &rtw_wow->pno_pkt_list, list) { 6183 ch_info->pkt_id[probe_count++] = info->id; 6184 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6185 break; 6186 } 6187 } 6188 6189 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6190 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6191 6192 switch (chan_type) { 6193 case RTW89_CHAN_DFS: 6194 ch_info->period = max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6195 ch_info->dwell_time = RTW89_DWELL_TIME; 6196 break; 6197 case RTW89_CHAN_ACTIVE: 6198 break; 6199 default: 6200 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6201 break; 6202 } 6203 } 6204 6205 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 6206 int ssid_num, 6207 struct rtw89_mac_chinfo_be *ch_info) 6208 { 6209 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6210 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 6211 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 6212 struct cfg80211_scan_request *req = rtwvif->scan_req; 6213 struct rtw89_pktofld_info *info; 6214 u8 band, probe_count = 0, i; 6215 6216 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 6217 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 6218 ch_info->bw = RTW89_SCAN_WIDTH; 6219 ch_info->tx_null = false; 6220 ch_info->pause_data = false; 6221 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6222 6223 if (ssid_num) { 6224 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 6225 6226 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 6227 if (info->channel_6ghz && 6228 ch_info->pri_ch != info->channel_6ghz) 6229 continue; 6230 6231 if (info->wildcard_6ghz) 6232 continue; 6233 6234 ch_info->pkt_id[probe_count++] = info->id; 6235 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 6236 break; 6237 } 6238 } 6239 6240 if (ch_info->ch_band == RTW89_BAND_6G) { 6241 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 6242 !ch_info->is_psc) { 6243 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 6244 if (!req->duration_mandatory) 6245 ch_info->period -= RTW89_DWELL_TIME_6G; 6246 } 6247 } 6248 6249 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 6250 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 6251 6252 switch (chan_type) { 6253 case RTW89_CHAN_DFS: 6254 if (ch_info->ch_band != RTW89_BAND_6G) 6255 ch_info->period = 6256 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 6257 ch_info->dwell_time = RTW89_DWELL_TIME; 6258 break; 6259 case RTW89_CHAN_ACTIVE: 6260 break; 6261 default: 6262 rtw89_warn(rtwdev, "Channel type out of bound\n"); 6263 break; 6264 } 6265 } 6266 6267 int rtw89_pno_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6268 struct rtw89_vif *rtwvif) 6269 { 6270 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6271 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6272 struct rtw89_mac_chinfo *ch_info, *tmp; 6273 struct ieee80211_channel *channel; 6274 struct list_head chan_list; 6275 int list_len; 6276 enum rtw89_chan_type type; 6277 int ret = 0; 6278 u32 idx; 6279 6280 INIT_LIST_HEAD(&chan_list); 6281 for (idx = 0, list_len = 0; 6282 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6283 idx++, list_len++) { 6284 channel = nd_config->channels[idx]; 6285 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6286 if (!ch_info) { 6287 ret = -ENOMEM; 6288 goto out; 6289 } 6290 6291 ch_info->period = RTW89_CHANNEL_TIME; 6292 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6293 ch_info->central_ch = channel->hw_value; 6294 ch_info->pri_ch = channel->hw_value; 6295 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6296 6297 if (channel->flags & 6298 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6299 type = RTW89_CHAN_DFS; 6300 else 6301 type = RTW89_CHAN_ACTIVE; 6302 6303 rtw89_pno_scan_add_chan_ax(rtwdev, type, nd_config->n_match_sets, ch_info); 6304 list_add_tail(&ch_info->list, &chan_list); 6305 } 6306 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6307 6308 out: 6309 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6310 list_del(&ch_info->list); 6311 kfree(ch_info); 6312 } 6313 6314 return ret; 6315 } 6316 6317 int rtw89_hw_scan_add_chan_list_ax(struct rtw89_dev *rtwdev, 6318 struct rtw89_vif *rtwvif, bool connected) 6319 { 6320 struct cfg80211_scan_request *req = rtwvif->scan_req; 6321 struct rtw89_mac_chinfo *ch_info, *tmp; 6322 struct ieee80211_channel *channel; 6323 struct list_head chan_list; 6324 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 6325 int list_len, off_chan_time = 0; 6326 enum rtw89_chan_type type; 6327 int ret = 0; 6328 u32 idx; 6329 6330 INIT_LIST_HEAD(&chan_list); 6331 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6332 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6333 idx++, list_len++) { 6334 channel = req->channels[idx]; 6335 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6336 if (!ch_info) { 6337 ret = -ENOMEM; 6338 goto out; 6339 } 6340 6341 if (req->duration) 6342 ch_info->period = req->duration; 6343 else if (channel->band == NL80211_BAND_6GHZ) 6344 ch_info->period = RTW89_CHANNEL_TIME_6G + 6345 RTW89_DWELL_TIME_6G; 6346 else 6347 ch_info->period = RTW89_CHANNEL_TIME; 6348 6349 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6350 ch_info->central_ch = channel->hw_value; 6351 ch_info->pri_ch = channel->hw_value; 6352 ch_info->rand_seq_num = random_seq; 6353 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6354 6355 if (channel->flags & 6356 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6357 type = RTW89_CHAN_DFS; 6358 else 6359 type = RTW89_CHAN_ACTIVE; 6360 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 6361 6362 if (connected && 6363 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 6364 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 6365 if (!tmp) { 6366 ret = -ENOMEM; 6367 kfree(ch_info); 6368 goto out; 6369 } 6370 6371 type = RTW89_CHAN_OPERATE; 6372 tmp->period = req->duration_mandatory ? 6373 req->duration : RTW89_CHANNEL_TIME; 6374 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 6375 list_add_tail(&tmp->list, &chan_list); 6376 off_chan_time = 0; 6377 list_len++; 6378 } 6379 list_add_tail(&ch_info->list, &chan_list); 6380 off_chan_time += ch_info->period; 6381 } 6382 rtwdev->scan_info.last_chan_idx = idx; 6383 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 6384 6385 out: 6386 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6387 list_del(&ch_info->list); 6388 kfree(ch_info); 6389 } 6390 6391 return ret; 6392 } 6393 6394 int rtw89_pno_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6395 struct rtw89_vif *rtwvif) 6396 { 6397 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6398 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6399 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6400 struct ieee80211_channel *channel; 6401 struct list_head chan_list; 6402 enum rtw89_chan_type type; 6403 int list_len, ret; 6404 u32 idx; 6405 6406 INIT_LIST_HEAD(&chan_list); 6407 6408 for (idx = 0, list_len = 0; 6409 idx < nd_config->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6410 idx++, list_len++) { 6411 channel = nd_config->channels[idx]; 6412 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6413 if (!ch_info) { 6414 ret = -ENOMEM; 6415 goto out; 6416 } 6417 6418 ch_info->period = RTW89_CHANNEL_TIME; 6419 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6420 ch_info->central_ch = channel->hw_value; 6421 ch_info->pri_ch = channel->hw_value; 6422 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6423 6424 if (channel->flags & 6425 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6426 type = RTW89_CHAN_DFS; 6427 else 6428 type = RTW89_CHAN_ACTIVE; 6429 6430 rtw89_pno_scan_add_chan_be(rtwdev, type, 6431 nd_config->n_match_sets, ch_info); 6432 list_add_tail(&ch_info->list, &chan_list); 6433 } 6434 6435 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list); 6436 6437 out: 6438 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6439 list_del(&ch_info->list); 6440 kfree(ch_info); 6441 } 6442 6443 return ret; 6444 } 6445 6446 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 6447 struct rtw89_vif *rtwvif, bool connected) 6448 { 6449 struct cfg80211_scan_request *req = rtwvif->scan_req; 6450 struct rtw89_mac_chinfo_be *ch_info, *tmp; 6451 struct ieee80211_channel *channel; 6452 struct list_head chan_list; 6453 enum rtw89_chan_type type; 6454 int list_len, ret; 6455 bool random_seq; 6456 u32 idx; 6457 6458 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 6459 INIT_LIST_HEAD(&chan_list); 6460 6461 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 6462 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 6463 idx++, list_len++) { 6464 channel = req->channels[idx]; 6465 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 6466 if (!ch_info) { 6467 ret = -ENOMEM; 6468 goto out; 6469 } 6470 6471 if (req->duration) 6472 ch_info->period = req->duration; 6473 else if (channel->band == NL80211_BAND_6GHZ) 6474 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 6475 else 6476 ch_info->period = RTW89_CHANNEL_TIME; 6477 6478 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 6479 ch_info->central_ch = channel->hw_value; 6480 ch_info->pri_ch = channel->hw_value; 6481 ch_info->rand_seq_num = random_seq; 6482 ch_info->is_psc = cfg80211_channel_is_psc(channel); 6483 6484 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 6485 type = RTW89_CHAN_DFS; 6486 else 6487 type = RTW89_CHAN_ACTIVE; 6488 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 6489 6490 list_add_tail(&ch_info->list, &chan_list); 6491 } 6492 6493 rtwdev->scan_info.last_chan_idx = idx; 6494 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list); 6495 6496 out: 6497 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6498 list_del(&ch_info->list); 6499 kfree(ch_info); 6500 } 6501 6502 return ret; 6503 } 6504 6505 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 6506 struct rtw89_vif *rtwvif, bool connected) 6507 { 6508 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6509 int ret; 6510 6511 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 6512 if (ret) { 6513 rtw89_err(rtwdev, "Update probe request failed\n"); 6514 goto out; 6515 } 6516 ret = mac->add_chan_list(rtwdev, rtwvif, connected); 6517 out: 6518 return ret; 6519 } 6520 6521 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 6522 struct ieee80211_scan_request *scan_req) 6523 { 6524 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 6525 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6526 struct cfg80211_scan_request *req = &scan_req->req; 6527 u32 rx_fltr = rtwdev->hal.rx_fltr; 6528 u8 mac_addr[ETH_ALEN]; 6529 6530 rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan); 6531 rtwdev->scan_info.scanning_vif = vif; 6532 rtwdev->scan_info.last_chan_idx = 0; 6533 rtwdev->scan_info.abort = false; 6534 rtwvif->scan_ies = &scan_req->ies; 6535 rtwvif->scan_req = req; 6536 ieee80211_stop_queues(rtwdev->hw); 6537 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, false); 6538 6539 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 6540 get_random_mask_addr(mac_addr, req->mac_addr, 6541 req->mac_addr_mask); 6542 else 6543 ether_addr_copy(mac_addr, vif->addr); 6544 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 6545 6546 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 6547 rx_fltr &= ~B_AX_A_BC; 6548 rx_fltr &= ~B_AX_A_A1_MATCH; 6549 rtw89_write32_mask(rtwdev, 6550 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 6551 B_AX_RX_FLTR_CFG_MASK, 6552 rx_fltr); 6553 6554 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); 6555 } 6556 6557 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 6558 bool aborted) 6559 { 6560 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6561 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6562 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 6563 struct cfg80211_scan_info info = { 6564 .aborted = aborted, 6565 }; 6566 6567 if (!vif) 6568 return; 6569 6570 rtw89_write32_mask(rtwdev, 6571 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 6572 B_AX_RX_FLTR_CFG_MASK, 6573 rtwdev->hal.rx_fltr); 6574 6575 rtw89_core_scan_complete(rtwdev, vif, true); 6576 ieee80211_scan_completed(rtwdev->hw, &info); 6577 ieee80211_wake_queues(rtwdev->hw); 6578 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, true); 6579 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 6580 6581 rtw89_release_pkt_list(rtwdev); 6582 rtwvif->scan_req = NULL; 6583 rtwvif->scan_ies = NULL; 6584 scan_info->last_chan_idx = 0; 6585 scan_info->scanning_vif = NULL; 6586 scan_info->abort = false; 6587 6588 rtw89_chanctx_proceed(rtwdev); 6589 } 6590 6591 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 6592 { 6593 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6594 int ret; 6595 6596 scan_info->abort = true; 6597 6598 ret = rtw89_hw_scan_offload(rtwdev, vif, false); 6599 if (ret) 6600 rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); 6601 6602 /* Indicate ieee80211_scan_completed() before returning, which is safe 6603 * because scan abort command always waits for completion of 6604 * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan 6605 * work properly. 6606 */ 6607 rtw89_hw_scan_complete(rtwdev, vif, true); 6608 } 6609 6610 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 6611 { 6612 struct rtw89_vif *rtwvif; 6613 6614 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 6615 /* This variable implies connected or during attempt to connect */ 6616 if (!is_zero_ether_addr(rtwvif->bssid)) 6617 return true; 6618 } 6619 6620 return false; 6621 } 6622 6623 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 6624 bool enable) 6625 { 6626 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6627 struct rtw89_scan_option opt = {0}; 6628 struct rtw89_vif *rtwvif; 6629 bool connected; 6630 int ret = 0; 6631 6632 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 6633 if (!rtwvif) 6634 return -EINVAL; 6635 6636 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 6637 opt.enable = enable; 6638 opt.target_ch_mode = connected; 6639 if (enable) { 6640 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected); 6641 if (ret) 6642 goto out; 6643 } 6644 6645 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 6646 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 6647 opt.scan_mode = RTW89_SCAN_MODE_SA; 6648 opt.band = RTW89_PHY_0; 6649 opt.num_macc_role = 0; 6650 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 6651 opt.num_opch = connected ? 1 : 0; 6652 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 6653 } 6654 6655 ret = mac->scan_offload(rtwdev, &opt, rtwvif, false); 6656 out: 6657 return ret; 6658 } 6659 6660 #define H2C_FW_CPU_EXCEPTION_LEN 4 6661 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 6662 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 6663 { 6664 struct sk_buff *skb; 6665 int ret; 6666 6667 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 6668 if (!skb) { 6669 rtw89_err(rtwdev, 6670 "failed to alloc skb for fw cpu exception\n"); 6671 return -ENOMEM; 6672 } 6673 6674 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 6675 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 6676 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 6677 6678 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6679 H2C_CAT_TEST, 6680 H2C_CL_FW_STATUS_TEST, 6681 H2C_FUNC_CPU_EXCEPTION, 0, 0, 6682 H2C_FW_CPU_EXCEPTION_LEN); 6683 6684 ret = rtw89_h2c_tx(rtwdev, skb, false); 6685 if (ret) { 6686 rtw89_err(rtwdev, "failed to send h2c\n"); 6687 goto fail; 6688 } 6689 6690 return 0; 6691 6692 fail: 6693 dev_kfree_skb_any(skb); 6694 return ret; 6695 } 6696 6697 #define H2C_PKT_DROP_LEN 24 6698 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 6699 const struct rtw89_pkt_drop_params *params) 6700 { 6701 struct sk_buff *skb; 6702 int ret; 6703 6704 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 6705 if (!skb) { 6706 rtw89_err(rtwdev, 6707 "failed to alloc skb for packet drop\n"); 6708 return -ENOMEM; 6709 } 6710 6711 switch (params->sel) { 6712 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 6713 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 6714 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 6715 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 6716 case RTW89_PKT_DROP_SEL_BAND_ONCE: 6717 break; 6718 default: 6719 rtw89_debug(rtwdev, RTW89_DBG_FW, 6720 "H2C of pkt drop might not fully support sel: %d yet\n", 6721 params->sel); 6722 break; 6723 } 6724 6725 skb_put(skb, H2C_PKT_DROP_LEN); 6726 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 6727 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 6728 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 6729 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 6730 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 6731 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 6732 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 6733 params->macid_band_sel[0]); 6734 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 6735 params->macid_band_sel[1]); 6736 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 6737 params->macid_band_sel[2]); 6738 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 6739 params->macid_band_sel[3]); 6740 6741 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6742 H2C_CAT_MAC, 6743 H2C_CL_MAC_FW_OFLD, 6744 H2C_FUNC_PKT_DROP, 0, 0, 6745 H2C_PKT_DROP_LEN); 6746 6747 ret = rtw89_h2c_tx(rtwdev, skb, false); 6748 if (ret) { 6749 rtw89_err(rtwdev, "failed to send h2c\n"); 6750 goto fail; 6751 } 6752 6753 return 0; 6754 6755 fail: 6756 dev_kfree_skb_any(skb); 6757 return ret; 6758 } 6759 6760 #define H2C_KEEP_ALIVE_LEN 4 6761 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6762 bool enable) 6763 { 6764 struct sk_buff *skb; 6765 u8 pkt_id = 0; 6766 int ret; 6767 6768 if (enable) { 6769 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 6770 RTW89_PKT_OFLD_TYPE_NULL_DATA, 6771 &pkt_id); 6772 if (ret) 6773 return -EPERM; 6774 } 6775 6776 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 6777 if (!skb) { 6778 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 6779 return -ENOMEM; 6780 } 6781 6782 skb_put(skb, H2C_KEEP_ALIVE_LEN); 6783 6784 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 6785 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 6786 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 6787 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 6788 6789 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6790 H2C_CAT_MAC, 6791 H2C_CL_MAC_WOW, 6792 H2C_FUNC_KEEP_ALIVE, 0, 1, 6793 H2C_KEEP_ALIVE_LEN); 6794 6795 ret = rtw89_h2c_tx(rtwdev, skb, false); 6796 if (ret) { 6797 rtw89_err(rtwdev, "failed to send h2c\n"); 6798 goto fail; 6799 } 6800 6801 return 0; 6802 6803 fail: 6804 dev_kfree_skb_any(skb); 6805 6806 return ret; 6807 } 6808 6809 int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6810 bool enable) 6811 { 6812 struct rtw89_h2c_arp_offload *h2c; 6813 u32 len = sizeof(*h2c); 6814 struct sk_buff *skb; 6815 u8 pkt_id = 0; 6816 int ret; 6817 6818 if (enable) { 6819 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 6820 RTW89_PKT_OFLD_TYPE_ARP_RSP, 6821 &pkt_id); 6822 if (ret) 6823 return ret; 6824 } 6825 6826 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6827 if (!skb) { 6828 rtw89_err(rtwdev, "failed to alloc skb for arp offload\n"); 6829 return -ENOMEM; 6830 } 6831 6832 skb_put(skb, len); 6833 h2c = (struct rtw89_h2c_arp_offload *)skb->data; 6834 6835 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) | 6836 le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) | 6837 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) | 6838 le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID); 6839 6840 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6841 H2C_CAT_MAC, 6842 H2C_CL_MAC_WOW, 6843 H2C_FUNC_ARP_OFLD, 0, 1, 6844 len); 6845 6846 ret = rtw89_h2c_tx(rtwdev, skb, false); 6847 if (ret) { 6848 rtw89_err(rtwdev, "failed to send h2c\n"); 6849 goto fail; 6850 } 6851 6852 return 0; 6853 6854 fail: 6855 dev_kfree_skb_any(skb); 6856 6857 return ret; 6858 } 6859 6860 #define H2C_DISCONNECT_DETECT_LEN 8 6861 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 6862 struct rtw89_vif *rtwvif, bool enable) 6863 { 6864 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6865 struct sk_buff *skb; 6866 u8 macid = rtwvif->mac_id; 6867 int ret; 6868 6869 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 6870 if (!skb) { 6871 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 6872 return -ENOMEM; 6873 } 6874 6875 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 6876 6877 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 6878 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 6879 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 6880 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 6881 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 6882 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 6883 } 6884 6885 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6886 H2C_CAT_MAC, 6887 H2C_CL_MAC_WOW, 6888 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 6889 H2C_DISCONNECT_DETECT_LEN); 6890 6891 ret = rtw89_h2c_tx(rtwdev, skb, false); 6892 if (ret) { 6893 rtw89_err(rtwdev, "failed to send h2c\n"); 6894 goto fail; 6895 } 6896 6897 return 0; 6898 6899 fail: 6900 dev_kfree_skb_any(skb); 6901 6902 return ret; 6903 } 6904 6905 int rtw89_fw_h2c_cfg_pno(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6906 bool enable) 6907 { 6908 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6909 struct cfg80211_sched_scan_request *nd_config = rtw_wow->nd_config; 6910 struct rtw89_h2c_cfg_nlo *h2c; 6911 u32 len = sizeof(*h2c); 6912 struct sk_buff *skb; 6913 int ret, i; 6914 6915 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6916 if (!skb) { 6917 rtw89_err(rtwdev, "failed to alloc skb for nlo\n"); 6918 return -ENOMEM; 6919 } 6920 6921 skb_put(skb, len); 6922 h2c = (struct rtw89_h2c_cfg_nlo *)skb->data; 6923 6924 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_NLO_W0_ENABLE) | 6925 le32_encode_bits(enable, RTW89_H2C_NLO_W0_IGNORE_CIPHER) | 6926 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_NLO_W0_MACID); 6927 6928 if (enable) { 6929 h2c->nlo_cnt = nd_config->n_match_sets; 6930 for (i = 0 ; i < nd_config->n_match_sets; i++) { 6931 h2c->ssid_len[i] = nd_config->match_sets[i].ssid.ssid_len; 6932 memcpy(h2c->ssid[i], nd_config->match_sets[i].ssid.ssid, 6933 nd_config->match_sets[i].ssid.ssid_len); 6934 } 6935 } 6936 6937 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6938 H2C_CAT_MAC, 6939 H2C_CL_MAC_WOW, 6940 H2C_FUNC_NLO, 0, 1, 6941 len); 6942 6943 ret = rtw89_h2c_tx(rtwdev, skb, false); 6944 if (ret) { 6945 rtw89_err(rtwdev, "failed to send h2c\n"); 6946 goto fail; 6947 } 6948 6949 return 0; 6950 6951 fail: 6952 dev_kfree_skb_any(skb); 6953 return ret; 6954 } 6955 6956 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6957 bool enable) 6958 { 6959 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6960 struct rtw89_h2c_wow_global *h2c; 6961 u8 macid = rtwvif->mac_id; 6962 u32 len = sizeof(*h2c); 6963 struct sk_buff *skb; 6964 int ret; 6965 6966 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6967 if (!skb) { 6968 rtw89_err(rtwdev, "failed to alloc skb for wow global\n"); 6969 return -ENOMEM; 6970 } 6971 6972 skb_put(skb, len); 6973 h2c = (struct rtw89_h2c_wow_global *)skb->data; 6974 6975 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) | 6976 le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) | 6977 le32_encode_bits(rtw_wow->ptk_alg, 6978 RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) | 6979 le32_encode_bits(rtw_wow->gtk_alg, 6980 RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO); 6981 h2c->key_info = rtw_wow->key_info; 6982 6983 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6984 H2C_CAT_MAC, 6985 H2C_CL_MAC_WOW, 6986 H2C_FUNC_WOW_GLOBAL, 0, 1, 6987 len); 6988 6989 ret = rtw89_h2c_tx(rtwdev, skb, false); 6990 if (ret) { 6991 rtw89_err(rtwdev, "failed to send h2c\n"); 6992 goto fail; 6993 } 6994 6995 return 0; 6996 6997 fail: 6998 dev_kfree_skb_any(skb); 6999 7000 return ret; 7001 } 7002 7003 #define H2C_WAKEUP_CTRL_LEN 4 7004 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 7005 struct rtw89_vif *rtwvif, 7006 bool enable) 7007 { 7008 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7009 struct sk_buff *skb; 7010 u8 macid = rtwvif->mac_id; 7011 int ret; 7012 7013 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 7014 if (!skb) { 7015 rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n"); 7016 return -ENOMEM; 7017 } 7018 7019 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 7020 7021 if (rtw_wow->pattern_cnt) 7022 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 7023 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 7024 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 7025 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 7026 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 7027 7028 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 7029 7030 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7031 H2C_CAT_MAC, 7032 H2C_CL_MAC_WOW, 7033 H2C_FUNC_WAKEUP_CTRL, 0, 1, 7034 H2C_WAKEUP_CTRL_LEN); 7035 7036 ret = rtw89_h2c_tx(rtwdev, skb, false); 7037 if (ret) { 7038 rtw89_err(rtwdev, "failed to send h2c\n"); 7039 goto fail; 7040 } 7041 7042 return 0; 7043 7044 fail: 7045 dev_kfree_skb_any(skb); 7046 7047 return ret; 7048 } 7049 7050 #define H2C_WOW_CAM_UPD_LEN 24 7051 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 7052 struct rtw89_wow_cam_info *cam_info) 7053 { 7054 struct sk_buff *skb; 7055 int ret; 7056 7057 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 7058 if (!skb) { 7059 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 7060 return -ENOMEM; 7061 } 7062 7063 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 7064 7065 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 7066 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 7067 if (cam_info->valid) { 7068 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 7069 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 7070 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 7071 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 7072 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 7073 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 7074 cam_info->negative_pattern_match); 7075 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 7076 cam_info->skip_mac_hdr); 7077 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 7078 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 7079 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 7080 } 7081 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 7082 7083 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7084 H2C_CAT_MAC, 7085 H2C_CL_MAC_WOW, 7086 H2C_FUNC_WOW_CAM_UPD, 0, 1, 7087 H2C_WOW_CAM_UPD_LEN); 7088 7089 ret = rtw89_h2c_tx(rtwdev, skb, false); 7090 if (ret) { 7091 rtw89_err(rtwdev, "failed to send h2c\n"); 7092 goto fail; 7093 } 7094 7095 return 0; 7096 fail: 7097 dev_kfree_skb_any(skb); 7098 7099 return ret; 7100 } 7101 7102 int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, 7103 struct rtw89_vif *rtwvif, 7104 bool enable) 7105 { 7106 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 7107 struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info; 7108 struct rtw89_h2c_wow_gtk_ofld *h2c; 7109 u8 macid = rtwvif->mac_id; 7110 u32 len = sizeof(*h2c); 7111 u8 pkt_id_sa_query = 0; 7112 struct sk_buff *skb; 7113 u8 pkt_id_eapol = 0; 7114 int ret; 7115 7116 if (!rtw_wow->gtk_alg) 7117 return 0; 7118 7119 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7120 if (!skb) { 7121 rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n"); 7122 return -ENOMEM; 7123 } 7124 7125 skb_put(skb, len); 7126 h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; 7127 7128 if (!enable) 7129 goto hdr; 7130 7131 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 7132 RTW89_PKT_OFLD_TYPE_EAPOL_KEY, 7133 &pkt_id_eapol); 7134 if (ret) 7135 goto fail; 7136 7137 if (gtk_info->igtk_keyid) { 7138 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 7139 RTW89_PKT_OFLD_TYPE_SA_QUERY, 7140 &pkt_id_sa_query); 7141 if (ret) 7142 goto fail; 7143 } 7144 7145 /* not support TKIP yet */ 7146 h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) | 7147 le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) | 7148 le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0, 7149 RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) | 7150 le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) | 7151 le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID); 7152 h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0, 7153 RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) | 7154 le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT); 7155 h2c->gtk_info = rtw_wow->gtk_info; 7156 7157 hdr: 7158 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7159 H2C_CAT_MAC, 7160 H2C_CL_MAC_WOW, 7161 H2C_FUNC_GTK_OFLD, 0, 1, 7162 len); 7163 7164 ret = rtw89_h2c_tx(rtwdev, skb, false); 7165 if (ret) { 7166 rtw89_err(rtwdev, "failed to send h2c\n"); 7167 goto fail; 7168 } 7169 return 0; 7170 fail: 7171 dev_kfree_skb_any(skb); 7172 7173 return ret; 7174 } 7175 7176 int rtw89_fw_h2c_fwips(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 7177 bool enable) 7178 { 7179 struct rtw89_wait_info *wait = &rtwdev->mac.ps_wait; 7180 struct rtw89_h2c_fwips *h2c; 7181 u32 len = sizeof(*h2c); 7182 struct sk_buff *skb; 7183 7184 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7185 if (!skb) { 7186 rtw89_err(rtwdev, "failed to alloc skb for fw ips\n"); 7187 return -ENOMEM; 7188 } 7189 skb_put(skb, len); 7190 h2c = (struct rtw89_h2c_fwips *)skb->data; 7191 7192 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_FW_IPS_W0_MACID) | 7193 le32_encode_bits(enable, RTW89_H2C_FW_IPS_W0_ENABLE); 7194 7195 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7196 H2C_CAT_MAC, 7197 H2C_CL_MAC_PS, 7198 H2C_FUNC_IPS_CFG, 0, 1, 7199 len); 7200 7201 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_PS_WAIT_COND_IPS_CFG); 7202 } 7203 7204 int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev) 7205 { 7206 struct rtw89_wait_info *wait = &rtwdev->wow.wait; 7207 struct rtw89_h2c_wow_aoac *h2c; 7208 u32 len = sizeof(*h2c); 7209 struct sk_buff *skb; 7210 7211 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7212 if (!skb) { 7213 rtw89_err(rtwdev, "failed to alloc skb for aoac\n"); 7214 return -ENOMEM; 7215 } 7216 7217 skb_put(skb, len); 7218 7219 /* This H2C only nofity firmware to generate AOAC report C2H, 7220 * no need any parameter. 7221 */ 7222 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7223 H2C_CAT_MAC, 7224 H2C_CL_MAC_WOW, 7225 H2C_FUNC_AOAC_REPORT_REQ, 1, 0, 7226 len); 7227 7228 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_WOW_WAIT_COND_AOAC); 7229 } 7230 7231 /* Return < 0, if failures happen during waiting for the condition. 7232 * Return 0, when waiting for the condition succeeds. 7233 * Return > 0, if the wait is considered unreachable due to driver/FW design, 7234 * where 1 means during SER. 7235 */ 7236 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 7237 struct rtw89_wait_info *wait, unsigned int cond) 7238 { 7239 int ret; 7240 7241 ret = rtw89_h2c_tx(rtwdev, skb, false); 7242 if (ret) { 7243 rtw89_err(rtwdev, "failed to send h2c\n"); 7244 dev_kfree_skb_any(skb); 7245 return -EBUSY; 7246 } 7247 7248 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 7249 return 1; 7250 7251 return rtw89_wait_for_cond(wait, cond); 7252 } 7253 7254 #define H2C_ADD_MCC_LEN 16 7255 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 7256 const struct rtw89_fw_mcc_add_req *p) 7257 { 7258 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7259 struct sk_buff *skb; 7260 unsigned int cond; 7261 7262 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 7263 if (!skb) { 7264 rtw89_err(rtwdev, 7265 "failed to alloc skb for add mcc\n"); 7266 return -ENOMEM; 7267 } 7268 7269 skb_put(skb, H2C_ADD_MCC_LEN); 7270 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 7271 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 7272 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 7273 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 7274 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 7275 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 7276 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 7277 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 7278 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 7279 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 7280 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 7281 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 7282 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 7283 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 7284 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 7285 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 7286 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 7287 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 7288 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 7289 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 7290 7291 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7292 H2C_CAT_MAC, 7293 H2C_CL_MCC, 7294 H2C_FUNC_ADD_MCC, 0, 0, 7295 H2C_ADD_MCC_LEN); 7296 7297 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 7298 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7299 } 7300 7301 #define H2C_START_MCC_LEN 12 7302 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 7303 const struct rtw89_fw_mcc_start_req *p) 7304 { 7305 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7306 struct sk_buff *skb; 7307 unsigned int cond; 7308 7309 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 7310 if (!skb) { 7311 rtw89_err(rtwdev, 7312 "failed to alloc skb for start mcc\n"); 7313 return -ENOMEM; 7314 } 7315 7316 skb_put(skb, H2C_START_MCC_LEN); 7317 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 7318 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 7319 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 7320 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 7321 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 7322 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 7323 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 7324 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 7325 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 7326 7327 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7328 H2C_CAT_MAC, 7329 H2C_CL_MCC, 7330 H2C_FUNC_START_MCC, 0, 0, 7331 H2C_START_MCC_LEN); 7332 7333 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 7334 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7335 } 7336 7337 #define H2C_STOP_MCC_LEN 4 7338 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7339 bool prev_groups) 7340 { 7341 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7342 struct sk_buff *skb; 7343 unsigned int cond; 7344 7345 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 7346 if (!skb) { 7347 rtw89_err(rtwdev, 7348 "failed to alloc skb for stop mcc\n"); 7349 return -ENOMEM; 7350 } 7351 7352 skb_put(skb, H2C_STOP_MCC_LEN); 7353 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 7354 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 7355 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 7356 7357 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7358 H2C_CAT_MAC, 7359 H2C_CL_MCC, 7360 H2C_FUNC_STOP_MCC, 0, 0, 7361 H2C_STOP_MCC_LEN); 7362 7363 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 7364 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7365 } 7366 7367 #define H2C_DEL_MCC_GROUP_LEN 4 7368 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 7369 bool prev_groups) 7370 { 7371 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7372 struct sk_buff *skb; 7373 unsigned int cond; 7374 7375 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 7376 if (!skb) { 7377 rtw89_err(rtwdev, 7378 "failed to alloc skb for del mcc group\n"); 7379 return -ENOMEM; 7380 } 7381 7382 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 7383 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 7384 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 7385 7386 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7387 H2C_CAT_MAC, 7388 H2C_CL_MCC, 7389 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 7390 H2C_DEL_MCC_GROUP_LEN); 7391 7392 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 7393 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7394 } 7395 7396 #define H2C_RESET_MCC_GROUP_LEN 4 7397 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 7398 { 7399 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7400 struct sk_buff *skb; 7401 unsigned int cond; 7402 7403 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 7404 if (!skb) { 7405 rtw89_err(rtwdev, 7406 "failed to alloc skb for reset mcc group\n"); 7407 return -ENOMEM; 7408 } 7409 7410 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 7411 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 7412 7413 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7414 H2C_CAT_MAC, 7415 H2C_CL_MCC, 7416 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 7417 H2C_RESET_MCC_GROUP_LEN); 7418 7419 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 7420 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7421 } 7422 7423 #define H2C_MCC_REQ_TSF_LEN 4 7424 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 7425 const struct rtw89_fw_mcc_tsf_req *req, 7426 struct rtw89_mac_mcc_tsf_rpt *rpt) 7427 { 7428 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7429 struct rtw89_mac_mcc_tsf_rpt *tmp; 7430 struct sk_buff *skb; 7431 unsigned int cond; 7432 int ret; 7433 7434 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 7435 if (!skb) { 7436 rtw89_err(rtwdev, 7437 "failed to alloc skb for mcc req tsf\n"); 7438 return -ENOMEM; 7439 } 7440 7441 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 7442 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 7443 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 7444 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 7445 7446 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7447 H2C_CAT_MAC, 7448 H2C_CL_MCC, 7449 H2C_FUNC_MCC_REQ_TSF, 0, 0, 7450 H2C_MCC_REQ_TSF_LEN); 7451 7452 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 7453 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7454 if (ret) 7455 return ret; 7456 7457 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 7458 *rpt = *tmp; 7459 7460 return 0; 7461 } 7462 7463 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 7464 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 7465 u8 *bitmap) 7466 { 7467 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7468 struct sk_buff *skb; 7469 unsigned int cond; 7470 u8 map_len; 7471 u8 h2c_len; 7472 7473 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 7474 map_len = RTW89_MAX_MAC_ID_NUM / 8; 7475 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 7476 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 7477 if (!skb) { 7478 rtw89_err(rtwdev, 7479 "failed to alloc skb for mcc macid bitmap\n"); 7480 return -ENOMEM; 7481 } 7482 7483 skb_put(skb, h2c_len); 7484 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 7485 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 7486 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 7487 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 7488 7489 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7490 H2C_CAT_MAC, 7491 H2C_CL_MCC, 7492 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 7493 h2c_len); 7494 7495 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 7496 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7497 } 7498 7499 #define H2C_MCC_SYNC_LEN 4 7500 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 7501 u8 target, u8 offset) 7502 { 7503 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7504 struct sk_buff *skb; 7505 unsigned int cond; 7506 7507 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 7508 if (!skb) { 7509 rtw89_err(rtwdev, 7510 "failed to alloc skb for mcc sync\n"); 7511 return -ENOMEM; 7512 } 7513 7514 skb_put(skb, H2C_MCC_SYNC_LEN); 7515 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 7516 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 7517 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 7518 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 7519 7520 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7521 H2C_CAT_MAC, 7522 H2C_CL_MCC, 7523 H2C_FUNC_MCC_SYNC, 0, 0, 7524 H2C_MCC_SYNC_LEN); 7525 7526 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 7527 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7528 } 7529 7530 #define H2C_MCC_SET_DURATION_LEN 20 7531 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 7532 const struct rtw89_fw_mcc_duration *p) 7533 { 7534 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7535 struct sk_buff *skb; 7536 unsigned int cond; 7537 7538 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 7539 if (!skb) { 7540 rtw89_err(rtwdev, 7541 "failed to alloc skb for mcc set duration\n"); 7542 return -ENOMEM; 7543 } 7544 7545 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 7546 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 7547 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 7548 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 7549 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 7550 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 7551 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 7552 p->start_tsf_low); 7553 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 7554 p->start_tsf_high); 7555 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 7556 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 7557 7558 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7559 H2C_CAT_MAC, 7560 H2C_CL_MCC, 7561 H2C_FUNC_MCC_SET_DURATION, 0, 0, 7562 H2C_MCC_SET_DURATION_LEN); 7563 7564 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 7565 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7566 } 7567 7568 static 7569 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 7570 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 7571 struct rtw89_h2c_mrc_add_slot *slot_h2c) 7572 { 7573 bool fill_h2c = !!slot_h2c; 7574 unsigned int i; 7575 7576 if (!fill_h2c) 7577 goto calc_len; 7578 7579 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 7580 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 7581 le32_encode_bits(slot_arg->courtesy_en, 7582 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 7583 le32_encode_bits(slot_arg->role_num, 7584 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 7585 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 7586 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 7587 le32_encode_bits(slot_arg->courtesy_target, 7588 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 7589 7590 for (i = 0; i < slot_arg->role_num; i++) { 7591 slot_h2c->roles[i].w0 = 7592 le32_encode_bits(slot_arg->roles[i].macid, 7593 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 7594 le32_encode_bits(slot_arg->roles[i].role_type, 7595 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 7596 le32_encode_bits(slot_arg->roles[i].is_master, 7597 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 7598 le32_encode_bits(slot_arg->roles[i].en_tx_null, 7599 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 7600 le32_encode_bits(false, 7601 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 7602 le32_encode_bits(false, 7603 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 7604 slot_h2c->roles[i].w1 = 7605 le32_encode_bits(slot_arg->roles[i].central_ch, 7606 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 7607 le32_encode_bits(slot_arg->roles[i].primary_ch, 7608 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 7609 le32_encode_bits(slot_arg->roles[i].bw, 7610 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 7611 le32_encode_bits(slot_arg->roles[i].band, 7612 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 7613 le32_encode_bits(slot_arg->roles[i].null_early, 7614 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 7615 le32_encode_bits(false, 7616 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 7617 le32_encode_bits(true, 7618 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 7619 slot_h2c->roles[i].macid_main_bitmap = 7620 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 7621 slot_h2c->roles[i].macid_paired_bitmap = 7622 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 7623 } 7624 7625 calc_len: 7626 return struct_size(slot_h2c, roles, slot_arg->role_num); 7627 } 7628 7629 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 7630 const struct rtw89_fw_mrc_add_arg *arg) 7631 { 7632 struct rtw89_h2c_mrc_add *h2c_head; 7633 struct sk_buff *skb; 7634 unsigned int i; 7635 void *tmp; 7636 u32 len; 7637 int ret; 7638 7639 len = sizeof(*h2c_head); 7640 for (i = 0; i < arg->slot_num; i++) 7641 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 7642 7643 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7644 if (!skb) { 7645 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 7646 return -ENOMEM; 7647 } 7648 7649 skb_put(skb, len); 7650 tmp = skb->data; 7651 7652 h2c_head = tmp; 7653 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 7654 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 7655 le32_encode_bits(arg->sch_type, 7656 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 7657 le32_encode_bits(arg->slot_num, 7658 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 7659 le32_encode_bits(arg->btc_in_sch, 7660 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 7661 7662 tmp += sizeof(*h2c_head); 7663 for (i = 0; i < arg->slot_num; i++) 7664 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 7665 7666 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7667 H2C_CAT_MAC, 7668 H2C_CL_MRC, 7669 H2C_FUNC_ADD_MRC, 0, 0, 7670 len); 7671 7672 ret = rtw89_h2c_tx(rtwdev, skb, false); 7673 if (ret) { 7674 rtw89_err(rtwdev, "failed to send h2c\n"); 7675 dev_kfree_skb_any(skb); 7676 return -EBUSY; 7677 } 7678 7679 return 0; 7680 } 7681 7682 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 7683 const struct rtw89_fw_mrc_start_arg *arg) 7684 { 7685 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7686 struct rtw89_h2c_mrc_start *h2c; 7687 u32 len = sizeof(*h2c); 7688 struct sk_buff *skb; 7689 unsigned int cond; 7690 7691 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7692 if (!skb) { 7693 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 7694 return -ENOMEM; 7695 } 7696 7697 skb_put(skb, len); 7698 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 7699 7700 h2c->w0 = le32_encode_bits(arg->sch_idx, 7701 RTW89_H2C_MRC_START_W0_SCH_IDX) | 7702 le32_encode_bits(arg->old_sch_idx, 7703 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 7704 le32_encode_bits(arg->action, 7705 RTW89_H2C_MRC_START_W0_ACTION); 7706 7707 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 7708 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 7709 7710 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7711 H2C_CAT_MAC, 7712 H2C_CL_MRC, 7713 H2C_FUNC_START_MRC, 0, 0, 7714 len); 7715 7716 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 7717 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7718 } 7719 7720 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx, u8 slot_idx) 7721 { 7722 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7723 struct rtw89_h2c_mrc_del *h2c; 7724 u32 len = sizeof(*h2c); 7725 struct sk_buff *skb; 7726 unsigned int cond; 7727 7728 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7729 if (!skb) { 7730 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 7731 return -ENOMEM; 7732 } 7733 7734 skb_put(skb, len); 7735 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 7736 7737 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX) | 7738 le32_encode_bits(slot_idx, RTW89_H2C_MRC_DEL_W0_STOP_SLOT_IDX); 7739 7740 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7741 H2C_CAT_MAC, 7742 H2C_CL_MRC, 7743 H2C_FUNC_DEL_MRC, 0, 0, 7744 len); 7745 7746 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 7747 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7748 } 7749 7750 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 7751 const struct rtw89_fw_mrc_req_tsf_arg *arg, 7752 struct rtw89_mac_mrc_tsf_rpt *rpt) 7753 { 7754 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7755 struct rtw89_h2c_mrc_req_tsf *h2c; 7756 struct rtw89_mac_mrc_tsf_rpt *tmp; 7757 struct sk_buff *skb; 7758 unsigned int i; 7759 u32 len; 7760 int ret; 7761 7762 len = struct_size(h2c, infos, arg->num); 7763 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7764 if (!skb) { 7765 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 7766 return -ENOMEM; 7767 } 7768 7769 skb_put(skb, len); 7770 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 7771 7772 h2c->req_tsf_num = arg->num; 7773 for (i = 0; i < arg->num; i++) 7774 h2c->infos[i] = 7775 u8_encode_bits(arg->infos[i].band, 7776 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 7777 u8_encode_bits(arg->infos[i].port, 7778 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 7779 7780 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7781 H2C_CAT_MAC, 7782 H2C_CL_MRC, 7783 H2C_FUNC_MRC_REQ_TSF, 0, 0, 7784 len); 7785 7786 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 7787 if (ret) 7788 return ret; 7789 7790 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 7791 *rpt = *tmp; 7792 7793 return 0; 7794 } 7795 7796 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 7797 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 7798 { 7799 struct rtw89_h2c_mrc_upd_bitmap *h2c; 7800 u32 len = sizeof(*h2c); 7801 struct sk_buff *skb; 7802 int ret; 7803 7804 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7805 if (!skb) { 7806 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 7807 return -ENOMEM; 7808 } 7809 7810 skb_put(skb, len); 7811 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 7812 7813 h2c->w0 = le32_encode_bits(arg->sch_idx, 7814 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 7815 le32_encode_bits(arg->action, 7816 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 7817 le32_encode_bits(arg->macid, 7818 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 7819 h2c->w1 = le32_encode_bits(arg->client_macid, 7820 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 7821 7822 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7823 H2C_CAT_MAC, 7824 H2C_CL_MRC, 7825 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 7826 len); 7827 7828 ret = rtw89_h2c_tx(rtwdev, skb, false); 7829 if (ret) { 7830 rtw89_err(rtwdev, "failed to send h2c\n"); 7831 dev_kfree_skb_any(skb); 7832 return -EBUSY; 7833 } 7834 7835 return 0; 7836 } 7837 7838 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 7839 const struct rtw89_fw_mrc_sync_arg *arg) 7840 { 7841 struct rtw89_h2c_mrc_sync *h2c; 7842 u32 len = sizeof(*h2c); 7843 struct sk_buff *skb; 7844 int ret; 7845 7846 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7847 if (!skb) { 7848 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 7849 return -ENOMEM; 7850 } 7851 7852 skb_put(skb, len); 7853 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 7854 7855 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 7856 le32_encode_bits(arg->src.port, 7857 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 7858 le32_encode_bits(arg->src.band, 7859 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 7860 le32_encode_bits(arg->dest.port, 7861 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 7862 le32_encode_bits(arg->dest.band, 7863 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 7864 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 7865 7866 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7867 H2C_CAT_MAC, 7868 H2C_CL_MRC, 7869 H2C_FUNC_MRC_SYNC, 0, 0, 7870 len); 7871 7872 ret = rtw89_h2c_tx(rtwdev, skb, false); 7873 if (ret) { 7874 rtw89_err(rtwdev, "failed to send h2c\n"); 7875 dev_kfree_skb_any(skb); 7876 return -EBUSY; 7877 } 7878 7879 return 0; 7880 } 7881 7882 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 7883 const struct rtw89_fw_mrc_upd_duration_arg *arg) 7884 { 7885 struct rtw89_h2c_mrc_upd_duration *h2c; 7886 struct sk_buff *skb; 7887 unsigned int i; 7888 u32 len; 7889 int ret; 7890 7891 len = struct_size(h2c, slots, arg->slot_num); 7892 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7893 if (!skb) { 7894 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 7895 return -ENOMEM; 7896 } 7897 7898 skb_put(skb, len); 7899 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 7900 7901 h2c->w0 = le32_encode_bits(arg->sch_idx, 7902 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 7903 le32_encode_bits(arg->slot_num, 7904 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 7905 le32_encode_bits(false, 7906 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 7907 7908 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 7909 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 7910 7911 for (i = 0; i < arg->slot_num; i++) { 7912 h2c->slots[i] = 7913 le32_encode_bits(arg->slots[i].slot_idx, 7914 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 7915 le32_encode_bits(arg->slots[i].duration, 7916 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 7917 } 7918 7919 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7920 H2C_CAT_MAC, 7921 H2C_CL_MRC, 7922 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 7923 len); 7924 7925 ret = rtw89_h2c_tx(rtwdev, skb, false); 7926 if (ret) { 7927 rtw89_err(rtwdev, "failed to send h2c\n"); 7928 dev_kfree_skb_any(skb); 7929 return -EBUSY; 7930 } 7931 7932 return 0; 7933 } 7934 7935 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 7936 { 7937 static const u8 zeros[U8_MAX] = {}; 7938 7939 return memcmp(ext_ptr, zeros, ext_len) == 0; 7940 } 7941 7942 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 7943 ({ \ 7944 u8 __var_sz = sizeof(*(e)); \ 7945 bool __accept; \ 7946 if (__var_sz >= (ent_sz)) \ 7947 __accept = true; \ 7948 else \ 7949 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 7950 (ent_sz) - __var_sz);\ 7951 __accept; \ 7952 }) 7953 7954 static bool 7955 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 7956 const void *cursor, 7957 const struct rtw89_txpwr_conf *conf) 7958 { 7959 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7960 return false; 7961 7962 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 7963 return false; 7964 7965 switch (e->rs) { 7966 case RTW89_RS_CCK: 7967 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 7968 return false; 7969 break; 7970 case RTW89_RS_OFDM: 7971 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 7972 return false; 7973 break; 7974 case RTW89_RS_MCS: 7975 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 7976 e->nss >= RTW89_NSS_NUM || 7977 e->ofdma >= RTW89_OFDMA_NUM) 7978 return false; 7979 break; 7980 case RTW89_RS_HEDCM: 7981 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 7982 e->nss >= RTW89_NSS_HEDCM_NUM || 7983 e->ofdma >= RTW89_OFDMA_NUM) 7984 return false; 7985 break; 7986 case RTW89_RS_OFFSET: 7987 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 7988 return false; 7989 break; 7990 default: 7991 return false; 7992 } 7993 7994 return true; 7995 } 7996 7997 static 7998 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 7999 const struct rtw89_txpwr_table *tbl) 8000 { 8001 const struct rtw89_txpwr_conf *conf = tbl->data; 8002 struct rtw89_fw_txpwr_byrate_entry entry = {}; 8003 struct rtw89_txpwr_byrate *byr_head; 8004 struct rtw89_rate_desc desc = {}; 8005 const void *cursor; 8006 u32 data; 8007 s8 *byr; 8008 int i; 8009 8010 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8011 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 8012 continue; 8013 8014 byr_head = &rtwdev->byr[entry.band][entry.bw]; 8015 data = le32_to_cpu(entry.data); 8016 desc.ofdma = entry.ofdma; 8017 desc.nss = entry.nss; 8018 desc.rs = entry.rs; 8019 8020 for (i = 0; i < entry.len; i++, data >>= 8) { 8021 desc.idx = entry.shf + i; 8022 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 8023 *byr = data & 0xff; 8024 } 8025 } 8026 } 8027 8028 static bool 8029 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 8030 const void *cursor, 8031 const struct rtw89_txpwr_conf *conf) 8032 { 8033 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8034 return false; 8035 8036 if (e->bw >= RTW89_2G_BW_NUM) 8037 return false; 8038 if (e->nt >= RTW89_NTX_NUM) 8039 return false; 8040 if (e->rs >= RTW89_RS_LMT_NUM) 8041 return false; 8042 if (e->bf >= RTW89_BF_NUM) 8043 return false; 8044 if (e->regd >= RTW89_REGD_NUM) 8045 return false; 8046 if (e->ch_idx >= RTW89_2G_CH_NUM) 8047 return false; 8048 8049 return true; 8050 } 8051 8052 static 8053 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 8054 { 8055 const struct rtw89_txpwr_conf *conf = &data->conf; 8056 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 8057 const void *cursor; 8058 8059 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8060 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 8061 continue; 8062 8063 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8064 [entry.ch_idx] = entry.v; 8065 } 8066 } 8067 8068 static bool 8069 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 8070 const void *cursor, 8071 const struct rtw89_txpwr_conf *conf) 8072 { 8073 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8074 return false; 8075 8076 if (e->bw >= RTW89_5G_BW_NUM) 8077 return false; 8078 if (e->nt >= RTW89_NTX_NUM) 8079 return false; 8080 if (e->rs >= RTW89_RS_LMT_NUM) 8081 return false; 8082 if (e->bf >= RTW89_BF_NUM) 8083 return false; 8084 if (e->regd >= RTW89_REGD_NUM) 8085 return false; 8086 if (e->ch_idx >= RTW89_5G_CH_NUM) 8087 return false; 8088 8089 return true; 8090 } 8091 8092 static 8093 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 8094 { 8095 const struct rtw89_txpwr_conf *conf = &data->conf; 8096 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 8097 const void *cursor; 8098 8099 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8100 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 8101 continue; 8102 8103 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8104 [entry.ch_idx] = entry.v; 8105 } 8106 } 8107 8108 static bool 8109 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 8110 const void *cursor, 8111 const struct rtw89_txpwr_conf *conf) 8112 { 8113 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8114 return false; 8115 8116 if (e->bw >= RTW89_6G_BW_NUM) 8117 return false; 8118 if (e->nt >= RTW89_NTX_NUM) 8119 return false; 8120 if (e->rs >= RTW89_RS_LMT_NUM) 8121 return false; 8122 if (e->bf >= RTW89_BF_NUM) 8123 return false; 8124 if (e->regd >= RTW89_REGD_NUM) 8125 return false; 8126 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8127 return false; 8128 if (e->ch_idx >= RTW89_6G_CH_NUM) 8129 return false; 8130 8131 return true; 8132 } 8133 8134 static 8135 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 8136 { 8137 const struct rtw89_txpwr_conf *conf = &data->conf; 8138 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 8139 const void *cursor; 8140 8141 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8142 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 8143 continue; 8144 8145 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 8146 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 8147 } 8148 } 8149 8150 static bool 8151 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 8152 const void *cursor, 8153 const struct rtw89_txpwr_conf *conf) 8154 { 8155 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8156 return false; 8157 8158 if (e->ru >= RTW89_RU_NUM) 8159 return false; 8160 if (e->nt >= RTW89_NTX_NUM) 8161 return false; 8162 if (e->regd >= RTW89_REGD_NUM) 8163 return false; 8164 if (e->ch_idx >= RTW89_2G_CH_NUM) 8165 return false; 8166 8167 return true; 8168 } 8169 8170 static 8171 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 8172 { 8173 const struct rtw89_txpwr_conf *conf = &data->conf; 8174 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 8175 const void *cursor; 8176 8177 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8178 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 8179 continue; 8180 8181 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8182 } 8183 } 8184 8185 static bool 8186 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 8187 const void *cursor, 8188 const struct rtw89_txpwr_conf *conf) 8189 { 8190 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8191 return false; 8192 8193 if (e->ru >= RTW89_RU_NUM) 8194 return false; 8195 if (e->nt >= RTW89_NTX_NUM) 8196 return false; 8197 if (e->regd >= RTW89_REGD_NUM) 8198 return false; 8199 if (e->ch_idx >= RTW89_5G_CH_NUM) 8200 return false; 8201 8202 return true; 8203 } 8204 8205 static 8206 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 8207 { 8208 const struct rtw89_txpwr_conf *conf = &data->conf; 8209 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 8210 const void *cursor; 8211 8212 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8213 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 8214 continue; 8215 8216 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 8217 } 8218 } 8219 8220 static bool 8221 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 8222 const void *cursor, 8223 const struct rtw89_txpwr_conf *conf) 8224 { 8225 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8226 return false; 8227 8228 if (e->ru >= RTW89_RU_NUM) 8229 return false; 8230 if (e->nt >= RTW89_NTX_NUM) 8231 return false; 8232 if (e->regd >= RTW89_REGD_NUM) 8233 return false; 8234 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 8235 return false; 8236 if (e->ch_idx >= RTW89_6G_CH_NUM) 8237 return false; 8238 8239 return true; 8240 } 8241 8242 static 8243 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 8244 { 8245 const struct rtw89_txpwr_conf *conf = &data->conf; 8246 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 8247 const void *cursor; 8248 8249 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8250 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 8251 continue; 8252 8253 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 8254 [entry.ch_idx] = entry.v; 8255 } 8256 } 8257 8258 static bool 8259 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 8260 const void *cursor, 8261 const struct rtw89_txpwr_conf *conf) 8262 { 8263 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8264 return false; 8265 8266 if (e->band >= RTW89_BAND_NUM) 8267 return false; 8268 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 8269 return false; 8270 if (e->regd >= RTW89_REGD_NUM) 8271 return false; 8272 8273 return true; 8274 } 8275 8276 static 8277 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 8278 { 8279 const struct rtw89_txpwr_conf *conf = &data->conf; 8280 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 8281 const void *cursor; 8282 8283 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8284 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 8285 continue; 8286 8287 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 8288 } 8289 } 8290 8291 static bool 8292 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 8293 const void *cursor, 8294 const struct rtw89_txpwr_conf *conf) 8295 { 8296 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 8297 return false; 8298 8299 if (e->band >= RTW89_BAND_NUM) 8300 return false; 8301 if (e->regd >= RTW89_REGD_NUM) 8302 return false; 8303 8304 return true; 8305 } 8306 8307 static 8308 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 8309 { 8310 const struct rtw89_txpwr_conf *conf = &data->conf; 8311 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 8312 const void *cursor; 8313 8314 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 8315 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 8316 continue; 8317 8318 data->v[entry.band][entry.regd] = entry.v; 8319 } 8320 } 8321 8322 const struct rtw89_rfe_parms * 8323 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 8324 const struct rtw89_rfe_parms *init) 8325 { 8326 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 8327 struct rtw89_rfe_parms *parms; 8328 8329 if (!rfe_data) 8330 return init; 8331 8332 parms = &rfe_data->rfe_parms; 8333 if (init) 8334 *parms = *init; 8335 8336 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 8337 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 8338 rfe_data->byrate.tbl.size = 0; /* don't care here */ 8339 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 8340 parms->byr_tbl = &rfe_data->byrate.tbl; 8341 } 8342 8343 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 8344 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 8345 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 8346 } 8347 8348 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 8349 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 8350 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 8351 } 8352 8353 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 8354 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 8355 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 8356 } 8357 8358 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 8359 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 8360 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 8361 } 8362 8363 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 8364 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 8365 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 8366 } 8367 8368 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 8369 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 8370 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 8371 } 8372 8373 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 8374 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 8375 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 8376 } 8377 8378 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 8379 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 8380 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 8381 } 8382 8383 return parms; 8384 } 8385