1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "reg.h" 13 14 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 15 struct sk_buff *skb); 16 17 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 18 bool header) 19 { 20 struct sk_buff *skb; 21 u32 header_len = 0; 22 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 23 24 if (header) 25 header_len = H2C_HEADER_LEN; 26 27 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 28 if (!skb) 29 return NULL; 30 skb_reserve(skb, header_len + h2c_desc_size); 31 memset(skb->data, 0, len); 32 33 return skb; 34 } 35 36 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 37 { 38 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 39 } 40 41 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 42 { 43 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 44 } 45 46 static u8 _fw_get_rdy(struct rtw89_dev *rtwdev) 47 { 48 u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL); 49 50 return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val); 51 } 52 53 #define FWDL_WAIT_CNT 400000 54 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev) 55 { 56 u8 val; 57 int ret; 58 59 ret = read_poll_timeout_atomic(_fw_get_rdy, val, 60 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 61 1, FWDL_WAIT_CNT, false, rtwdev); 62 if (ret) { 63 switch (val) { 64 case RTW89_FWDL_CHECKSUM_FAIL: 65 rtw89_err(rtwdev, "fw checksum fail\n"); 66 return -EINVAL; 67 68 case RTW89_FWDL_SECURITY_FAIL: 69 rtw89_err(rtwdev, "fw security fail\n"); 70 return -EINVAL; 71 72 case RTW89_FWDL_CV_NOT_MATCH: 73 rtw89_err(rtwdev, "fw cv not match\n"); 74 return -EINVAL; 75 76 default: 77 return -EBUSY; 78 } 79 } 80 81 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 82 83 return 0; 84 } 85 86 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 87 struct rtw89_fw_bin_info *info) 88 { 89 struct rtw89_fw_hdr_section_info *section_info; 90 const u8 *fw_end = fw + len; 91 const u8 *fwdynhdr; 92 const u8 *bin; 93 u32 base_hdr_len; 94 u32 mssc_len = 0; 95 u32 i; 96 97 if (!info) 98 return -EINVAL; 99 100 info->section_num = GET_FW_HDR_SEC_NUM(fw); 101 base_hdr_len = RTW89_FW_HDR_SIZE + 102 info->section_num * RTW89_FW_SECTION_HDR_SIZE; 103 info->dynamic_hdr_en = GET_FW_HDR_DYN_HDR(fw); 104 105 if (info->dynamic_hdr_en) { 106 info->hdr_len = GET_FW_HDR_LEN(fw); 107 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 108 fwdynhdr = fw + base_hdr_len; 109 if (GET_FW_DYNHDR_LEN(fwdynhdr) != info->dynamic_hdr_len) { 110 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 111 return -EINVAL; 112 } 113 } else { 114 info->hdr_len = base_hdr_len; 115 info->dynamic_hdr_len = 0; 116 } 117 118 bin = fw + info->hdr_len; 119 120 /* jump to section header */ 121 fw += RTW89_FW_HDR_SIZE; 122 section_info = info->section_info; 123 for (i = 0; i < info->section_num; i++) { 124 section_info->type = GET_FWSECTION_HDR_SECTIONTYPE(fw); 125 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 126 section_info->mssc = GET_FWSECTION_HDR_MSSC(fw); 127 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 128 } else { 129 section_info->mssc = 0; 130 } 131 132 section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw); 133 if (GET_FWSECTION_HDR_CHECKSUM(fw)) 134 section_info->len += FWDL_SECTION_CHKSUM_LEN; 135 section_info->redl = GET_FWSECTION_HDR_REDL(fw); 136 section_info->dladdr = 137 GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff; 138 section_info->addr = bin; 139 bin += section_info->len; 140 fw += RTW89_FW_SECTION_HDR_SIZE; 141 section_info++; 142 } 143 144 if (fw_end != bin + mssc_len) { 145 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 146 return -EINVAL; 147 } 148 149 return 0; 150 } 151 152 static 153 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 154 struct rtw89_fw_suit *fw_suit) 155 { 156 struct rtw89_fw_info *fw_info = &rtwdev->fw; 157 const u8 *mfw = fw_info->firmware->data; 158 u32 mfw_len = fw_info->firmware->size; 159 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 160 const struct rtw89_mfw_info *mfw_info; 161 int i; 162 163 if (mfw_hdr->sig != RTW89_MFW_SIG) { 164 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 165 /* legacy firmware support normal type only */ 166 if (type != RTW89_FW_NORMAL) 167 return -EINVAL; 168 fw_suit->data = mfw; 169 fw_suit->size = mfw_len; 170 return 0; 171 } 172 173 for (i = 0; i < mfw_hdr->fw_nr; i++) { 174 mfw_info = &mfw_hdr->info[i]; 175 if (mfw_info->cv != rtwdev->hal.cv || 176 mfw_info->type != type || 177 mfw_info->mp) 178 continue; 179 180 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 181 fw_suit->size = le32_to_cpu(mfw_info->size); 182 return 0; 183 } 184 185 rtw89_err(rtwdev, "no suitable firmware found\n"); 186 return -ENOENT; 187 } 188 189 static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 190 enum rtw89_fw_type type, 191 struct rtw89_fw_suit *fw_suit) 192 { 193 const u8 *hdr = fw_suit->data; 194 195 fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr); 196 fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr); 197 fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr); 198 fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr); 199 fw_suit->build_year = GET_FW_HDR_YEAR(hdr); 200 fw_suit->build_mon = GET_FW_HDR_MONTH(hdr); 201 fw_suit->build_date = GET_FW_HDR_DATE(hdr); 202 fw_suit->build_hour = GET_FW_HDR_HOUR(hdr); 203 fw_suit->build_min = GET_FW_HDR_MIN(hdr); 204 fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr); 205 206 rtw89_info(rtwdev, 207 "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n", 208 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 209 fw_suit->sub_idex, fw_suit->cmd_ver, type); 210 } 211 212 static 213 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 214 { 215 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 216 int ret; 217 218 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit); 219 if (ret) 220 return ret; 221 222 rtw89_fw_update_ver(rtwdev, type, fw_suit); 223 224 return 0; 225 } 226 227 #define __DEF_FW_FEAT_COND(__cond, __op) \ 228 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 229 { \ 230 return suit_ver_code __op comp_ver_code; \ 231 } 232 233 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 234 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 235 236 struct __fw_feat_cfg { 237 enum rtw89_core_chip_id chip_id; 238 enum rtw89_fw_feature feature; 239 u32 ver_code; 240 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 241 }; 242 243 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 244 { \ 245 .chip_id = _chip, \ 246 .feature = RTW89_FW_FEATURE_ ## _feat, \ 247 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 248 .cond = __fw_feat_cond_ ## _cond, \ 249 } 250 251 static const struct __fw_feat_cfg fw_feat_tbl[] = { 252 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 253 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 254 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 255 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 256 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 38, 0, PACKET_DROP), 257 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 20, 0, PACKET_DROP), 258 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 259 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 260 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 261 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 262 }; 263 264 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 265 { 266 const struct rtw89_chip_info *chip = rtwdev->chip; 267 const struct __fw_feat_cfg *ent; 268 const struct rtw89_fw_suit *fw_suit; 269 u32 suit_ver_code; 270 int i; 271 272 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 273 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 274 275 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 276 ent = &fw_feat_tbl[i]; 277 if (chip->chip_id != ent->chip_id) 278 continue; 279 280 if (ent->cond(suit_ver_code, ent->ver_code)) 281 RTW89_SET_FW_FEATURE(ent->feature, &rtwdev->fw); 282 } 283 } 284 285 const struct firmware * 286 rtw89_early_fw_feature_recognize(struct device *device, 287 const struct rtw89_chip_info *chip, 288 u32 *early_feat_map) 289 { 290 union rtw89_compat_fw_hdr buf = {}; 291 const struct firmware *firmware; 292 bool full_req = false; 293 u32 ver_code; 294 int ret; 295 int i; 296 297 /* If SECURITY_LOADPIN_ENFORCE is enabled, reading partial files will 298 * be denied (-EPERM). Then, we don't get right firmware things as 299 * expected. So, in this case, we have to request full firmware here. 300 */ 301 if (IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE)) 302 full_req = true; 303 304 if (full_req) 305 ret = request_firmware(&firmware, chip->fw_name, device); 306 else 307 ret = request_partial_firmware_into_buf(&firmware, chip->fw_name, 308 device, &buf, sizeof(buf), 309 0); 310 311 if (ret) { 312 dev_err(device, "failed to early request firmware: %d\n", ret); 313 return NULL; 314 } 315 316 if (full_req) 317 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 318 else 319 ver_code = rtw89_compat_fw_hdr_ver_code(&buf); 320 321 if (!ver_code) 322 goto out; 323 324 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 325 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 326 327 if (chip->chip_id != ent->chip_id) 328 continue; 329 330 if (ent->cond(ver_code, ent->ver_code)) 331 *early_feat_map |= BIT(ent->feature); 332 } 333 334 out: 335 if (full_req) 336 return firmware; 337 338 release_firmware(firmware); 339 return NULL; 340 } 341 342 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 343 { 344 int ret; 345 346 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL); 347 if (ret) 348 return ret; 349 350 /* It still works if wowlan firmware isn't existing. */ 351 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN); 352 353 rtw89_fw_recognize_features(rtwdev); 354 355 rtw89_coex_recognize_ver(rtwdev); 356 357 return 0; 358 } 359 360 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 361 u8 type, u8 cat, u8 class, u8 func, 362 bool rack, bool dack, u32 len) 363 { 364 struct fwcmd_hdr *hdr; 365 366 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 367 368 if (!(rtwdev->fw.h2c_seq % 4)) 369 rack = true; 370 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 371 FIELD_PREP(H2C_HDR_CAT, cat) | 372 FIELD_PREP(H2C_HDR_CLASS, class) | 373 FIELD_PREP(H2C_HDR_FUNC, func) | 374 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 375 376 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 377 len + H2C_HEADER_LEN) | 378 (rack ? H2C_HDR_REC_ACK : 0) | 379 (dack ? H2C_HDR_DONE_ACK : 0)); 380 381 rtwdev->fw.h2c_seq++; 382 } 383 384 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 385 struct sk_buff *skb, 386 u8 type, u8 cat, u8 class, u8 func, 387 u32 len) 388 { 389 struct fwcmd_hdr *hdr; 390 391 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 392 393 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 394 FIELD_PREP(H2C_HDR_CAT, cat) | 395 FIELD_PREP(H2C_HDR_CLASS, class) | 396 FIELD_PREP(H2C_HDR_FUNC, func) | 397 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 398 399 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 400 len + H2C_HEADER_LEN)); 401 } 402 403 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 404 { 405 struct sk_buff *skb; 406 u32 ret = 0; 407 408 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 409 if (!skb) { 410 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 411 return -ENOMEM; 412 } 413 414 skb_put_data(skb, fw, len); 415 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); 416 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 417 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 418 H2C_FUNC_MAC_FWHDR_DL, len); 419 420 ret = rtw89_h2c_tx(rtwdev, skb, false); 421 if (ret) { 422 rtw89_err(rtwdev, "failed to send h2c\n"); 423 ret = -1; 424 goto fail; 425 } 426 427 return 0; 428 fail: 429 dev_kfree_skb_any(skb); 430 431 return ret; 432 } 433 434 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 435 { 436 u8 val; 437 int ret; 438 439 ret = __rtw89_fw_download_hdr(rtwdev, fw, len); 440 if (ret) { 441 rtw89_err(rtwdev, "[ERR]FW header download\n"); 442 return ret; 443 } 444 445 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY, 446 1, FWDL_WAIT_CNT, false, 447 rtwdev, R_AX_WCPU_FW_CTRL); 448 if (ret) { 449 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 450 return ret; 451 } 452 453 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 454 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 455 456 return 0; 457 } 458 459 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 460 struct rtw89_fw_hdr_section_info *info) 461 { 462 struct sk_buff *skb; 463 const u8 *section = info->addr; 464 u32 residue_len = info->len; 465 u32 pkt_len; 466 int ret; 467 468 while (residue_len) { 469 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 470 pkt_len = FWDL_SECTION_PER_PKT_LEN; 471 else 472 pkt_len = residue_len; 473 474 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 475 if (!skb) { 476 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 477 return -ENOMEM; 478 } 479 skb_put_data(skb, section, pkt_len); 480 481 ret = rtw89_h2c_tx(rtwdev, skb, true); 482 if (ret) { 483 rtw89_err(rtwdev, "failed to send h2c\n"); 484 ret = -1; 485 goto fail; 486 } 487 488 section += pkt_len; 489 residue_len -= pkt_len; 490 } 491 492 return 0; 493 fail: 494 dev_kfree_skb_any(skb); 495 496 return ret; 497 } 498 499 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw, 500 struct rtw89_fw_bin_info *info) 501 { 502 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 503 u8 section_num = info->section_num; 504 int ret; 505 506 while (section_num--) { 507 ret = __rtw89_fw_download_main(rtwdev, section_info); 508 if (ret) 509 return ret; 510 section_info++; 511 } 512 513 mdelay(5); 514 515 ret = rtw89_fw_check_rdy(rtwdev); 516 if (ret) { 517 rtw89_warn(rtwdev, "download firmware fail\n"); 518 return ret; 519 } 520 521 return 0; 522 } 523 524 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 525 { 526 u32 val32; 527 u16 index; 528 529 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 530 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 531 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 532 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 533 534 for (index = 0; index < 15; index++) { 535 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL); 536 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 537 fsleep(10); 538 } 539 } 540 541 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 542 { 543 u32 val32; 544 u16 val16; 545 546 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 547 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 548 549 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); 550 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); 551 552 rtw89_fw_prog_cnt_dump(rtwdev); 553 } 554 555 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type) 556 { 557 struct rtw89_fw_info *fw_info = &rtwdev->fw; 558 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 559 struct rtw89_fw_bin_info info; 560 const u8 *fw = fw_suit->data; 561 u32 len = fw_suit->size; 562 u8 val; 563 int ret; 564 565 rtw89_mac_disable_cpu(rtwdev); 566 ret = rtw89_mac_enable_cpu(rtwdev, 0, true); 567 if (ret) 568 return ret; 569 570 if (!fw || !len) { 571 rtw89_err(rtwdev, "fw type %d isn't recognized\n", type); 572 return -ENOENT; 573 } 574 575 ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info); 576 if (ret) { 577 rtw89_err(rtwdev, "parse fw header fail\n"); 578 goto fwdl_err; 579 } 580 581 ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY, 582 1, FWDL_WAIT_CNT, false, 583 rtwdev, R_AX_WCPU_FW_CTRL); 584 if (ret) { 585 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 586 goto fwdl_err; 587 } 588 589 ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len - info.dynamic_hdr_len); 590 if (ret) { 591 ret = -EBUSY; 592 goto fwdl_err; 593 } 594 595 ret = rtw89_fw_download_main(rtwdev, fw, &info); 596 if (ret) { 597 ret = -EBUSY; 598 goto fwdl_err; 599 } 600 601 fw_info->h2c_seq = 0; 602 fw_info->rec_seq = 0; 603 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 604 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 605 606 return ret; 607 608 fwdl_err: 609 rtw89_fw_dl_fail_dump(rtwdev); 610 return ret; 611 } 612 613 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 614 { 615 struct rtw89_fw_info *fw = &rtwdev->fw; 616 617 wait_for_completion(&fw->completion); 618 if (!fw->firmware) 619 return -EINVAL; 620 621 return 0; 622 } 623 624 static void rtw89_load_firmware_cb(const struct firmware *firmware, void *context) 625 { 626 struct rtw89_fw_info *fw = context; 627 struct rtw89_dev *rtwdev = fw->rtwdev; 628 629 if (!firmware || !firmware->data) { 630 rtw89_err(rtwdev, "failed to request firmware\n"); 631 complete_all(&fw->completion); 632 return; 633 } 634 635 fw->firmware = firmware; 636 complete_all(&fw->completion); 637 } 638 639 int rtw89_load_firmware(struct rtw89_dev *rtwdev) 640 { 641 struct rtw89_fw_info *fw = &rtwdev->fw; 642 const char *fw_name = rtwdev->chip->fw_name; 643 int ret; 644 645 fw->rtwdev = rtwdev; 646 init_completion(&fw->completion); 647 648 if (fw->firmware) { 649 rtw89_debug(rtwdev, RTW89_DBG_FW, 650 "full firmware has been early requested\n"); 651 complete_all(&fw->completion); 652 return 0; 653 } 654 655 ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev, 656 GFP_KERNEL, fw, rtw89_load_firmware_cb); 657 if (ret) { 658 rtw89_err(rtwdev, "failed to async firmware request\n"); 659 return ret; 660 } 661 662 return 0; 663 } 664 665 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 666 { 667 struct rtw89_fw_info *fw = &rtwdev->fw; 668 669 rtw89_wait_firmware_completion(rtwdev); 670 671 if (fw->firmware) { 672 release_firmware(fw->firmware); 673 674 /* assign NULL back in case rtw89_free_ieee80211_hw() 675 * try to release the same one again. 676 */ 677 fw->firmware = NULL; 678 } 679 } 680 681 #define H2C_CAM_LEN 60 682 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 683 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 684 { 685 struct sk_buff *skb; 686 int ret; 687 688 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 689 if (!skb) { 690 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 691 return -ENOMEM; 692 } 693 skb_put(skb, H2C_CAM_LEN); 694 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 695 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 696 697 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 698 H2C_CAT_MAC, 699 H2C_CL_MAC_ADDR_CAM_UPDATE, 700 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 701 H2C_CAM_LEN); 702 703 ret = rtw89_h2c_tx(rtwdev, skb, false); 704 if (ret) { 705 rtw89_err(rtwdev, "failed to send h2c\n"); 706 goto fail; 707 } 708 709 return 0; 710 fail: 711 dev_kfree_skb_any(skb); 712 713 return ret; 714 } 715 716 #define H2C_DCTL_SEC_CAM_LEN 68 717 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 718 struct rtw89_vif *rtwvif, 719 struct rtw89_sta *rtwsta) 720 { 721 struct sk_buff *skb; 722 int ret; 723 724 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); 725 if (!skb) { 726 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 727 return -ENOMEM; 728 } 729 skb_put(skb, H2C_DCTL_SEC_CAM_LEN); 730 731 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); 732 733 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 734 H2C_CAT_MAC, 735 H2C_CL_MAC_FR_EXCHG, 736 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 737 H2C_DCTL_SEC_CAM_LEN); 738 739 ret = rtw89_h2c_tx(rtwdev, skb, false); 740 if (ret) { 741 rtw89_err(rtwdev, "failed to send h2c\n"); 742 goto fail; 743 } 744 745 return 0; 746 fail: 747 dev_kfree_skb_any(skb); 748 749 return ret; 750 } 751 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 752 753 #define H2C_BA_CAM_LEN 8 754 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 755 bool valid, struct ieee80211_ampdu_params *params) 756 { 757 const struct rtw89_chip_info *chip = rtwdev->chip; 758 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 759 u8 macid = rtwsta->mac_id; 760 struct sk_buff *skb; 761 u8 entry_idx; 762 int ret; 763 764 ret = valid ? 765 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 766 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 767 if (ret) { 768 /* it still works even if we don't have static BA CAM, because 769 * hardware can create dynamic BA CAM automatically. 770 */ 771 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 772 "failed to %s entry tid=%d for h2c ba cam\n", 773 valid ? "alloc" : "free", params->tid); 774 return 0; 775 } 776 777 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 778 if (!skb) { 779 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 780 return -ENOMEM; 781 } 782 skb_put(skb, H2C_BA_CAM_LEN); 783 SET_BA_CAM_MACID(skb->data, macid); 784 if (chip->bacam_v1) 785 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 786 else 787 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx); 788 if (!valid) 789 goto end; 790 SET_BA_CAM_VALID(skb->data, valid); 791 SET_BA_CAM_TID(skb->data, params->tid); 792 if (params->buf_size > 64) 793 SET_BA_CAM_BMAP_SIZE(skb->data, 4); 794 else 795 SET_BA_CAM_BMAP_SIZE(skb->data, 0); 796 /* If init req is set, hw will set the ssn */ 797 SET_BA_CAM_INIT_REQ(skb->data, 1); 798 SET_BA_CAM_SSN(skb->data, params->ssn); 799 800 if (chip->bacam_v1) { 801 SET_BA_CAM_STD_EN(skb->data, 1); 802 SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx); 803 } 804 805 end: 806 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 807 H2C_CAT_MAC, 808 H2C_CL_BA_CAM, 809 H2C_FUNC_MAC_BA_CAM, 0, 1, 810 H2C_BA_CAM_LEN); 811 812 ret = rtw89_h2c_tx(rtwdev, skb, false); 813 if (ret) { 814 rtw89_err(rtwdev, "failed to send h2c\n"); 815 goto fail; 816 } 817 818 return 0; 819 fail: 820 dev_kfree_skb_any(skb); 821 822 return ret; 823 } 824 825 static int rtw89_fw_h2c_init_dynamic_ba_cam_v1(struct rtw89_dev *rtwdev, 826 u8 entry_idx, u8 uid) 827 { 828 struct sk_buff *skb; 829 int ret; 830 831 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 832 if (!skb) { 833 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 834 return -ENOMEM; 835 } 836 skb_put(skb, H2C_BA_CAM_LEN); 837 838 SET_BA_CAM_VALID(skb->data, 1); 839 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 840 SET_BA_CAM_UID(skb->data, uid); 841 SET_BA_CAM_BAND(skb->data, 0); 842 SET_BA_CAM_STD_EN(skb->data, 0); 843 844 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 845 H2C_CAT_MAC, 846 H2C_CL_BA_CAM, 847 H2C_FUNC_MAC_BA_CAM, 0, 1, 848 H2C_BA_CAM_LEN); 849 850 ret = rtw89_h2c_tx(rtwdev, skb, false); 851 if (ret) { 852 rtw89_err(rtwdev, "failed to send h2c\n"); 853 goto fail; 854 } 855 856 return 0; 857 fail: 858 dev_kfree_skb_any(skb); 859 860 return ret; 861 } 862 863 void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev) 864 { 865 const struct rtw89_chip_info *chip = rtwdev->chip; 866 u8 entry_idx = chip->bacam_num; 867 u8 uid = 0; 868 int i; 869 870 for (i = 0; i < chip->bacam_dynamic_num; i++) { 871 rtw89_fw_h2c_init_dynamic_ba_cam_v1(rtwdev, entry_idx, uid); 872 entry_idx++; 873 uid++; 874 } 875 } 876 877 #define H2C_LOG_CFG_LEN 12 878 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 879 { 880 struct sk_buff *skb; 881 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 882 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; 883 int ret; 884 885 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 886 if (!skb) { 887 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 888 return -ENOMEM; 889 } 890 891 skb_put(skb, H2C_LOG_CFG_LEN); 892 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER); 893 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 894 SET_LOG_CFG_COMP(skb->data, comp); 895 SET_LOG_CFG_COMP_EXT(skb->data, 0); 896 897 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 898 H2C_CAT_MAC, 899 H2C_CL_FW_INFO, 900 H2C_FUNC_LOG_CFG, 0, 0, 901 H2C_LOG_CFG_LEN); 902 903 ret = rtw89_h2c_tx(rtwdev, skb, false); 904 if (ret) { 905 rtw89_err(rtwdev, "failed to send h2c\n"); 906 goto fail; 907 } 908 909 return 0; 910 fail: 911 dev_kfree_skb_any(skb); 912 913 return ret; 914 } 915 916 static int rtw89_fw_h2c_add_wow_fw_ofld(struct rtw89_dev *rtwdev, 917 struct rtw89_vif *rtwvif, 918 enum rtw89_fw_pkt_ofld_type type, 919 u8 *id) 920 { 921 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 922 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 923 struct rtw89_pktofld_info *info; 924 struct sk_buff *skb; 925 int ret; 926 927 info = kzalloc(sizeof(*info), GFP_KERNEL); 928 if (!info) 929 return -ENOMEM; 930 931 switch (type) { 932 case RTW89_PKT_OFLD_TYPE_PS_POLL: 933 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 934 break; 935 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 936 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 937 break; 938 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 939 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 940 break; 941 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 942 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 943 break; 944 default: 945 goto err; 946 } 947 948 if (!skb) 949 goto err; 950 951 list_add_tail(&info->list, &rtw_wow->pkt_list); 952 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 953 kfree_skb(skb); 954 955 if (ret) 956 return ret; 957 958 *id = info->id; 959 return 0; 960 961 err: 962 kfree(info); 963 return -ENOMEM; 964 } 965 966 #define H2C_GENERAL_PKT_LEN 6 967 #define H2C_GENERAL_PKT_ID_UND 0xff 968 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid) 969 { 970 struct sk_buff *skb; 971 int ret; 972 973 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 974 if (!skb) { 975 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 976 return -ENOMEM; 977 } 978 skb_put(skb, H2C_GENERAL_PKT_LEN); 979 SET_GENERAL_PKT_MACID(skb->data, macid); 980 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 981 SET_GENERAL_PKT_PSPOLL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 982 SET_GENERAL_PKT_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 983 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 984 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 985 986 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 987 H2C_CAT_MAC, 988 H2C_CL_FW_INFO, 989 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 990 H2C_GENERAL_PKT_LEN); 991 992 ret = rtw89_h2c_tx(rtwdev, skb, false); 993 if (ret) { 994 rtw89_err(rtwdev, "failed to send h2c\n"); 995 goto fail; 996 } 997 998 return 0; 999 fail: 1000 dev_kfree_skb_any(skb); 1001 1002 return ret; 1003 } 1004 1005 #define H2C_LPS_PARM_LEN 8 1006 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 1007 struct rtw89_lps_parm *lps_param) 1008 { 1009 struct sk_buff *skb; 1010 int ret; 1011 1012 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 1013 if (!skb) { 1014 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1015 return -ENOMEM; 1016 } 1017 skb_put(skb, H2C_LPS_PARM_LEN); 1018 1019 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 1020 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 1021 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 1022 SET_LPS_PARM_RLBM(skb->data, 1); 1023 SET_LPS_PARM_SMARTPS(skb->data, 1); 1024 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 1025 SET_LPS_PARM_VOUAPSD(skb->data, 0); 1026 SET_LPS_PARM_VIUAPSD(skb->data, 0); 1027 SET_LPS_PARM_BEUAPSD(skb->data, 0); 1028 SET_LPS_PARM_BKUAPSD(skb->data, 0); 1029 1030 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1031 H2C_CAT_MAC, 1032 H2C_CL_MAC_PS, 1033 H2C_FUNC_MAC_LPS_PARM, 0, 1, 1034 H2C_LPS_PARM_LEN); 1035 1036 ret = rtw89_h2c_tx(rtwdev, skb, false); 1037 if (ret) { 1038 rtw89_err(rtwdev, "failed to send h2c\n"); 1039 goto fail; 1040 } 1041 1042 return 0; 1043 fail: 1044 dev_kfree_skb_any(skb); 1045 1046 return ret; 1047 } 1048 1049 #define H2C_P2P_ACT_LEN 20 1050 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 1051 struct ieee80211_p2p_noa_desc *desc, 1052 u8 act, u8 noa_id) 1053 { 1054 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1055 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 1056 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 1057 struct sk_buff *skb; 1058 u8 *cmd; 1059 int ret; 1060 1061 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 1062 if (!skb) { 1063 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1064 return -ENOMEM; 1065 } 1066 skb_put(skb, H2C_P2P_ACT_LEN); 1067 cmd = skb->data; 1068 1069 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 1070 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 1071 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 1072 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 1073 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 1074 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 1075 if (desc) { 1076 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 1077 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 1078 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 1079 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 1080 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 1081 } 1082 1083 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1084 H2C_CAT_MAC, H2C_CL_MAC_PS, 1085 H2C_FUNC_P2P_ACT, 0, 0, 1086 H2C_P2P_ACT_LEN); 1087 1088 ret = rtw89_h2c_tx(rtwdev, skb, false); 1089 if (ret) { 1090 rtw89_err(rtwdev, "failed to send h2c\n"); 1091 goto fail; 1092 } 1093 1094 return 0; 1095 fail: 1096 dev_kfree_skb_any(skb); 1097 1098 return ret; 1099 } 1100 1101 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 1102 struct sk_buff *skb) 1103 { 1104 struct rtw89_hal *hal = &rtwdev->hal; 1105 u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 1106 u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 1107 1108 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 1109 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 1110 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 1111 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 1112 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 1113 } 1114 1115 #define H2C_CMC_TBL_LEN 68 1116 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 1117 struct rtw89_vif *rtwvif) 1118 { 1119 const struct rtw89_chip_info *chip = rtwdev->chip; 1120 struct sk_buff *skb; 1121 u8 macid = rtwvif->mac_id; 1122 int ret; 1123 1124 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1125 if (!skb) { 1126 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1127 return -ENOMEM; 1128 } 1129 skb_put(skb, H2C_CMC_TBL_LEN); 1130 SET_CTRL_INFO_MACID(skb->data, macid); 1131 SET_CTRL_INFO_OPERATION(skb->data, 1); 1132 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1133 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 1134 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1135 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 1136 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 1137 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 1138 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 1139 } 1140 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 1141 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 1142 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1143 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1144 1145 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1146 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1147 chip->h2c_cctl_func_id, 0, 1, 1148 H2C_CMC_TBL_LEN); 1149 1150 ret = rtw89_h2c_tx(rtwdev, skb, false); 1151 if (ret) { 1152 rtw89_err(rtwdev, "failed to send h2c\n"); 1153 goto fail; 1154 } 1155 1156 return 0; 1157 fail: 1158 dev_kfree_skb_any(skb); 1159 1160 return ret; 1161 } 1162 1163 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 1164 struct ieee80211_sta *sta, u8 *pads) 1165 { 1166 bool ppe_th; 1167 u8 ppe16, ppe8; 1168 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 1169 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 1170 u8 ru_bitmap; 1171 u8 n, idx, sh; 1172 u16 ppe; 1173 int i; 1174 1175 if (!sta->deflink.he_cap.has_he) 1176 return; 1177 1178 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 1179 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 1180 if (!ppe_th) { 1181 u8 pad; 1182 1183 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 1184 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 1185 1186 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 1187 pads[i] = pad; 1188 1189 return; 1190 } 1191 1192 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 1193 n = hweight8(ru_bitmap); 1194 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 1195 1196 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 1197 if (!(ru_bitmap & BIT(i))) { 1198 pads[i] = 1; 1199 continue; 1200 } 1201 1202 idx = n >> 3; 1203 sh = n & 7; 1204 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 1205 1206 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 1207 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1208 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1209 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1210 1211 if (ppe16 != 7 && ppe8 == 7) 1212 pads[i] = 2; 1213 else if (ppe8 != 7) 1214 pads[i] = 1; 1215 else 1216 pads[i] = 0; 1217 } 1218 } 1219 1220 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 1221 struct ieee80211_vif *vif, 1222 struct ieee80211_sta *sta) 1223 { 1224 const struct rtw89_chip_info *chip = rtwdev->chip; 1225 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 1226 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1227 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1228 struct sk_buff *skb; 1229 u8 pads[RTW89_PPE_BW_NUM]; 1230 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1231 u16 lowest_rate; 1232 int ret; 1233 1234 memset(pads, 0, sizeof(pads)); 1235 if (sta) 1236 __get_sta_he_pkt_padding(rtwdev, sta, pads); 1237 1238 if (vif->p2p) 1239 lowest_rate = RTW89_HW_RATE_OFDM6; 1240 else if (chan->band_type == RTW89_BAND_2G) 1241 lowest_rate = RTW89_HW_RATE_CCK1; 1242 else 1243 lowest_rate = RTW89_HW_RATE_OFDM6; 1244 1245 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1246 if (!skb) { 1247 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1248 return -ENOMEM; 1249 } 1250 skb_put(skb, H2C_CMC_TBL_LEN); 1251 SET_CTRL_INFO_MACID(skb->data, mac_id); 1252 SET_CTRL_INFO_OPERATION(skb->data, 1); 1253 SET_CMC_TBL_DISRTSFB(skb->data, 1); 1254 SET_CMC_TBL_DISDATAFB(skb->data, 1); 1255 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 1256 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 1257 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 1258 if (vif->type == NL80211_IFTYPE_STATION) 1259 SET_CMC_TBL_ULDL(skb->data, 1); 1260 else 1261 SET_CMC_TBL_ULDL(skb->data, 0); 1262 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 1263 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 1264 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1265 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1266 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1267 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1268 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1269 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1270 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1271 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1272 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1273 } 1274 if (sta) 1275 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 1276 sta->deflink.he_cap.has_he); 1277 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1278 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1279 1280 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1281 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1282 chip->h2c_cctl_func_id, 0, 1, 1283 H2C_CMC_TBL_LEN); 1284 1285 ret = rtw89_h2c_tx(rtwdev, skb, false); 1286 if (ret) { 1287 rtw89_err(rtwdev, "failed to send h2c\n"); 1288 goto fail; 1289 } 1290 1291 return 0; 1292 fail: 1293 dev_kfree_skb_any(skb); 1294 1295 return ret; 1296 } 1297 1298 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 1299 struct rtw89_sta *rtwsta) 1300 { 1301 const struct rtw89_chip_info *chip = rtwdev->chip; 1302 struct sk_buff *skb; 1303 int ret; 1304 1305 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1306 if (!skb) { 1307 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1308 return -ENOMEM; 1309 } 1310 skb_put(skb, H2C_CMC_TBL_LEN); 1311 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1312 SET_CTRL_INFO_OPERATION(skb->data, 1); 1313 if (rtwsta->cctl_tx_time) { 1314 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 1315 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 1316 } 1317 if (rtwsta->cctl_tx_retry_limit) { 1318 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 1319 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 1320 } 1321 1322 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1323 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1324 chip->h2c_cctl_func_id, 0, 1, 1325 H2C_CMC_TBL_LEN); 1326 1327 ret = rtw89_h2c_tx(rtwdev, skb, false); 1328 if (ret) { 1329 rtw89_err(rtwdev, "failed to send h2c\n"); 1330 goto fail; 1331 } 1332 1333 return 0; 1334 fail: 1335 dev_kfree_skb_any(skb); 1336 1337 return ret; 1338 } 1339 1340 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 1341 struct rtw89_sta *rtwsta) 1342 { 1343 const struct rtw89_chip_info *chip = rtwdev->chip; 1344 struct sk_buff *skb; 1345 int ret; 1346 1347 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 1348 return 0; 1349 1350 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1351 if (!skb) { 1352 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1353 return -ENOMEM; 1354 } 1355 skb_put(skb, H2C_CMC_TBL_LEN); 1356 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1357 SET_CTRL_INFO_OPERATION(skb->data, 1); 1358 1359 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1360 1361 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1362 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1363 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 1364 H2C_CMC_TBL_LEN); 1365 1366 ret = rtw89_h2c_tx(rtwdev, skb, false); 1367 if (ret) { 1368 rtw89_err(rtwdev, "failed to send h2c\n"); 1369 goto fail; 1370 } 1371 1372 return 0; 1373 fail: 1374 dev_kfree_skb_any(skb); 1375 1376 return ret; 1377 } 1378 1379 #define H2C_BCN_BASE_LEN 12 1380 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 1381 struct rtw89_vif *rtwvif) 1382 { 1383 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 1384 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 1385 struct sk_buff *skb; 1386 struct sk_buff *skb_beacon; 1387 u16 tim_offset; 1388 int bcn_total_len; 1389 u16 beacon_rate; 1390 int ret; 1391 1392 if (vif->p2p) 1393 beacon_rate = RTW89_HW_RATE_OFDM6; 1394 else if (chan->band_type == RTW89_BAND_2G) 1395 beacon_rate = RTW89_HW_RATE_CCK1; 1396 else 1397 beacon_rate = RTW89_HW_RATE_OFDM6; 1398 1399 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 1400 NULL, 0); 1401 if (!skb_beacon) { 1402 rtw89_err(rtwdev, "failed to get beacon skb\n"); 1403 return -ENOMEM; 1404 } 1405 1406 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; 1407 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 1408 if (!skb) { 1409 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1410 dev_kfree_skb_any(skb_beacon); 1411 return -ENOMEM; 1412 } 1413 skb_put(skb, H2C_BCN_BASE_LEN); 1414 1415 SET_BCN_UPD_PORT(skb->data, rtwvif->port); 1416 SET_BCN_UPD_MBSSID(skb->data, 0); 1417 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx); 1418 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset); 1419 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id); 1420 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL); 1421 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE); 1422 SET_BCN_UPD_RATE(skb->data, beacon_rate); 1423 1424 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 1425 dev_kfree_skb_any(skb_beacon); 1426 1427 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1428 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1429 H2C_FUNC_MAC_BCN_UPD, 0, 1, 1430 bcn_total_len); 1431 1432 ret = rtw89_h2c_tx(rtwdev, skb, false); 1433 if (ret) { 1434 rtw89_err(rtwdev, "failed to send h2c\n"); 1435 dev_kfree_skb_any(skb); 1436 return ret; 1437 } 1438 1439 return 0; 1440 } 1441 1442 #define H2C_ROLE_MAINTAIN_LEN 4 1443 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 1444 struct rtw89_vif *rtwvif, 1445 struct rtw89_sta *rtwsta, 1446 enum rtw89_upd_mode upd_mode) 1447 { 1448 struct sk_buff *skb; 1449 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1450 u8 self_role; 1451 int ret; 1452 1453 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 1454 if (rtwsta) 1455 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1456 else 1457 self_role = rtwvif->self_role; 1458 } else { 1459 self_role = rtwvif->self_role; 1460 } 1461 1462 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 1463 if (!skb) { 1464 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1465 return -ENOMEM; 1466 } 1467 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 1468 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 1469 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 1470 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 1471 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1472 1473 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1474 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1475 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 1476 H2C_ROLE_MAINTAIN_LEN); 1477 1478 ret = rtw89_h2c_tx(rtwdev, skb, false); 1479 if (ret) { 1480 rtw89_err(rtwdev, "failed to send h2c\n"); 1481 goto fail; 1482 } 1483 1484 return 0; 1485 fail: 1486 dev_kfree_skb_any(skb); 1487 1488 return ret; 1489 } 1490 1491 #define H2C_JOIN_INFO_LEN 4 1492 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1493 struct rtw89_sta *rtwsta, bool dis_conn) 1494 { 1495 struct sk_buff *skb; 1496 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1497 u8 self_role = rtwvif->self_role; 1498 u8 net_type = rtwvif->net_type; 1499 int ret; 1500 1501 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 1502 self_role = RTW89_SELF_ROLE_AP_CLIENT; 1503 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 1504 } 1505 1506 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1507 if (!skb) { 1508 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1509 return -ENOMEM; 1510 } 1511 skb_put(skb, H2C_JOIN_INFO_LEN); 1512 SET_JOININFO_MACID(skb->data, mac_id); 1513 SET_JOININFO_OP(skb->data, dis_conn); 1514 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx); 1515 SET_JOININFO_WMM(skb->data, rtwvif->wmm); 1516 SET_JOININFO_TGR(skb->data, rtwvif->trigger); 1517 SET_JOININFO_ISHESTA(skb->data, 0); 1518 SET_JOININFO_DLBW(skb->data, 0); 1519 SET_JOININFO_TF_MAC_PAD(skb->data, 0); 1520 SET_JOININFO_DL_T_PE(skb->data, 0); 1521 SET_JOININFO_PORT_ID(skb->data, rtwvif->port); 1522 SET_JOININFO_NET_TYPE(skb->data, net_type); 1523 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role); 1524 SET_JOININFO_SELF_ROLE(skb->data, self_role); 1525 1526 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1527 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 1528 H2C_FUNC_MAC_JOININFO, 0, 1, 1529 H2C_JOIN_INFO_LEN); 1530 1531 ret = rtw89_h2c_tx(rtwdev, skb, false); 1532 if (ret) { 1533 rtw89_err(rtwdev, "failed to send h2c\n"); 1534 goto fail; 1535 } 1536 1537 return 0; 1538 fail: 1539 dev_kfree_skb_any(skb); 1540 1541 return ret; 1542 } 1543 1544 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 1545 bool pause) 1546 { 1547 struct rtw89_fw_macid_pause_grp h2c = {{0}}; 1548 u8 len = sizeof(struct rtw89_fw_macid_pause_grp); 1549 struct sk_buff *skb; 1550 int ret; 1551 1552 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 1553 if (!skb) { 1554 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1555 return -ENOMEM; 1556 } 1557 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh)); 1558 if (pause) 1559 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh)); 1560 skb_put_data(skb, &h2c, len); 1561 1562 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1563 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1564 H2C_FUNC_MAC_MACID_PAUSE, 1, 0, 1565 len); 1566 1567 ret = rtw89_h2c_tx(rtwdev, skb, false); 1568 if (ret) { 1569 rtw89_err(rtwdev, "failed to send h2c\n"); 1570 goto fail; 1571 } 1572 1573 return 0; 1574 fail: 1575 dev_kfree_skb_any(skb); 1576 1577 return ret; 1578 } 1579 1580 #define H2C_EDCA_LEN 12 1581 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1582 u8 ac, u32 val) 1583 { 1584 struct sk_buff *skb; 1585 int ret; 1586 1587 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 1588 if (!skb) { 1589 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 1590 return -ENOMEM; 1591 } 1592 skb_put(skb, H2C_EDCA_LEN); 1593 RTW89_SET_EDCA_SEL(skb->data, 0); 1594 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 1595 RTW89_SET_EDCA_WMM(skb->data, 0); 1596 RTW89_SET_EDCA_AC(skb->data, ac); 1597 RTW89_SET_EDCA_PARAM(skb->data, val); 1598 1599 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1600 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1601 H2C_FUNC_USR_EDCA, 0, 1, 1602 H2C_EDCA_LEN); 1603 1604 ret = rtw89_h2c_tx(rtwdev, skb, false); 1605 if (ret) { 1606 rtw89_err(rtwdev, "failed to send h2c\n"); 1607 goto fail; 1608 } 1609 1610 return 0; 1611 fail: 1612 dev_kfree_skb_any(skb); 1613 1614 return ret; 1615 } 1616 1617 #define H2C_TSF32_TOGL_LEN 4 1618 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1619 bool en) 1620 { 1621 struct sk_buff *skb; 1622 u16 early_us = en ? 2000 : 0; 1623 u8 *cmd; 1624 int ret; 1625 1626 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 1627 if (!skb) { 1628 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1629 return -ENOMEM; 1630 } 1631 skb_put(skb, H2C_TSF32_TOGL_LEN); 1632 cmd = skb->data; 1633 1634 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 1635 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 1636 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 1637 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 1638 1639 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1640 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1641 H2C_FUNC_TSF32_TOGL, 0, 0, 1642 H2C_TSF32_TOGL_LEN); 1643 1644 ret = rtw89_h2c_tx(rtwdev, skb, false); 1645 if (ret) { 1646 rtw89_err(rtwdev, "failed to send h2c\n"); 1647 goto fail; 1648 } 1649 1650 return 0; 1651 fail: 1652 dev_kfree_skb_any(skb); 1653 1654 return ret; 1655 } 1656 1657 #define H2C_OFLD_CFG_LEN 8 1658 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 1659 { 1660 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 1661 struct sk_buff *skb; 1662 int ret; 1663 1664 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 1665 if (!skb) { 1666 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 1667 return -ENOMEM; 1668 } 1669 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 1670 1671 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1672 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 1673 H2C_FUNC_OFLD_CFG, 0, 1, 1674 H2C_OFLD_CFG_LEN); 1675 1676 ret = rtw89_h2c_tx(rtwdev, skb, false); 1677 if (ret) { 1678 rtw89_err(rtwdev, "failed to send h2c\n"); 1679 goto fail; 1680 } 1681 1682 return 0; 1683 fail: 1684 dev_kfree_skb_any(skb); 1685 1686 return ret; 1687 } 1688 1689 #define H2C_RA_LEN 16 1690 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 1691 { 1692 struct sk_buff *skb; 1693 u8 *cmd; 1694 int ret; 1695 1696 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RA_LEN); 1697 if (!skb) { 1698 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 1699 return -ENOMEM; 1700 } 1701 skb_put(skb, H2C_RA_LEN); 1702 cmd = skb->data; 1703 rtw89_debug(rtwdev, RTW89_DBG_RA, 1704 "ra cmd msk: %llx ", ra->ra_mask); 1705 1706 RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl); 1707 RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap); 1708 RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid); 1709 RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap); 1710 RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap); 1711 RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv); 1712 RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all); 1713 RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi); 1714 RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap); 1715 RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap); 1716 RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num); 1717 RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf); 1718 RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask); 1719 RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask); 1720 RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask)); 1721 RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask)); 1722 RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask)); 1723 RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask)); 1724 RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask)); 1725 RTW89_SET_FWCMD_RA_FIX_GILTF_EN(cmd, ra->fix_giltf_en); 1726 RTW89_SET_FWCMD_RA_FIX_GILTF(cmd, ra->fix_giltf); 1727 1728 if (csi) { 1729 RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1); 1730 RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num); 1731 RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel); 1732 RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en); 1733 RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en); 1734 RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx); 1735 RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode); 1736 RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf); 1737 RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw); 1738 } 1739 1740 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1741 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 1742 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 1743 H2C_RA_LEN); 1744 1745 ret = rtw89_h2c_tx(rtwdev, skb, false); 1746 if (ret) { 1747 rtw89_err(rtwdev, "failed to send h2c\n"); 1748 goto fail; 1749 } 1750 1751 return 0; 1752 fail: 1753 dev_kfree_skb_any(skb); 1754 1755 return ret; 1756 } 1757 1758 #define H2C_LEN_CXDRVHDR 2 1759 #define H2C_LEN_CXDRVINFO_INIT (12 + H2C_LEN_CXDRVHDR) 1760 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) 1761 { 1762 struct rtw89_btc *btc = &rtwdev->btc; 1763 struct rtw89_btc_dm *dm = &btc->dm; 1764 struct rtw89_btc_init_info *init_info = &dm->init_info; 1765 struct rtw89_btc_module *module = &init_info->module; 1766 struct rtw89_btc_ant_info *ant = &module->ant; 1767 struct sk_buff *skb; 1768 u8 *cmd; 1769 int ret; 1770 1771 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_INIT); 1772 if (!skb) { 1773 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 1774 return -ENOMEM; 1775 } 1776 skb_put(skb, H2C_LEN_CXDRVINFO_INIT); 1777 cmd = skb->data; 1778 1779 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_INIT); 1780 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_INIT - H2C_LEN_CXDRVHDR); 1781 1782 RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, ant->type); 1783 RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, ant->num); 1784 RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, ant->isolation); 1785 RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, ant->single_pos); 1786 RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, ant->diversity); 1787 1788 RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, module->rfe_type); 1789 RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, module->cv); 1790 RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, module->bt_solo); 1791 RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, module->bt_pos); 1792 RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, module->switch_type); 1793 1794 RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, init_info->wl_guard_ch); 1795 RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, init_info->wl_only); 1796 RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, init_info->wl_init_ok); 1797 RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, init_info->dbcc_en); 1798 RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, init_info->cx_other); 1799 RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, init_info->bt_only); 1800 1801 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1802 H2C_CAT_OUTSRC, BTFC_SET, 1803 SET_DRV_INFO, 0, 0, 1804 H2C_LEN_CXDRVINFO_INIT); 1805 1806 ret = rtw89_h2c_tx(rtwdev, skb, false); 1807 if (ret) { 1808 rtw89_err(rtwdev, "failed to send h2c\n"); 1809 goto fail; 1810 } 1811 1812 return 0; 1813 fail: 1814 dev_kfree_skb_any(skb); 1815 1816 return ret; 1817 } 1818 1819 #define PORT_DATA_OFFSET 4 1820 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 1821 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 1822 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 1823 1824 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) 1825 { 1826 struct rtw89_btc *btc = &rtwdev->btc; 1827 const struct rtw89_btc_ver *ver = btc->ver; 1828 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 1829 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 1830 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 1831 struct rtw89_btc_wl_active_role *active = role_info->active_role; 1832 struct sk_buff *skb; 1833 u32 len; 1834 u8 offset = 0; 1835 u8 *cmd; 1836 int ret; 1837 int i; 1838 1839 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 1840 1841 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1842 if (!skb) { 1843 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 1844 return -ENOMEM; 1845 } 1846 skb_put(skb, len); 1847 cmd = skb->data; 1848 1849 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 1850 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 1851 1852 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 1853 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 1854 1855 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 1856 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 1857 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 1858 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 1859 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 1860 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 1861 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 1862 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 1863 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 1864 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 1865 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 1866 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 1867 1868 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 1869 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 1870 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 1871 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 1872 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 1873 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 1874 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 1875 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 1876 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 1877 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 1878 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 1879 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 1880 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 1881 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 1882 } 1883 1884 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1885 H2C_CAT_OUTSRC, BTFC_SET, 1886 SET_DRV_INFO, 0, 0, 1887 len); 1888 1889 ret = rtw89_h2c_tx(rtwdev, skb, false); 1890 if (ret) { 1891 rtw89_err(rtwdev, "failed to send h2c\n"); 1892 goto fail; 1893 } 1894 1895 return 0; 1896 fail: 1897 dev_kfree_skb_any(skb); 1898 1899 return ret; 1900 } 1901 1902 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 1903 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 1904 1905 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev) 1906 { 1907 struct rtw89_btc *btc = &rtwdev->btc; 1908 const struct rtw89_btc_ver *ver = btc->ver; 1909 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 1910 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 1911 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 1912 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 1913 struct sk_buff *skb; 1914 u32 len; 1915 u8 *cmd, offset; 1916 int ret; 1917 int i; 1918 1919 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 1920 1921 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1922 if (!skb) { 1923 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 1924 return -ENOMEM; 1925 } 1926 skb_put(skb, len); 1927 cmd = skb->data; 1928 1929 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 1930 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 1931 1932 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 1933 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 1934 1935 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 1936 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 1937 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 1938 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 1939 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 1940 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 1941 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 1942 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 1943 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 1944 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 1945 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 1946 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 1947 1948 offset = PORT_DATA_OFFSET; 1949 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 1950 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 1951 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 1952 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 1953 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 1954 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 1955 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 1956 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 1957 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 1958 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 1959 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 1960 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 1961 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 1962 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 1963 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 1964 } 1965 1966 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 1967 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 1968 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 1969 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 1970 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 1971 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 1972 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 1973 1974 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1975 H2C_CAT_OUTSRC, BTFC_SET, 1976 SET_DRV_INFO, 0, 0, 1977 len); 1978 1979 ret = rtw89_h2c_tx(rtwdev, skb, false); 1980 if (ret) { 1981 rtw89_err(rtwdev, "failed to send h2c\n"); 1982 goto fail; 1983 } 1984 1985 return 0; 1986 fail: 1987 dev_kfree_skb_any(skb); 1988 1989 return ret; 1990 } 1991 1992 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 1993 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) 1994 { 1995 struct rtw89_btc *btc = &rtwdev->btc; 1996 const struct rtw89_btc_ver *ver = btc->ver; 1997 struct rtw89_btc_ctrl *ctrl = &btc->ctrl; 1998 struct sk_buff *skb; 1999 u8 *cmd; 2000 int ret; 2001 2002 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 2003 if (!skb) { 2004 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2005 return -ENOMEM; 2006 } 2007 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 2008 cmd = skb->data; 2009 2010 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL); 2011 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 2012 2013 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 2014 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 2015 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 2016 if (ver->fcxctrl == 0) 2017 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 2018 2019 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2020 H2C_CAT_OUTSRC, BTFC_SET, 2021 SET_DRV_INFO, 0, 0, 2022 H2C_LEN_CXDRVINFO_CTRL); 2023 2024 ret = rtw89_h2c_tx(rtwdev, skb, false); 2025 if (ret) { 2026 rtw89_err(rtwdev, "failed to send h2c\n"); 2027 goto fail; 2028 } 2029 2030 return 0; 2031 fail: 2032 dev_kfree_skb_any(skb); 2033 2034 return ret; 2035 } 2036 2037 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 2038 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) 2039 { 2040 struct rtw89_btc *btc = &rtwdev->btc; 2041 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2042 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 2043 struct sk_buff *skb; 2044 u8 *cmd; 2045 int ret; 2046 2047 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 2048 if (!skb) { 2049 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2050 return -ENOMEM; 2051 } 2052 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 2053 cmd = skb->data; 2054 2055 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK); 2056 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 2057 2058 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 2059 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 2060 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 2061 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 2062 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 2063 2064 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2065 H2C_CAT_OUTSRC, BTFC_SET, 2066 SET_DRV_INFO, 0, 0, 2067 H2C_LEN_CXDRVINFO_RFK); 2068 2069 ret = rtw89_h2c_tx(rtwdev, skb, false); 2070 if (ret) { 2071 rtw89_err(rtwdev, "failed to send h2c\n"); 2072 goto fail; 2073 } 2074 2075 return 0; 2076 fail: 2077 dev_kfree_skb_any(skb); 2078 2079 return ret; 2080 } 2081 2082 #define H2C_LEN_PKT_OFLD 4 2083 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 2084 { 2085 struct sk_buff *skb; 2086 u8 *cmd; 2087 int ret; 2088 2089 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 2090 if (!skb) { 2091 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2092 return -ENOMEM; 2093 } 2094 skb_put(skb, H2C_LEN_PKT_OFLD); 2095 cmd = skb->data; 2096 2097 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 2098 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 2099 2100 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2101 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2102 H2C_FUNC_PACKET_OFLD, 1, 1, 2103 H2C_LEN_PKT_OFLD); 2104 2105 ret = rtw89_h2c_tx(rtwdev, skb, false); 2106 if (ret) { 2107 rtw89_err(rtwdev, "failed to send h2c\n"); 2108 goto fail; 2109 } 2110 2111 return 0; 2112 fail: 2113 dev_kfree_skb_any(skb); 2114 2115 return ret; 2116 } 2117 2118 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 2119 struct sk_buff *skb_ofld) 2120 { 2121 struct sk_buff *skb; 2122 u8 *cmd; 2123 u8 alloc_id; 2124 int ret; 2125 2126 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 2127 RTW89_MAX_PKT_OFLD_NUM); 2128 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 2129 return -ENOSPC; 2130 2131 *id = alloc_id; 2132 2133 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 2134 if (!skb) { 2135 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 2136 return -ENOMEM; 2137 } 2138 skb_put(skb, H2C_LEN_PKT_OFLD); 2139 cmd = skb->data; 2140 2141 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 2142 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 2143 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 2144 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 2145 2146 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2147 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2148 H2C_FUNC_PACKET_OFLD, 1, 1, 2149 H2C_LEN_PKT_OFLD + skb_ofld->len); 2150 2151 ret = rtw89_h2c_tx(rtwdev, skb, false); 2152 if (ret) { 2153 rtw89_err(rtwdev, "failed to send h2c\n"); 2154 goto fail; 2155 } 2156 2157 return 0; 2158 fail: 2159 dev_kfree_skb_any(skb); 2160 2161 return ret; 2162 } 2163 2164 #define H2C_LEN_SCAN_LIST_OFFLOAD 4 2165 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len, 2166 struct list_head *chan_list) 2167 { 2168 struct rtw89_mac_chinfo *ch_info; 2169 struct sk_buff *skb; 2170 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE; 2171 u8 *cmd; 2172 int ret; 2173 2174 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 2175 if (!skb) { 2176 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 2177 return -ENOMEM; 2178 } 2179 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD); 2180 cmd = skb->data; 2181 2182 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len); 2183 /* in unit of 4 bytes */ 2184 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4); 2185 2186 list_for_each_entry(ch_info, chan_list, list) { 2187 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE); 2188 2189 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period); 2190 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time); 2191 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch); 2192 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch); 2193 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw); 2194 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action); 2195 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt); 2196 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt); 2197 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data); 2198 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band); 2199 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id); 2200 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch); 2201 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null); 2202 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num); 2203 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]); 2204 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]); 2205 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]); 2206 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]); 2207 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]); 2208 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]); 2209 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]); 2210 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]); 2211 } 2212 2213 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2214 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2215 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 2216 2217 ret = rtw89_h2c_tx(rtwdev, skb, false); 2218 if (ret) { 2219 rtw89_err(rtwdev, "failed to send h2c\n"); 2220 goto fail; 2221 } 2222 2223 return 0; 2224 fail: 2225 dev_kfree_skb_any(skb); 2226 2227 return ret; 2228 } 2229 2230 #define H2C_LEN_SCAN_OFFLOAD 28 2231 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 2232 struct rtw89_scan_option *option, 2233 struct rtw89_vif *rtwvif) 2234 { 2235 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2236 struct sk_buff *skb; 2237 u8 *cmd; 2238 int ret; 2239 2240 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_SCAN_OFFLOAD); 2241 if (!skb) { 2242 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 2243 return -ENOMEM; 2244 } 2245 skb_put(skb, H2C_LEN_SCAN_OFFLOAD); 2246 cmd = skb->data; 2247 2248 RTW89_SET_FWCMD_SCANOFLD_MACID(cmd, rtwvif->mac_id); 2249 RTW89_SET_FWCMD_SCANOFLD_PORT_ID(cmd, rtwvif->port); 2250 RTW89_SET_FWCMD_SCANOFLD_BAND(cmd, RTW89_PHY_0); 2251 RTW89_SET_FWCMD_SCANOFLD_OPERATION(cmd, option->enable); 2252 RTW89_SET_FWCMD_SCANOFLD_NOTIFY_END(cmd, true); 2253 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_MODE(cmd, option->target_ch_mode); 2254 RTW89_SET_FWCMD_SCANOFLD_START_MODE(cmd, RTW89_SCAN_IMMEDIATE); 2255 RTW89_SET_FWCMD_SCANOFLD_SCAN_TYPE(cmd, RTW89_SCAN_ONCE); 2256 if (option->target_ch_mode) { 2257 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BW(cmd, scan_info->op_bw); 2258 RTW89_SET_FWCMD_SCANOFLD_TARGET_PRI_CH(cmd, 2259 scan_info->op_pri_ch); 2260 RTW89_SET_FWCMD_SCANOFLD_TARGET_CENTRAL_CH(cmd, 2261 scan_info->op_chan); 2262 RTW89_SET_FWCMD_SCANOFLD_TARGET_CH_BAND(cmd, 2263 scan_info->op_band); 2264 } 2265 2266 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2267 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2268 H2C_FUNC_SCANOFLD, 1, 1, 2269 H2C_LEN_SCAN_OFFLOAD); 2270 2271 ret = rtw89_h2c_tx(rtwdev, skb, false); 2272 if (ret) { 2273 rtw89_err(rtwdev, "failed to send h2c\n"); 2274 goto fail; 2275 } 2276 2277 return 0; 2278 fail: 2279 dev_kfree_skb_any(skb); 2280 2281 return ret; 2282 } 2283 2284 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 2285 struct rtw89_fw_h2c_rf_reg_info *info, 2286 u16 len, u8 page) 2287 { 2288 struct sk_buff *skb; 2289 u8 class = info->rf_path == RF_PATH_A ? 2290 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 2291 int ret; 2292 2293 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2294 if (!skb) { 2295 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 2296 return -ENOMEM; 2297 } 2298 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 2299 2300 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2301 H2C_CAT_OUTSRC, class, page, 0, 0, 2302 len); 2303 2304 ret = rtw89_h2c_tx(rtwdev, skb, false); 2305 if (ret) { 2306 rtw89_err(rtwdev, "failed to send h2c\n"); 2307 goto fail; 2308 } 2309 2310 return 0; 2311 fail: 2312 dev_kfree_skb_any(skb); 2313 2314 return ret; 2315 } 2316 2317 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 2318 { 2319 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2320 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 2321 struct rtw89_fw_h2c_rf_get_mccch *mccch; 2322 struct sk_buff *skb; 2323 int ret; 2324 2325 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 2326 if (!skb) { 2327 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2328 return -ENOMEM; 2329 } 2330 skb_put(skb, sizeof(*mccch)); 2331 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 2332 2333 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 2334 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 2335 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 2336 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 2337 mccch->current_channel = cpu_to_le32(chan->channel); 2338 mccch->current_band_type = cpu_to_le32(chan->band_type); 2339 2340 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2341 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 2342 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 2343 sizeof(*mccch)); 2344 2345 ret = rtw89_h2c_tx(rtwdev, skb, false); 2346 if (ret) { 2347 rtw89_err(rtwdev, "failed to send h2c\n"); 2348 goto fail; 2349 } 2350 2351 return 0; 2352 fail: 2353 dev_kfree_skb_any(skb); 2354 2355 return ret; 2356 } 2357 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 2358 2359 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 2360 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 2361 bool rack, bool dack) 2362 { 2363 struct sk_buff *skb; 2364 int ret; 2365 2366 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2367 if (!skb) { 2368 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 2369 return -ENOMEM; 2370 } 2371 skb_put_data(skb, buf, len); 2372 2373 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2374 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 2375 len); 2376 2377 ret = rtw89_h2c_tx(rtwdev, skb, false); 2378 if (ret) { 2379 rtw89_err(rtwdev, "failed to send h2c\n"); 2380 goto fail; 2381 } 2382 2383 return 0; 2384 fail: 2385 dev_kfree_skb_any(skb); 2386 2387 return ret; 2388 } 2389 2390 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 2391 { 2392 struct sk_buff *skb; 2393 int ret; 2394 2395 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 2396 if (!skb) { 2397 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 2398 return -ENOMEM; 2399 } 2400 skb_put_data(skb, buf, len); 2401 2402 ret = rtw89_h2c_tx(rtwdev, skb, false); 2403 if (ret) { 2404 rtw89_err(rtwdev, "failed to send h2c\n"); 2405 goto fail; 2406 } 2407 2408 return 0; 2409 fail: 2410 dev_kfree_skb_any(skb); 2411 2412 return ret; 2413 } 2414 2415 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 2416 { 2417 struct rtw89_early_h2c *early_h2c; 2418 2419 lockdep_assert_held(&rtwdev->mutex); 2420 2421 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 2422 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 2423 } 2424 } 2425 2426 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 2427 { 2428 struct rtw89_early_h2c *early_h2c, *tmp; 2429 2430 mutex_lock(&rtwdev->mutex); 2431 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 2432 list_del(&early_h2c->list); 2433 kfree(early_h2c->h2c); 2434 kfree(early_h2c); 2435 } 2436 mutex_unlock(&rtwdev->mutex); 2437 } 2438 2439 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 2440 { 2441 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2442 2443 attr->category = RTW89_GET_C2H_CATEGORY(c2h->data); 2444 attr->class = RTW89_GET_C2H_CLASS(c2h->data); 2445 attr->func = RTW89_GET_C2H_FUNC(c2h->data); 2446 attr->len = RTW89_GET_C2H_LEN(c2h->data); 2447 } 2448 2449 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 2450 struct sk_buff *c2h) 2451 { 2452 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 2453 u8 category = attr->category; 2454 u8 class = attr->class; 2455 u8 func = attr->func; 2456 2457 switch (category) { 2458 default: 2459 return false; 2460 case RTW89_C2H_CAT_MAC: 2461 return rtw89_mac_c2h_chk_atomic(rtwdev, class, func); 2462 } 2463 } 2464 2465 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 2466 { 2467 rtw89_fw_c2h_parse_attr(c2h); 2468 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 2469 goto enqueue; 2470 2471 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 2472 dev_kfree_skb_any(c2h); 2473 return; 2474 2475 enqueue: 2476 skb_queue_tail(&rtwdev->c2h_queue, c2h); 2477 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 2478 } 2479 2480 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 2481 struct sk_buff *skb) 2482 { 2483 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 2484 u8 category = attr->category; 2485 u8 class = attr->class; 2486 u8 func = attr->func; 2487 u16 len = attr->len; 2488 bool dump = true; 2489 2490 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 2491 return; 2492 2493 switch (category) { 2494 case RTW89_C2H_CAT_TEST: 2495 break; 2496 case RTW89_C2H_CAT_MAC: 2497 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 2498 if (class == RTW89_MAC_C2H_CLASS_INFO && 2499 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 2500 dump = false; 2501 break; 2502 case RTW89_C2H_CAT_OUTSRC: 2503 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 2504 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 2505 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 2506 else 2507 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 2508 break; 2509 } 2510 2511 if (dump) 2512 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 2513 } 2514 2515 void rtw89_fw_c2h_work(struct work_struct *work) 2516 { 2517 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 2518 c2h_work); 2519 struct sk_buff *skb, *tmp; 2520 2521 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 2522 skb_unlink(skb, &rtwdev->c2h_queue); 2523 mutex_lock(&rtwdev->mutex); 2524 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 2525 mutex_unlock(&rtwdev->mutex); 2526 dev_kfree_skb_any(skb); 2527 } 2528 } 2529 2530 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 2531 struct rtw89_mac_h2c_info *info) 2532 { 2533 const struct rtw89_chip_info *chip = rtwdev->chip; 2534 const u32 *h2c_reg = chip->h2c_regs; 2535 u8 i, val, len; 2536 int ret; 2537 2538 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 2539 rtwdev, chip->h2c_ctrl_reg); 2540 if (ret) { 2541 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 2542 return ret; 2543 } 2544 2545 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 2546 sizeof(info->h2creg[0])); 2547 2548 RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id); 2549 RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len); 2550 for (i = 0; i < RTW89_H2CREG_MAX; i++) 2551 rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]); 2552 2553 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 2554 2555 return 0; 2556 } 2557 2558 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 2559 struct rtw89_mac_c2h_info *info) 2560 { 2561 const struct rtw89_chip_info *chip = rtwdev->chip; 2562 const u32 *c2h_reg = chip->c2h_regs; 2563 u32 ret; 2564 u8 i, val; 2565 2566 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 2567 2568 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 2569 RTW89_C2H_TIMEOUT, false, rtwdev, 2570 chip->c2h_ctrl_reg); 2571 if (ret) { 2572 rtw89_warn(rtwdev, "c2h reg timeout\n"); 2573 return ret; 2574 } 2575 2576 for (i = 0; i < RTW89_C2HREG_MAX; i++) 2577 info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 2578 2579 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 2580 2581 info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg); 2582 info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) - 2583 RTW89_C2HREG_HDR_LEN; 2584 2585 return 0; 2586 } 2587 2588 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 2589 struct rtw89_mac_h2c_info *h2c_info, 2590 struct rtw89_mac_c2h_info *c2h_info) 2591 { 2592 u32 ret; 2593 2594 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 2595 lockdep_assert_held(&rtwdev->mutex); 2596 2597 if (!h2c_info && !c2h_info) 2598 return -EINVAL; 2599 2600 if (!h2c_info) 2601 goto recv_c2h; 2602 2603 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 2604 if (ret) 2605 return ret; 2606 2607 recv_c2h: 2608 if (!c2h_info) 2609 return 0; 2610 2611 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 2612 if (ret) 2613 return ret; 2614 2615 return 0; 2616 } 2617 2618 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 2619 { 2620 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 2621 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 2622 return; 2623 } 2624 2625 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 2626 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 2627 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 2628 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 2629 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 2630 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 2631 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 2632 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 2633 2634 rtw89_fw_prog_cnt_dump(rtwdev); 2635 } 2636 2637 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 2638 { 2639 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 2640 struct rtw89_pktofld_info *info, *tmp; 2641 u8 idx; 2642 2643 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 2644 if (!(rtwdev->chip->support_bands & BIT(idx))) 2645 continue; 2646 2647 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 2648 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2649 rtw89_core_release_bit_map(rtwdev->pkt_offload, 2650 info->id); 2651 list_del(&info->list); 2652 kfree(info); 2653 } 2654 } 2655 } 2656 2657 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 2658 struct rtw89_vif *rtwvif, 2659 struct sk_buff *skb) 2660 { 2661 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2662 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 2663 struct rtw89_pktofld_info *info; 2664 struct sk_buff *new; 2665 int ret = 0; 2666 u8 band; 2667 2668 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 2669 if (!(rtwdev->chip->support_bands & BIT(band))) 2670 continue; 2671 2672 new = skb_copy(skb, GFP_KERNEL); 2673 if (!new) { 2674 ret = -ENOMEM; 2675 goto out; 2676 } 2677 skb_put_data(new, ies->ies[band], ies->len[band]); 2678 skb_put_data(new, ies->common_ies, ies->common_ie_len); 2679 2680 info = kzalloc(sizeof(*info), GFP_KERNEL); 2681 if (!info) { 2682 ret = -ENOMEM; 2683 kfree_skb(new); 2684 goto out; 2685 } 2686 2687 list_add_tail(&info->list, &scan_info->pkt_list[band]); 2688 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 2689 if (ret) { 2690 kfree_skb(new); 2691 goto out; 2692 } 2693 2694 kfree_skb(new); 2695 } 2696 out: 2697 return ret; 2698 } 2699 2700 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 2701 struct rtw89_vif *rtwvif) 2702 { 2703 struct cfg80211_scan_request *req = rtwvif->scan_req; 2704 struct sk_buff *skb; 2705 u8 num = req->n_ssids, i; 2706 int ret; 2707 2708 for (i = 0; i < num; i++) { 2709 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 2710 req->ssids[i].ssid, 2711 req->ssids[i].ssid_len, 2712 req->ie_len); 2713 if (!skb) 2714 return -ENOMEM; 2715 2716 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb); 2717 kfree_skb(skb); 2718 2719 if (ret) 2720 return ret; 2721 } 2722 2723 return 0; 2724 } 2725 2726 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 2727 int ssid_num, 2728 struct rtw89_mac_chinfo *ch_info) 2729 { 2730 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2731 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 2732 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2733 struct cfg80211_scan_request *req = rtwvif->scan_req; 2734 struct rtw89_pktofld_info *info; 2735 u8 band, probe_count = 0; 2736 2737 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 2738 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 2739 ch_info->bw = RTW89_SCAN_WIDTH; 2740 ch_info->tx_pkt = true; 2741 ch_info->cfg_tx_pwr = false; 2742 ch_info->tx_pwr_idx = 0; 2743 ch_info->tx_null = false; 2744 ch_info->pause_data = false; 2745 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 2746 2747 if (ssid_num) { 2748 ch_info->num_pkt = ssid_num; 2749 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 2750 2751 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 2752 ch_info->pkt_id[probe_count] = info->id; 2753 if (++probe_count >= ssid_num) 2754 break; 2755 } 2756 if (probe_count != ssid_num) 2757 rtw89_err(rtwdev, "SSID num differs from list len\n"); 2758 } 2759 2760 if (ch_info->ch_band == RTW89_BAND_6G) { 2761 if (ssid_num == 1 && req->ssids[0].ssid_len == 0) { 2762 ch_info->tx_pkt = false; 2763 if (!req->duration_mandatory) 2764 ch_info->period -= RTW89_DWELL_TIME_6G; 2765 } 2766 } 2767 2768 switch (chan_type) { 2769 case RTW89_CHAN_OPERATE: 2770 ch_info->central_ch = scan_info->op_chan; 2771 ch_info->pri_ch = scan_info->op_pri_ch; 2772 ch_info->ch_band = scan_info->op_band; 2773 ch_info->bw = scan_info->op_bw; 2774 ch_info->tx_null = true; 2775 ch_info->num_pkt = 0; 2776 break; 2777 case RTW89_CHAN_DFS: 2778 if (ch_info->ch_band != RTW89_BAND_6G) 2779 ch_info->period = max_t(u8, ch_info->period, 2780 RTW89_DFS_CHAN_TIME); 2781 ch_info->dwell_time = RTW89_DWELL_TIME; 2782 break; 2783 case RTW89_CHAN_ACTIVE: 2784 break; 2785 default: 2786 rtw89_err(rtwdev, "Channel type out of bound\n"); 2787 } 2788 } 2789 2790 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 2791 struct rtw89_vif *rtwvif) 2792 { 2793 struct cfg80211_scan_request *req = rtwvif->scan_req; 2794 struct rtw89_mac_chinfo *ch_info, *tmp; 2795 struct ieee80211_channel *channel; 2796 struct list_head chan_list; 2797 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 2798 int list_len, off_chan_time = 0; 2799 enum rtw89_chan_type type; 2800 int ret = 0; 2801 u32 idx; 2802 2803 INIT_LIST_HEAD(&chan_list); 2804 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 2805 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 2806 idx++, list_len++) { 2807 channel = req->channels[idx]; 2808 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 2809 if (!ch_info) { 2810 ret = -ENOMEM; 2811 goto out; 2812 } 2813 2814 if (req->duration_mandatory) 2815 ch_info->period = req->duration; 2816 else if (channel->band == NL80211_BAND_6GHZ) 2817 ch_info->period = RTW89_CHANNEL_TIME_6G + 2818 RTW89_DWELL_TIME_6G; 2819 else 2820 ch_info->period = RTW89_CHANNEL_TIME; 2821 2822 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 2823 ch_info->central_ch = channel->hw_value; 2824 ch_info->pri_ch = channel->hw_value; 2825 ch_info->rand_seq_num = random_seq; 2826 2827 if (channel->flags & 2828 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 2829 type = RTW89_CHAN_DFS; 2830 else 2831 type = RTW89_CHAN_ACTIVE; 2832 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 2833 2834 if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK && 2835 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 2836 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 2837 if (!tmp) { 2838 ret = -ENOMEM; 2839 kfree(ch_info); 2840 goto out; 2841 } 2842 2843 type = RTW89_CHAN_OPERATE; 2844 tmp->period = req->duration_mandatory ? 2845 req->duration : RTW89_CHANNEL_TIME; 2846 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 2847 list_add_tail(&tmp->list, &chan_list); 2848 off_chan_time = 0; 2849 list_len++; 2850 } 2851 list_add_tail(&ch_info->list, &chan_list); 2852 off_chan_time += ch_info->period; 2853 } 2854 rtwdev->scan_info.last_chan_idx = idx; 2855 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 2856 2857 out: 2858 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 2859 list_del(&ch_info->list); 2860 kfree(ch_info); 2861 } 2862 2863 return ret; 2864 } 2865 2866 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 2867 struct rtw89_vif *rtwvif) 2868 { 2869 int ret; 2870 2871 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 2872 if (ret) { 2873 rtw89_err(rtwdev, "Update probe request failed\n"); 2874 goto out; 2875 } 2876 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif); 2877 out: 2878 return ret; 2879 } 2880 2881 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2882 struct ieee80211_scan_request *scan_req) 2883 { 2884 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2885 struct cfg80211_scan_request *req = &scan_req->req; 2886 u32 rx_fltr = rtwdev->hal.rx_fltr; 2887 u8 mac_addr[ETH_ALEN]; 2888 2889 rtwdev->scan_info.scanning_vif = vif; 2890 rtwdev->scan_info.last_chan_idx = 0; 2891 rtwvif->scan_ies = &scan_req->ies; 2892 rtwvif->scan_req = req; 2893 ieee80211_stop_queues(rtwdev->hw); 2894 2895 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 2896 get_random_mask_addr(mac_addr, req->mac_addr, 2897 req->mac_addr_mask); 2898 else 2899 ether_addr_copy(mac_addr, vif->addr); 2900 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 2901 2902 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 2903 rx_fltr &= ~B_AX_A_BC; 2904 rx_fltr &= ~B_AX_A_A1_MATCH; 2905 rtw89_write32_mask(rtwdev, 2906 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 2907 B_AX_RX_FLTR_CFG_MASK, 2908 rx_fltr); 2909 } 2910 2911 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2912 bool aborted) 2913 { 2914 struct cfg80211_scan_info info = { 2915 .aborted = aborted, 2916 }; 2917 struct rtw89_vif *rtwvif; 2918 2919 if (!vif) 2920 return; 2921 2922 rtw89_write32_mask(rtwdev, 2923 rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0), 2924 B_AX_RX_FLTR_CFG_MASK, 2925 rtwdev->hal.rx_fltr); 2926 2927 rtw89_core_scan_complete(rtwdev, vif, true); 2928 ieee80211_scan_completed(rtwdev->hw, &info); 2929 ieee80211_wake_queues(rtwdev->hw); 2930 2931 rtw89_release_pkt_list(rtwdev); 2932 rtwvif = (struct rtw89_vif *)vif->drv_priv; 2933 rtwvif->scan_req = NULL; 2934 rtwvif->scan_ies = NULL; 2935 rtwdev->scan_info.last_chan_idx = 0; 2936 rtwdev->scan_info.scanning_vif = NULL; 2937 2938 if (rtwvif->net_type != RTW89_NET_TYPE_NO_LINK) 2939 rtw89_store_op_chan(rtwdev, false); 2940 rtw89_set_channel(rtwdev); 2941 } 2942 2943 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 2944 { 2945 rtw89_hw_scan_offload(rtwdev, vif, false); 2946 rtw89_hw_scan_complete(rtwdev, vif, true); 2947 } 2948 2949 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2950 bool enable) 2951 { 2952 struct rtw89_scan_option opt = {0}; 2953 struct rtw89_vif *rtwvif; 2954 int ret = 0; 2955 2956 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 2957 if (!rtwvif) 2958 return -EINVAL; 2959 2960 opt.enable = enable; 2961 opt.target_ch_mode = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK; 2962 if (enable) { 2963 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif); 2964 if (ret) 2965 goto out; 2966 } 2967 ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif); 2968 out: 2969 return ret; 2970 } 2971 2972 void rtw89_store_op_chan(struct rtw89_dev *rtwdev, bool backup) 2973 { 2974 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 2975 const struct rtw89_chan *cur = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2976 struct rtw89_chan new; 2977 2978 if (backup) { 2979 scan_info->op_pri_ch = cur->primary_channel; 2980 scan_info->op_chan = cur->channel; 2981 scan_info->op_bw = cur->band_width; 2982 scan_info->op_band = cur->band_type; 2983 } else { 2984 rtw89_chan_create(&new, scan_info->op_chan, scan_info->op_pri_ch, 2985 scan_info->op_band, scan_info->op_bw); 2986 rtw89_assign_entity_chan(rtwdev, RTW89_SUB_ENTITY_0, &new); 2987 } 2988 } 2989 2990 #define H2C_FW_CPU_EXCEPTION_LEN 4 2991 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 2992 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 2993 { 2994 struct sk_buff *skb; 2995 int ret; 2996 2997 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 2998 if (!skb) { 2999 rtw89_err(rtwdev, 3000 "failed to alloc skb for fw cpu exception\n"); 3001 return -ENOMEM; 3002 } 3003 3004 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 3005 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 3006 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 3007 3008 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3009 H2C_CAT_TEST, 3010 H2C_CL_FW_STATUS_TEST, 3011 H2C_FUNC_CPU_EXCEPTION, 0, 0, 3012 H2C_FW_CPU_EXCEPTION_LEN); 3013 3014 ret = rtw89_h2c_tx(rtwdev, skb, false); 3015 if (ret) { 3016 rtw89_err(rtwdev, "failed to send h2c\n"); 3017 goto fail; 3018 } 3019 3020 return 0; 3021 3022 fail: 3023 dev_kfree_skb_any(skb); 3024 return ret; 3025 } 3026 3027 #define H2C_PKT_DROP_LEN 24 3028 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 3029 const struct rtw89_pkt_drop_params *params) 3030 { 3031 struct sk_buff *skb; 3032 int ret; 3033 3034 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 3035 if (!skb) { 3036 rtw89_err(rtwdev, 3037 "failed to alloc skb for packet drop\n"); 3038 return -ENOMEM; 3039 } 3040 3041 switch (params->sel) { 3042 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 3043 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 3044 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 3045 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 3046 case RTW89_PKT_DROP_SEL_BAND_ONCE: 3047 break; 3048 default: 3049 rtw89_debug(rtwdev, RTW89_DBG_FW, 3050 "H2C of pkt drop might not fully support sel: %d yet\n", 3051 params->sel); 3052 break; 3053 } 3054 3055 skb_put(skb, H2C_PKT_DROP_LEN); 3056 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 3057 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 3058 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 3059 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 3060 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 3061 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 3062 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 3063 params->macid_band_sel[0]); 3064 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 3065 params->macid_band_sel[1]); 3066 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 3067 params->macid_band_sel[2]); 3068 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 3069 params->macid_band_sel[3]); 3070 3071 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3072 H2C_CAT_MAC, 3073 H2C_CL_MAC_FW_OFLD, 3074 H2C_FUNC_PKT_DROP, 0, 0, 3075 H2C_PKT_DROP_LEN); 3076 3077 ret = rtw89_h2c_tx(rtwdev, skb, false); 3078 if (ret) { 3079 rtw89_err(rtwdev, "failed to send h2c\n"); 3080 goto fail; 3081 } 3082 3083 return 0; 3084 3085 fail: 3086 dev_kfree_skb_any(skb); 3087 return ret; 3088 } 3089 3090 #define H2C_KEEP_ALIVE_LEN 4 3091 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3092 bool enable) 3093 { 3094 struct sk_buff *skb; 3095 u8 pkt_id = 0; 3096 int ret; 3097 3098 if (enable) { 3099 ret = rtw89_fw_h2c_add_wow_fw_ofld(rtwdev, rtwvif, 3100 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id); 3101 if (ret) 3102 return -EPERM; 3103 } 3104 3105 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 3106 if (!skb) { 3107 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3108 return -ENOMEM; 3109 } 3110 3111 skb_put(skb, H2C_KEEP_ALIVE_LEN); 3112 3113 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 3114 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 3115 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 3116 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 3117 3118 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3119 H2C_CAT_MAC, 3120 H2C_CL_MAC_WOW, 3121 H2C_FUNC_KEEP_ALIVE, 0, 1, 3122 H2C_KEEP_ALIVE_LEN); 3123 3124 ret = rtw89_h2c_tx(rtwdev, skb, false); 3125 if (ret) { 3126 rtw89_err(rtwdev, "failed to send h2c\n"); 3127 goto fail; 3128 } 3129 3130 return 0; 3131 3132 fail: 3133 dev_kfree_skb_any(skb); 3134 3135 return ret; 3136 } 3137 3138 #define H2C_DISCONNECT_DETECT_LEN 8 3139 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 3140 struct rtw89_vif *rtwvif, bool enable) 3141 { 3142 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3143 struct sk_buff *skb; 3144 u8 macid = rtwvif->mac_id; 3145 int ret; 3146 3147 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 3148 if (!skb) { 3149 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3150 return -ENOMEM; 3151 } 3152 3153 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 3154 3155 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 3156 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 3157 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 3158 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 3159 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 3160 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 3161 } 3162 3163 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3164 H2C_CAT_MAC, 3165 H2C_CL_MAC_WOW, 3166 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 3167 H2C_DISCONNECT_DETECT_LEN); 3168 3169 ret = rtw89_h2c_tx(rtwdev, skb, false); 3170 if (ret) { 3171 rtw89_err(rtwdev, "failed to send h2c\n"); 3172 goto fail; 3173 } 3174 3175 return 0; 3176 3177 fail: 3178 dev_kfree_skb_any(skb); 3179 3180 return ret; 3181 } 3182 3183 #define H2C_WOW_GLOBAL_LEN 8 3184 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3185 bool enable) 3186 { 3187 struct sk_buff *skb; 3188 u8 macid = rtwvif->mac_id; 3189 int ret; 3190 3191 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN); 3192 if (!skb) { 3193 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3194 return -ENOMEM; 3195 } 3196 3197 skb_put(skb, H2C_WOW_GLOBAL_LEN); 3198 3199 RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable); 3200 RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid); 3201 3202 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3203 H2C_CAT_MAC, 3204 H2C_CL_MAC_WOW, 3205 H2C_FUNC_WOW_GLOBAL, 0, 1, 3206 H2C_WOW_GLOBAL_LEN); 3207 3208 ret = rtw89_h2c_tx(rtwdev, skb, false); 3209 if (ret) { 3210 rtw89_err(rtwdev, "failed to send h2c\n"); 3211 goto fail; 3212 } 3213 3214 return 0; 3215 3216 fail: 3217 dev_kfree_skb_any(skb); 3218 3219 return ret; 3220 } 3221 3222 #define H2C_WAKEUP_CTRL_LEN 4 3223 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 3224 struct rtw89_vif *rtwvif, 3225 bool enable) 3226 { 3227 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 3228 struct sk_buff *skb; 3229 u8 macid = rtwvif->mac_id; 3230 int ret; 3231 3232 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 3233 if (!skb) { 3234 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3235 return -ENOMEM; 3236 } 3237 3238 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 3239 3240 if (rtw_wow->pattern_cnt) 3241 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 3242 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 3243 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 3244 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 3245 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 3246 3247 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 3248 3249 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3250 H2C_CAT_MAC, 3251 H2C_CL_MAC_WOW, 3252 H2C_FUNC_WAKEUP_CTRL, 0, 1, 3253 H2C_WAKEUP_CTRL_LEN); 3254 3255 ret = rtw89_h2c_tx(rtwdev, skb, false); 3256 if (ret) { 3257 rtw89_err(rtwdev, "failed to send h2c\n"); 3258 goto fail; 3259 } 3260 3261 return 0; 3262 3263 fail: 3264 dev_kfree_skb_any(skb); 3265 3266 return ret; 3267 } 3268 3269 #define H2C_WOW_CAM_UPD_LEN 24 3270 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 3271 struct rtw89_wow_cam_info *cam_info) 3272 { 3273 struct sk_buff *skb; 3274 int ret; 3275 3276 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 3277 if (!skb) { 3278 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 3279 return -ENOMEM; 3280 } 3281 3282 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 3283 3284 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 3285 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 3286 if (cam_info->valid) { 3287 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 3288 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 3289 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 3290 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 3291 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 3292 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 3293 cam_info->negative_pattern_match); 3294 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 3295 cam_info->skip_mac_hdr); 3296 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 3297 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 3298 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 3299 } 3300 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 3301 3302 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3303 H2C_CAT_MAC, 3304 H2C_CL_MAC_WOW, 3305 H2C_FUNC_WOW_CAM_UPD, 0, 1, 3306 H2C_WOW_CAM_UPD_LEN); 3307 3308 ret = rtw89_h2c_tx(rtwdev, skb, false); 3309 if (ret) { 3310 rtw89_err(rtwdev, "failed to send h2c\n"); 3311 goto fail; 3312 } 3313 3314 return 0; 3315 fail: 3316 dev_kfree_skb_any(skb); 3317 3318 return ret; 3319 } 3320 3321 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 3322 struct rtw89_wait_info *wait, unsigned int cond) 3323 { 3324 int ret; 3325 3326 ret = rtw89_h2c_tx(rtwdev, skb, false); 3327 if (ret) { 3328 rtw89_err(rtwdev, "failed to send h2c\n"); 3329 dev_kfree_skb_any(skb); 3330 return -EBUSY; 3331 } 3332 3333 return rtw89_wait_for_cond(wait, cond); 3334 } 3335 3336 #define H2C_ADD_MCC_LEN 16 3337 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 3338 const struct rtw89_fw_mcc_add_req *p) 3339 { 3340 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3341 struct sk_buff *skb; 3342 unsigned int cond; 3343 3344 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 3345 if (!skb) { 3346 rtw89_err(rtwdev, 3347 "failed to alloc skb for add mcc\n"); 3348 return -ENOMEM; 3349 } 3350 3351 skb_put(skb, H2C_ADD_MCC_LEN); 3352 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 3353 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 3354 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 3355 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 3356 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 3357 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 3358 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 3359 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 3360 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 3361 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 3362 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 3363 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 3364 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 3365 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 3366 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 3367 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 3368 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 3369 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 3370 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 3371 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 3372 3373 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3374 H2C_CAT_MAC, 3375 H2C_CL_MCC, 3376 H2C_FUNC_ADD_MCC, 0, 0, 3377 H2C_ADD_MCC_LEN); 3378 3379 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 3380 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3381 } 3382 3383 #define H2C_START_MCC_LEN 12 3384 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 3385 const struct rtw89_fw_mcc_start_req *p) 3386 { 3387 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3388 struct sk_buff *skb; 3389 unsigned int cond; 3390 3391 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 3392 if (!skb) { 3393 rtw89_err(rtwdev, 3394 "failed to alloc skb for start mcc\n"); 3395 return -ENOMEM; 3396 } 3397 3398 skb_put(skb, H2C_START_MCC_LEN); 3399 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 3400 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 3401 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 3402 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 3403 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 3404 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 3405 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 3406 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 3407 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 3408 3409 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3410 H2C_CAT_MAC, 3411 H2C_CL_MCC, 3412 H2C_FUNC_START_MCC, 0, 0, 3413 H2C_START_MCC_LEN); 3414 3415 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 3416 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3417 } 3418 3419 #define H2C_STOP_MCC_LEN 4 3420 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 3421 bool prev_groups) 3422 { 3423 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3424 struct sk_buff *skb; 3425 unsigned int cond; 3426 3427 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 3428 if (!skb) { 3429 rtw89_err(rtwdev, 3430 "failed to alloc skb for stop mcc\n"); 3431 return -ENOMEM; 3432 } 3433 3434 skb_put(skb, H2C_STOP_MCC_LEN); 3435 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 3436 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 3437 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 3438 3439 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3440 H2C_CAT_MAC, 3441 H2C_CL_MCC, 3442 H2C_FUNC_STOP_MCC, 0, 0, 3443 H2C_STOP_MCC_LEN); 3444 3445 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 3446 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3447 } 3448 3449 #define H2C_DEL_MCC_GROUP_LEN 4 3450 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 3451 bool prev_groups) 3452 { 3453 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3454 struct sk_buff *skb; 3455 unsigned int cond; 3456 3457 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 3458 if (!skb) { 3459 rtw89_err(rtwdev, 3460 "failed to alloc skb for del mcc group\n"); 3461 return -ENOMEM; 3462 } 3463 3464 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 3465 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 3466 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 3467 3468 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3469 H2C_CAT_MAC, 3470 H2C_CL_MCC, 3471 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 3472 H2C_DEL_MCC_GROUP_LEN); 3473 3474 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 3475 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3476 } 3477 3478 #define H2C_RESET_MCC_GROUP_LEN 4 3479 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 3480 { 3481 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3482 struct sk_buff *skb; 3483 unsigned int cond; 3484 3485 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 3486 if (!skb) { 3487 rtw89_err(rtwdev, 3488 "failed to alloc skb for reset mcc group\n"); 3489 return -ENOMEM; 3490 } 3491 3492 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 3493 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 3494 3495 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3496 H2C_CAT_MAC, 3497 H2C_CL_MCC, 3498 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 3499 H2C_RESET_MCC_GROUP_LEN); 3500 3501 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 3502 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3503 } 3504 3505 #define H2C_MCC_REQ_TSF_LEN 4 3506 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 3507 const struct rtw89_fw_mcc_tsf_req *req, 3508 struct rtw89_mac_mcc_tsf_rpt *rpt) 3509 { 3510 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3511 struct rtw89_mac_mcc_tsf_rpt *tmp; 3512 struct sk_buff *skb; 3513 unsigned int cond; 3514 int ret; 3515 3516 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 3517 if (!skb) { 3518 rtw89_err(rtwdev, 3519 "failed to alloc skb for mcc req tsf\n"); 3520 return -ENOMEM; 3521 } 3522 3523 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 3524 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 3525 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 3526 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 3527 3528 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3529 H2C_CAT_MAC, 3530 H2C_CL_MCC, 3531 H2C_FUNC_MCC_REQ_TSF, 0, 0, 3532 H2C_MCC_REQ_TSF_LEN); 3533 3534 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 3535 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3536 if (ret) 3537 return ret; 3538 3539 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 3540 *rpt = *tmp; 3541 3542 return 0; 3543 } 3544 3545 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 3546 int rtw89_fw_h2c_mcc_macid_bitamp(struct rtw89_dev *rtwdev, u8 group, u8 macid, 3547 u8 *bitmap) 3548 { 3549 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3550 struct sk_buff *skb; 3551 unsigned int cond; 3552 u8 map_len; 3553 u8 h2c_len; 3554 3555 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 3556 map_len = RTW89_MAX_MAC_ID_NUM / 8; 3557 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 3558 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 3559 if (!skb) { 3560 rtw89_err(rtwdev, 3561 "failed to alloc skb for mcc macid bitmap\n"); 3562 return -ENOMEM; 3563 } 3564 3565 skb_put(skb, h2c_len); 3566 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 3567 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 3568 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 3569 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 3570 3571 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3572 H2C_CAT_MAC, 3573 H2C_CL_MCC, 3574 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 3575 h2c_len); 3576 3577 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 3578 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3579 } 3580 3581 #define H2C_MCC_SYNC_LEN 4 3582 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 3583 u8 target, u8 offset) 3584 { 3585 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3586 struct sk_buff *skb; 3587 unsigned int cond; 3588 3589 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 3590 if (!skb) { 3591 rtw89_err(rtwdev, 3592 "failed to alloc skb for mcc sync\n"); 3593 return -ENOMEM; 3594 } 3595 3596 skb_put(skb, H2C_MCC_SYNC_LEN); 3597 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 3598 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 3599 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 3600 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 3601 3602 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3603 H2C_CAT_MAC, 3604 H2C_CL_MCC, 3605 H2C_FUNC_MCC_SYNC, 0, 0, 3606 H2C_MCC_SYNC_LEN); 3607 3608 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 3609 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3610 } 3611 3612 #define H2C_MCC_SET_DURATION_LEN 20 3613 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 3614 const struct rtw89_fw_mcc_duration *p) 3615 { 3616 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 3617 struct sk_buff *skb; 3618 unsigned int cond; 3619 3620 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 3621 if (!skb) { 3622 rtw89_err(rtwdev, 3623 "failed to alloc skb for mcc set duration\n"); 3624 return -ENOMEM; 3625 } 3626 3627 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 3628 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 3629 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 3630 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 3631 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 3632 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 3633 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 3634 p->start_tsf_low); 3635 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 3636 p->start_tsf_high); 3637 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 3638 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 3639 3640 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3641 H2C_CAT_MAC, 3642 H2C_CL_MCC, 3643 H2C_FUNC_MCC_SET_DURATION, 0, 0, 3644 H2C_MCC_SET_DURATION_LEN); 3645 3646 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 3647 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3648 } 3649