1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "ps.h" 13 #include "reg.h" 14 #include "util.h" 15 16 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 17 struct sk_buff *skb); 18 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 19 struct rtw89_wait_info *wait, unsigned int cond); 20 21 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 22 bool header) 23 { 24 struct sk_buff *skb; 25 u32 header_len = 0; 26 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 27 28 if (header) 29 header_len = H2C_HEADER_LEN; 30 31 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 32 if (!skb) 33 return NULL; 34 skb_reserve(skb, header_len + h2c_desc_size); 35 memset(skb->data, 0, len); 36 37 return skb; 38 } 39 40 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 41 { 42 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 43 } 44 45 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 46 { 47 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 48 } 49 50 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 51 { 52 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 53 u8 val; 54 int ret; 55 56 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 57 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 58 1, FWDL_WAIT_CNT, false, rtwdev, type); 59 if (ret) { 60 switch (val) { 61 case RTW89_FWDL_CHECKSUM_FAIL: 62 rtw89_err(rtwdev, "fw checksum fail\n"); 63 return -EINVAL; 64 65 case RTW89_FWDL_SECURITY_FAIL: 66 rtw89_err(rtwdev, "fw security fail\n"); 67 return -EINVAL; 68 69 case RTW89_FWDL_CV_NOT_MATCH: 70 rtw89_err(rtwdev, "fw cv not match\n"); 71 return -EINVAL; 72 73 default: 74 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 75 return -EBUSY; 76 } 77 } 78 79 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 80 81 return 0; 82 } 83 84 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 85 struct rtw89_fw_bin_info *info) 86 { 87 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 88 struct rtw89_fw_hdr_section_info *section_info; 89 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 90 const struct rtw89_fw_hdr_section *section; 91 const u8 *fw_end = fw + len; 92 const u8 *bin; 93 u32 base_hdr_len; 94 u32 mssc_len = 0; 95 u32 i; 96 97 if (!info) 98 return -EINVAL; 99 100 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 101 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 102 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 103 104 if (info->dynamic_hdr_en) { 105 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 106 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 107 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 108 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 109 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 110 return -EINVAL; 111 } 112 } else { 113 info->hdr_len = base_hdr_len; 114 info->dynamic_hdr_len = 0; 115 } 116 117 bin = fw + info->hdr_len; 118 119 /* jump to section header */ 120 section_info = info->section_info; 121 for (i = 0; i < info->section_num; i++) { 122 section = &fw_hdr->sections[i]; 123 section_info->type = 124 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 125 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 126 section_info->mssc = 127 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 128 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 129 } else { 130 section_info->mssc = 0; 131 } 132 133 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 134 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 135 section_info->len += FWDL_SECTION_CHKSUM_LEN; 136 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 137 section_info->dladdr = 138 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 139 section_info->addr = bin; 140 bin += section_info->len; 141 section_info++; 142 } 143 144 if (fw_end != bin + mssc_len) { 145 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 146 return -EINVAL; 147 } 148 149 return 0; 150 } 151 152 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 153 struct rtw89_fw_bin_info *info) 154 { 155 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 156 struct rtw89_fw_hdr_section_info *section_info; 157 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 158 const struct rtw89_fw_hdr_section_v1 *section; 159 const u8 *fw_end = fw + len; 160 const u8 *bin; 161 u32 base_hdr_len; 162 u32 mssc_len = 0; 163 u32 i; 164 165 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 166 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 167 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 168 169 if (info->dynamic_hdr_en) { 170 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 171 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 172 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 173 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 174 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 175 return -EINVAL; 176 } 177 } else { 178 info->hdr_len = base_hdr_len; 179 info->dynamic_hdr_len = 0; 180 } 181 182 bin = fw + info->hdr_len; 183 184 /* jump to section header */ 185 section_info = info->section_info; 186 for (i = 0; i < info->section_num; i++) { 187 section = &fw_hdr->sections[i]; 188 section_info->type = 189 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 190 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 191 section_info->mssc = 192 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 193 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 194 } else { 195 section_info->mssc = 0; 196 } 197 198 section_info->len = 199 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 200 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 201 section_info->len += FWDL_SECTION_CHKSUM_LEN; 202 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 203 section_info->dladdr = 204 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 205 section_info->addr = bin; 206 bin += section_info->len; 207 section_info++; 208 } 209 210 if (fw_end != bin + mssc_len) { 211 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 212 return -EINVAL; 213 } 214 215 return 0; 216 } 217 218 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 219 const struct rtw89_fw_suit *fw_suit, 220 struct rtw89_fw_bin_info *info) 221 { 222 const u8 *fw = fw_suit->data; 223 u32 len = fw_suit->size; 224 225 if (!fw || !len) { 226 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 227 return -ENOENT; 228 } 229 230 switch (fw_suit->hdr_ver) { 231 case 0: 232 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 233 case 1: 234 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 235 default: 236 return -ENOENT; 237 } 238 } 239 240 static 241 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 242 struct rtw89_fw_suit *fw_suit, bool nowarn) 243 { 244 struct rtw89_fw_info *fw_info = &rtwdev->fw; 245 const struct firmware *firmware = fw_info->req.firmware; 246 const u8 *mfw = firmware->data; 247 u32 mfw_len = firmware->size; 248 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 249 const struct rtw89_mfw_info *mfw_info; 250 int i; 251 252 if (mfw_hdr->sig != RTW89_MFW_SIG) { 253 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 254 /* legacy firmware support normal type only */ 255 if (type != RTW89_FW_NORMAL) 256 return -EINVAL; 257 fw_suit->data = mfw; 258 fw_suit->size = mfw_len; 259 return 0; 260 } 261 262 for (i = 0; i < mfw_hdr->fw_nr; i++) { 263 mfw_info = &mfw_hdr->info[i]; 264 if (mfw_info->type == type) { 265 if (mfw_info->cv == rtwdev->hal.cv && !mfw_info->mp) 266 goto found; 267 if (type == RTW89_FW_LOGFMT) 268 goto found; 269 } 270 } 271 272 if (!nowarn) 273 rtw89_err(rtwdev, "no suitable firmware found\n"); 274 return -ENOENT; 275 276 found: 277 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 278 fw_suit->size = le32_to_cpu(mfw_info->size); 279 return 0; 280 } 281 282 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 283 { 284 struct rtw89_fw_info *fw_info = &rtwdev->fw; 285 const struct firmware *firmware = fw_info->req.firmware; 286 const struct rtw89_mfw_hdr *mfw_hdr = 287 (const struct rtw89_mfw_hdr *)firmware->data; 288 const struct rtw89_mfw_info *mfw_info; 289 u32 size; 290 291 if (mfw_hdr->sig != RTW89_MFW_SIG) { 292 rtw89_warn(rtwdev, "not mfw format\n"); 293 return 0; 294 } 295 296 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 297 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 298 299 return size; 300 } 301 302 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 303 struct rtw89_fw_suit *fw_suit, 304 const struct rtw89_fw_hdr *hdr) 305 { 306 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 307 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 308 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 309 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 310 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 311 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 312 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 313 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 314 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 315 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 316 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 317 } 318 319 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 320 struct rtw89_fw_suit *fw_suit, 321 const struct rtw89_fw_hdr_v1 *hdr) 322 { 323 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 324 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 325 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 326 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 327 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 328 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 329 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 330 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 331 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 332 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 333 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 334 } 335 336 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 337 enum rtw89_fw_type type, 338 struct rtw89_fw_suit *fw_suit) 339 { 340 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 341 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 342 343 if (type == RTW89_FW_LOGFMT) 344 return 0; 345 346 fw_suit->type = type; 347 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 348 349 switch (fw_suit->hdr_ver) { 350 case 0: 351 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 352 break; 353 case 1: 354 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 355 break; 356 default: 357 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 358 fw_suit->hdr_ver); 359 return -ENOENT; 360 } 361 362 rtw89_info(rtwdev, 363 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 364 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 365 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 366 367 return 0; 368 } 369 370 static 371 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 372 bool nowarn) 373 { 374 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 375 int ret; 376 377 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 378 if (ret) 379 return ret; 380 381 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 382 } 383 384 static 385 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 386 const struct rtw89_fw_element_hdr *elm, 387 const void *data) 388 { 389 enum rtw89_fw_type type = (enum rtw89_fw_type)data; 390 struct rtw89_fw_suit *fw_suit; 391 392 fw_suit = rtw89_fw_suit_get(rtwdev, type); 393 fw_suit->data = elm->u.common.contents; 394 fw_suit->size = le32_to_cpu(elm->size); 395 396 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 397 } 398 399 #define __DEF_FW_FEAT_COND(__cond, __op) \ 400 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 401 { \ 402 return suit_ver_code __op comp_ver_code; \ 403 } 404 405 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 406 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 407 __DEF_FW_FEAT_COND(lt, <); /* less than */ 408 409 struct __fw_feat_cfg { 410 enum rtw89_core_chip_id chip_id; 411 enum rtw89_fw_feature feature; 412 u32 ver_code; 413 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 414 }; 415 416 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 417 { \ 418 .chip_id = _chip, \ 419 .feature = RTW89_FW_FEATURE_ ## _feat, \ 420 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 421 .cond = __fw_feat_cond_ ## _cond, \ 422 } 423 424 static const struct __fw_feat_cfg fw_feat_tbl[] = { 425 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 426 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 427 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 428 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 429 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 430 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 431 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 432 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 433 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 434 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 435 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 436 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 437 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 438 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 439 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 440 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 441 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 442 }; 443 444 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 445 const struct rtw89_chip_info *chip, 446 u32 ver_code) 447 { 448 int i; 449 450 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 451 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 452 453 if (chip->chip_id != ent->chip_id) 454 continue; 455 456 if (ent->cond(ver_code, ent->ver_code)) 457 RTW89_SET_FW_FEATURE(ent->feature, fw); 458 } 459 } 460 461 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 462 { 463 const struct rtw89_chip_info *chip = rtwdev->chip; 464 const struct rtw89_fw_suit *fw_suit; 465 u32 suit_ver_code; 466 467 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 468 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 469 470 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 471 } 472 473 const struct firmware * 474 rtw89_early_fw_feature_recognize(struct device *device, 475 const struct rtw89_chip_info *chip, 476 struct rtw89_fw_info *early_fw, 477 int *used_fw_format) 478 { 479 const struct firmware *firmware; 480 char fw_name[64]; 481 int fw_format; 482 u32 ver_code; 483 int ret; 484 485 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 486 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 487 chip->fw_basename, fw_format); 488 489 ret = request_firmware(&firmware, fw_name, device); 490 if (!ret) { 491 dev_info(device, "loaded firmware %s\n", fw_name); 492 *used_fw_format = fw_format; 493 break; 494 } 495 } 496 497 if (ret) { 498 dev_err(device, "failed to early request firmware: %d\n", ret); 499 return NULL; 500 } 501 502 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 503 504 if (!ver_code) 505 goto out; 506 507 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 508 509 out: 510 return firmware; 511 } 512 513 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 514 { 515 const struct rtw89_chip_info *chip = rtwdev->chip; 516 int ret; 517 518 if (chip->try_ce_fw) { 519 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 520 if (!ret) 521 goto normal_done; 522 } 523 524 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 525 if (ret) 526 return ret; 527 528 normal_done: 529 /* It still works if wowlan firmware isn't existing. */ 530 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 531 532 /* It still works if log format file isn't existing. */ 533 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 534 535 rtw89_fw_recognize_features(rtwdev); 536 537 rtw89_coex_recognize_ver(rtwdev); 538 539 return 0; 540 } 541 542 static 543 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 544 const struct rtw89_fw_element_hdr *elm, 545 const void *data) 546 { 547 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 548 struct rtw89_phy_table *tbl; 549 struct rtw89_reg2_def *regs; 550 enum rtw89_rf_path rf_path; 551 u32 n_regs, i; 552 u8 idx; 553 554 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 555 if (!tbl) 556 return -ENOMEM; 557 558 switch (le32_to_cpu(elm->id)) { 559 case RTW89_FW_ELEMENT_ID_BB_REG: 560 elm_info->bb_tbl = tbl; 561 break; 562 case RTW89_FW_ELEMENT_ID_BB_GAIN: 563 elm_info->bb_gain = tbl; 564 break; 565 case RTW89_FW_ELEMENT_ID_RADIO_A: 566 case RTW89_FW_ELEMENT_ID_RADIO_B: 567 case RTW89_FW_ELEMENT_ID_RADIO_C: 568 case RTW89_FW_ELEMENT_ID_RADIO_D: 569 rf_path = (enum rtw89_rf_path)data; 570 idx = elm->u.reg2.idx; 571 572 elm_info->rf_radio[idx] = tbl; 573 tbl->rf_path = rf_path; 574 tbl->config = rtw89_phy_config_rf_reg_v1; 575 break; 576 case RTW89_FW_ELEMENT_ID_RF_NCTL: 577 elm_info->rf_nctl = tbl; 578 break; 579 default: 580 kfree(tbl); 581 return -ENOENT; 582 } 583 584 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 585 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); 586 if (!regs) 587 goto out; 588 589 for (i = 0; i < n_regs; i++) { 590 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 591 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 592 } 593 594 tbl->n_regs = n_regs; 595 tbl->regs = regs; 596 597 return 0; 598 599 out: 600 kfree(tbl); 601 return -ENOMEM; 602 } 603 604 static 605 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 606 const struct rtw89_fw_element_hdr *elm, 607 const void *data) 608 { 609 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 610 const unsigned long offset = (const unsigned long)data; 611 struct rtw89_efuse *efuse = &rtwdev->efuse; 612 struct rtw89_txpwr_conf *conf; 613 614 if (!rtwdev->rfe_data) { 615 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 616 if (!rtwdev->rfe_data) 617 return -ENOMEM; 618 } 619 620 conf = (void *)rtwdev->rfe_data + offset; 621 622 /* if multiple matched, take the last eventually */ 623 if (txpwr_elm->rfe_type == efuse->rfe_type) 624 goto setup; 625 626 /* without one is matched, accept default */ 627 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 628 (!rtw89_txpwr_conf_valid(conf) || 629 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 630 goto setup; 631 632 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 633 elm->id, txpwr_elm->rfe_type); 634 return 0; 635 636 setup: 637 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 638 elm->id, txpwr_elm->rfe_type); 639 640 conf->rfe_type = txpwr_elm->rfe_type; 641 conf->ent_sz = txpwr_elm->ent_sz; 642 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 643 conf->data = txpwr_elm->content; 644 return 0; 645 } 646 647 struct rtw89_fw_element_handler { 648 int (*fn)(struct rtw89_dev *rtwdev, 649 const struct rtw89_fw_element_hdr *elm, const void *data); 650 const void *data; 651 const char *name; 652 }; 653 654 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 655 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 656 (const void *)RTW89_FW_BBMCU0, NULL}, 657 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 658 (const void *)RTW89_FW_BBMCU1, NULL}, 659 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, NULL, "BB"}, 660 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, NULL, NULL}, 661 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 662 (const void *)RF_PATH_A, "radio A"}, 663 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 664 (const void *)RF_PATH_B, NULL}, 665 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 666 (const void *)RF_PATH_C, NULL}, 667 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 668 (const void *)RF_PATH_D, NULL}, 669 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, NULL, "NCTL"}, 670 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 671 rtw89_fw_recognize_txpwr_from_elm, 672 (const void *)offsetof(struct rtw89_rfe_data, byrate.conf), "TXPWR", 673 }, 674 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 675 rtw89_fw_recognize_txpwr_from_elm, 676 (const void *)offsetof(struct rtw89_rfe_data, lmt_2ghz.conf), NULL, 677 }, 678 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 679 rtw89_fw_recognize_txpwr_from_elm, 680 (const void *)offsetof(struct rtw89_rfe_data, lmt_5ghz.conf), NULL, 681 }, 682 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 683 rtw89_fw_recognize_txpwr_from_elm, 684 (const void *)offsetof(struct rtw89_rfe_data, lmt_6ghz.conf), NULL, 685 }, 686 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 687 rtw89_fw_recognize_txpwr_from_elm, 688 (const void *)offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf), NULL, 689 }, 690 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 691 rtw89_fw_recognize_txpwr_from_elm, 692 (const void *)offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf), NULL, 693 }, 694 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 695 rtw89_fw_recognize_txpwr_from_elm, 696 (const void *)offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf), NULL, 697 }, 698 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 699 rtw89_fw_recognize_txpwr_from_elm, 700 (const void *)offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf), NULL, 701 }, 702 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 703 rtw89_fw_recognize_txpwr_from_elm, 704 (const void *)offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf), NULL, 705 }, 706 }; 707 708 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 709 { 710 struct rtw89_fw_info *fw_info = &rtwdev->fw; 711 const struct firmware *firmware = fw_info->req.firmware; 712 const struct rtw89_chip_info *chip = rtwdev->chip; 713 u32 unrecognized_elements = chip->needed_fw_elms; 714 const struct rtw89_fw_element_handler *handler; 715 const struct rtw89_fw_element_hdr *hdr; 716 u32 elm_size; 717 u32 elem_id; 718 u32 offset; 719 int ret; 720 721 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 722 723 offset = rtw89_mfw_get_size(rtwdev); 724 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 725 if (offset == 0) 726 return -EINVAL; 727 728 while (offset + sizeof(*hdr) < firmware->size) { 729 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 730 731 elm_size = le32_to_cpu(hdr->size); 732 if (offset + elm_size >= firmware->size) { 733 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 734 break; 735 } 736 737 elem_id = le32_to_cpu(hdr->id); 738 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 739 goto next; 740 741 handler = &__fw_element_handlers[elem_id]; 742 if (!handler->fn) 743 goto next; 744 745 ret = handler->fn(rtwdev, hdr, handler->data); 746 if (ret) 747 return ret; 748 749 if (handler->name) 750 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 751 handler->name, hdr->ver); 752 753 unrecognized_elements &= ~BIT(elem_id); 754 next: 755 offset += sizeof(*hdr) + elm_size; 756 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 757 } 758 759 if (unrecognized_elements) { 760 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 761 unrecognized_elements); 762 return -ENOENT; 763 } 764 765 return 0; 766 } 767 768 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 769 u8 type, u8 cat, u8 class, u8 func, 770 bool rack, bool dack, u32 len) 771 { 772 struct fwcmd_hdr *hdr; 773 774 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 775 776 if (!(rtwdev->fw.h2c_seq % 4)) 777 rack = true; 778 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 779 FIELD_PREP(H2C_HDR_CAT, cat) | 780 FIELD_PREP(H2C_HDR_CLASS, class) | 781 FIELD_PREP(H2C_HDR_FUNC, func) | 782 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 783 784 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 785 len + H2C_HEADER_LEN) | 786 (rack ? H2C_HDR_REC_ACK : 0) | 787 (dack ? H2C_HDR_DONE_ACK : 0)); 788 789 rtwdev->fw.h2c_seq++; 790 } 791 792 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 793 struct sk_buff *skb, 794 u8 type, u8 cat, u8 class, u8 func, 795 u32 len) 796 { 797 struct fwcmd_hdr *hdr; 798 799 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 800 801 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 802 FIELD_PREP(H2C_HDR_CAT, cat) | 803 FIELD_PREP(H2C_HDR_CLASS, class) | 804 FIELD_PREP(H2C_HDR_FUNC, func) | 805 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 806 807 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 808 len + H2C_HEADER_LEN)); 809 } 810 811 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 812 { 813 struct sk_buff *skb; 814 u32 ret = 0; 815 816 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 817 if (!skb) { 818 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 819 return -ENOMEM; 820 } 821 822 skb_put_data(skb, fw, len); 823 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); 824 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 825 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 826 H2C_FUNC_MAC_FWHDR_DL, len); 827 828 ret = rtw89_h2c_tx(rtwdev, skb, false); 829 if (ret) { 830 rtw89_err(rtwdev, "failed to send h2c\n"); 831 ret = -1; 832 goto fail; 833 } 834 835 return 0; 836 fail: 837 dev_kfree_skb_any(skb); 838 839 return ret; 840 } 841 842 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 843 { 844 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 845 int ret; 846 847 ret = __rtw89_fw_download_hdr(rtwdev, fw, len); 848 if (ret) { 849 rtw89_err(rtwdev, "[ERR]FW header download\n"); 850 return ret; 851 } 852 853 ret = mac->fwdl_check_path_ready(rtwdev, false); 854 if (ret) { 855 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 856 return ret; 857 } 858 859 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 860 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 861 862 return 0; 863 } 864 865 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 866 struct rtw89_fw_hdr_section_info *info) 867 { 868 struct sk_buff *skb; 869 const u8 *section = info->addr; 870 u32 residue_len = info->len; 871 u32 pkt_len; 872 int ret; 873 874 while (residue_len) { 875 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 876 pkt_len = FWDL_SECTION_PER_PKT_LEN; 877 else 878 pkt_len = residue_len; 879 880 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 881 if (!skb) { 882 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 883 return -ENOMEM; 884 } 885 skb_put_data(skb, section, pkt_len); 886 887 ret = rtw89_h2c_tx(rtwdev, skb, true); 888 if (ret) { 889 rtw89_err(rtwdev, "failed to send h2c\n"); 890 ret = -1; 891 goto fail; 892 } 893 894 section += pkt_len; 895 residue_len -= pkt_len; 896 } 897 898 return 0; 899 fail: 900 dev_kfree_skb_any(skb); 901 902 return ret; 903 } 904 905 static enum rtw89_fwdl_check_type 906 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 907 const struct rtw89_fw_suit *fw_suit) 908 { 909 switch (fw_suit->type) { 910 case RTW89_FW_BBMCU0: 911 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 912 case RTW89_FW_BBMCU1: 913 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 914 default: 915 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 916 } 917 } 918 919 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 920 const struct rtw89_fw_suit *fw_suit, 921 struct rtw89_fw_bin_info *info) 922 { 923 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 924 const struct rtw89_chip_info *chip = rtwdev->chip; 925 enum rtw89_fwdl_check_type chk_type; 926 u8 section_num = info->section_num; 927 int ret; 928 929 while (section_num--) { 930 ret = __rtw89_fw_download_main(rtwdev, section_info); 931 if (ret) 932 return ret; 933 section_info++; 934 } 935 936 if (chip->chip_gen == RTW89_CHIP_AX) 937 return 0; 938 939 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 940 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 941 if (ret) { 942 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 943 fw_suit->type); 944 return ret; 945 } 946 947 return 0; 948 } 949 950 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 951 { 952 u32 val32; 953 u16 index; 954 955 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 956 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 957 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 958 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 959 960 for (index = 0; index < 15; index++) { 961 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL); 962 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 963 fsleep(10); 964 } 965 } 966 967 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 968 { 969 u32 val32; 970 u16 val16; 971 972 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 973 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 974 975 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); 976 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); 977 978 rtw89_fw_prog_cnt_dump(rtwdev); 979 } 980 981 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 982 struct rtw89_fw_suit *fw_suit) 983 { 984 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 985 struct rtw89_fw_bin_info info; 986 int ret; 987 988 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 989 if (ret) { 990 rtw89_err(rtwdev, "parse fw header fail\n"); 991 return ret; 992 } 993 994 if (rtwdev->chip->chip_id == RTL8922A && 995 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 996 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 997 998 ret = mac->fwdl_check_path_ready(rtwdev, true); 999 if (ret) { 1000 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1001 return ret; 1002 } 1003 1004 ret = rtw89_fw_download_hdr(rtwdev, fw_suit->data, info.hdr_len - 1005 info.dynamic_hdr_len); 1006 if (ret) 1007 return ret; 1008 1009 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1010 if (ret) 1011 return ret; 1012 1013 return 0; 1014 } 1015 1016 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1017 bool include_bb) 1018 { 1019 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1020 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1021 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1022 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1023 int ret; 1024 int i; 1025 1026 mac->disable_cpu(rtwdev); 1027 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1028 if (ret) 1029 return ret; 1030 1031 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1032 if (ret) 1033 goto fwdl_err; 1034 1035 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1036 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1037 1038 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1039 if (ret) 1040 goto fwdl_err; 1041 } 1042 1043 fw_info->h2c_seq = 0; 1044 fw_info->rec_seq = 0; 1045 fw_info->h2c_counter = 0; 1046 fw_info->c2h_counter = 0; 1047 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1048 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1049 1050 mdelay(5); 1051 1052 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1053 if (ret) { 1054 rtw89_warn(rtwdev, "download firmware fail\n"); 1055 return ret; 1056 } 1057 1058 return ret; 1059 1060 fwdl_err: 1061 rtw89_fw_dl_fail_dump(rtwdev); 1062 return ret; 1063 } 1064 1065 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1066 { 1067 struct rtw89_fw_info *fw = &rtwdev->fw; 1068 1069 wait_for_completion(&fw->req.completion); 1070 if (!fw->req.firmware) 1071 return -EINVAL; 1072 1073 return 0; 1074 } 1075 1076 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1077 struct rtw89_fw_req_info *req, 1078 const char *fw_name, bool nowarn) 1079 { 1080 int ret; 1081 1082 if (req->firmware) { 1083 rtw89_debug(rtwdev, RTW89_DBG_FW, 1084 "full firmware has been early requested\n"); 1085 complete_all(&req->completion); 1086 return 0; 1087 } 1088 1089 if (nowarn) 1090 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1091 else 1092 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1093 1094 complete_all(&req->completion); 1095 1096 return ret; 1097 } 1098 1099 void rtw89_load_firmware_work(struct work_struct *work) 1100 { 1101 struct rtw89_dev *rtwdev = 1102 container_of(work, struct rtw89_dev, load_firmware_work); 1103 const struct rtw89_chip_info *chip = rtwdev->chip; 1104 char fw_name[64]; 1105 1106 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1107 chip->fw_basename, rtwdev->fw.fw_format); 1108 1109 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1110 } 1111 1112 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1113 { 1114 if (!tbl) 1115 return; 1116 1117 kfree(tbl->regs); 1118 kfree(tbl); 1119 } 1120 1121 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1122 { 1123 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1124 int i; 1125 1126 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1127 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1128 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1129 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1130 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1131 } 1132 1133 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1134 { 1135 struct rtw89_fw_info *fw = &rtwdev->fw; 1136 1137 cancel_work_sync(&rtwdev->load_firmware_work); 1138 1139 if (fw->req.firmware) { 1140 release_firmware(fw->req.firmware); 1141 1142 /* assign NULL back in case rtw89_free_ieee80211_hw() 1143 * try to release the same one again. 1144 */ 1145 fw->req.firmware = NULL; 1146 } 1147 1148 kfree(fw->log.fmts); 1149 rtw89_unload_firmware_elements(rtwdev); 1150 } 1151 1152 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1153 { 1154 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1155 u32 i; 1156 1157 if (fmt_id > fw_log->last_fmt_id) 1158 return 0; 1159 1160 for (i = 0; i < fw_log->fmt_count; i++) { 1161 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1162 return i; 1163 } 1164 return 0; 1165 } 1166 1167 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1168 { 1169 struct rtw89_fw_log *log = &rtwdev->fw.log; 1170 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1171 struct rtw89_fw_suit *suit = &log->suit; 1172 const void *fmts_ptr, *fmts_end_ptr; 1173 u32 fmt_count; 1174 int i; 1175 1176 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1177 fmt_count = le32_to_cpu(suit_hdr->count); 1178 log->fmt_ids = suit_hdr->ids; 1179 fmts_ptr = &suit_hdr->ids[fmt_count]; 1180 fmts_end_ptr = suit->data + suit->size; 1181 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1182 if (!log->fmts) 1183 return -ENOMEM; 1184 1185 for (i = 0; i < fmt_count; i++) { 1186 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1187 if (!fmts_ptr) 1188 break; 1189 1190 (*log->fmts)[i] = fmts_ptr; 1191 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1192 log->fmt_count++; 1193 fmts_ptr += strlen(fmts_ptr); 1194 } 1195 1196 return 0; 1197 } 1198 1199 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1200 { 1201 struct rtw89_fw_log *log = &rtwdev->fw.log; 1202 struct rtw89_fw_suit *suit = &log->suit; 1203 1204 if (!suit || !suit->data) { 1205 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1206 return -EINVAL; 1207 } 1208 if (log->fmts) 1209 return 0; 1210 1211 return rtw89_fw_log_create_fmts_dict(rtwdev); 1212 } 1213 1214 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 1215 const struct rtw89_fw_c2h_log_fmt *log_fmt, 1216 u32 fmt_idx, u8 para_int, bool raw_data) 1217 { 1218 const char *(*fmts)[] = rtwdev->fw.log.fmts; 1219 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 1220 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 1221 int i; 1222 1223 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 1224 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 1225 log_fmt->argc); 1226 return; 1227 } 1228 1229 if (para_int) 1230 for (i = 0 ; i < log_fmt->argc; i++) 1231 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 1232 1233 if (raw_data) { 1234 if (para_int) 1235 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1236 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 1237 para_int, log_fmt->argc, (int)sizeof(args), args); 1238 else 1239 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1240 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 1241 para_int, log_fmt->argc, log_fmt->u.raw); 1242 } else { 1243 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 1244 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 1245 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 1246 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 1247 args[0xf]); 1248 } 1249 1250 rtw89_info(rtwdev, "C2H log: %s", str_buf); 1251 } 1252 1253 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 1254 { 1255 const struct rtw89_fw_c2h_log_fmt *log_fmt; 1256 u8 para_int; 1257 u32 fmt_idx; 1258 1259 if (len < RTW89_C2H_HEADER_LEN) { 1260 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 1261 return; 1262 } 1263 1264 buf += RTW89_C2H_HEADER_LEN; 1265 len -= RTW89_C2H_HEADER_LEN; 1266 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 1267 1268 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 1269 goto plain_log; 1270 1271 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 1272 goto plain_log; 1273 1274 if (!rtwdev->fw.log.fmts) 1275 return; 1276 1277 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 1278 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 1279 1280 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 1281 rtw89_info(rtwdev, "C2H log: %s%s", 1282 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 1283 else if (fmt_idx != 0 && para_int) 1284 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 1285 else 1286 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 1287 return; 1288 1289 plain_log: 1290 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 1291 1292 } 1293 1294 #define H2C_CAM_LEN 60 1295 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1296 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 1297 { 1298 struct sk_buff *skb; 1299 int ret; 1300 1301 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 1302 if (!skb) { 1303 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1304 return -ENOMEM; 1305 } 1306 skb_put(skb, H2C_CAM_LEN); 1307 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 1308 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 1309 1310 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1311 H2C_CAT_MAC, 1312 H2C_CL_MAC_ADDR_CAM_UPDATE, 1313 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 1314 H2C_CAM_LEN); 1315 1316 ret = rtw89_h2c_tx(rtwdev, skb, false); 1317 if (ret) { 1318 rtw89_err(rtwdev, "failed to send h2c\n"); 1319 goto fail; 1320 } 1321 1322 return 0; 1323 fail: 1324 dev_kfree_skb_any(skb); 1325 1326 return ret; 1327 } 1328 1329 #define H2C_DCTL_SEC_CAM_LEN 68 1330 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 1331 struct rtw89_vif *rtwvif, 1332 struct rtw89_sta *rtwsta) 1333 { 1334 struct sk_buff *skb; 1335 int ret; 1336 1337 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); 1338 if (!skb) { 1339 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1340 return -ENOMEM; 1341 } 1342 skb_put(skb, H2C_DCTL_SEC_CAM_LEN); 1343 1344 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); 1345 1346 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1347 H2C_CAT_MAC, 1348 H2C_CL_MAC_FR_EXCHG, 1349 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 1350 H2C_DCTL_SEC_CAM_LEN); 1351 1352 ret = rtw89_h2c_tx(rtwdev, skb, false); 1353 if (ret) { 1354 rtw89_err(rtwdev, "failed to send h2c\n"); 1355 goto fail; 1356 } 1357 1358 return 0; 1359 fail: 1360 dev_kfree_skb_any(skb); 1361 1362 return ret; 1363 } 1364 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 1365 1366 #define H2C_BA_CAM_LEN 8 1367 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 1368 bool valid, struct ieee80211_ampdu_params *params) 1369 { 1370 const struct rtw89_chip_info *chip = rtwdev->chip; 1371 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 1372 u8 macid = rtwsta->mac_id; 1373 struct sk_buff *skb; 1374 u8 entry_idx; 1375 int ret; 1376 1377 ret = valid ? 1378 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 1379 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 1380 if (ret) { 1381 /* it still works even if we don't have static BA CAM, because 1382 * hardware can create dynamic BA CAM automatically. 1383 */ 1384 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 1385 "failed to %s entry tid=%d for h2c ba cam\n", 1386 valid ? "alloc" : "free", params->tid); 1387 return 0; 1388 } 1389 1390 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 1391 if (!skb) { 1392 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 1393 return -ENOMEM; 1394 } 1395 skb_put(skb, H2C_BA_CAM_LEN); 1396 SET_BA_CAM_MACID(skb->data, macid); 1397 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 1398 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 1399 else 1400 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx); 1401 if (!valid) 1402 goto end; 1403 SET_BA_CAM_VALID(skb->data, valid); 1404 SET_BA_CAM_TID(skb->data, params->tid); 1405 if (params->buf_size > 64) 1406 SET_BA_CAM_BMAP_SIZE(skb->data, 4); 1407 else 1408 SET_BA_CAM_BMAP_SIZE(skb->data, 0); 1409 /* If init req is set, hw will set the ssn */ 1410 SET_BA_CAM_INIT_REQ(skb->data, 1); 1411 SET_BA_CAM_SSN(skb->data, params->ssn); 1412 1413 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 1414 SET_BA_CAM_STD_EN(skb->data, 1); 1415 SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx); 1416 } 1417 1418 end: 1419 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1420 H2C_CAT_MAC, 1421 H2C_CL_BA_CAM, 1422 H2C_FUNC_MAC_BA_CAM, 0, 1, 1423 H2C_BA_CAM_LEN); 1424 1425 ret = rtw89_h2c_tx(rtwdev, skb, false); 1426 if (ret) { 1427 rtw89_err(rtwdev, "failed to send h2c\n"); 1428 goto fail; 1429 } 1430 1431 return 0; 1432 fail: 1433 dev_kfree_skb_any(skb); 1434 1435 return ret; 1436 } 1437 1438 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 1439 u8 entry_idx, u8 uid) 1440 { 1441 struct sk_buff *skb; 1442 int ret; 1443 1444 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 1445 if (!skb) { 1446 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 1447 return -ENOMEM; 1448 } 1449 skb_put(skb, H2C_BA_CAM_LEN); 1450 1451 SET_BA_CAM_VALID(skb->data, 1); 1452 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 1453 SET_BA_CAM_UID(skb->data, uid); 1454 SET_BA_CAM_BAND(skb->data, 0); 1455 SET_BA_CAM_STD_EN(skb->data, 0); 1456 1457 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1458 H2C_CAT_MAC, 1459 H2C_CL_BA_CAM, 1460 H2C_FUNC_MAC_BA_CAM, 0, 1, 1461 H2C_BA_CAM_LEN); 1462 1463 ret = rtw89_h2c_tx(rtwdev, skb, false); 1464 if (ret) { 1465 rtw89_err(rtwdev, "failed to send h2c\n"); 1466 goto fail; 1467 } 1468 1469 return 0; 1470 fail: 1471 dev_kfree_skb_any(skb); 1472 1473 return ret; 1474 } 1475 1476 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 1477 { 1478 const struct rtw89_chip_info *chip = rtwdev->chip; 1479 u8 entry_idx = chip->bacam_num; 1480 u8 uid = 0; 1481 int i; 1482 1483 for (i = 0; i < chip->bacam_dynamic_num; i++) { 1484 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 1485 entry_idx++; 1486 uid++; 1487 } 1488 } 1489 1490 #define H2C_LOG_CFG_LEN 12 1491 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 1492 { 1493 struct sk_buff *skb; 1494 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 1495 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; 1496 int ret; 1497 1498 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 1499 if (!skb) { 1500 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 1501 return -ENOMEM; 1502 } 1503 1504 skb_put(skb, H2C_LOG_CFG_LEN); 1505 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 1506 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 1507 SET_LOG_CFG_COMP(skb->data, comp); 1508 SET_LOG_CFG_COMP_EXT(skb->data, 0); 1509 1510 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1511 H2C_CAT_MAC, 1512 H2C_CL_FW_INFO, 1513 H2C_FUNC_LOG_CFG, 0, 0, 1514 H2C_LOG_CFG_LEN); 1515 1516 ret = rtw89_h2c_tx(rtwdev, skb, false); 1517 if (ret) { 1518 rtw89_err(rtwdev, "failed to send h2c\n"); 1519 goto fail; 1520 } 1521 1522 return 0; 1523 fail: 1524 dev_kfree_skb_any(skb); 1525 1526 return ret; 1527 } 1528 1529 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 1530 struct rtw89_vif *rtwvif, 1531 enum rtw89_fw_pkt_ofld_type type, 1532 u8 *id) 1533 { 1534 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 1535 struct rtw89_pktofld_info *info; 1536 struct sk_buff *skb; 1537 int ret; 1538 1539 info = kzalloc(sizeof(*info), GFP_KERNEL); 1540 if (!info) 1541 return -ENOMEM; 1542 1543 switch (type) { 1544 case RTW89_PKT_OFLD_TYPE_PS_POLL: 1545 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 1546 break; 1547 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 1548 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 1549 break; 1550 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 1551 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 1552 break; 1553 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 1554 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 1555 break; 1556 default: 1557 goto err; 1558 } 1559 1560 if (!skb) 1561 goto err; 1562 1563 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 1564 kfree_skb(skb); 1565 1566 if (ret) 1567 goto err; 1568 1569 list_add_tail(&info->list, &rtwvif->general_pkt_list); 1570 *id = info->id; 1571 return 0; 1572 1573 err: 1574 kfree(info); 1575 return -ENOMEM; 1576 } 1577 1578 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 1579 struct rtw89_vif *rtwvif, bool notify_fw) 1580 { 1581 struct list_head *pkt_list = &rtwvif->general_pkt_list; 1582 struct rtw89_pktofld_info *info, *tmp; 1583 1584 list_for_each_entry_safe(info, tmp, pkt_list, list) { 1585 if (notify_fw) 1586 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 1587 else 1588 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 1589 list_del(&info->list); 1590 kfree(info); 1591 } 1592 } 1593 1594 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 1595 { 1596 struct rtw89_vif *rtwvif; 1597 1598 rtw89_for_each_rtwvif(rtwdev, rtwvif) 1599 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw); 1600 } 1601 1602 #define H2C_GENERAL_PKT_LEN 6 1603 #define H2C_GENERAL_PKT_ID_UND 0xff 1604 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 1605 struct rtw89_vif *rtwvif, u8 macid) 1606 { 1607 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 1608 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 1609 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 1610 struct sk_buff *skb; 1611 int ret; 1612 1613 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1614 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 1615 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1616 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 1617 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1618 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 1619 1620 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 1621 if (!skb) { 1622 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1623 return -ENOMEM; 1624 } 1625 skb_put(skb, H2C_GENERAL_PKT_LEN); 1626 SET_GENERAL_PKT_MACID(skb->data, macid); 1627 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1628 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 1629 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 1630 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 1631 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1632 1633 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1634 H2C_CAT_MAC, 1635 H2C_CL_FW_INFO, 1636 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 1637 H2C_GENERAL_PKT_LEN); 1638 1639 ret = rtw89_h2c_tx(rtwdev, skb, false); 1640 if (ret) { 1641 rtw89_err(rtwdev, "failed to send h2c\n"); 1642 goto fail; 1643 } 1644 1645 return 0; 1646 fail: 1647 dev_kfree_skb_any(skb); 1648 1649 return ret; 1650 } 1651 1652 #define H2C_LPS_PARM_LEN 8 1653 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 1654 struct rtw89_lps_parm *lps_param) 1655 { 1656 struct sk_buff *skb; 1657 int ret; 1658 1659 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 1660 if (!skb) { 1661 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1662 return -ENOMEM; 1663 } 1664 skb_put(skb, H2C_LPS_PARM_LEN); 1665 1666 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 1667 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 1668 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 1669 SET_LPS_PARM_RLBM(skb->data, 1); 1670 SET_LPS_PARM_SMARTPS(skb->data, 1); 1671 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 1672 SET_LPS_PARM_VOUAPSD(skb->data, 0); 1673 SET_LPS_PARM_VIUAPSD(skb->data, 0); 1674 SET_LPS_PARM_BEUAPSD(skb->data, 0); 1675 SET_LPS_PARM_BKUAPSD(skb->data, 0); 1676 1677 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1678 H2C_CAT_MAC, 1679 H2C_CL_MAC_PS, 1680 H2C_FUNC_MAC_LPS_PARM, 0, 1, 1681 H2C_LPS_PARM_LEN); 1682 1683 ret = rtw89_h2c_tx(rtwdev, skb, false); 1684 if (ret) { 1685 rtw89_err(rtwdev, "failed to send h2c\n"); 1686 goto fail; 1687 } 1688 1689 return 0; 1690 fail: 1691 dev_kfree_skb_any(skb); 1692 1693 return ret; 1694 } 1695 1696 #define H2C_P2P_ACT_LEN 20 1697 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 1698 struct ieee80211_p2p_noa_desc *desc, 1699 u8 act, u8 noa_id) 1700 { 1701 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1702 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 1703 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 1704 struct sk_buff *skb; 1705 u8 *cmd; 1706 int ret; 1707 1708 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 1709 if (!skb) { 1710 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1711 return -ENOMEM; 1712 } 1713 skb_put(skb, H2C_P2P_ACT_LEN); 1714 cmd = skb->data; 1715 1716 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 1717 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 1718 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 1719 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 1720 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 1721 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 1722 if (desc) { 1723 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 1724 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 1725 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 1726 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 1727 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 1728 } 1729 1730 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1731 H2C_CAT_MAC, H2C_CL_MAC_PS, 1732 H2C_FUNC_P2P_ACT, 0, 0, 1733 H2C_P2P_ACT_LEN); 1734 1735 ret = rtw89_h2c_tx(rtwdev, skb, false); 1736 if (ret) { 1737 rtw89_err(rtwdev, "failed to send h2c\n"); 1738 goto fail; 1739 } 1740 1741 return 0; 1742 fail: 1743 dev_kfree_skb_any(skb); 1744 1745 return ret; 1746 } 1747 1748 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 1749 struct sk_buff *skb) 1750 { 1751 const struct rtw89_chip_info *chip = rtwdev->chip; 1752 struct rtw89_hal *hal = &rtwdev->hal; 1753 u8 ntx_path; 1754 u8 map_b; 1755 1756 if (chip->rf_path_num == 1) { 1757 ntx_path = RF_A; 1758 map_b = 0; 1759 } else { 1760 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 1761 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 1762 } 1763 1764 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 1765 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 1766 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 1767 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 1768 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 1769 } 1770 1771 #define H2C_CMC_TBL_LEN 68 1772 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 1773 struct rtw89_vif *rtwvif) 1774 { 1775 const struct rtw89_chip_info *chip = rtwdev->chip; 1776 struct sk_buff *skb; 1777 u8 macid = rtwvif->mac_id; 1778 int ret; 1779 1780 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1781 if (!skb) { 1782 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1783 return -ENOMEM; 1784 } 1785 skb_put(skb, H2C_CMC_TBL_LEN); 1786 SET_CTRL_INFO_MACID(skb->data, macid); 1787 SET_CTRL_INFO_OPERATION(skb->data, 1); 1788 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1789 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 1790 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1791 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 1792 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 1793 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 1794 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 1795 } 1796 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 1797 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 1798 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1799 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1800 1801 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1802 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1803 chip->h2c_cctl_func_id, 0, 1, 1804 H2C_CMC_TBL_LEN); 1805 1806 ret = rtw89_h2c_tx(rtwdev, skb, false); 1807 if (ret) { 1808 rtw89_err(rtwdev, "failed to send h2c\n"); 1809 goto fail; 1810 } 1811 1812 return 0; 1813 fail: 1814 dev_kfree_skb_any(skb); 1815 1816 return ret; 1817 } 1818 1819 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 1820 struct ieee80211_sta *sta, u8 *pads) 1821 { 1822 bool ppe_th; 1823 u8 ppe16, ppe8; 1824 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 1825 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 1826 u8 ru_bitmap; 1827 u8 n, idx, sh; 1828 u16 ppe; 1829 int i; 1830 1831 if (!sta->deflink.he_cap.has_he) 1832 return; 1833 1834 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 1835 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 1836 if (!ppe_th) { 1837 u8 pad; 1838 1839 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 1840 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 1841 1842 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 1843 pads[i] = pad; 1844 1845 return; 1846 } 1847 1848 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 1849 n = hweight8(ru_bitmap); 1850 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 1851 1852 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 1853 if (!(ru_bitmap & BIT(i))) { 1854 pads[i] = 1; 1855 continue; 1856 } 1857 1858 idx = n >> 3; 1859 sh = n & 7; 1860 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 1861 1862 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 1863 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1864 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1865 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1866 1867 if (ppe16 != 7 && ppe8 == 7) 1868 pads[i] = 2; 1869 else if (ppe8 != 7) 1870 pads[i] = 1; 1871 else 1872 pads[i] = 0; 1873 } 1874 } 1875 1876 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 1877 struct ieee80211_vif *vif, 1878 struct ieee80211_sta *sta) 1879 { 1880 const struct rtw89_chip_info *chip = rtwdev->chip; 1881 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 1882 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1883 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 1884 rtwvif->sub_entity_idx); 1885 struct sk_buff *skb; 1886 u8 pads[RTW89_PPE_BW_NUM]; 1887 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1888 u16 lowest_rate; 1889 int ret; 1890 1891 memset(pads, 0, sizeof(pads)); 1892 if (sta) 1893 __get_sta_he_pkt_padding(rtwdev, sta, pads); 1894 1895 if (vif->p2p) 1896 lowest_rate = RTW89_HW_RATE_OFDM6; 1897 else if (chan->band_type == RTW89_BAND_2G) 1898 lowest_rate = RTW89_HW_RATE_CCK1; 1899 else 1900 lowest_rate = RTW89_HW_RATE_OFDM6; 1901 1902 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1903 if (!skb) { 1904 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1905 return -ENOMEM; 1906 } 1907 skb_put(skb, H2C_CMC_TBL_LEN); 1908 SET_CTRL_INFO_MACID(skb->data, mac_id); 1909 SET_CTRL_INFO_OPERATION(skb->data, 1); 1910 SET_CMC_TBL_DISRTSFB(skb->data, 1); 1911 SET_CMC_TBL_DISDATAFB(skb->data, 1); 1912 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 1913 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 1914 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 1915 if (vif->type == NL80211_IFTYPE_STATION) 1916 SET_CMC_TBL_ULDL(skb->data, 1); 1917 else 1918 SET_CMC_TBL_ULDL(skb->data, 0); 1919 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 1920 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 1921 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1922 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1923 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1924 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1925 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1926 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1927 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1928 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1929 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1930 } 1931 if (sta) 1932 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 1933 sta->deflink.he_cap.has_he); 1934 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1935 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1936 1937 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1938 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1939 chip->h2c_cctl_func_id, 0, 1, 1940 H2C_CMC_TBL_LEN); 1941 1942 ret = rtw89_h2c_tx(rtwdev, skb, false); 1943 if (ret) { 1944 rtw89_err(rtwdev, "failed to send h2c\n"); 1945 goto fail; 1946 } 1947 1948 return 0; 1949 fail: 1950 dev_kfree_skb_any(skb); 1951 1952 return ret; 1953 } 1954 1955 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 1956 struct rtw89_sta *rtwsta) 1957 { 1958 const struct rtw89_chip_info *chip = rtwdev->chip; 1959 struct sk_buff *skb; 1960 int ret; 1961 1962 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1963 if (!skb) { 1964 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1965 return -ENOMEM; 1966 } 1967 skb_put(skb, H2C_CMC_TBL_LEN); 1968 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1969 SET_CTRL_INFO_OPERATION(skb->data, 1); 1970 if (rtwsta->cctl_tx_time) { 1971 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 1972 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 1973 } 1974 if (rtwsta->cctl_tx_retry_limit) { 1975 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 1976 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 1977 } 1978 1979 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1980 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1981 chip->h2c_cctl_func_id, 0, 1, 1982 H2C_CMC_TBL_LEN); 1983 1984 ret = rtw89_h2c_tx(rtwdev, skb, false); 1985 if (ret) { 1986 rtw89_err(rtwdev, "failed to send h2c\n"); 1987 goto fail; 1988 } 1989 1990 return 0; 1991 fail: 1992 dev_kfree_skb_any(skb); 1993 1994 return ret; 1995 } 1996 1997 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 1998 struct rtw89_sta *rtwsta) 1999 { 2000 const struct rtw89_chip_info *chip = rtwdev->chip; 2001 struct sk_buff *skb; 2002 int ret; 2003 2004 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 2005 return 0; 2006 2007 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2008 if (!skb) { 2009 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2010 return -ENOMEM; 2011 } 2012 skb_put(skb, H2C_CMC_TBL_LEN); 2013 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 2014 SET_CTRL_INFO_OPERATION(skb->data, 1); 2015 2016 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 2017 2018 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2019 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2020 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 2021 H2C_CMC_TBL_LEN); 2022 2023 ret = rtw89_h2c_tx(rtwdev, skb, false); 2024 if (ret) { 2025 rtw89_err(rtwdev, "failed to send h2c\n"); 2026 goto fail; 2027 } 2028 2029 return 0; 2030 fail: 2031 dev_kfree_skb_any(skb); 2032 2033 return ret; 2034 } 2035 2036 #define H2C_BCN_BASE_LEN 12 2037 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 2038 struct rtw89_vif *rtwvif) 2039 { 2040 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2041 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 2042 rtwvif->sub_entity_idx); 2043 struct sk_buff *skb; 2044 struct sk_buff *skb_beacon; 2045 u16 tim_offset; 2046 int bcn_total_len; 2047 u16 beacon_rate; 2048 void *noa_data; 2049 u8 noa_len; 2050 int ret; 2051 2052 if (vif->p2p) 2053 beacon_rate = RTW89_HW_RATE_OFDM6; 2054 else if (chan->band_type == RTW89_BAND_2G) 2055 beacon_rate = RTW89_HW_RATE_CCK1; 2056 else 2057 beacon_rate = RTW89_HW_RATE_OFDM6; 2058 2059 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 2060 NULL, 0); 2061 if (!skb_beacon) { 2062 rtw89_err(rtwdev, "failed to get beacon skb\n"); 2063 return -ENOMEM; 2064 } 2065 2066 noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data); 2067 if (noa_len && 2068 (noa_len <= skb_tailroom(skb_beacon) || 2069 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 2070 skb_put_data(skb_beacon, noa_data, noa_len); 2071 } 2072 2073 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; 2074 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 2075 if (!skb) { 2076 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2077 dev_kfree_skb_any(skb_beacon); 2078 return -ENOMEM; 2079 } 2080 skb_put(skb, H2C_BCN_BASE_LEN); 2081 2082 SET_BCN_UPD_PORT(skb->data, rtwvif->port); 2083 SET_BCN_UPD_MBSSID(skb->data, 0); 2084 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx); 2085 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset); 2086 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id); 2087 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL); 2088 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE); 2089 SET_BCN_UPD_RATE(skb->data, beacon_rate); 2090 2091 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 2092 dev_kfree_skb_any(skb_beacon); 2093 2094 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2095 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2096 H2C_FUNC_MAC_BCN_UPD, 0, 1, 2097 bcn_total_len); 2098 2099 ret = rtw89_h2c_tx(rtwdev, skb, false); 2100 if (ret) { 2101 rtw89_err(rtwdev, "failed to send h2c\n"); 2102 dev_kfree_skb_any(skb); 2103 return ret; 2104 } 2105 2106 return 0; 2107 } 2108 2109 #define H2C_ROLE_MAINTAIN_LEN 4 2110 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 2111 struct rtw89_vif *rtwvif, 2112 struct rtw89_sta *rtwsta, 2113 enum rtw89_upd_mode upd_mode) 2114 { 2115 struct sk_buff *skb; 2116 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2117 u8 self_role; 2118 int ret; 2119 2120 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 2121 if (rtwsta) 2122 self_role = RTW89_SELF_ROLE_AP_CLIENT; 2123 else 2124 self_role = rtwvif->self_role; 2125 } else { 2126 self_role = rtwvif->self_role; 2127 } 2128 2129 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 2130 if (!skb) { 2131 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 2132 return -ENOMEM; 2133 } 2134 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 2135 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 2136 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 2137 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 2138 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 2139 2140 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2141 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 2142 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 2143 H2C_ROLE_MAINTAIN_LEN); 2144 2145 ret = rtw89_h2c_tx(rtwdev, skb, false); 2146 if (ret) { 2147 rtw89_err(rtwdev, "failed to send h2c\n"); 2148 goto fail; 2149 } 2150 2151 return 0; 2152 fail: 2153 dev_kfree_skb_any(skb); 2154 2155 return ret; 2156 } 2157 2158 #define H2C_JOIN_INFO_LEN 4 2159 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 2160 struct rtw89_sta *rtwsta, bool dis_conn) 2161 { 2162 struct sk_buff *skb; 2163 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2164 u8 self_role = rtwvif->self_role; 2165 u8 net_type = rtwvif->net_type; 2166 int ret; 2167 2168 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 2169 self_role = RTW89_SELF_ROLE_AP_CLIENT; 2170 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 2171 } 2172 2173 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 2174 if (!skb) { 2175 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 2176 return -ENOMEM; 2177 } 2178 skb_put(skb, H2C_JOIN_INFO_LEN); 2179 SET_JOININFO_MACID(skb->data, mac_id); 2180 SET_JOININFO_OP(skb->data, dis_conn); 2181 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx); 2182 SET_JOININFO_WMM(skb->data, rtwvif->wmm); 2183 SET_JOININFO_TGR(skb->data, rtwvif->trigger); 2184 SET_JOININFO_ISHESTA(skb->data, 0); 2185 SET_JOININFO_DLBW(skb->data, 0); 2186 SET_JOININFO_TF_MAC_PAD(skb->data, 0); 2187 SET_JOININFO_DL_T_PE(skb->data, 0); 2188 SET_JOININFO_PORT_ID(skb->data, rtwvif->port); 2189 SET_JOININFO_NET_TYPE(skb->data, net_type); 2190 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role); 2191 SET_JOININFO_SELF_ROLE(skb->data, self_role); 2192 2193 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2194 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 2195 H2C_FUNC_MAC_JOININFO, 0, 1, 2196 H2C_JOIN_INFO_LEN); 2197 2198 ret = rtw89_h2c_tx(rtwdev, skb, false); 2199 if (ret) { 2200 rtw89_err(rtwdev, "failed to send h2c\n"); 2201 goto fail; 2202 } 2203 2204 return 0; 2205 fail: 2206 dev_kfree_skb_any(skb); 2207 2208 return ret; 2209 } 2210 2211 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 2212 bool pause) 2213 { 2214 struct rtw89_fw_macid_pause_grp h2c = {{0}}; 2215 u8 len = sizeof(struct rtw89_fw_macid_pause_grp); 2216 struct sk_buff *skb; 2217 int ret; 2218 2219 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 2220 if (!skb) { 2221 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 2222 return -ENOMEM; 2223 } 2224 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh)); 2225 if (pause) 2226 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh)); 2227 skb_put_data(skb, &h2c, len); 2228 2229 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2230 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2231 H2C_FUNC_MAC_MACID_PAUSE, 1, 0, 2232 len); 2233 2234 ret = rtw89_h2c_tx(rtwdev, skb, false); 2235 if (ret) { 2236 rtw89_err(rtwdev, "failed to send h2c\n"); 2237 goto fail; 2238 } 2239 2240 return 0; 2241 fail: 2242 dev_kfree_skb_any(skb); 2243 2244 return ret; 2245 } 2246 2247 #define H2C_EDCA_LEN 12 2248 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 2249 u8 ac, u32 val) 2250 { 2251 struct sk_buff *skb; 2252 int ret; 2253 2254 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 2255 if (!skb) { 2256 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 2257 return -ENOMEM; 2258 } 2259 skb_put(skb, H2C_EDCA_LEN); 2260 RTW89_SET_EDCA_SEL(skb->data, 0); 2261 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 2262 RTW89_SET_EDCA_WMM(skb->data, 0); 2263 RTW89_SET_EDCA_AC(skb->data, ac); 2264 RTW89_SET_EDCA_PARAM(skb->data, val); 2265 2266 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2267 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2268 H2C_FUNC_USR_EDCA, 0, 1, 2269 H2C_EDCA_LEN); 2270 2271 ret = rtw89_h2c_tx(rtwdev, skb, false); 2272 if (ret) { 2273 rtw89_err(rtwdev, "failed to send h2c\n"); 2274 goto fail; 2275 } 2276 2277 return 0; 2278 fail: 2279 dev_kfree_skb_any(skb); 2280 2281 return ret; 2282 } 2283 2284 #define H2C_TSF32_TOGL_LEN 4 2285 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 2286 bool en) 2287 { 2288 struct sk_buff *skb; 2289 u16 early_us = en ? 2000 : 0; 2290 u8 *cmd; 2291 int ret; 2292 2293 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 2294 if (!skb) { 2295 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 2296 return -ENOMEM; 2297 } 2298 skb_put(skb, H2C_TSF32_TOGL_LEN); 2299 cmd = skb->data; 2300 2301 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 2302 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 2303 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 2304 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 2305 2306 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2307 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2308 H2C_FUNC_TSF32_TOGL, 0, 0, 2309 H2C_TSF32_TOGL_LEN); 2310 2311 ret = rtw89_h2c_tx(rtwdev, skb, false); 2312 if (ret) { 2313 rtw89_err(rtwdev, "failed to send h2c\n"); 2314 goto fail; 2315 } 2316 2317 return 0; 2318 fail: 2319 dev_kfree_skb_any(skb); 2320 2321 return ret; 2322 } 2323 2324 #define H2C_OFLD_CFG_LEN 8 2325 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 2326 { 2327 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 2328 struct sk_buff *skb; 2329 int ret; 2330 2331 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 2332 if (!skb) { 2333 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 2334 return -ENOMEM; 2335 } 2336 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 2337 2338 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2339 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2340 H2C_FUNC_OFLD_CFG, 0, 1, 2341 H2C_OFLD_CFG_LEN); 2342 2343 ret = rtw89_h2c_tx(rtwdev, skb, false); 2344 if (ret) { 2345 rtw89_err(rtwdev, "failed to send h2c\n"); 2346 goto fail; 2347 } 2348 2349 return 0; 2350 fail: 2351 dev_kfree_skb_any(skb); 2352 2353 return ret; 2354 } 2355 2356 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 2357 struct ieee80211_vif *vif, 2358 bool connect) 2359 { 2360 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 2361 struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL; 2362 struct rtw89_h2c_bcnfltr *h2c; 2363 u32 len = sizeof(*h2c); 2364 struct sk_buff *skb; 2365 int ret; 2366 2367 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 2368 return -EINVAL; 2369 2370 if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA) 2371 return -EINVAL; 2372 2373 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2374 if (!skb) { 2375 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 2376 return -ENOMEM; 2377 } 2378 2379 skb_put(skb, len); 2380 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 2381 2382 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 2383 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 2384 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 2385 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 2386 RTW89_H2C_BCNFLTR_W0_MODE) | 2387 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 2388 le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 2389 le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI, 2390 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 2391 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 2392 2393 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2394 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2395 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 2396 2397 ret = rtw89_h2c_tx(rtwdev, skb, false); 2398 if (ret) { 2399 rtw89_err(rtwdev, "failed to send h2c\n"); 2400 goto fail; 2401 } 2402 2403 return 0; 2404 fail: 2405 dev_kfree_skb_any(skb); 2406 2407 return ret; 2408 } 2409 2410 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 2411 struct rtw89_rx_phy_ppdu *phy_ppdu) 2412 { 2413 struct rtw89_h2c_ofld_rssi *h2c; 2414 u32 len = sizeof(*h2c); 2415 struct sk_buff *skb; 2416 s8 rssi; 2417 int ret; 2418 2419 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 2420 return -EINVAL; 2421 2422 if (!phy_ppdu) 2423 return -EINVAL; 2424 2425 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2426 if (!skb) { 2427 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 2428 return -ENOMEM; 2429 } 2430 2431 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 2432 skb_put(skb, len); 2433 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 2434 2435 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 2436 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 2437 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 2438 2439 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2440 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2441 H2C_FUNC_OFLD_RSSI, 0, 1, len); 2442 2443 ret = rtw89_h2c_tx(rtwdev, skb, false); 2444 if (ret) { 2445 rtw89_err(rtwdev, "failed to send h2c\n"); 2446 goto fail; 2447 } 2448 2449 return 0; 2450 fail: 2451 dev_kfree_skb_any(skb); 2452 2453 return ret; 2454 } 2455 2456 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2457 { 2458 struct rtw89_traffic_stats *stats = &rtwvif->stats; 2459 struct rtw89_h2c_ofld *h2c; 2460 u32 len = sizeof(*h2c); 2461 struct sk_buff *skb; 2462 int ret; 2463 2464 if (rtwvif->net_type != RTW89_NET_TYPE_INFRA) 2465 return -EINVAL; 2466 2467 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2468 if (!skb) { 2469 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 2470 return -ENOMEM; 2471 } 2472 2473 skb_put(skb, len); 2474 h2c = (struct rtw89_h2c_ofld *)skb->data; 2475 2476 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 2477 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 2478 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 2479 2480 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2481 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2482 H2C_FUNC_OFLD_TP, 0, 1, len); 2483 2484 ret = rtw89_h2c_tx(rtwdev, skb, false); 2485 if (ret) { 2486 rtw89_err(rtwdev, "failed to send h2c\n"); 2487 goto fail; 2488 } 2489 2490 return 0; 2491 fail: 2492 dev_kfree_skb_any(skb); 2493 2494 return ret; 2495 } 2496 2497 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 2498 { 2499 const struct rtw89_chip_info *chip = rtwdev->chip; 2500 struct rtw89_h2c_ra_v1 *h2c_v1; 2501 struct rtw89_h2c_ra *h2c; 2502 u32 len = sizeof(*h2c); 2503 bool format_v1 = false; 2504 struct sk_buff *skb; 2505 int ret; 2506 2507 if (chip->chip_gen == RTW89_CHIP_BE) { 2508 len = sizeof(*h2c_v1); 2509 format_v1 = true; 2510 } 2511 2512 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2513 if (!skb) { 2514 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 2515 return -ENOMEM; 2516 } 2517 skb_put(skb, len); 2518 h2c = (struct rtw89_h2c_ra *)skb->data; 2519 rtw89_debug(rtwdev, RTW89_DBG_RA, 2520 "ra cmd msk: %llx ", ra->ra_mask); 2521 2522 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 2523 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 2524 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 2525 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 2526 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 2527 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 2528 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 2529 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 2530 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 2531 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 2532 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 2533 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 2534 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 2535 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 2536 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 2537 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 2538 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 2539 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 2540 2541 if (!format_v1) 2542 goto csi; 2543 2544 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 2545 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 2546 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 2547 2548 csi: 2549 if (!csi) 2550 goto done; 2551 2552 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 2553 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 2554 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 2555 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 2556 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 2557 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 2558 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 2559 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 2560 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 2561 2562 done: 2563 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2564 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 2565 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 2566 len); 2567 2568 ret = rtw89_h2c_tx(rtwdev, skb, false); 2569 if (ret) { 2570 rtw89_err(rtwdev, "failed to send h2c\n"); 2571 goto fail; 2572 } 2573 2574 return 0; 2575 fail: 2576 dev_kfree_skb_any(skb); 2577 2578 return ret; 2579 } 2580 2581 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) 2582 { 2583 struct rtw89_btc *btc = &rtwdev->btc; 2584 struct rtw89_btc_dm *dm = &btc->dm; 2585 struct rtw89_btc_init_info *init_info = &dm->init_info; 2586 struct rtw89_btc_module *module = &init_info->module; 2587 struct rtw89_btc_ant_info *ant = &module->ant; 2588 struct rtw89_h2c_cxinit *h2c; 2589 u32 len = sizeof(*h2c); 2590 struct sk_buff *skb; 2591 int ret; 2592 2593 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2594 if (!skb) { 2595 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 2596 return -ENOMEM; 2597 } 2598 skb_put(skb, len); 2599 h2c = (struct rtw89_h2c_cxinit *)skb->data; 2600 2601 h2c->hdr.type = CXDRVINFO_INIT; 2602 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 2603 2604 h2c->ant_type = ant->type; 2605 h2c->ant_num = ant->num; 2606 h2c->ant_iso = ant->isolation; 2607 h2c->ant_info = 2608 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 2609 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 2610 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 2611 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 2612 2613 h2c->mod_rfe = module->rfe_type; 2614 h2c->mod_cv = module->cv; 2615 h2c->mod_info = 2616 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 2617 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 2618 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 2619 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 2620 h2c->mod_adie_kt = module->kt_ver_adie; 2621 h2c->wl_gch = init_info->wl_guard_ch; 2622 2623 h2c->info = 2624 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 2625 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 2626 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 2627 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 2628 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 2629 2630 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2631 H2C_CAT_OUTSRC, BTFC_SET, 2632 SET_DRV_INFO, 0, 0, 2633 len); 2634 2635 ret = rtw89_h2c_tx(rtwdev, skb, false); 2636 if (ret) { 2637 rtw89_err(rtwdev, "failed to send h2c\n"); 2638 goto fail; 2639 } 2640 2641 return 0; 2642 fail: 2643 dev_kfree_skb_any(skb); 2644 2645 return ret; 2646 } 2647 2648 #define PORT_DATA_OFFSET 4 2649 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 2650 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 2651 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 2652 2653 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) 2654 { 2655 struct rtw89_btc *btc = &rtwdev->btc; 2656 const struct rtw89_btc_ver *ver = btc->ver; 2657 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2658 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 2659 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2660 struct rtw89_btc_wl_active_role *active = role_info->active_role; 2661 struct sk_buff *skb; 2662 u32 len; 2663 u8 offset = 0; 2664 u8 *cmd; 2665 int ret; 2666 int i; 2667 2668 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 2669 2670 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2671 if (!skb) { 2672 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2673 return -ENOMEM; 2674 } 2675 skb_put(skb, len); 2676 cmd = skb->data; 2677 2678 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2679 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2680 2681 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2682 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2683 2684 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2685 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2686 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2687 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2688 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2689 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2690 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2691 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2692 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2693 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2694 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2695 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2696 2697 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2698 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2699 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2700 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2701 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2702 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2703 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2704 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2705 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2706 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2707 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2708 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2709 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2710 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2711 } 2712 2713 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2714 H2C_CAT_OUTSRC, BTFC_SET, 2715 SET_DRV_INFO, 0, 0, 2716 len); 2717 2718 ret = rtw89_h2c_tx(rtwdev, skb, false); 2719 if (ret) { 2720 rtw89_err(rtwdev, "failed to send h2c\n"); 2721 goto fail; 2722 } 2723 2724 return 0; 2725 fail: 2726 dev_kfree_skb_any(skb); 2727 2728 return ret; 2729 } 2730 2731 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 2732 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2733 2734 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev) 2735 { 2736 struct rtw89_btc *btc = &rtwdev->btc; 2737 const struct rtw89_btc_ver *ver = btc->ver; 2738 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2739 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 2740 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2741 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 2742 struct sk_buff *skb; 2743 u32 len; 2744 u8 *cmd, offset; 2745 int ret; 2746 int i; 2747 2748 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 2749 2750 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2751 if (!skb) { 2752 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2753 return -ENOMEM; 2754 } 2755 skb_put(skb, len); 2756 cmd = skb->data; 2757 2758 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2759 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2760 2761 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2762 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2763 2764 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2765 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2766 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2767 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2768 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2769 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2770 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2771 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2772 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2773 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2774 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2775 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2776 2777 offset = PORT_DATA_OFFSET; 2778 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2779 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2780 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2781 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2782 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2783 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2784 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2785 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2786 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2787 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2788 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2789 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2790 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2791 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2792 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 2793 } 2794 2795 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2796 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2797 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2798 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2799 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2800 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2801 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2802 2803 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2804 H2C_CAT_OUTSRC, BTFC_SET, 2805 SET_DRV_INFO, 0, 0, 2806 len); 2807 2808 ret = rtw89_h2c_tx(rtwdev, skb, false); 2809 if (ret) { 2810 rtw89_err(rtwdev, "failed to send h2c\n"); 2811 goto fail; 2812 } 2813 2814 return 0; 2815 fail: 2816 dev_kfree_skb_any(skb); 2817 2818 return ret; 2819 } 2820 2821 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 2822 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2823 2824 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev) 2825 { 2826 struct rtw89_btc *btc = &rtwdev->btc; 2827 const struct rtw89_btc_ver *ver = btc->ver; 2828 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2829 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 2830 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2831 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 2832 struct sk_buff *skb; 2833 u32 len; 2834 u8 *cmd, offset; 2835 int ret; 2836 int i; 2837 2838 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 2839 2840 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2841 if (!skb) { 2842 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2843 return -ENOMEM; 2844 } 2845 skb_put(skb, len); 2846 cmd = skb->data; 2847 2848 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2849 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2850 2851 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2852 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2853 2854 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2855 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2856 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2857 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2858 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2859 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2860 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2861 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2862 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2863 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2864 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2865 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2866 2867 offset = PORT_DATA_OFFSET; 2868 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2869 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 2870 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 2871 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 2872 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 2873 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 2874 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 2875 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 2876 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 2877 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 2878 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 2879 } 2880 2881 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2882 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2883 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2884 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2885 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2886 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2887 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2888 2889 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2890 H2C_CAT_OUTSRC, BTFC_SET, 2891 SET_DRV_INFO, 0, 0, 2892 len); 2893 2894 ret = rtw89_h2c_tx(rtwdev, skb, false); 2895 if (ret) { 2896 rtw89_err(rtwdev, "failed to send h2c\n"); 2897 goto fail; 2898 } 2899 2900 return 0; 2901 fail: 2902 dev_kfree_skb_any(skb); 2903 2904 return ret; 2905 } 2906 2907 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 2908 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) 2909 { 2910 struct rtw89_btc *btc = &rtwdev->btc; 2911 const struct rtw89_btc_ver *ver = btc->ver; 2912 struct rtw89_btc_ctrl *ctrl = &btc->ctrl; 2913 struct sk_buff *skb; 2914 u8 *cmd; 2915 int ret; 2916 2917 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 2918 if (!skb) { 2919 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2920 return -ENOMEM; 2921 } 2922 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 2923 cmd = skb->data; 2924 2925 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL); 2926 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 2927 2928 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 2929 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 2930 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 2931 if (ver->fcxctrl == 0) 2932 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 2933 2934 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2935 H2C_CAT_OUTSRC, BTFC_SET, 2936 SET_DRV_INFO, 0, 0, 2937 H2C_LEN_CXDRVINFO_CTRL); 2938 2939 ret = rtw89_h2c_tx(rtwdev, skb, false); 2940 if (ret) { 2941 rtw89_err(rtwdev, "failed to send h2c\n"); 2942 goto fail; 2943 } 2944 2945 return 0; 2946 fail: 2947 dev_kfree_skb_any(skb); 2948 2949 return ret; 2950 } 2951 2952 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 2953 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev) 2954 { 2955 struct rtw89_btc *btc = &rtwdev->btc; 2956 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 2957 struct sk_buff *skb; 2958 u8 *cmd; 2959 int ret; 2960 2961 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 2962 if (!skb) { 2963 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 2964 return -ENOMEM; 2965 } 2966 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 2967 cmd = skb->data; 2968 2969 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX); 2970 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 2971 2972 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 2973 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 2974 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 2975 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 2976 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 2977 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 2978 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 2979 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 2980 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 2981 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 2982 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 2983 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 2984 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 2985 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 2986 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 2987 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 2988 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 2989 2990 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2991 H2C_CAT_OUTSRC, BTFC_SET, 2992 SET_DRV_INFO, 0, 0, 2993 H2C_LEN_CXDRVINFO_TRX); 2994 2995 ret = rtw89_h2c_tx(rtwdev, skb, false); 2996 if (ret) { 2997 rtw89_err(rtwdev, "failed to send h2c\n"); 2998 goto fail; 2999 } 3000 3001 return 0; 3002 fail: 3003 dev_kfree_skb_any(skb); 3004 3005 return ret; 3006 } 3007 3008 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 3009 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) 3010 { 3011 struct rtw89_btc *btc = &rtwdev->btc; 3012 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 3013 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 3014 struct sk_buff *skb; 3015 u8 *cmd; 3016 int ret; 3017 3018 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 3019 if (!skb) { 3020 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 3021 return -ENOMEM; 3022 } 3023 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 3024 cmd = skb->data; 3025 3026 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK); 3027 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 3028 3029 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 3030 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 3031 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 3032 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 3033 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 3034 3035 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3036 H2C_CAT_OUTSRC, BTFC_SET, 3037 SET_DRV_INFO, 0, 0, 3038 H2C_LEN_CXDRVINFO_RFK); 3039 3040 ret = rtw89_h2c_tx(rtwdev, skb, false); 3041 if (ret) { 3042 rtw89_err(rtwdev, "failed to send h2c\n"); 3043 goto fail; 3044 } 3045 3046 return 0; 3047 fail: 3048 dev_kfree_skb_any(skb); 3049 3050 return ret; 3051 } 3052 3053 #define H2C_LEN_PKT_OFLD 4 3054 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 3055 { 3056 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 3057 struct sk_buff *skb; 3058 unsigned int cond; 3059 u8 *cmd; 3060 int ret; 3061 3062 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 3063 if (!skb) { 3064 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 3065 return -ENOMEM; 3066 } 3067 skb_put(skb, H2C_LEN_PKT_OFLD); 3068 cmd = skb->data; 3069 3070 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 3071 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 3072 3073 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3074 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3075 H2C_FUNC_PACKET_OFLD, 1, 1, 3076 H2C_LEN_PKT_OFLD); 3077 3078 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 3079 3080 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3081 if (ret < 0) { 3082 rtw89_debug(rtwdev, RTW89_DBG_FW, 3083 "failed to del pkt ofld: id %d, ret %d\n", 3084 id, ret); 3085 return ret; 3086 } 3087 3088 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 3089 return 0; 3090 } 3091 3092 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 3093 struct sk_buff *skb_ofld) 3094 { 3095 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 3096 struct sk_buff *skb; 3097 unsigned int cond; 3098 u8 *cmd; 3099 u8 alloc_id; 3100 int ret; 3101 3102 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 3103 RTW89_MAX_PKT_OFLD_NUM); 3104 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 3105 return -ENOSPC; 3106 3107 *id = alloc_id; 3108 3109 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 3110 if (!skb) { 3111 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 3112 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 3113 return -ENOMEM; 3114 } 3115 skb_put(skb, H2C_LEN_PKT_OFLD); 3116 cmd = skb->data; 3117 3118 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 3119 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 3120 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 3121 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 3122 3123 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3124 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3125 H2C_FUNC_PACKET_OFLD, 1, 1, 3126 H2C_LEN_PKT_OFLD + skb_ofld->len); 3127 3128 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 3129 3130 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3131 if (ret < 0) { 3132 rtw89_debug(rtwdev, RTW89_DBG_FW, 3133 "failed to add pkt ofld: id %d, ret %d\n", 3134 alloc_id, ret); 3135 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 3136 return ret; 3137 } 3138 3139 return 0; 3140 } 3141 3142 #define H2C_LEN_SCAN_LIST_OFFLOAD 4 3143 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len, 3144 struct list_head *chan_list) 3145 { 3146 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 3147 struct rtw89_mac_chinfo *ch_info; 3148 struct sk_buff *skb; 3149 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE; 3150 unsigned int cond; 3151 u8 *cmd; 3152 int ret; 3153 3154 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 3155 if (!skb) { 3156 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 3157 return -ENOMEM; 3158 } 3159 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD); 3160 cmd = skb->data; 3161 3162 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len); 3163 /* in unit of 4 bytes */ 3164 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4); 3165 3166 list_for_each_entry(ch_info, chan_list, list) { 3167 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE); 3168 3169 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period); 3170 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time); 3171 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch); 3172 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch); 3173 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw); 3174 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action); 3175 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt); 3176 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt); 3177 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data); 3178 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band); 3179 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id); 3180 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch); 3181 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null); 3182 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num); 3183 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]); 3184 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]); 3185 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]); 3186 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]); 3187 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]); 3188 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]); 3189 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]); 3190 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]); 3191 } 3192 3193 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3194 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3195 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 3196 3197 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH); 3198 3199 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3200 if (ret) { 3201 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 3202 return ret; 3203 } 3204 3205 return 0; 3206 } 3207 3208 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 3209 struct rtw89_scan_option *option, 3210 struct rtw89_vif *rtwvif) 3211 { 3212 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 3213 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 3214 struct rtw89_h2c_scanofld *h2c; 3215 u32 len = sizeof(*h2c); 3216 struct sk_buff *skb; 3217 unsigned int cond; 3218 int ret; 3219 3220 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3221 if (!skb) { 3222 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 3223 return -ENOMEM; 3224 } 3225 skb_put(skb, len); 3226 h2c = (struct rtw89_h2c_scanofld *)skb->data; 3227 3228 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 3229 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 3230 le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) | 3231 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 3232 3233 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 3234 le32_encode_bits(option->target_ch_mode, 3235 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 3236 le32_encode_bits(RTW89_SCAN_IMMEDIATE, 3237 RTW89_H2C_SCANOFLD_W1_START_MODE) | 3238 le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 3239 3240 if (option->target_ch_mode) { 3241 h2c->w1 |= le32_encode_bits(op->band_width, 3242 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 3243 le32_encode_bits(op->primary_channel, 3244 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 3245 le32_encode_bits(op->channel, 3246 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 3247 h2c->w0 |= le32_encode_bits(op->band_type, 3248 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 3249 } 3250 3251 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3252 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3253 H2C_FUNC_SCANOFLD, 1, 1, 3254 len); 3255 3256 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD); 3257 3258 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3259 if (ret) { 3260 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 3261 return ret; 3262 } 3263 3264 return 0; 3265 } 3266 3267 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 3268 struct rtw89_fw_h2c_rf_reg_info *info, 3269 u16 len, u8 page) 3270 { 3271 struct sk_buff *skb; 3272 u8 class = info->rf_path == RF_PATH_A ? 3273 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 3274 int ret; 3275 3276 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3277 if (!skb) { 3278 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 3279 return -ENOMEM; 3280 } 3281 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 3282 3283 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3284 H2C_CAT_OUTSRC, class, page, 0, 0, 3285 len); 3286 3287 ret = rtw89_h2c_tx(rtwdev, skb, false); 3288 if (ret) { 3289 rtw89_err(rtwdev, "failed to send h2c\n"); 3290 goto fail; 3291 } 3292 3293 return 0; 3294 fail: 3295 dev_kfree_skb_any(skb); 3296 3297 return ret; 3298 } 3299 3300 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 3301 { 3302 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 3303 struct rtw89_fw_h2c_rf_get_mccch *mccch; 3304 struct sk_buff *skb; 3305 int ret; 3306 u8 idx; 3307 3308 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 3309 if (!skb) { 3310 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 3311 return -ENOMEM; 3312 } 3313 skb_put(skb, sizeof(*mccch)); 3314 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 3315 3316 idx = rfk_mcc->table_idx; 3317 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 3318 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 3319 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 3320 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 3321 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 3322 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 3323 3324 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3325 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 3326 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 3327 sizeof(*mccch)); 3328 3329 ret = rtw89_h2c_tx(rtwdev, skb, false); 3330 if (ret) { 3331 rtw89_err(rtwdev, "failed to send h2c\n"); 3332 goto fail; 3333 } 3334 3335 return 0; 3336 fail: 3337 dev_kfree_skb_any(skb); 3338 3339 return ret; 3340 } 3341 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 3342 3343 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 3344 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 3345 bool rack, bool dack) 3346 { 3347 struct sk_buff *skb; 3348 int ret; 3349 3350 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3351 if (!skb) { 3352 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 3353 return -ENOMEM; 3354 } 3355 skb_put_data(skb, buf, len); 3356 3357 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3358 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 3359 len); 3360 3361 ret = rtw89_h2c_tx(rtwdev, skb, false); 3362 if (ret) { 3363 rtw89_err(rtwdev, "failed to send h2c\n"); 3364 goto fail; 3365 } 3366 3367 return 0; 3368 fail: 3369 dev_kfree_skb_any(skb); 3370 3371 return ret; 3372 } 3373 3374 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 3375 { 3376 struct sk_buff *skb; 3377 int ret; 3378 3379 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 3380 if (!skb) { 3381 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 3382 return -ENOMEM; 3383 } 3384 skb_put_data(skb, buf, len); 3385 3386 ret = rtw89_h2c_tx(rtwdev, skb, false); 3387 if (ret) { 3388 rtw89_err(rtwdev, "failed to send h2c\n"); 3389 goto fail; 3390 } 3391 3392 return 0; 3393 fail: 3394 dev_kfree_skb_any(skb); 3395 3396 return ret; 3397 } 3398 3399 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 3400 { 3401 struct rtw89_early_h2c *early_h2c; 3402 3403 lockdep_assert_held(&rtwdev->mutex); 3404 3405 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 3406 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 3407 } 3408 } 3409 3410 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 3411 { 3412 struct rtw89_early_h2c *early_h2c, *tmp; 3413 3414 mutex_lock(&rtwdev->mutex); 3415 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 3416 list_del(&early_h2c->list); 3417 kfree(early_h2c->h2c); 3418 kfree(early_h2c); 3419 } 3420 mutex_unlock(&rtwdev->mutex); 3421 } 3422 3423 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 3424 { 3425 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 3426 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 3427 3428 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 3429 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 3430 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 3431 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 3432 } 3433 3434 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 3435 struct sk_buff *c2h) 3436 { 3437 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 3438 u8 category = attr->category; 3439 u8 class = attr->class; 3440 u8 func = attr->func; 3441 3442 switch (category) { 3443 default: 3444 return false; 3445 case RTW89_C2H_CAT_MAC: 3446 return rtw89_mac_c2h_chk_atomic(rtwdev, class, func); 3447 } 3448 } 3449 3450 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 3451 { 3452 rtw89_fw_c2h_parse_attr(c2h); 3453 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 3454 goto enqueue; 3455 3456 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 3457 dev_kfree_skb_any(c2h); 3458 return; 3459 3460 enqueue: 3461 skb_queue_tail(&rtwdev->c2h_queue, c2h); 3462 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 3463 } 3464 3465 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 3466 struct sk_buff *skb) 3467 { 3468 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 3469 u8 category = attr->category; 3470 u8 class = attr->class; 3471 u8 func = attr->func; 3472 u16 len = attr->len; 3473 bool dump = true; 3474 3475 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 3476 return; 3477 3478 switch (category) { 3479 case RTW89_C2H_CAT_TEST: 3480 break; 3481 case RTW89_C2H_CAT_MAC: 3482 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 3483 if (class == RTW89_MAC_C2H_CLASS_INFO && 3484 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 3485 dump = false; 3486 break; 3487 case RTW89_C2H_CAT_OUTSRC: 3488 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 3489 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 3490 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 3491 else 3492 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 3493 break; 3494 } 3495 3496 if (dump) 3497 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 3498 } 3499 3500 void rtw89_fw_c2h_work(struct work_struct *work) 3501 { 3502 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 3503 c2h_work); 3504 struct sk_buff *skb, *tmp; 3505 3506 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 3507 skb_unlink(skb, &rtwdev->c2h_queue); 3508 mutex_lock(&rtwdev->mutex); 3509 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 3510 mutex_unlock(&rtwdev->mutex); 3511 dev_kfree_skb_any(skb); 3512 } 3513 } 3514 3515 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 3516 struct rtw89_mac_h2c_info *info) 3517 { 3518 const struct rtw89_chip_info *chip = rtwdev->chip; 3519 struct rtw89_fw_info *fw_info = &rtwdev->fw; 3520 const u32 *h2c_reg = chip->h2c_regs; 3521 u8 i, val, len; 3522 int ret; 3523 3524 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 3525 rtwdev, chip->h2c_ctrl_reg); 3526 if (ret) { 3527 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 3528 return ret; 3529 } 3530 3531 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 3532 sizeof(info->u.h2creg[0])); 3533 3534 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 3535 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 3536 3537 for (i = 0; i < RTW89_H2CREG_MAX; i++) 3538 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 3539 3540 fw_info->h2c_counter++; 3541 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 3542 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 3543 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 3544 3545 return 0; 3546 } 3547 3548 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 3549 struct rtw89_mac_c2h_info *info) 3550 { 3551 const struct rtw89_chip_info *chip = rtwdev->chip; 3552 struct rtw89_fw_info *fw_info = &rtwdev->fw; 3553 const u32 *c2h_reg = chip->c2h_regs; 3554 u32 ret; 3555 u8 i, val; 3556 3557 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 3558 3559 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 3560 RTW89_C2H_TIMEOUT, false, rtwdev, 3561 chip->c2h_ctrl_reg); 3562 if (ret) { 3563 rtw89_warn(rtwdev, "c2h reg timeout\n"); 3564 return ret; 3565 } 3566 3567 for (i = 0; i < RTW89_C2HREG_MAX; i++) 3568 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 3569 3570 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 3571 3572 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 3573 info->content_len = 3574 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 3575 RTW89_C2HREG_HDR_LEN; 3576 3577 fw_info->c2h_counter++; 3578 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 3579 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 3580 3581 return 0; 3582 } 3583 3584 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 3585 struct rtw89_mac_h2c_info *h2c_info, 3586 struct rtw89_mac_c2h_info *c2h_info) 3587 { 3588 u32 ret; 3589 3590 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 3591 lockdep_assert_held(&rtwdev->mutex); 3592 3593 if (!h2c_info && !c2h_info) 3594 return -EINVAL; 3595 3596 if (!h2c_info) 3597 goto recv_c2h; 3598 3599 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 3600 if (ret) 3601 return ret; 3602 3603 recv_c2h: 3604 if (!c2h_info) 3605 return 0; 3606 3607 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 3608 if (ret) 3609 return ret; 3610 3611 return 0; 3612 } 3613 3614 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 3615 { 3616 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 3617 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 3618 return; 3619 } 3620 3621 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 3622 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 3623 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 3624 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 3625 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 3626 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 3627 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 3628 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 3629 3630 rtw89_fw_prog_cnt_dump(rtwdev); 3631 } 3632 3633 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 3634 { 3635 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3636 struct rtw89_pktofld_info *info, *tmp; 3637 u8 idx; 3638 3639 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 3640 if (!(rtwdev->chip->support_bands & BIT(idx))) 3641 continue; 3642 3643 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 3644 if (test_bit(info->id, rtwdev->pkt_offload)) 3645 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 3646 list_del(&info->list); 3647 kfree(info); 3648 } 3649 } 3650 } 3651 3652 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 3653 struct rtw89_vif *rtwvif, 3654 struct rtw89_pktofld_info *info, 3655 enum nl80211_band band, u8 ssid_idx) 3656 { 3657 struct cfg80211_scan_request *req = rtwvif->scan_req; 3658 3659 if (band != NL80211_BAND_6GHZ) 3660 return false; 3661 3662 if (req->ssids[ssid_idx].ssid_len) { 3663 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 3664 req->ssids[ssid_idx].ssid_len); 3665 info->ssid_len = req->ssids[ssid_idx].ssid_len; 3666 return false; 3667 } else { 3668 return true; 3669 } 3670 } 3671 3672 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 3673 struct rtw89_vif *rtwvif, 3674 struct sk_buff *skb, u8 ssid_idx) 3675 { 3676 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3677 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3678 struct rtw89_pktofld_info *info; 3679 struct sk_buff *new; 3680 int ret = 0; 3681 u8 band; 3682 3683 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 3684 if (!(rtwdev->chip->support_bands & BIT(band))) 3685 continue; 3686 3687 new = skb_copy(skb, GFP_KERNEL); 3688 if (!new) { 3689 ret = -ENOMEM; 3690 goto out; 3691 } 3692 skb_put_data(new, ies->ies[band], ies->len[band]); 3693 skb_put_data(new, ies->common_ies, ies->common_ie_len); 3694 3695 info = kzalloc(sizeof(*info), GFP_KERNEL); 3696 if (!info) { 3697 ret = -ENOMEM; 3698 kfree_skb(new); 3699 goto out; 3700 } 3701 3702 if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band, 3703 ssid_idx)) { 3704 kfree_skb(new); 3705 kfree(info); 3706 goto out; 3707 } 3708 3709 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 3710 if (ret) { 3711 kfree_skb(new); 3712 kfree(info); 3713 goto out; 3714 } 3715 3716 list_add_tail(&info->list, &scan_info->pkt_list[band]); 3717 kfree_skb(new); 3718 } 3719 out: 3720 return ret; 3721 } 3722 3723 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 3724 struct rtw89_vif *rtwvif) 3725 { 3726 struct cfg80211_scan_request *req = rtwvif->scan_req; 3727 struct sk_buff *skb; 3728 u8 num = req->n_ssids, i; 3729 int ret; 3730 3731 for (i = 0; i < num; i++) { 3732 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3733 req->ssids[i].ssid, 3734 req->ssids[i].ssid_len, 3735 req->ie_len); 3736 if (!skb) 3737 return -ENOMEM; 3738 3739 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i); 3740 kfree_skb(skb); 3741 3742 if (ret) 3743 return ret; 3744 } 3745 3746 return 0; 3747 } 3748 3749 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 3750 struct cfg80211_scan_request *req, 3751 struct rtw89_mac_chinfo *ch_info) 3752 { 3753 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3754 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3755 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 3756 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3757 struct cfg80211_scan_6ghz_params *params; 3758 struct rtw89_pktofld_info *info, *tmp; 3759 struct ieee80211_hdr *hdr; 3760 struct sk_buff *skb; 3761 bool found; 3762 int ret = 0; 3763 u8 i; 3764 3765 if (!req->n_6ghz_params) 3766 return 0; 3767 3768 for (i = 0; i < req->n_6ghz_params; i++) { 3769 params = &req->scan_6ghz_params[i]; 3770 3771 if (req->channels[params->channel_idx]->hw_value != 3772 ch_info->pri_ch) 3773 continue; 3774 3775 found = false; 3776 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 3777 if (ether_addr_equal(tmp->bssid, params->bssid)) { 3778 found = true; 3779 break; 3780 } 3781 } 3782 if (found) 3783 continue; 3784 3785 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3786 NULL, 0, req->ie_len); 3787 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 3788 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 3789 hdr = (struct ieee80211_hdr *)skb->data; 3790 ether_addr_copy(hdr->addr3, params->bssid); 3791 3792 info = kzalloc(sizeof(*info), GFP_KERNEL); 3793 if (!info) { 3794 ret = -ENOMEM; 3795 kfree_skb(skb); 3796 goto out; 3797 } 3798 3799 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 3800 if (ret) { 3801 kfree_skb(skb); 3802 kfree(info); 3803 goto out; 3804 } 3805 3806 ether_addr_copy(info->bssid, params->bssid); 3807 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 3808 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 3809 3810 ch_info->tx_pkt = true; 3811 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 3812 3813 kfree_skb(skb); 3814 } 3815 3816 out: 3817 return ret; 3818 } 3819 3820 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 3821 int ssid_num, 3822 struct rtw89_mac_chinfo *ch_info) 3823 { 3824 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3825 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3826 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3827 struct cfg80211_scan_request *req = rtwvif->scan_req; 3828 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 3829 struct rtw89_pktofld_info *info; 3830 u8 band, probe_count = 0; 3831 int ret; 3832 3833 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 3834 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 3835 ch_info->bw = RTW89_SCAN_WIDTH; 3836 ch_info->tx_pkt = true; 3837 ch_info->cfg_tx_pwr = false; 3838 ch_info->tx_pwr_idx = 0; 3839 ch_info->tx_null = false; 3840 ch_info->pause_data = false; 3841 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 3842 3843 if (ch_info->ch_band == RTW89_BAND_6G) { 3844 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 3845 !ch_info->is_psc) { 3846 ch_info->tx_pkt = false; 3847 if (!req->duration_mandatory) 3848 ch_info->period -= RTW89_DWELL_TIME_6G; 3849 } 3850 } 3851 3852 ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info); 3853 if (ret) 3854 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 3855 3856 if (ssid_num) { 3857 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 3858 3859 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 3860 if (info->channel_6ghz && 3861 ch_info->pri_ch != info->channel_6ghz) 3862 continue; 3863 ch_info->pkt_id[probe_count++] = info->id; 3864 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 3865 break; 3866 } 3867 ch_info->num_pkt = probe_count; 3868 } 3869 3870 switch (chan_type) { 3871 case RTW89_CHAN_OPERATE: 3872 ch_info->central_ch = op->channel; 3873 ch_info->pri_ch = op->primary_channel; 3874 ch_info->ch_band = op->band_type; 3875 ch_info->bw = op->band_width; 3876 ch_info->tx_null = true; 3877 ch_info->num_pkt = 0; 3878 break; 3879 case RTW89_CHAN_DFS: 3880 if (ch_info->ch_band != RTW89_BAND_6G) 3881 ch_info->period = max_t(u8, ch_info->period, 3882 RTW89_DFS_CHAN_TIME); 3883 ch_info->dwell_time = RTW89_DWELL_TIME; 3884 break; 3885 case RTW89_CHAN_ACTIVE: 3886 break; 3887 default: 3888 rtw89_err(rtwdev, "Channel type out of bound\n"); 3889 } 3890 } 3891 3892 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 3893 struct rtw89_vif *rtwvif, bool connected) 3894 { 3895 struct cfg80211_scan_request *req = rtwvif->scan_req; 3896 struct rtw89_mac_chinfo *ch_info, *tmp; 3897 struct ieee80211_channel *channel; 3898 struct list_head chan_list; 3899 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 3900 int list_len, off_chan_time = 0; 3901 enum rtw89_chan_type type; 3902 int ret = 0; 3903 u32 idx; 3904 3905 INIT_LIST_HEAD(&chan_list); 3906 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 3907 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 3908 idx++, list_len++) { 3909 channel = req->channels[idx]; 3910 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 3911 if (!ch_info) { 3912 ret = -ENOMEM; 3913 goto out; 3914 } 3915 3916 if (req->duration_mandatory) 3917 ch_info->period = req->duration; 3918 else if (channel->band == NL80211_BAND_6GHZ) 3919 ch_info->period = RTW89_CHANNEL_TIME_6G + 3920 RTW89_DWELL_TIME_6G; 3921 else 3922 ch_info->period = RTW89_CHANNEL_TIME; 3923 3924 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 3925 ch_info->central_ch = channel->hw_value; 3926 ch_info->pri_ch = channel->hw_value; 3927 ch_info->rand_seq_num = random_seq; 3928 ch_info->is_psc = cfg80211_channel_is_psc(channel); 3929 3930 if (channel->flags & 3931 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 3932 type = RTW89_CHAN_DFS; 3933 else 3934 type = RTW89_CHAN_ACTIVE; 3935 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 3936 3937 if (connected && 3938 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 3939 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3940 if (!tmp) { 3941 ret = -ENOMEM; 3942 kfree(ch_info); 3943 goto out; 3944 } 3945 3946 type = RTW89_CHAN_OPERATE; 3947 tmp->period = req->duration_mandatory ? 3948 req->duration : RTW89_CHANNEL_TIME; 3949 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 3950 list_add_tail(&tmp->list, &chan_list); 3951 off_chan_time = 0; 3952 list_len++; 3953 } 3954 list_add_tail(&ch_info->list, &chan_list); 3955 off_chan_time += ch_info->period; 3956 } 3957 rtwdev->scan_info.last_chan_idx = idx; 3958 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 3959 3960 out: 3961 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 3962 list_del(&ch_info->list); 3963 kfree(ch_info); 3964 } 3965 3966 return ret; 3967 } 3968 3969 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 3970 struct rtw89_vif *rtwvif, bool connected) 3971 { 3972 int ret; 3973 3974 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 3975 if (ret) { 3976 rtw89_err(rtwdev, "Update probe request failed\n"); 3977 goto out; 3978 } 3979 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected); 3980 out: 3981 return ret; 3982 } 3983 3984 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3985 struct ieee80211_scan_request *scan_req) 3986 { 3987 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3988 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3989 struct cfg80211_scan_request *req = &scan_req->req; 3990 u32 rx_fltr = rtwdev->hal.rx_fltr; 3991 u8 mac_addr[ETH_ALEN]; 3992 3993 rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan); 3994 rtwdev->scan_info.scanning_vif = vif; 3995 rtwdev->scan_info.last_chan_idx = 0; 3996 rtwvif->scan_ies = &scan_req->ies; 3997 rtwvif->scan_req = req; 3998 ieee80211_stop_queues(rtwdev->hw); 3999 4000 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 4001 get_random_mask_addr(mac_addr, req->mac_addr, 4002 req->mac_addr_mask); 4003 else 4004 ether_addr_copy(mac_addr, vif->addr); 4005 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 4006 4007 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 4008 rx_fltr &= ~B_AX_A_BC; 4009 rx_fltr &= ~B_AX_A_A1_MATCH; 4010 rtw89_write32_mask(rtwdev, 4011 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 4012 B_AX_RX_FLTR_CFG_MASK, 4013 rx_fltr); 4014 4015 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); 4016 } 4017 4018 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 4019 bool aborted) 4020 { 4021 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4022 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 4023 struct cfg80211_scan_info info = { 4024 .aborted = aborted, 4025 }; 4026 struct rtw89_vif *rtwvif; 4027 4028 if (!vif) 4029 return; 4030 4031 rtw89_write32_mask(rtwdev, 4032 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 4033 B_AX_RX_FLTR_CFG_MASK, 4034 rtwdev->hal.rx_fltr); 4035 4036 rtw89_core_scan_complete(rtwdev, vif, true); 4037 ieee80211_scan_completed(rtwdev->hw, &info); 4038 ieee80211_wake_queues(rtwdev->hw); 4039 4040 rtw89_release_pkt_list(rtwdev); 4041 rtwvif = (struct rtw89_vif *)vif->drv_priv; 4042 rtwvif->scan_req = NULL; 4043 rtwvif->scan_ies = NULL; 4044 scan_info->last_chan_idx = 0; 4045 scan_info->scanning_vif = NULL; 4046 4047 rtw89_chanctx_proceed(rtwdev); 4048 } 4049 4050 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 4051 { 4052 rtw89_hw_scan_offload(rtwdev, vif, false); 4053 rtw89_hw_scan_complete(rtwdev, vif, true); 4054 } 4055 4056 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 4057 bool enable) 4058 { 4059 struct rtw89_scan_option opt = {0}; 4060 struct rtw89_vif *rtwvif; 4061 bool connected; 4062 int ret = 0; 4063 4064 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 4065 if (!rtwvif) 4066 return -EINVAL; 4067 4068 /* This variable implies connected or during attempt to connect */ 4069 connected = !is_zero_ether_addr(rtwvif->bssid); 4070 opt.enable = enable; 4071 opt.target_ch_mode = connected; 4072 if (enable) { 4073 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected); 4074 if (ret) 4075 goto out; 4076 } 4077 ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif); 4078 out: 4079 return ret; 4080 } 4081 4082 #define H2C_FW_CPU_EXCEPTION_LEN 4 4083 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 4084 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 4085 { 4086 struct sk_buff *skb; 4087 int ret; 4088 4089 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 4090 if (!skb) { 4091 rtw89_err(rtwdev, 4092 "failed to alloc skb for fw cpu exception\n"); 4093 return -ENOMEM; 4094 } 4095 4096 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 4097 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 4098 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 4099 4100 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4101 H2C_CAT_TEST, 4102 H2C_CL_FW_STATUS_TEST, 4103 H2C_FUNC_CPU_EXCEPTION, 0, 0, 4104 H2C_FW_CPU_EXCEPTION_LEN); 4105 4106 ret = rtw89_h2c_tx(rtwdev, skb, false); 4107 if (ret) { 4108 rtw89_err(rtwdev, "failed to send h2c\n"); 4109 goto fail; 4110 } 4111 4112 return 0; 4113 4114 fail: 4115 dev_kfree_skb_any(skb); 4116 return ret; 4117 } 4118 4119 #define H2C_PKT_DROP_LEN 24 4120 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 4121 const struct rtw89_pkt_drop_params *params) 4122 { 4123 struct sk_buff *skb; 4124 int ret; 4125 4126 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 4127 if (!skb) { 4128 rtw89_err(rtwdev, 4129 "failed to alloc skb for packet drop\n"); 4130 return -ENOMEM; 4131 } 4132 4133 switch (params->sel) { 4134 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 4135 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 4136 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 4137 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 4138 case RTW89_PKT_DROP_SEL_BAND_ONCE: 4139 break; 4140 default: 4141 rtw89_debug(rtwdev, RTW89_DBG_FW, 4142 "H2C of pkt drop might not fully support sel: %d yet\n", 4143 params->sel); 4144 break; 4145 } 4146 4147 skb_put(skb, H2C_PKT_DROP_LEN); 4148 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 4149 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 4150 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 4151 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 4152 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 4153 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 4154 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 4155 params->macid_band_sel[0]); 4156 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 4157 params->macid_band_sel[1]); 4158 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 4159 params->macid_band_sel[2]); 4160 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 4161 params->macid_band_sel[3]); 4162 4163 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4164 H2C_CAT_MAC, 4165 H2C_CL_MAC_FW_OFLD, 4166 H2C_FUNC_PKT_DROP, 0, 0, 4167 H2C_PKT_DROP_LEN); 4168 4169 ret = rtw89_h2c_tx(rtwdev, skb, false); 4170 if (ret) { 4171 rtw89_err(rtwdev, "failed to send h2c\n"); 4172 goto fail; 4173 } 4174 4175 return 0; 4176 4177 fail: 4178 dev_kfree_skb_any(skb); 4179 return ret; 4180 } 4181 4182 #define H2C_KEEP_ALIVE_LEN 4 4183 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 4184 bool enable) 4185 { 4186 struct sk_buff *skb; 4187 u8 pkt_id = 0; 4188 int ret; 4189 4190 if (enable) { 4191 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 4192 RTW89_PKT_OFLD_TYPE_NULL_DATA, 4193 &pkt_id); 4194 if (ret) 4195 return -EPERM; 4196 } 4197 4198 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 4199 if (!skb) { 4200 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4201 return -ENOMEM; 4202 } 4203 4204 skb_put(skb, H2C_KEEP_ALIVE_LEN); 4205 4206 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 4207 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 4208 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 4209 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 4210 4211 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4212 H2C_CAT_MAC, 4213 H2C_CL_MAC_WOW, 4214 H2C_FUNC_KEEP_ALIVE, 0, 1, 4215 H2C_KEEP_ALIVE_LEN); 4216 4217 ret = rtw89_h2c_tx(rtwdev, skb, false); 4218 if (ret) { 4219 rtw89_err(rtwdev, "failed to send h2c\n"); 4220 goto fail; 4221 } 4222 4223 return 0; 4224 4225 fail: 4226 dev_kfree_skb_any(skb); 4227 4228 return ret; 4229 } 4230 4231 #define H2C_DISCONNECT_DETECT_LEN 8 4232 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 4233 struct rtw89_vif *rtwvif, bool enable) 4234 { 4235 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 4236 struct sk_buff *skb; 4237 u8 macid = rtwvif->mac_id; 4238 int ret; 4239 4240 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 4241 if (!skb) { 4242 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4243 return -ENOMEM; 4244 } 4245 4246 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 4247 4248 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 4249 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 4250 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 4251 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 4252 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 4253 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 4254 } 4255 4256 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4257 H2C_CAT_MAC, 4258 H2C_CL_MAC_WOW, 4259 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 4260 H2C_DISCONNECT_DETECT_LEN); 4261 4262 ret = rtw89_h2c_tx(rtwdev, skb, false); 4263 if (ret) { 4264 rtw89_err(rtwdev, "failed to send h2c\n"); 4265 goto fail; 4266 } 4267 4268 return 0; 4269 4270 fail: 4271 dev_kfree_skb_any(skb); 4272 4273 return ret; 4274 } 4275 4276 #define H2C_WOW_GLOBAL_LEN 8 4277 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 4278 bool enable) 4279 { 4280 struct sk_buff *skb; 4281 u8 macid = rtwvif->mac_id; 4282 int ret; 4283 4284 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN); 4285 if (!skb) { 4286 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4287 return -ENOMEM; 4288 } 4289 4290 skb_put(skb, H2C_WOW_GLOBAL_LEN); 4291 4292 RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable); 4293 RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid); 4294 4295 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4296 H2C_CAT_MAC, 4297 H2C_CL_MAC_WOW, 4298 H2C_FUNC_WOW_GLOBAL, 0, 1, 4299 H2C_WOW_GLOBAL_LEN); 4300 4301 ret = rtw89_h2c_tx(rtwdev, skb, false); 4302 if (ret) { 4303 rtw89_err(rtwdev, "failed to send h2c\n"); 4304 goto fail; 4305 } 4306 4307 return 0; 4308 4309 fail: 4310 dev_kfree_skb_any(skb); 4311 4312 return ret; 4313 } 4314 4315 #define H2C_WAKEUP_CTRL_LEN 4 4316 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 4317 struct rtw89_vif *rtwvif, 4318 bool enable) 4319 { 4320 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 4321 struct sk_buff *skb; 4322 u8 macid = rtwvif->mac_id; 4323 int ret; 4324 4325 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 4326 if (!skb) { 4327 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4328 return -ENOMEM; 4329 } 4330 4331 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 4332 4333 if (rtw_wow->pattern_cnt) 4334 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 4335 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 4336 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 4337 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 4338 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 4339 4340 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 4341 4342 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4343 H2C_CAT_MAC, 4344 H2C_CL_MAC_WOW, 4345 H2C_FUNC_WAKEUP_CTRL, 0, 1, 4346 H2C_WAKEUP_CTRL_LEN); 4347 4348 ret = rtw89_h2c_tx(rtwdev, skb, false); 4349 if (ret) { 4350 rtw89_err(rtwdev, "failed to send h2c\n"); 4351 goto fail; 4352 } 4353 4354 return 0; 4355 4356 fail: 4357 dev_kfree_skb_any(skb); 4358 4359 return ret; 4360 } 4361 4362 #define H2C_WOW_CAM_UPD_LEN 24 4363 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 4364 struct rtw89_wow_cam_info *cam_info) 4365 { 4366 struct sk_buff *skb; 4367 int ret; 4368 4369 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 4370 if (!skb) { 4371 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4372 return -ENOMEM; 4373 } 4374 4375 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 4376 4377 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 4378 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 4379 if (cam_info->valid) { 4380 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 4381 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 4382 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 4383 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 4384 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 4385 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 4386 cam_info->negative_pattern_match); 4387 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 4388 cam_info->skip_mac_hdr); 4389 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 4390 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 4391 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 4392 } 4393 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 4394 4395 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4396 H2C_CAT_MAC, 4397 H2C_CL_MAC_WOW, 4398 H2C_FUNC_WOW_CAM_UPD, 0, 1, 4399 H2C_WOW_CAM_UPD_LEN); 4400 4401 ret = rtw89_h2c_tx(rtwdev, skb, false); 4402 if (ret) { 4403 rtw89_err(rtwdev, "failed to send h2c\n"); 4404 goto fail; 4405 } 4406 4407 return 0; 4408 fail: 4409 dev_kfree_skb_any(skb); 4410 4411 return ret; 4412 } 4413 4414 /* Return < 0, if failures happen during waiting for the condition. 4415 * Return 0, when waiting for the condition succeeds. 4416 * Return > 0, if the wait is considered unreachable due to driver/FW design, 4417 * where 1 means during SER. 4418 */ 4419 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 4420 struct rtw89_wait_info *wait, unsigned int cond) 4421 { 4422 int ret; 4423 4424 ret = rtw89_h2c_tx(rtwdev, skb, false); 4425 if (ret) { 4426 rtw89_err(rtwdev, "failed to send h2c\n"); 4427 dev_kfree_skb_any(skb); 4428 return -EBUSY; 4429 } 4430 4431 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 4432 return 1; 4433 4434 return rtw89_wait_for_cond(wait, cond); 4435 } 4436 4437 #define H2C_ADD_MCC_LEN 16 4438 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 4439 const struct rtw89_fw_mcc_add_req *p) 4440 { 4441 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4442 struct sk_buff *skb; 4443 unsigned int cond; 4444 4445 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 4446 if (!skb) { 4447 rtw89_err(rtwdev, 4448 "failed to alloc skb for add mcc\n"); 4449 return -ENOMEM; 4450 } 4451 4452 skb_put(skb, H2C_ADD_MCC_LEN); 4453 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 4454 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 4455 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 4456 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 4457 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 4458 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 4459 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 4460 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 4461 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 4462 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 4463 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 4464 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 4465 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 4466 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 4467 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 4468 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 4469 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 4470 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 4471 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 4472 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 4473 4474 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4475 H2C_CAT_MAC, 4476 H2C_CL_MCC, 4477 H2C_FUNC_ADD_MCC, 0, 0, 4478 H2C_ADD_MCC_LEN); 4479 4480 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 4481 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4482 } 4483 4484 #define H2C_START_MCC_LEN 12 4485 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 4486 const struct rtw89_fw_mcc_start_req *p) 4487 { 4488 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4489 struct sk_buff *skb; 4490 unsigned int cond; 4491 4492 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 4493 if (!skb) { 4494 rtw89_err(rtwdev, 4495 "failed to alloc skb for start mcc\n"); 4496 return -ENOMEM; 4497 } 4498 4499 skb_put(skb, H2C_START_MCC_LEN); 4500 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 4501 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 4502 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 4503 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 4504 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 4505 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 4506 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 4507 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 4508 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 4509 4510 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4511 H2C_CAT_MAC, 4512 H2C_CL_MCC, 4513 H2C_FUNC_START_MCC, 0, 0, 4514 H2C_START_MCC_LEN); 4515 4516 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 4517 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4518 } 4519 4520 #define H2C_STOP_MCC_LEN 4 4521 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 4522 bool prev_groups) 4523 { 4524 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4525 struct sk_buff *skb; 4526 unsigned int cond; 4527 4528 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 4529 if (!skb) { 4530 rtw89_err(rtwdev, 4531 "failed to alloc skb for stop mcc\n"); 4532 return -ENOMEM; 4533 } 4534 4535 skb_put(skb, H2C_STOP_MCC_LEN); 4536 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 4537 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 4538 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 4539 4540 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4541 H2C_CAT_MAC, 4542 H2C_CL_MCC, 4543 H2C_FUNC_STOP_MCC, 0, 0, 4544 H2C_STOP_MCC_LEN); 4545 4546 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 4547 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4548 } 4549 4550 #define H2C_DEL_MCC_GROUP_LEN 4 4551 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 4552 bool prev_groups) 4553 { 4554 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4555 struct sk_buff *skb; 4556 unsigned int cond; 4557 4558 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 4559 if (!skb) { 4560 rtw89_err(rtwdev, 4561 "failed to alloc skb for del mcc group\n"); 4562 return -ENOMEM; 4563 } 4564 4565 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 4566 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 4567 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 4568 4569 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4570 H2C_CAT_MAC, 4571 H2C_CL_MCC, 4572 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 4573 H2C_DEL_MCC_GROUP_LEN); 4574 4575 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 4576 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4577 } 4578 4579 #define H2C_RESET_MCC_GROUP_LEN 4 4580 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 4581 { 4582 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4583 struct sk_buff *skb; 4584 unsigned int cond; 4585 4586 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 4587 if (!skb) { 4588 rtw89_err(rtwdev, 4589 "failed to alloc skb for reset mcc group\n"); 4590 return -ENOMEM; 4591 } 4592 4593 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 4594 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 4595 4596 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4597 H2C_CAT_MAC, 4598 H2C_CL_MCC, 4599 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 4600 H2C_RESET_MCC_GROUP_LEN); 4601 4602 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 4603 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4604 } 4605 4606 #define H2C_MCC_REQ_TSF_LEN 4 4607 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 4608 const struct rtw89_fw_mcc_tsf_req *req, 4609 struct rtw89_mac_mcc_tsf_rpt *rpt) 4610 { 4611 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4612 struct rtw89_mac_mcc_tsf_rpt *tmp; 4613 struct sk_buff *skb; 4614 unsigned int cond; 4615 int ret; 4616 4617 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 4618 if (!skb) { 4619 rtw89_err(rtwdev, 4620 "failed to alloc skb for mcc req tsf\n"); 4621 return -ENOMEM; 4622 } 4623 4624 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 4625 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 4626 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 4627 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 4628 4629 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4630 H2C_CAT_MAC, 4631 H2C_CL_MCC, 4632 H2C_FUNC_MCC_REQ_TSF, 0, 0, 4633 H2C_MCC_REQ_TSF_LEN); 4634 4635 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 4636 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4637 if (ret) 4638 return ret; 4639 4640 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 4641 *rpt = *tmp; 4642 4643 return 0; 4644 } 4645 4646 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 4647 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 4648 u8 *bitmap) 4649 { 4650 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4651 struct sk_buff *skb; 4652 unsigned int cond; 4653 u8 map_len; 4654 u8 h2c_len; 4655 4656 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 4657 map_len = RTW89_MAX_MAC_ID_NUM / 8; 4658 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 4659 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 4660 if (!skb) { 4661 rtw89_err(rtwdev, 4662 "failed to alloc skb for mcc macid bitmap\n"); 4663 return -ENOMEM; 4664 } 4665 4666 skb_put(skb, h2c_len); 4667 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 4668 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 4669 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 4670 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 4671 4672 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4673 H2C_CAT_MAC, 4674 H2C_CL_MCC, 4675 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 4676 h2c_len); 4677 4678 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 4679 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4680 } 4681 4682 #define H2C_MCC_SYNC_LEN 4 4683 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 4684 u8 target, u8 offset) 4685 { 4686 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4687 struct sk_buff *skb; 4688 unsigned int cond; 4689 4690 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 4691 if (!skb) { 4692 rtw89_err(rtwdev, 4693 "failed to alloc skb for mcc sync\n"); 4694 return -ENOMEM; 4695 } 4696 4697 skb_put(skb, H2C_MCC_SYNC_LEN); 4698 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 4699 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 4700 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 4701 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 4702 4703 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4704 H2C_CAT_MAC, 4705 H2C_CL_MCC, 4706 H2C_FUNC_MCC_SYNC, 0, 0, 4707 H2C_MCC_SYNC_LEN); 4708 4709 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 4710 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4711 } 4712 4713 #define H2C_MCC_SET_DURATION_LEN 20 4714 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 4715 const struct rtw89_fw_mcc_duration *p) 4716 { 4717 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4718 struct sk_buff *skb; 4719 unsigned int cond; 4720 4721 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 4722 if (!skb) { 4723 rtw89_err(rtwdev, 4724 "failed to alloc skb for mcc set duration\n"); 4725 return -ENOMEM; 4726 } 4727 4728 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 4729 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 4730 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 4731 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 4732 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 4733 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 4734 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 4735 p->start_tsf_low); 4736 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 4737 p->start_tsf_high); 4738 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 4739 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 4740 4741 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4742 H2C_CAT_MAC, 4743 H2C_CL_MCC, 4744 H2C_FUNC_MCC_SET_DURATION, 0, 0, 4745 H2C_MCC_SET_DURATION_LEN); 4746 4747 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 4748 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4749 } 4750 4751 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 4752 { 4753 static const u8 zeros[U8_MAX] = {}; 4754 4755 return memcmp(ext_ptr, zeros, ext_len) == 0; 4756 } 4757 4758 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 4759 ({ \ 4760 u8 __var_sz = sizeof(*(e)); \ 4761 bool __accept; \ 4762 if (__var_sz >= (ent_sz)) \ 4763 __accept = true; \ 4764 else \ 4765 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 4766 (ent_sz) - __var_sz);\ 4767 __accept; \ 4768 }) 4769 4770 static bool 4771 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 4772 const void *cursor, 4773 const struct rtw89_txpwr_conf *conf) 4774 { 4775 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 4776 return false; 4777 4778 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 4779 return false; 4780 4781 switch (e->rs) { 4782 case RTW89_RS_CCK: 4783 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 4784 return false; 4785 break; 4786 case RTW89_RS_OFDM: 4787 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 4788 return false; 4789 break; 4790 case RTW89_RS_MCS: 4791 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 4792 e->nss >= RTW89_NSS_NUM || 4793 e->ofdma >= RTW89_OFDMA_NUM) 4794 return false; 4795 break; 4796 case RTW89_RS_HEDCM: 4797 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 4798 e->nss >= RTW89_NSS_HEDCM_NUM || 4799 e->ofdma >= RTW89_OFDMA_NUM) 4800 return false; 4801 break; 4802 case RTW89_RS_OFFSET: 4803 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 4804 return false; 4805 break; 4806 default: 4807 return false; 4808 } 4809 4810 return true; 4811 } 4812 4813 static 4814 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 4815 const struct rtw89_txpwr_table *tbl) 4816 { 4817 const struct rtw89_txpwr_conf *conf = tbl->data; 4818 struct rtw89_fw_txpwr_byrate_entry entry = {}; 4819 struct rtw89_txpwr_byrate *byr_head; 4820 struct rtw89_rate_desc desc = {}; 4821 const void *cursor; 4822 u32 data; 4823 s8 *byr; 4824 int i; 4825 4826 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 4827 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 4828 continue; 4829 4830 byr_head = &rtwdev->byr[entry.band][entry.bw]; 4831 data = le32_to_cpu(entry.data); 4832 desc.ofdma = entry.ofdma; 4833 desc.nss = entry.nss; 4834 desc.rs = entry.rs; 4835 4836 for (i = 0; i < entry.len; i++, data >>= 8) { 4837 desc.idx = entry.shf + i; 4838 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 4839 *byr = data & 0xff; 4840 } 4841 } 4842 } 4843 4844 static bool 4845 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 4846 const void *cursor, 4847 const struct rtw89_txpwr_conf *conf) 4848 { 4849 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 4850 return false; 4851 4852 if (e->bw >= RTW89_2G_BW_NUM) 4853 return false; 4854 if (e->nt >= RTW89_NTX_NUM) 4855 return false; 4856 if (e->rs >= RTW89_RS_LMT_NUM) 4857 return false; 4858 if (e->bf >= RTW89_BF_NUM) 4859 return false; 4860 if (e->regd >= RTW89_REGD_NUM) 4861 return false; 4862 if (e->ch_idx >= RTW89_2G_CH_NUM) 4863 return false; 4864 4865 return true; 4866 } 4867 4868 static 4869 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 4870 { 4871 const struct rtw89_txpwr_conf *conf = &data->conf; 4872 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 4873 const void *cursor; 4874 4875 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 4876 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 4877 continue; 4878 4879 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 4880 [entry.ch_idx] = entry.v; 4881 } 4882 } 4883 4884 static bool 4885 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 4886 const void *cursor, 4887 const struct rtw89_txpwr_conf *conf) 4888 { 4889 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 4890 return false; 4891 4892 if (e->bw >= RTW89_5G_BW_NUM) 4893 return false; 4894 if (e->nt >= RTW89_NTX_NUM) 4895 return false; 4896 if (e->rs >= RTW89_RS_LMT_NUM) 4897 return false; 4898 if (e->bf >= RTW89_BF_NUM) 4899 return false; 4900 if (e->regd >= RTW89_REGD_NUM) 4901 return false; 4902 if (e->ch_idx >= RTW89_5G_CH_NUM) 4903 return false; 4904 4905 return true; 4906 } 4907 4908 static 4909 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 4910 { 4911 const struct rtw89_txpwr_conf *conf = &data->conf; 4912 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 4913 const void *cursor; 4914 4915 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 4916 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 4917 continue; 4918 4919 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 4920 [entry.ch_idx] = entry.v; 4921 } 4922 } 4923 4924 static bool 4925 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 4926 const void *cursor, 4927 const struct rtw89_txpwr_conf *conf) 4928 { 4929 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 4930 return false; 4931 4932 if (e->bw >= RTW89_6G_BW_NUM) 4933 return false; 4934 if (e->nt >= RTW89_NTX_NUM) 4935 return false; 4936 if (e->rs >= RTW89_RS_LMT_NUM) 4937 return false; 4938 if (e->bf >= RTW89_BF_NUM) 4939 return false; 4940 if (e->regd >= RTW89_REGD_NUM) 4941 return false; 4942 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 4943 return false; 4944 if (e->ch_idx >= RTW89_6G_CH_NUM) 4945 return false; 4946 4947 return true; 4948 } 4949 4950 static 4951 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 4952 { 4953 const struct rtw89_txpwr_conf *conf = &data->conf; 4954 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 4955 const void *cursor; 4956 4957 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 4958 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 4959 continue; 4960 4961 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 4962 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 4963 } 4964 } 4965 4966 static bool 4967 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 4968 const void *cursor, 4969 const struct rtw89_txpwr_conf *conf) 4970 { 4971 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 4972 return false; 4973 4974 if (e->ru >= RTW89_RU_NUM) 4975 return false; 4976 if (e->nt >= RTW89_NTX_NUM) 4977 return false; 4978 if (e->regd >= RTW89_REGD_NUM) 4979 return false; 4980 if (e->ch_idx >= RTW89_2G_CH_NUM) 4981 return false; 4982 4983 return true; 4984 } 4985 4986 static 4987 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 4988 { 4989 const struct rtw89_txpwr_conf *conf = &data->conf; 4990 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 4991 const void *cursor; 4992 4993 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 4994 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 4995 continue; 4996 4997 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 4998 } 4999 } 5000 5001 static bool 5002 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 5003 const void *cursor, 5004 const struct rtw89_txpwr_conf *conf) 5005 { 5006 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 5007 return false; 5008 5009 if (e->ru >= RTW89_RU_NUM) 5010 return false; 5011 if (e->nt >= RTW89_NTX_NUM) 5012 return false; 5013 if (e->regd >= RTW89_REGD_NUM) 5014 return false; 5015 if (e->ch_idx >= RTW89_5G_CH_NUM) 5016 return false; 5017 5018 return true; 5019 } 5020 5021 static 5022 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 5023 { 5024 const struct rtw89_txpwr_conf *conf = &data->conf; 5025 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 5026 const void *cursor; 5027 5028 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 5029 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 5030 continue; 5031 5032 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 5033 } 5034 } 5035 5036 static bool 5037 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 5038 const void *cursor, 5039 const struct rtw89_txpwr_conf *conf) 5040 { 5041 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 5042 return false; 5043 5044 if (e->ru >= RTW89_RU_NUM) 5045 return false; 5046 if (e->nt >= RTW89_NTX_NUM) 5047 return false; 5048 if (e->regd >= RTW89_REGD_NUM) 5049 return false; 5050 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 5051 return false; 5052 if (e->ch_idx >= RTW89_6G_CH_NUM) 5053 return false; 5054 5055 return true; 5056 } 5057 5058 static 5059 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 5060 { 5061 const struct rtw89_txpwr_conf *conf = &data->conf; 5062 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 5063 const void *cursor; 5064 5065 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 5066 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 5067 continue; 5068 5069 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 5070 [entry.ch_idx] = entry.v; 5071 } 5072 } 5073 5074 static bool 5075 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 5076 const void *cursor, 5077 const struct rtw89_txpwr_conf *conf) 5078 { 5079 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 5080 return false; 5081 5082 if (e->band >= RTW89_BAND_NUM) 5083 return false; 5084 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 5085 return false; 5086 if (e->regd >= RTW89_REGD_NUM) 5087 return false; 5088 5089 return true; 5090 } 5091 5092 static 5093 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 5094 { 5095 const struct rtw89_txpwr_conf *conf = &data->conf; 5096 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 5097 const void *cursor; 5098 5099 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 5100 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 5101 continue; 5102 5103 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 5104 } 5105 } 5106 5107 static bool 5108 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 5109 const void *cursor, 5110 const struct rtw89_txpwr_conf *conf) 5111 { 5112 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 5113 return false; 5114 5115 if (e->band >= RTW89_BAND_NUM) 5116 return false; 5117 if (e->regd >= RTW89_REGD_NUM) 5118 return false; 5119 5120 return true; 5121 } 5122 5123 static 5124 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 5125 { 5126 const struct rtw89_txpwr_conf *conf = &data->conf; 5127 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 5128 const void *cursor; 5129 5130 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 5131 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 5132 continue; 5133 5134 data->v[entry.band][entry.regd] = entry.v; 5135 } 5136 } 5137 5138 const struct rtw89_rfe_parms * 5139 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 5140 const struct rtw89_rfe_parms *init) 5141 { 5142 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 5143 struct rtw89_rfe_parms *parms; 5144 5145 if (!rfe_data) 5146 return init; 5147 5148 parms = &rfe_data->rfe_parms; 5149 if (init) 5150 *parms = *init; 5151 5152 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 5153 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 5154 rfe_data->byrate.tbl.size = 0; /* don't care here */ 5155 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 5156 parms->byr_tbl = &rfe_data->byrate.tbl; 5157 } 5158 5159 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 5160 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 5161 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 5162 } 5163 5164 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 5165 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 5166 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 5167 } 5168 5169 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 5170 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 5171 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 5172 } 5173 5174 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 5175 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 5176 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 5177 } 5178 5179 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 5180 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 5181 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 5182 } 5183 5184 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 5185 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 5186 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 5187 } 5188 5189 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 5190 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 5191 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 5192 } 5193 5194 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 5195 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 5196 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 5197 } 5198 5199 return parms; 5200 } 5201