1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "ps.h" 13 #include "reg.h" 14 #include "util.h" 15 16 union rtw89_fw_element_arg { 17 size_t offset; 18 enum rtw89_rf_path rf_path; 19 enum rtw89_fw_type fw_type; 20 }; 21 22 struct rtw89_fw_element_handler { 23 int (*fn)(struct rtw89_dev *rtwdev, 24 const struct rtw89_fw_element_hdr *elm, 25 const union rtw89_fw_element_arg arg); 26 const union rtw89_fw_element_arg arg; 27 const char *name; 28 }; 29 30 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 31 struct sk_buff *skb); 32 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 33 struct rtw89_wait_info *wait, unsigned int cond); 34 35 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 36 bool header) 37 { 38 struct sk_buff *skb; 39 u32 header_len = 0; 40 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 41 42 if (header) 43 header_len = H2C_HEADER_LEN; 44 45 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 46 if (!skb) 47 return NULL; 48 skb_reserve(skb, header_len + h2c_desc_size); 49 memset(skb->data, 0, len); 50 51 return skb; 52 } 53 54 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 55 { 56 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 57 } 58 59 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 60 { 61 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 62 } 63 64 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 65 { 66 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 67 u8 val; 68 int ret; 69 70 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 71 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 72 1, FWDL_WAIT_CNT, false, rtwdev, type); 73 if (ret) { 74 switch (val) { 75 case RTW89_FWDL_CHECKSUM_FAIL: 76 rtw89_err(rtwdev, "fw checksum fail\n"); 77 return -EINVAL; 78 79 case RTW89_FWDL_SECURITY_FAIL: 80 rtw89_err(rtwdev, "fw security fail\n"); 81 return -EINVAL; 82 83 case RTW89_FWDL_CV_NOT_MATCH: 84 rtw89_err(rtwdev, "fw cv not match\n"); 85 return -EINVAL; 86 87 default: 88 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 89 return -EBUSY; 90 } 91 } 92 93 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 94 95 return 0; 96 } 97 98 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 99 struct rtw89_fw_bin_info *info) 100 { 101 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 102 struct rtw89_fw_hdr_section_info *section_info; 103 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 104 const struct rtw89_fw_hdr_section *section; 105 const u8 *fw_end = fw + len; 106 const u8 *bin; 107 u32 base_hdr_len; 108 u32 mssc_len = 0; 109 u32 i; 110 111 if (!info) 112 return -EINVAL; 113 114 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 115 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 116 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 117 118 if (info->dynamic_hdr_en) { 119 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 120 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 121 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 122 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 123 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 124 return -EINVAL; 125 } 126 } else { 127 info->hdr_len = base_hdr_len; 128 info->dynamic_hdr_len = 0; 129 } 130 131 bin = fw + info->hdr_len; 132 133 /* jump to section header */ 134 section_info = info->section_info; 135 for (i = 0; i < info->section_num; i++) { 136 section = &fw_hdr->sections[i]; 137 section_info->type = 138 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 139 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 140 section_info->mssc = 141 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 142 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 143 } else { 144 section_info->mssc = 0; 145 } 146 147 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 148 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 149 section_info->len += FWDL_SECTION_CHKSUM_LEN; 150 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 151 section_info->dladdr = 152 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 153 section_info->addr = bin; 154 bin += section_info->len; 155 section_info++; 156 } 157 158 if (fw_end != bin + mssc_len) { 159 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 160 return -EINVAL; 161 } 162 163 return 0; 164 } 165 166 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 167 struct rtw89_fw_bin_info *info) 168 { 169 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 170 struct rtw89_fw_hdr_section_info *section_info; 171 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 172 const struct rtw89_fw_hdr_section_v1 *section; 173 const u8 *fw_end = fw + len; 174 const u8 *bin; 175 u32 base_hdr_len; 176 u32 mssc_len = 0; 177 u32 i; 178 179 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 180 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 181 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 182 183 if (info->dynamic_hdr_en) { 184 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 185 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 186 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 187 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 188 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 189 return -EINVAL; 190 } 191 } else { 192 info->hdr_len = base_hdr_len; 193 info->dynamic_hdr_len = 0; 194 } 195 196 bin = fw + info->hdr_len; 197 198 /* jump to section header */ 199 section_info = info->section_info; 200 for (i = 0; i < info->section_num; i++) { 201 section = &fw_hdr->sections[i]; 202 section_info->type = 203 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 204 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 205 section_info->mssc = 206 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 207 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 208 } else { 209 section_info->mssc = 0; 210 } 211 212 section_info->len = 213 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 214 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 215 section_info->len += FWDL_SECTION_CHKSUM_LEN; 216 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 217 section_info->dladdr = 218 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 219 section_info->addr = bin; 220 bin += section_info->len; 221 section_info++; 222 } 223 224 if (fw_end != bin + mssc_len) { 225 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 226 return -EINVAL; 227 } 228 229 return 0; 230 } 231 232 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 233 const struct rtw89_fw_suit *fw_suit, 234 struct rtw89_fw_bin_info *info) 235 { 236 const u8 *fw = fw_suit->data; 237 u32 len = fw_suit->size; 238 239 if (!fw || !len) { 240 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 241 return -ENOENT; 242 } 243 244 switch (fw_suit->hdr_ver) { 245 case 0: 246 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 247 case 1: 248 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 249 default: 250 return -ENOENT; 251 } 252 } 253 254 static 255 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 256 struct rtw89_fw_suit *fw_suit, bool nowarn) 257 { 258 struct rtw89_fw_info *fw_info = &rtwdev->fw; 259 const struct firmware *firmware = fw_info->req.firmware; 260 const u8 *mfw = firmware->data; 261 u32 mfw_len = firmware->size; 262 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 263 const struct rtw89_mfw_info *mfw_info; 264 int i; 265 266 if (mfw_hdr->sig != RTW89_MFW_SIG) { 267 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 268 /* legacy firmware support normal type only */ 269 if (type != RTW89_FW_NORMAL) 270 return -EINVAL; 271 fw_suit->data = mfw; 272 fw_suit->size = mfw_len; 273 return 0; 274 } 275 276 for (i = 0; i < mfw_hdr->fw_nr; i++) { 277 mfw_info = &mfw_hdr->info[i]; 278 if (mfw_info->type == type) { 279 if (mfw_info->cv == rtwdev->hal.cv && !mfw_info->mp) 280 goto found; 281 if (type == RTW89_FW_LOGFMT) 282 goto found; 283 } 284 } 285 286 if (!nowarn) 287 rtw89_err(rtwdev, "no suitable firmware found\n"); 288 return -ENOENT; 289 290 found: 291 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 292 fw_suit->size = le32_to_cpu(mfw_info->size); 293 return 0; 294 } 295 296 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 297 { 298 struct rtw89_fw_info *fw_info = &rtwdev->fw; 299 const struct firmware *firmware = fw_info->req.firmware; 300 const struct rtw89_mfw_hdr *mfw_hdr = 301 (const struct rtw89_mfw_hdr *)firmware->data; 302 const struct rtw89_mfw_info *mfw_info; 303 u32 size; 304 305 if (mfw_hdr->sig != RTW89_MFW_SIG) { 306 rtw89_warn(rtwdev, "not mfw format\n"); 307 return 0; 308 } 309 310 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 311 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 312 313 return size; 314 } 315 316 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 317 struct rtw89_fw_suit *fw_suit, 318 const struct rtw89_fw_hdr *hdr) 319 { 320 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 321 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 322 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 323 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 324 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 325 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 326 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 327 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 328 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 329 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 330 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 331 } 332 333 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 334 struct rtw89_fw_suit *fw_suit, 335 const struct rtw89_fw_hdr_v1 *hdr) 336 { 337 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 338 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 339 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 340 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 341 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 342 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 343 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 344 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 345 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 346 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 347 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 348 } 349 350 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 351 enum rtw89_fw_type type, 352 struct rtw89_fw_suit *fw_suit) 353 { 354 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 355 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 356 357 if (type == RTW89_FW_LOGFMT) 358 return 0; 359 360 fw_suit->type = type; 361 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 362 363 switch (fw_suit->hdr_ver) { 364 case 0: 365 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 366 break; 367 case 1: 368 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 369 break; 370 default: 371 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 372 fw_suit->hdr_ver); 373 return -ENOENT; 374 } 375 376 rtw89_info(rtwdev, 377 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 378 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 379 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 380 381 return 0; 382 } 383 384 static 385 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 386 bool nowarn) 387 { 388 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 389 int ret; 390 391 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 392 if (ret) 393 return ret; 394 395 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 396 } 397 398 static 399 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 400 const struct rtw89_fw_element_hdr *elm, 401 const union rtw89_fw_element_arg arg) 402 { 403 enum rtw89_fw_type type = arg.fw_type; 404 struct rtw89_fw_suit *fw_suit; 405 406 fw_suit = rtw89_fw_suit_get(rtwdev, type); 407 fw_suit->data = elm->u.common.contents; 408 fw_suit->size = le32_to_cpu(elm->size); 409 410 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 411 } 412 413 #define __DEF_FW_FEAT_COND(__cond, __op) \ 414 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 415 { \ 416 return suit_ver_code __op comp_ver_code; \ 417 } 418 419 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 420 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 421 __DEF_FW_FEAT_COND(lt, <); /* less than */ 422 423 struct __fw_feat_cfg { 424 enum rtw89_core_chip_id chip_id; 425 enum rtw89_fw_feature feature; 426 u32 ver_code; 427 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 428 }; 429 430 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 431 { \ 432 .chip_id = _chip, \ 433 .feature = RTW89_FW_FEATURE_ ## _feat, \ 434 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 435 .cond = __fw_feat_cond_ ## _cond, \ 436 } 437 438 static const struct __fw_feat_cfg fw_feat_tbl[] = { 439 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 440 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 441 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 442 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 443 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 444 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 445 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 446 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 447 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 448 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 449 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 450 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 451 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 452 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 453 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 454 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 455 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 456 }; 457 458 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 459 const struct rtw89_chip_info *chip, 460 u32 ver_code) 461 { 462 int i; 463 464 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 465 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 466 467 if (chip->chip_id != ent->chip_id) 468 continue; 469 470 if (ent->cond(ver_code, ent->ver_code)) 471 RTW89_SET_FW_FEATURE(ent->feature, fw); 472 } 473 } 474 475 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 476 { 477 const struct rtw89_chip_info *chip = rtwdev->chip; 478 const struct rtw89_fw_suit *fw_suit; 479 u32 suit_ver_code; 480 481 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 482 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 483 484 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 485 } 486 487 const struct firmware * 488 rtw89_early_fw_feature_recognize(struct device *device, 489 const struct rtw89_chip_info *chip, 490 struct rtw89_fw_info *early_fw, 491 int *used_fw_format) 492 { 493 const struct firmware *firmware; 494 char fw_name[64]; 495 int fw_format; 496 u32 ver_code; 497 int ret; 498 499 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 500 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 501 chip->fw_basename, fw_format); 502 503 ret = request_firmware(&firmware, fw_name, device); 504 if (!ret) { 505 dev_info(device, "loaded firmware %s\n", fw_name); 506 *used_fw_format = fw_format; 507 break; 508 } 509 } 510 511 if (ret) { 512 dev_err(device, "failed to early request firmware: %d\n", ret); 513 return NULL; 514 } 515 516 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 517 518 if (!ver_code) 519 goto out; 520 521 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 522 523 out: 524 return firmware; 525 } 526 527 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 528 { 529 const struct rtw89_chip_info *chip = rtwdev->chip; 530 int ret; 531 532 if (chip->try_ce_fw) { 533 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 534 if (!ret) 535 goto normal_done; 536 } 537 538 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 539 if (ret) 540 return ret; 541 542 normal_done: 543 /* It still works if wowlan firmware isn't existing. */ 544 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 545 546 /* It still works if log format file isn't existing. */ 547 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 548 549 rtw89_fw_recognize_features(rtwdev); 550 551 rtw89_coex_recognize_ver(rtwdev); 552 553 return 0; 554 } 555 556 static 557 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 558 const struct rtw89_fw_element_hdr *elm, 559 const union rtw89_fw_element_arg arg) 560 { 561 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 562 struct rtw89_phy_table *tbl; 563 struct rtw89_reg2_def *regs; 564 enum rtw89_rf_path rf_path; 565 u32 n_regs, i; 566 u8 idx; 567 568 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 569 if (!tbl) 570 return -ENOMEM; 571 572 switch (le32_to_cpu(elm->id)) { 573 case RTW89_FW_ELEMENT_ID_BB_REG: 574 elm_info->bb_tbl = tbl; 575 break; 576 case RTW89_FW_ELEMENT_ID_BB_GAIN: 577 elm_info->bb_gain = tbl; 578 break; 579 case RTW89_FW_ELEMENT_ID_RADIO_A: 580 case RTW89_FW_ELEMENT_ID_RADIO_B: 581 case RTW89_FW_ELEMENT_ID_RADIO_C: 582 case RTW89_FW_ELEMENT_ID_RADIO_D: 583 rf_path = arg.rf_path; 584 idx = elm->u.reg2.idx; 585 586 elm_info->rf_radio[idx] = tbl; 587 tbl->rf_path = rf_path; 588 tbl->config = rtw89_phy_config_rf_reg_v1; 589 break; 590 case RTW89_FW_ELEMENT_ID_RF_NCTL: 591 elm_info->rf_nctl = tbl; 592 break; 593 default: 594 kfree(tbl); 595 return -ENOENT; 596 } 597 598 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 599 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); 600 if (!regs) 601 goto out; 602 603 for (i = 0; i < n_regs; i++) { 604 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 605 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 606 } 607 608 tbl->n_regs = n_regs; 609 tbl->regs = regs; 610 611 return 0; 612 613 out: 614 kfree(tbl); 615 return -ENOMEM; 616 } 617 618 static 619 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 620 const struct rtw89_fw_element_hdr *elm, 621 const union rtw89_fw_element_arg arg) 622 { 623 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 624 const unsigned long offset = arg.offset; 625 struct rtw89_efuse *efuse = &rtwdev->efuse; 626 struct rtw89_txpwr_conf *conf; 627 628 if (!rtwdev->rfe_data) { 629 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 630 if (!rtwdev->rfe_data) 631 return -ENOMEM; 632 } 633 634 conf = (void *)rtwdev->rfe_data + offset; 635 636 /* if multiple matched, take the last eventually */ 637 if (txpwr_elm->rfe_type == efuse->rfe_type) 638 goto setup; 639 640 /* without one is matched, accept default */ 641 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 642 (!rtw89_txpwr_conf_valid(conf) || 643 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 644 goto setup; 645 646 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 647 elm->id, txpwr_elm->rfe_type); 648 return 0; 649 650 setup: 651 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 652 elm->id, txpwr_elm->rfe_type); 653 654 conf->rfe_type = txpwr_elm->rfe_type; 655 conf->ent_sz = txpwr_elm->ent_sz; 656 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 657 conf->data = txpwr_elm->content; 658 return 0; 659 } 660 661 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 662 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 663 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 664 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 665 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 666 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 667 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 668 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 669 { .rf_path = RF_PATH_A }, "radio A"}, 670 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 671 { .rf_path = RF_PATH_B }, NULL}, 672 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 673 { .rf_path = RF_PATH_C }, NULL}, 674 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 675 { .rf_path = RF_PATH_D }, NULL}, 676 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 677 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 678 rtw89_fw_recognize_txpwr_from_elm, 679 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 680 }, 681 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 682 rtw89_fw_recognize_txpwr_from_elm, 683 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 684 }, 685 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 686 rtw89_fw_recognize_txpwr_from_elm, 687 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 688 }, 689 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 690 rtw89_fw_recognize_txpwr_from_elm, 691 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 692 }, 693 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 694 rtw89_fw_recognize_txpwr_from_elm, 695 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 696 }, 697 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 698 rtw89_fw_recognize_txpwr_from_elm, 699 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 700 }, 701 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 702 rtw89_fw_recognize_txpwr_from_elm, 703 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 704 }, 705 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 706 rtw89_fw_recognize_txpwr_from_elm, 707 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 708 }, 709 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 710 rtw89_fw_recognize_txpwr_from_elm, 711 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 712 }, 713 }; 714 715 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 716 { 717 struct rtw89_fw_info *fw_info = &rtwdev->fw; 718 const struct firmware *firmware = fw_info->req.firmware; 719 const struct rtw89_chip_info *chip = rtwdev->chip; 720 u32 unrecognized_elements = chip->needed_fw_elms; 721 const struct rtw89_fw_element_handler *handler; 722 const struct rtw89_fw_element_hdr *hdr; 723 u32 elm_size; 724 u32 elem_id; 725 u32 offset; 726 int ret; 727 728 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 729 730 offset = rtw89_mfw_get_size(rtwdev); 731 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 732 if (offset == 0) 733 return -EINVAL; 734 735 while (offset + sizeof(*hdr) < firmware->size) { 736 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 737 738 elm_size = le32_to_cpu(hdr->size); 739 if (offset + elm_size >= firmware->size) { 740 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 741 break; 742 } 743 744 elem_id = le32_to_cpu(hdr->id); 745 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 746 goto next; 747 748 handler = &__fw_element_handlers[elem_id]; 749 if (!handler->fn) 750 goto next; 751 752 ret = handler->fn(rtwdev, hdr, handler->arg); 753 if (ret) 754 return ret; 755 756 if (handler->name) 757 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 758 handler->name, hdr->ver); 759 760 unrecognized_elements &= ~BIT(elem_id); 761 next: 762 offset += sizeof(*hdr) + elm_size; 763 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 764 } 765 766 if (unrecognized_elements) { 767 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 768 unrecognized_elements); 769 return -ENOENT; 770 } 771 772 return 0; 773 } 774 775 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 776 u8 type, u8 cat, u8 class, u8 func, 777 bool rack, bool dack, u32 len) 778 { 779 struct fwcmd_hdr *hdr; 780 781 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 782 783 if (!(rtwdev->fw.h2c_seq % 4)) 784 rack = true; 785 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 786 FIELD_PREP(H2C_HDR_CAT, cat) | 787 FIELD_PREP(H2C_HDR_CLASS, class) | 788 FIELD_PREP(H2C_HDR_FUNC, func) | 789 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 790 791 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 792 len + H2C_HEADER_LEN) | 793 (rack ? H2C_HDR_REC_ACK : 0) | 794 (dack ? H2C_HDR_DONE_ACK : 0)); 795 796 rtwdev->fw.h2c_seq++; 797 } 798 799 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 800 struct sk_buff *skb, 801 u8 type, u8 cat, u8 class, u8 func, 802 u32 len) 803 { 804 struct fwcmd_hdr *hdr; 805 806 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 807 808 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 809 FIELD_PREP(H2C_HDR_CAT, cat) | 810 FIELD_PREP(H2C_HDR_CLASS, class) | 811 FIELD_PREP(H2C_HDR_FUNC, func) | 812 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 813 814 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 815 len + H2C_HEADER_LEN)); 816 } 817 818 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 819 { 820 struct sk_buff *skb; 821 u32 ret = 0; 822 823 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 824 if (!skb) { 825 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 826 return -ENOMEM; 827 } 828 829 skb_put_data(skb, fw, len); 830 SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN); 831 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 832 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 833 H2C_FUNC_MAC_FWHDR_DL, len); 834 835 ret = rtw89_h2c_tx(rtwdev, skb, false); 836 if (ret) { 837 rtw89_err(rtwdev, "failed to send h2c\n"); 838 ret = -1; 839 goto fail; 840 } 841 842 return 0; 843 fail: 844 dev_kfree_skb_any(skb); 845 846 return ret; 847 } 848 849 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len) 850 { 851 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 852 int ret; 853 854 ret = __rtw89_fw_download_hdr(rtwdev, fw, len); 855 if (ret) { 856 rtw89_err(rtwdev, "[ERR]FW header download\n"); 857 return ret; 858 } 859 860 ret = mac->fwdl_check_path_ready(rtwdev, false); 861 if (ret) { 862 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 863 return ret; 864 } 865 866 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 867 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 868 869 return 0; 870 } 871 872 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 873 struct rtw89_fw_hdr_section_info *info) 874 { 875 struct sk_buff *skb; 876 const u8 *section = info->addr; 877 u32 residue_len = info->len; 878 u32 pkt_len; 879 int ret; 880 881 while (residue_len) { 882 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 883 pkt_len = FWDL_SECTION_PER_PKT_LEN; 884 else 885 pkt_len = residue_len; 886 887 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 888 if (!skb) { 889 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 890 return -ENOMEM; 891 } 892 skb_put_data(skb, section, pkt_len); 893 894 ret = rtw89_h2c_tx(rtwdev, skb, true); 895 if (ret) { 896 rtw89_err(rtwdev, "failed to send h2c\n"); 897 ret = -1; 898 goto fail; 899 } 900 901 section += pkt_len; 902 residue_len -= pkt_len; 903 } 904 905 return 0; 906 fail: 907 dev_kfree_skb_any(skb); 908 909 return ret; 910 } 911 912 static enum rtw89_fwdl_check_type 913 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 914 const struct rtw89_fw_suit *fw_suit) 915 { 916 switch (fw_suit->type) { 917 case RTW89_FW_BBMCU0: 918 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 919 case RTW89_FW_BBMCU1: 920 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 921 default: 922 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 923 } 924 } 925 926 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 927 const struct rtw89_fw_suit *fw_suit, 928 struct rtw89_fw_bin_info *info) 929 { 930 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 931 const struct rtw89_chip_info *chip = rtwdev->chip; 932 enum rtw89_fwdl_check_type chk_type; 933 u8 section_num = info->section_num; 934 int ret; 935 936 while (section_num--) { 937 ret = __rtw89_fw_download_main(rtwdev, section_info); 938 if (ret) 939 return ret; 940 section_info++; 941 } 942 943 if (chip->chip_gen == RTW89_CHIP_AX) 944 return 0; 945 946 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 947 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 948 if (ret) { 949 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 950 fw_suit->type); 951 return ret; 952 } 953 954 return 0; 955 } 956 957 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 958 { 959 u32 val32; 960 u16 index; 961 962 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 963 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 964 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 965 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 966 967 for (index = 0; index < 15; index++) { 968 val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL); 969 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 970 fsleep(10); 971 } 972 } 973 974 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 975 { 976 u32 val32; 977 u16 val16; 978 979 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 980 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 981 982 val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2); 983 rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16); 984 985 rtw89_fw_prog_cnt_dump(rtwdev); 986 } 987 988 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 989 struct rtw89_fw_suit *fw_suit) 990 { 991 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 992 struct rtw89_fw_bin_info info; 993 int ret; 994 995 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 996 if (ret) { 997 rtw89_err(rtwdev, "parse fw header fail\n"); 998 return ret; 999 } 1000 1001 if (rtwdev->chip->chip_id == RTL8922A && 1002 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1003 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1004 1005 ret = mac->fwdl_check_path_ready(rtwdev, true); 1006 if (ret) { 1007 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1008 return ret; 1009 } 1010 1011 ret = rtw89_fw_download_hdr(rtwdev, fw_suit->data, info.hdr_len - 1012 info.dynamic_hdr_len); 1013 if (ret) 1014 return ret; 1015 1016 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1017 if (ret) 1018 return ret; 1019 1020 return 0; 1021 } 1022 1023 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1024 bool include_bb) 1025 { 1026 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1027 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1028 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1029 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1030 int ret; 1031 int i; 1032 1033 mac->disable_cpu(rtwdev); 1034 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1035 if (ret) 1036 return ret; 1037 1038 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1039 if (ret) 1040 goto fwdl_err; 1041 1042 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1043 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1044 1045 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1046 if (ret) 1047 goto fwdl_err; 1048 } 1049 1050 fw_info->h2c_seq = 0; 1051 fw_info->rec_seq = 0; 1052 fw_info->h2c_counter = 0; 1053 fw_info->c2h_counter = 0; 1054 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1055 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1056 1057 mdelay(5); 1058 1059 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1060 if (ret) { 1061 rtw89_warn(rtwdev, "download firmware fail\n"); 1062 return ret; 1063 } 1064 1065 return ret; 1066 1067 fwdl_err: 1068 rtw89_fw_dl_fail_dump(rtwdev); 1069 return ret; 1070 } 1071 1072 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1073 { 1074 struct rtw89_fw_info *fw = &rtwdev->fw; 1075 1076 wait_for_completion(&fw->req.completion); 1077 if (!fw->req.firmware) 1078 return -EINVAL; 1079 1080 return 0; 1081 } 1082 1083 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1084 struct rtw89_fw_req_info *req, 1085 const char *fw_name, bool nowarn) 1086 { 1087 int ret; 1088 1089 if (req->firmware) { 1090 rtw89_debug(rtwdev, RTW89_DBG_FW, 1091 "full firmware has been early requested\n"); 1092 complete_all(&req->completion); 1093 return 0; 1094 } 1095 1096 if (nowarn) 1097 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1098 else 1099 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1100 1101 complete_all(&req->completion); 1102 1103 return ret; 1104 } 1105 1106 void rtw89_load_firmware_work(struct work_struct *work) 1107 { 1108 struct rtw89_dev *rtwdev = 1109 container_of(work, struct rtw89_dev, load_firmware_work); 1110 const struct rtw89_chip_info *chip = rtwdev->chip; 1111 char fw_name[64]; 1112 1113 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1114 chip->fw_basename, rtwdev->fw.fw_format); 1115 1116 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1117 } 1118 1119 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1120 { 1121 if (!tbl) 1122 return; 1123 1124 kfree(tbl->regs); 1125 kfree(tbl); 1126 } 1127 1128 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1129 { 1130 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1131 int i; 1132 1133 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1134 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1135 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1136 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1137 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1138 } 1139 1140 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1141 { 1142 struct rtw89_fw_info *fw = &rtwdev->fw; 1143 1144 cancel_work_sync(&rtwdev->load_firmware_work); 1145 1146 if (fw->req.firmware) { 1147 release_firmware(fw->req.firmware); 1148 1149 /* assign NULL back in case rtw89_free_ieee80211_hw() 1150 * try to release the same one again. 1151 */ 1152 fw->req.firmware = NULL; 1153 } 1154 1155 kfree(fw->log.fmts); 1156 rtw89_unload_firmware_elements(rtwdev); 1157 } 1158 1159 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1160 { 1161 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1162 u32 i; 1163 1164 if (fmt_id > fw_log->last_fmt_id) 1165 return 0; 1166 1167 for (i = 0; i < fw_log->fmt_count; i++) { 1168 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1169 return i; 1170 } 1171 return 0; 1172 } 1173 1174 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1175 { 1176 struct rtw89_fw_log *log = &rtwdev->fw.log; 1177 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1178 struct rtw89_fw_suit *suit = &log->suit; 1179 const void *fmts_ptr, *fmts_end_ptr; 1180 u32 fmt_count; 1181 int i; 1182 1183 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1184 fmt_count = le32_to_cpu(suit_hdr->count); 1185 log->fmt_ids = suit_hdr->ids; 1186 fmts_ptr = &suit_hdr->ids[fmt_count]; 1187 fmts_end_ptr = suit->data + suit->size; 1188 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1189 if (!log->fmts) 1190 return -ENOMEM; 1191 1192 for (i = 0; i < fmt_count; i++) { 1193 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1194 if (!fmts_ptr) 1195 break; 1196 1197 (*log->fmts)[i] = fmts_ptr; 1198 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1199 log->fmt_count++; 1200 fmts_ptr += strlen(fmts_ptr); 1201 } 1202 1203 return 0; 1204 } 1205 1206 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1207 { 1208 struct rtw89_fw_log *log = &rtwdev->fw.log; 1209 struct rtw89_fw_suit *suit = &log->suit; 1210 1211 if (!suit || !suit->data) { 1212 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1213 return -EINVAL; 1214 } 1215 if (log->fmts) 1216 return 0; 1217 1218 return rtw89_fw_log_create_fmts_dict(rtwdev); 1219 } 1220 1221 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 1222 const struct rtw89_fw_c2h_log_fmt *log_fmt, 1223 u32 fmt_idx, u8 para_int, bool raw_data) 1224 { 1225 const char *(*fmts)[] = rtwdev->fw.log.fmts; 1226 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 1227 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 1228 int i; 1229 1230 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 1231 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 1232 log_fmt->argc); 1233 return; 1234 } 1235 1236 if (para_int) 1237 for (i = 0 ; i < log_fmt->argc; i++) 1238 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 1239 1240 if (raw_data) { 1241 if (para_int) 1242 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1243 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 1244 para_int, log_fmt->argc, (int)sizeof(args), args); 1245 else 1246 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1247 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 1248 para_int, log_fmt->argc, log_fmt->u.raw); 1249 } else { 1250 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 1251 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 1252 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 1253 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 1254 args[0xf]); 1255 } 1256 1257 rtw89_info(rtwdev, "C2H log: %s", str_buf); 1258 } 1259 1260 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 1261 { 1262 const struct rtw89_fw_c2h_log_fmt *log_fmt; 1263 u8 para_int; 1264 u32 fmt_idx; 1265 1266 if (len < RTW89_C2H_HEADER_LEN) { 1267 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 1268 return; 1269 } 1270 1271 buf += RTW89_C2H_HEADER_LEN; 1272 len -= RTW89_C2H_HEADER_LEN; 1273 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 1274 1275 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 1276 goto plain_log; 1277 1278 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 1279 goto plain_log; 1280 1281 if (!rtwdev->fw.log.fmts) 1282 return; 1283 1284 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 1285 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 1286 1287 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 1288 rtw89_info(rtwdev, "C2H log: %s%s", 1289 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 1290 else if (fmt_idx != 0 && para_int) 1291 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 1292 else 1293 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 1294 return; 1295 1296 plain_log: 1297 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 1298 1299 } 1300 1301 #define H2C_CAM_LEN 60 1302 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1303 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 1304 { 1305 struct sk_buff *skb; 1306 int ret; 1307 1308 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 1309 if (!skb) { 1310 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1311 return -ENOMEM; 1312 } 1313 skb_put(skb, H2C_CAM_LEN); 1314 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 1315 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 1316 1317 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1318 H2C_CAT_MAC, 1319 H2C_CL_MAC_ADDR_CAM_UPDATE, 1320 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 1321 H2C_CAM_LEN); 1322 1323 ret = rtw89_h2c_tx(rtwdev, skb, false); 1324 if (ret) { 1325 rtw89_err(rtwdev, "failed to send h2c\n"); 1326 goto fail; 1327 } 1328 1329 return 0; 1330 fail: 1331 dev_kfree_skb_any(skb); 1332 1333 return ret; 1334 } 1335 1336 #define H2C_DCTL_SEC_CAM_LEN 68 1337 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 1338 struct rtw89_vif *rtwvif, 1339 struct rtw89_sta *rtwsta) 1340 { 1341 struct sk_buff *skb; 1342 int ret; 1343 1344 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); 1345 if (!skb) { 1346 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1347 return -ENOMEM; 1348 } 1349 skb_put(skb, H2C_DCTL_SEC_CAM_LEN); 1350 1351 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); 1352 1353 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1354 H2C_CAT_MAC, 1355 H2C_CL_MAC_FR_EXCHG, 1356 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 1357 H2C_DCTL_SEC_CAM_LEN); 1358 1359 ret = rtw89_h2c_tx(rtwdev, skb, false); 1360 if (ret) { 1361 rtw89_err(rtwdev, "failed to send h2c\n"); 1362 goto fail; 1363 } 1364 1365 return 0; 1366 fail: 1367 dev_kfree_skb_any(skb); 1368 1369 return ret; 1370 } 1371 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 1372 1373 #define H2C_BA_CAM_LEN 8 1374 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 1375 bool valid, struct ieee80211_ampdu_params *params) 1376 { 1377 const struct rtw89_chip_info *chip = rtwdev->chip; 1378 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 1379 u8 macid = rtwsta->mac_id; 1380 struct sk_buff *skb; 1381 u8 entry_idx; 1382 int ret; 1383 1384 ret = valid ? 1385 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 1386 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 1387 if (ret) { 1388 /* it still works even if we don't have static BA CAM, because 1389 * hardware can create dynamic BA CAM automatically. 1390 */ 1391 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 1392 "failed to %s entry tid=%d for h2c ba cam\n", 1393 valid ? "alloc" : "free", params->tid); 1394 return 0; 1395 } 1396 1397 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 1398 if (!skb) { 1399 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 1400 return -ENOMEM; 1401 } 1402 skb_put(skb, H2C_BA_CAM_LEN); 1403 SET_BA_CAM_MACID(skb->data, macid); 1404 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 1405 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 1406 else 1407 SET_BA_CAM_ENTRY_IDX(skb->data, entry_idx); 1408 if (!valid) 1409 goto end; 1410 SET_BA_CAM_VALID(skb->data, valid); 1411 SET_BA_CAM_TID(skb->data, params->tid); 1412 if (params->buf_size > 64) 1413 SET_BA_CAM_BMAP_SIZE(skb->data, 4); 1414 else 1415 SET_BA_CAM_BMAP_SIZE(skb->data, 0); 1416 /* If init req is set, hw will set the ssn */ 1417 SET_BA_CAM_INIT_REQ(skb->data, 1); 1418 SET_BA_CAM_SSN(skb->data, params->ssn); 1419 1420 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 1421 SET_BA_CAM_STD_EN(skb->data, 1); 1422 SET_BA_CAM_BAND(skb->data, rtwvif->mac_idx); 1423 } 1424 1425 end: 1426 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1427 H2C_CAT_MAC, 1428 H2C_CL_BA_CAM, 1429 H2C_FUNC_MAC_BA_CAM, 0, 1, 1430 H2C_BA_CAM_LEN); 1431 1432 ret = rtw89_h2c_tx(rtwdev, skb, false); 1433 if (ret) { 1434 rtw89_err(rtwdev, "failed to send h2c\n"); 1435 goto fail; 1436 } 1437 1438 return 0; 1439 fail: 1440 dev_kfree_skb_any(skb); 1441 1442 return ret; 1443 } 1444 1445 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 1446 u8 entry_idx, u8 uid) 1447 { 1448 struct sk_buff *skb; 1449 int ret; 1450 1451 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_BA_CAM_LEN); 1452 if (!skb) { 1453 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 1454 return -ENOMEM; 1455 } 1456 skb_put(skb, H2C_BA_CAM_LEN); 1457 1458 SET_BA_CAM_VALID(skb->data, 1); 1459 SET_BA_CAM_ENTRY_IDX_V1(skb->data, entry_idx); 1460 SET_BA_CAM_UID(skb->data, uid); 1461 SET_BA_CAM_BAND(skb->data, 0); 1462 SET_BA_CAM_STD_EN(skb->data, 0); 1463 1464 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1465 H2C_CAT_MAC, 1466 H2C_CL_BA_CAM, 1467 H2C_FUNC_MAC_BA_CAM, 0, 1, 1468 H2C_BA_CAM_LEN); 1469 1470 ret = rtw89_h2c_tx(rtwdev, skb, false); 1471 if (ret) { 1472 rtw89_err(rtwdev, "failed to send h2c\n"); 1473 goto fail; 1474 } 1475 1476 return 0; 1477 fail: 1478 dev_kfree_skb_any(skb); 1479 1480 return ret; 1481 } 1482 1483 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 1484 { 1485 const struct rtw89_chip_info *chip = rtwdev->chip; 1486 u8 entry_idx = chip->bacam_num; 1487 u8 uid = 0; 1488 int i; 1489 1490 for (i = 0; i < chip->bacam_dynamic_num; i++) { 1491 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 1492 entry_idx++; 1493 uid++; 1494 } 1495 } 1496 1497 #define H2C_LOG_CFG_LEN 12 1498 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 1499 { 1500 struct sk_buff *skb; 1501 u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 1502 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0; 1503 int ret; 1504 1505 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 1506 if (!skb) { 1507 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 1508 return -ENOMEM; 1509 } 1510 1511 skb_put(skb, H2C_LOG_CFG_LEN); 1512 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 1513 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 1514 SET_LOG_CFG_COMP(skb->data, comp); 1515 SET_LOG_CFG_COMP_EXT(skb->data, 0); 1516 1517 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1518 H2C_CAT_MAC, 1519 H2C_CL_FW_INFO, 1520 H2C_FUNC_LOG_CFG, 0, 0, 1521 H2C_LOG_CFG_LEN); 1522 1523 ret = rtw89_h2c_tx(rtwdev, skb, false); 1524 if (ret) { 1525 rtw89_err(rtwdev, "failed to send h2c\n"); 1526 goto fail; 1527 } 1528 1529 return 0; 1530 fail: 1531 dev_kfree_skb_any(skb); 1532 1533 return ret; 1534 } 1535 1536 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 1537 struct rtw89_vif *rtwvif, 1538 enum rtw89_fw_pkt_ofld_type type, 1539 u8 *id) 1540 { 1541 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 1542 struct rtw89_pktofld_info *info; 1543 struct sk_buff *skb; 1544 int ret; 1545 1546 info = kzalloc(sizeof(*info), GFP_KERNEL); 1547 if (!info) 1548 return -ENOMEM; 1549 1550 switch (type) { 1551 case RTW89_PKT_OFLD_TYPE_PS_POLL: 1552 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 1553 break; 1554 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 1555 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 1556 break; 1557 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 1558 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 1559 break; 1560 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 1561 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 1562 break; 1563 default: 1564 goto err; 1565 } 1566 1567 if (!skb) 1568 goto err; 1569 1570 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 1571 kfree_skb(skb); 1572 1573 if (ret) 1574 goto err; 1575 1576 list_add_tail(&info->list, &rtwvif->general_pkt_list); 1577 *id = info->id; 1578 return 0; 1579 1580 err: 1581 kfree(info); 1582 return -ENOMEM; 1583 } 1584 1585 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 1586 struct rtw89_vif *rtwvif, bool notify_fw) 1587 { 1588 struct list_head *pkt_list = &rtwvif->general_pkt_list; 1589 struct rtw89_pktofld_info *info, *tmp; 1590 1591 list_for_each_entry_safe(info, tmp, pkt_list, list) { 1592 if (notify_fw) 1593 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 1594 else 1595 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 1596 list_del(&info->list); 1597 kfree(info); 1598 } 1599 } 1600 1601 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 1602 { 1603 struct rtw89_vif *rtwvif; 1604 1605 rtw89_for_each_rtwvif(rtwdev, rtwvif) 1606 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw); 1607 } 1608 1609 #define H2C_GENERAL_PKT_LEN 6 1610 #define H2C_GENERAL_PKT_ID_UND 0xff 1611 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 1612 struct rtw89_vif *rtwvif, u8 macid) 1613 { 1614 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 1615 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 1616 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 1617 struct sk_buff *skb; 1618 int ret; 1619 1620 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1621 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 1622 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1623 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 1624 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 1625 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 1626 1627 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 1628 if (!skb) { 1629 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1630 return -ENOMEM; 1631 } 1632 skb_put(skb, H2C_GENERAL_PKT_LEN); 1633 SET_GENERAL_PKT_MACID(skb->data, macid); 1634 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1635 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 1636 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 1637 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 1638 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 1639 1640 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1641 H2C_CAT_MAC, 1642 H2C_CL_FW_INFO, 1643 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 1644 H2C_GENERAL_PKT_LEN); 1645 1646 ret = rtw89_h2c_tx(rtwdev, skb, false); 1647 if (ret) { 1648 rtw89_err(rtwdev, "failed to send h2c\n"); 1649 goto fail; 1650 } 1651 1652 return 0; 1653 fail: 1654 dev_kfree_skb_any(skb); 1655 1656 return ret; 1657 } 1658 1659 #define H2C_LPS_PARM_LEN 8 1660 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 1661 struct rtw89_lps_parm *lps_param) 1662 { 1663 struct sk_buff *skb; 1664 int ret; 1665 1666 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 1667 if (!skb) { 1668 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1669 return -ENOMEM; 1670 } 1671 skb_put(skb, H2C_LPS_PARM_LEN); 1672 1673 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 1674 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 1675 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 1676 SET_LPS_PARM_RLBM(skb->data, 1); 1677 SET_LPS_PARM_SMARTPS(skb->data, 1); 1678 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 1679 SET_LPS_PARM_VOUAPSD(skb->data, 0); 1680 SET_LPS_PARM_VIUAPSD(skb->data, 0); 1681 SET_LPS_PARM_BEUAPSD(skb->data, 0); 1682 SET_LPS_PARM_BKUAPSD(skb->data, 0); 1683 1684 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1685 H2C_CAT_MAC, 1686 H2C_CL_MAC_PS, 1687 H2C_FUNC_MAC_LPS_PARM, 0, 1, 1688 H2C_LPS_PARM_LEN); 1689 1690 ret = rtw89_h2c_tx(rtwdev, skb, false); 1691 if (ret) { 1692 rtw89_err(rtwdev, "failed to send h2c\n"); 1693 goto fail; 1694 } 1695 1696 return 0; 1697 fail: 1698 dev_kfree_skb_any(skb); 1699 1700 return ret; 1701 } 1702 1703 #define H2C_P2P_ACT_LEN 20 1704 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 1705 struct ieee80211_p2p_noa_desc *desc, 1706 u8 act, u8 noa_id) 1707 { 1708 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1709 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 1710 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 1711 struct sk_buff *skb; 1712 u8 *cmd; 1713 int ret; 1714 1715 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 1716 if (!skb) { 1717 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 1718 return -ENOMEM; 1719 } 1720 skb_put(skb, H2C_P2P_ACT_LEN); 1721 cmd = skb->data; 1722 1723 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 1724 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 1725 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 1726 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 1727 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 1728 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 1729 if (desc) { 1730 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 1731 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 1732 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 1733 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 1734 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 1735 } 1736 1737 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1738 H2C_CAT_MAC, H2C_CL_MAC_PS, 1739 H2C_FUNC_P2P_ACT, 0, 0, 1740 H2C_P2P_ACT_LEN); 1741 1742 ret = rtw89_h2c_tx(rtwdev, skb, false); 1743 if (ret) { 1744 rtw89_err(rtwdev, "failed to send h2c\n"); 1745 goto fail; 1746 } 1747 1748 return 0; 1749 fail: 1750 dev_kfree_skb_any(skb); 1751 1752 return ret; 1753 } 1754 1755 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 1756 struct sk_buff *skb) 1757 { 1758 const struct rtw89_chip_info *chip = rtwdev->chip; 1759 struct rtw89_hal *hal = &rtwdev->hal; 1760 u8 ntx_path; 1761 u8 map_b; 1762 1763 if (chip->rf_path_num == 1) { 1764 ntx_path = RF_A; 1765 map_b = 0; 1766 } else { 1767 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 1768 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 1769 } 1770 1771 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 1772 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 1773 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 1774 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 1775 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 1776 } 1777 1778 #define H2C_CMC_TBL_LEN 68 1779 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 1780 struct rtw89_vif *rtwvif) 1781 { 1782 const struct rtw89_chip_info *chip = rtwdev->chip; 1783 struct sk_buff *skb; 1784 u8 macid = rtwvif->mac_id; 1785 int ret; 1786 1787 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1788 if (!skb) { 1789 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1790 return -ENOMEM; 1791 } 1792 skb_put(skb, H2C_CMC_TBL_LEN); 1793 SET_CTRL_INFO_MACID(skb->data, macid); 1794 SET_CTRL_INFO_OPERATION(skb->data, 1); 1795 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1796 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 1797 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 1798 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 1799 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 1800 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 1801 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 1802 } 1803 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 1804 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 1805 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1806 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1807 1808 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1809 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1810 chip->h2c_cctl_func_id, 0, 1, 1811 H2C_CMC_TBL_LEN); 1812 1813 ret = rtw89_h2c_tx(rtwdev, skb, false); 1814 if (ret) { 1815 rtw89_err(rtwdev, "failed to send h2c\n"); 1816 goto fail; 1817 } 1818 1819 return 0; 1820 fail: 1821 dev_kfree_skb_any(skb); 1822 1823 return ret; 1824 } 1825 1826 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 1827 struct ieee80211_sta *sta, u8 *pads) 1828 { 1829 bool ppe_th; 1830 u8 ppe16, ppe8; 1831 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 1832 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 1833 u8 ru_bitmap; 1834 u8 n, idx, sh; 1835 u16 ppe; 1836 int i; 1837 1838 if (!sta->deflink.he_cap.has_he) 1839 return; 1840 1841 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 1842 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 1843 if (!ppe_th) { 1844 u8 pad; 1845 1846 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 1847 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 1848 1849 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 1850 pads[i] = pad; 1851 1852 return; 1853 } 1854 1855 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 1856 n = hweight8(ru_bitmap); 1857 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 1858 1859 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 1860 if (!(ru_bitmap & BIT(i))) { 1861 pads[i] = 1; 1862 continue; 1863 } 1864 1865 idx = n >> 3; 1866 sh = n & 7; 1867 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 1868 1869 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 1870 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1871 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1872 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 1873 1874 if (ppe16 != 7 && ppe8 == 7) 1875 pads[i] = 2; 1876 else if (ppe8 != 7) 1877 pads[i] = 1; 1878 else 1879 pads[i] = 0; 1880 } 1881 } 1882 1883 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 1884 struct ieee80211_vif *vif, 1885 struct ieee80211_sta *sta) 1886 { 1887 const struct rtw89_chip_info *chip = rtwdev->chip; 1888 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 1889 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 1890 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 1891 rtwvif->sub_entity_idx); 1892 struct sk_buff *skb; 1893 u8 pads[RTW89_PPE_BW_NUM]; 1894 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1895 u16 lowest_rate; 1896 int ret; 1897 1898 memset(pads, 0, sizeof(pads)); 1899 if (sta) 1900 __get_sta_he_pkt_padding(rtwdev, sta, pads); 1901 1902 if (vif->p2p) 1903 lowest_rate = RTW89_HW_RATE_OFDM6; 1904 else if (chan->band_type == RTW89_BAND_2G) 1905 lowest_rate = RTW89_HW_RATE_CCK1; 1906 else 1907 lowest_rate = RTW89_HW_RATE_OFDM6; 1908 1909 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1910 if (!skb) { 1911 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1912 return -ENOMEM; 1913 } 1914 skb_put(skb, H2C_CMC_TBL_LEN); 1915 SET_CTRL_INFO_MACID(skb->data, mac_id); 1916 SET_CTRL_INFO_OPERATION(skb->data, 1); 1917 SET_CMC_TBL_DISRTSFB(skb->data, 1); 1918 SET_CMC_TBL_DISDATAFB(skb->data, 1); 1919 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 1920 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 1921 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 1922 if (vif->type == NL80211_IFTYPE_STATION) 1923 SET_CMC_TBL_ULDL(skb->data, 1); 1924 else 1925 SET_CMC_TBL_ULDL(skb->data, 0); 1926 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 1927 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 1928 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1929 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1930 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1931 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1932 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 1933 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 1934 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 1935 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 1936 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 1937 } 1938 if (sta) 1939 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 1940 sta->deflink.he_cap.has_he); 1941 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 1942 SET_CMC_TBL_DATA_DCM(skb->data, 0); 1943 1944 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1945 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1946 chip->h2c_cctl_func_id, 0, 1, 1947 H2C_CMC_TBL_LEN); 1948 1949 ret = rtw89_h2c_tx(rtwdev, skb, false); 1950 if (ret) { 1951 rtw89_err(rtwdev, "failed to send h2c\n"); 1952 goto fail; 1953 } 1954 1955 return 0; 1956 fail: 1957 dev_kfree_skb_any(skb); 1958 1959 return ret; 1960 } 1961 1962 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 1963 struct rtw89_sta *rtwsta) 1964 { 1965 const struct rtw89_chip_info *chip = rtwdev->chip; 1966 struct sk_buff *skb; 1967 int ret; 1968 1969 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 1970 if (!skb) { 1971 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1972 return -ENOMEM; 1973 } 1974 skb_put(skb, H2C_CMC_TBL_LEN); 1975 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 1976 SET_CTRL_INFO_OPERATION(skb->data, 1); 1977 if (rtwsta->cctl_tx_time) { 1978 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 1979 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 1980 } 1981 if (rtwsta->cctl_tx_retry_limit) { 1982 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 1983 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 1984 } 1985 1986 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1987 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 1988 chip->h2c_cctl_func_id, 0, 1, 1989 H2C_CMC_TBL_LEN); 1990 1991 ret = rtw89_h2c_tx(rtwdev, skb, false); 1992 if (ret) { 1993 rtw89_err(rtwdev, "failed to send h2c\n"); 1994 goto fail; 1995 } 1996 1997 return 0; 1998 fail: 1999 dev_kfree_skb_any(skb); 2000 2001 return ret; 2002 } 2003 2004 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 2005 struct rtw89_sta *rtwsta) 2006 { 2007 const struct rtw89_chip_info *chip = rtwdev->chip; 2008 struct sk_buff *skb; 2009 int ret; 2010 2011 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 2012 return 0; 2013 2014 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2015 if (!skb) { 2016 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2017 return -ENOMEM; 2018 } 2019 skb_put(skb, H2C_CMC_TBL_LEN); 2020 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 2021 SET_CTRL_INFO_OPERATION(skb->data, 1); 2022 2023 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 2024 2025 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2026 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2027 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 2028 H2C_CMC_TBL_LEN); 2029 2030 ret = rtw89_h2c_tx(rtwdev, skb, false); 2031 if (ret) { 2032 rtw89_err(rtwdev, "failed to send h2c\n"); 2033 goto fail; 2034 } 2035 2036 return 0; 2037 fail: 2038 dev_kfree_skb_any(skb); 2039 2040 return ret; 2041 } 2042 2043 #define H2C_BCN_BASE_LEN 12 2044 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 2045 struct rtw89_vif *rtwvif) 2046 { 2047 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2048 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 2049 rtwvif->sub_entity_idx); 2050 struct sk_buff *skb; 2051 struct sk_buff *skb_beacon; 2052 u16 tim_offset; 2053 int bcn_total_len; 2054 u16 beacon_rate; 2055 void *noa_data; 2056 u8 noa_len; 2057 int ret; 2058 2059 if (vif->p2p) 2060 beacon_rate = RTW89_HW_RATE_OFDM6; 2061 else if (chan->band_type == RTW89_BAND_2G) 2062 beacon_rate = RTW89_HW_RATE_CCK1; 2063 else 2064 beacon_rate = RTW89_HW_RATE_OFDM6; 2065 2066 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 2067 NULL, 0); 2068 if (!skb_beacon) { 2069 rtw89_err(rtwdev, "failed to get beacon skb\n"); 2070 return -ENOMEM; 2071 } 2072 2073 noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data); 2074 if (noa_len && 2075 (noa_len <= skb_tailroom(skb_beacon) || 2076 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 2077 skb_put_data(skb_beacon, noa_data, noa_len); 2078 } 2079 2080 bcn_total_len = H2C_BCN_BASE_LEN + skb_beacon->len; 2081 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 2082 if (!skb) { 2083 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2084 dev_kfree_skb_any(skb_beacon); 2085 return -ENOMEM; 2086 } 2087 skb_put(skb, H2C_BCN_BASE_LEN); 2088 2089 SET_BCN_UPD_PORT(skb->data, rtwvif->port); 2090 SET_BCN_UPD_MBSSID(skb->data, 0); 2091 SET_BCN_UPD_BAND(skb->data, rtwvif->mac_idx); 2092 SET_BCN_UPD_GRP_IE_OFST(skb->data, tim_offset); 2093 SET_BCN_UPD_MACID(skb->data, rtwvif->mac_id); 2094 SET_BCN_UPD_SSN_SEL(skb->data, RTW89_MGMT_HW_SSN_SEL); 2095 SET_BCN_UPD_SSN_MODE(skb->data, RTW89_MGMT_HW_SEQ_MODE); 2096 SET_BCN_UPD_RATE(skb->data, beacon_rate); 2097 2098 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 2099 dev_kfree_skb_any(skb_beacon); 2100 2101 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2102 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2103 H2C_FUNC_MAC_BCN_UPD, 0, 1, 2104 bcn_total_len); 2105 2106 ret = rtw89_h2c_tx(rtwdev, skb, false); 2107 if (ret) { 2108 rtw89_err(rtwdev, "failed to send h2c\n"); 2109 dev_kfree_skb_any(skb); 2110 return ret; 2111 } 2112 2113 return 0; 2114 } 2115 2116 #define H2C_ROLE_MAINTAIN_LEN 4 2117 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 2118 struct rtw89_vif *rtwvif, 2119 struct rtw89_sta *rtwsta, 2120 enum rtw89_upd_mode upd_mode) 2121 { 2122 struct sk_buff *skb; 2123 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2124 u8 self_role; 2125 int ret; 2126 2127 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 2128 if (rtwsta) 2129 self_role = RTW89_SELF_ROLE_AP_CLIENT; 2130 else 2131 self_role = rtwvif->self_role; 2132 } else { 2133 self_role = rtwvif->self_role; 2134 } 2135 2136 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 2137 if (!skb) { 2138 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 2139 return -ENOMEM; 2140 } 2141 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 2142 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 2143 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 2144 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 2145 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 2146 2147 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2148 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 2149 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 2150 H2C_ROLE_MAINTAIN_LEN); 2151 2152 ret = rtw89_h2c_tx(rtwdev, skb, false); 2153 if (ret) { 2154 rtw89_err(rtwdev, "failed to send h2c\n"); 2155 goto fail; 2156 } 2157 2158 return 0; 2159 fail: 2160 dev_kfree_skb_any(skb); 2161 2162 return ret; 2163 } 2164 2165 #define H2C_JOIN_INFO_LEN 4 2166 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 2167 struct rtw89_sta *rtwsta, bool dis_conn) 2168 { 2169 struct sk_buff *skb; 2170 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2171 u8 self_role = rtwvif->self_role; 2172 u8 net_type = rtwvif->net_type; 2173 int ret; 2174 2175 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 2176 self_role = RTW89_SELF_ROLE_AP_CLIENT; 2177 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 2178 } 2179 2180 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 2181 if (!skb) { 2182 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 2183 return -ENOMEM; 2184 } 2185 skb_put(skb, H2C_JOIN_INFO_LEN); 2186 SET_JOININFO_MACID(skb->data, mac_id); 2187 SET_JOININFO_OP(skb->data, dis_conn); 2188 SET_JOININFO_BAND(skb->data, rtwvif->mac_idx); 2189 SET_JOININFO_WMM(skb->data, rtwvif->wmm); 2190 SET_JOININFO_TGR(skb->data, rtwvif->trigger); 2191 SET_JOININFO_ISHESTA(skb->data, 0); 2192 SET_JOININFO_DLBW(skb->data, 0); 2193 SET_JOININFO_TF_MAC_PAD(skb->data, 0); 2194 SET_JOININFO_DL_T_PE(skb->data, 0); 2195 SET_JOININFO_PORT_ID(skb->data, rtwvif->port); 2196 SET_JOININFO_NET_TYPE(skb->data, net_type); 2197 SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role); 2198 SET_JOININFO_SELF_ROLE(skb->data, self_role); 2199 2200 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2201 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 2202 H2C_FUNC_MAC_JOININFO, 0, 1, 2203 H2C_JOIN_INFO_LEN); 2204 2205 ret = rtw89_h2c_tx(rtwdev, skb, false); 2206 if (ret) { 2207 rtw89_err(rtwdev, "failed to send h2c\n"); 2208 goto fail; 2209 } 2210 2211 return 0; 2212 fail: 2213 dev_kfree_skb_any(skb); 2214 2215 return ret; 2216 } 2217 2218 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 2219 bool pause) 2220 { 2221 struct rtw89_fw_macid_pause_grp h2c = {{0}}; 2222 u8 len = sizeof(struct rtw89_fw_macid_pause_grp); 2223 struct sk_buff *skb; 2224 int ret; 2225 2226 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_JOIN_INFO_LEN); 2227 if (!skb) { 2228 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 2229 return -ENOMEM; 2230 } 2231 h2c.mask_grp[grp] = cpu_to_le32(BIT(sh)); 2232 if (pause) 2233 h2c.pause_grp[grp] = cpu_to_le32(BIT(sh)); 2234 skb_put_data(skb, &h2c, len); 2235 2236 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2237 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2238 H2C_FUNC_MAC_MACID_PAUSE, 1, 0, 2239 len); 2240 2241 ret = rtw89_h2c_tx(rtwdev, skb, false); 2242 if (ret) { 2243 rtw89_err(rtwdev, "failed to send h2c\n"); 2244 goto fail; 2245 } 2246 2247 return 0; 2248 fail: 2249 dev_kfree_skb_any(skb); 2250 2251 return ret; 2252 } 2253 2254 #define H2C_EDCA_LEN 12 2255 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 2256 u8 ac, u32 val) 2257 { 2258 struct sk_buff *skb; 2259 int ret; 2260 2261 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 2262 if (!skb) { 2263 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 2264 return -ENOMEM; 2265 } 2266 skb_put(skb, H2C_EDCA_LEN); 2267 RTW89_SET_EDCA_SEL(skb->data, 0); 2268 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 2269 RTW89_SET_EDCA_WMM(skb->data, 0); 2270 RTW89_SET_EDCA_AC(skb->data, ac); 2271 RTW89_SET_EDCA_PARAM(skb->data, val); 2272 2273 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2274 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2275 H2C_FUNC_USR_EDCA, 0, 1, 2276 H2C_EDCA_LEN); 2277 2278 ret = rtw89_h2c_tx(rtwdev, skb, false); 2279 if (ret) { 2280 rtw89_err(rtwdev, "failed to send h2c\n"); 2281 goto fail; 2282 } 2283 2284 return 0; 2285 fail: 2286 dev_kfree_skb_any(skb); 2287 2288 return ret; 2289 } 2290 2291 #define H2C_TSF32_TOGL_LEN 4 2292 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 2293 bool en) 2294 { 2295 struct sk_buff *skb; 2296 u16 early_us = en ? 2000 : 0; 2297 u8 *cmd; 2298 int ret; 2299 2300 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 2301 if (!skb) { 2302 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 2303 return -ENOMEM; 2304 } 2305 skb_put(skb, H2C_TSF32_TOGL_LEN); 2306 cmd = skb->data; 2307 2308 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 2309 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 2310 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 2311 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 2312 2313 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2314 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2315 H2C_FUNC_TSF32_TOGL, 0, 0, 2316 H2C_TSF32_TOGL_LEN); 2317 2318 ret = rtw89_h2c_tx(rtwdev, skb, false); 2319 if (ret) { 2320 rtw89_err(rtwdev, "failed to send h2c\n"); 2321 goto fail; 2322 } 2323 2324 return 0; 2325 fail: 2326 dev_kfree_skb_any(skb); 2327 2328 return ret; 2329 } 2330 2331 #define H2C_OFLD_CFG_LEN 8 2332 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 2333 { 2334 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 2335 struct sk_buff *skb; 2336 int ret; 2337 2338 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 2339 if (!skb) { 2340 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 2341 return -ENOMEM; 2342 } 2343 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 2344 2345 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2346 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2347 H2C_FUNC_OFLD_CFG, 0, 1, 2348 H2C_OFLD_CFG_LEN); 2349 2350 ret = rtw89_h2c_tx(rtwdev, skb, false); 2351 if (ret) { 2352 rtw89_err(rtwdev, "failed to send h2c\n"); 2353 goto fail; 2354 } 2355 2356 return 0; 2357 fail: 2358 dev_kfree_skb_any(skb); 2359 2360 return ret; 2361 } 2362 2363 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 2364 struct ieee80211_vif *vif, 2365 bool connect) 2366 { 2367 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 2368 struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL; 2369 struct rtw89_h2c_bcnfltr *h2c; 2370 u32 len = sizeof(*h2c); 2371 struct sk_buff *skb; 2372 int ret; 2373 2374 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 2375 return -EINVAL; 2376 2377 if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA) 2378 return -EINVAL; 2379 2380 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2381 if (!skb) { 2382 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 2383 return -ENOMEM; 2384 } 2385 2386 skb_put(skb, len); 2387 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 2388 2389 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 2390 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 2391 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 2392 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 2393 RTW89_H2C_BCNFLTR_W0_MODE) | 2394 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 2395 le32_encode_bits(bss_conf->cqm_rssi_hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 2396 le32_encode_bits(bss_conf->cqm_rssi_thold + MAX_RSSI, 2397 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 2398 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 2399 2400 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2401 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2402 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 2403 2404 ret = rtw89_h2c_tx(rtwdev, skb, false); 2405 if (ret) { 2406 rtw89_err(rtwdev, "failed to send h2c\n"); 2407 goto fail; 2408 } 2409 2410 return 0; 2411 fail: 2412 dev_kfree_skb_any(skb); 2413 2414 return ret; 2415 } 2416 2417 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 2418 struct rtw89_rx_phy_ppdu *phy_ppdu) 2419 { 2420 struct rtw89_h2c_ofld_rssi *h2c; 2421 u32 len = sizeof(*h2c); 2422 struct sk_buff *skb; 2423 s8 rssi; 2424 int ret; 2425 2426 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 2427 return -EINVAL; 2428 2429 if (!phy_ppdu) 2430 return -EINVAL; 2431 2432 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2433 if (!skb) { 2434 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 2435 return -ENOMEM; 2436 } 2437 2438 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 2439 skb_put(skb, len); 2440 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 2441 2442 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 2443 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 2444 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 2445 2446 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2447 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2448 H2C_FUNC_OFLD_RSSI, 0, 1, len); 2449 2450 ret = rtw89_h2c_tx(rtwdev, skb, false); 2451 if (ret) { 2452 rtw89_err(rtwdev, "failed to send h2c\n"); 2453 goto fail; 2454 } 2455 2456 return 0; 2457 fail: 2458 dev_kfree_skb_any(skb); 2459 2460 return ret; 2461 } 2462 2463 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2464 { 2465 struct rtw89_traffic_stats *stats = &rtwvif->stats; 2466 struct rtw89_h2c_ofld *h2c; 2467 u32 len = sizeof(*h2c); 2468 struct sk_buff *skb; 2469 int ret; 2470 2471 if (rtwvif->net_type != RTW89_NET_TYPE_INFRA) 2472 return -EINVAL; 2473 2474 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2475 if (!skb) { 2476 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 2477 return -ENOMEM; 2478 } 2479 2480 skb_put(skb, len); 2481 h2c = (struct rtw89_h2c_ofld *)skb->data; 2482 2483 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 2484 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 2485 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 2486 2487 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2488 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 2489 H2C_FUNC_OFLD_TP, 0, 1, len); 2490 2491 ret = rtw89_h2c_tx(rtwdev, skb, false); 2492 if (ret) { 2493 rtw89_err(rtwdev, "failed to send h2c\n"); 2494 goto fail; 2495 } 2496 2497 return 0; 2498 fail: 2499 dev_kfree_skb_any(skb); 2500 2501 return ret; 2502 } 2503 2504 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 2505 { 2506 const struct rtw89_chip_info *chip = rtwdev->chip; 2507 struct rtw89_h2c_ra_v1 *h2c_v1; 2508 struct rtw89_h2c_ra *h2c; 2509 u32 len = sizeof(*h2c); 2510 bool format_v1 = false; 2511 struct sk_buff *skb; 2512 int ret; 2513 2514 if (chip->chip_gen == RTW89_CHIP_BE) { 2515 len = sizeof(*h2c_v1); 2516 format_v1 = true; 2517 } 2518 2519 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2520 if (!skb) { 2521 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 2522 return -ENOMEM; 2523 } 2524 skb_put(skb, len); 2525 h2c = (struct rtw89_h2c_ra *)skb->data; 2526 rtw89_debug(rtwdev, RTW89_DBG_RA, 2527 "ra cmd msk: %llx ", ra->ra_mask); 2528 2529 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 2530 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 2531 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 2532 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 2533 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 2534 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 2535 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 2536 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 2537 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 2538 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 2539 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 2540 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 2541 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 2542 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 2543 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 2544 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 2545 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 2546 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 2547 2548 if (!format_v1) 2549 goto csi; 2550 2551 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 2552 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 2553 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 2554 2555 csi: 2556 if (!csi) 2557 goto done; 2558 2559 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 2560 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 2561 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 2562 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 2563 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 2564 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 2565 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 2566 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 2567 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 2568 2569 done: 2570 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2571 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 2572 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 2573 len); 2574 2575 ret = rtw89_h2c_tx(rtwdev, skb, false); 2576 if (ret) { 2577 rtw89_err(rtwdev, "failed to send h2c\n"); 2578 goto fail; 2579 } 2580 2581 return 0; 2582 fail: 2583 dev_kfree_skb_any(skb); 2584 2585 return ret; 2586 } 2587 2588 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev) 2589 { 2590 struct rtw89_btc *btc = &rtwdev->btc; 2591 struct rtw89_btc_dm *dm = &btc->dm; 2592 struct rtw89_btc_init_info *init_info = &dm->init_info; 2593 struct rtw89_btc_module *module = &init_info->module; 2594 struct rtw89_btc_ant_info *ant = &module->ant; 2595 struct rtw89_h2c_cxinit *h2c; 2596 u32 len = sizeof(*h2c); 2597 struct sk_buff *skb; 2598 int ret; 2599 2600 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2601 if (!skb) { 2602 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 2603 return -ENOMEM; 2604 } 2605 skb_put(skb, len); 2606 h2c = (struct rtw89_h2c_cxinit *)skb->data; 2607 2608 h2c->hdr.type = CXDRVINFO_INIT; 2609 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 2610 2611 h2c->ant_type = ant->type; 2612 h2c->ant_num = ant->num; 2613 h2c->ant_iso = ant->isolation; 2614 h2c->ant_info = 2615 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 2616 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 2617 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 2618 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 2619 2620 h2c->mod_rfe = module->rfe_type; 2621 h2c->mod_cv = module->cv; 2622 h2c->mod_info = 2623 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 2624 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 2625 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 2626 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 2627 h2c->mod_adie_kt = module->kt_ver_adie; 2628 h2c->wl_gch = init_info->wl_guard_ch; 2629 2630 h2c->info = 2631 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 2632 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 2633 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 2634 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 2635 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 2636 2637 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2638 H2C_CAT_OUTSRC, BTFC_SET, 2639 SET_DRV_INFO, 0, 0, 2640 len); 2641 2642 ret = rtw89_h2c_tx(rtwdev, skb, false); 2643 if (ret) { 2644 rtw89_err(rtwdev, "failed to send h2c\n"); 2645 goto fail; 2646 } 2647 2648 return 0; 2649 fail: 2650 dev_kfree_skb_any(skb); 2651 2652 return ret; 2653 } 2654 2655 #define PORT_DATA_OFFSET 4 2656 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 2657 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 2658 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 2659 2660 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev) 2661 { 2662 struct rtw89_btc *btc = &rtwdev->btc; 2663 const struct rtw89_btc_ver *ver = btc->ver; 2664 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2665 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 2666 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2667 struct rtw89_btc_wl_active_role *active = role_info->active_role; 2668 struct sk_buff *skb; 2669 u32 len; 2670 u8 offset = 0; 2671 u8 *cmd; 2672 int ret; 2673 int i; 2674 2675 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 2676 2677 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2678 if (!skb) { 2679 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2680 return -ENOMEM; 2681 } 2682 skb_put(skb, len); 2683 cmd = skb->data; 2684 2685 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2686 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2687 2688 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2689 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2690 2691 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2692 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2693 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2694 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2695 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2696 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2697 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2698 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2699 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2700 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2701 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2702 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2703 2704 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2705 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2706 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2707 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2708 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2709 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2710 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2711 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2712 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2713 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2714 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2715 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2716 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2717 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2718 } 2719 2720 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2721 H2C_CAT_OUTSRC, BTFC_SET, 2722 SET_DRV_INFO, 0, 0, 2723 len); 2724 2725 ret = rtw89_h2c_tx(rtwdev, skb, false); 2726 if (ret) { 2727 rtw89_err(rtwdev, "failed to send h2c\n"); 2728 goto fail; 2729 } 2730 2731 return 0; 2732 fail: 2733 dev_kfree_skb_any(skb); 2734 2735 return ret; 2736 } 2737 2738 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 2739 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2740 2741 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev) 2742 { 2743 struct rtw89_btc *btc = &rtwdev->btc; 2744 const struct rtw89_btc_ver *ver = btc->ver; 2745 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2746 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 2747 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2748 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 2749 struct sk_buff *skb; 2750 u32 len; 2751 u8 *cmd, offset; 2752 int ret; 2753 int i; 2754 2755 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 2756 2757 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2758 if (!skb) { 2759 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2760 return -ENOMEM; 2761 } 2762 skb_put(skb, len); 2763 cmd = skb->data; 2764 2765 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2766 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2767 2768 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2769 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2770 2771 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2772 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2773 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2774 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2775 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2776 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2777 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2778 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2779 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2780 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2781 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2782 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2783 2784 offset = PORT_DATA_OFFSET; 2785 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2786 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 2787 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 2788 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 2789 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 2790 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 2791 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 2792 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 2793 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 2794 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 2795 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 2796 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 2797 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 2798 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 2799 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 2800 } 2801 2802 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2803 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2804 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2805 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2806 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2807 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2808 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2809 2810 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2811 H2C_CAT_OUTSRC, BTFC_SET, 2812 SET_DRV_INFO, 0, 0, 2813 len); 2814 2815 ret = rtw89_h2c_tx(rtwdev, skb, false); 2816 if (ret) { 2817 rtw89_err(rtwdev, "failed to send h2c\n"); 2818 goto fail; 2819 } 2820 2821 return 0; 2822 fail: 2823 dev_kfree_skb_any(skb); 2824 2825 return ret; 2826 } 2827 2828 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 2829 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 2830 2831 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev) 2832 { 2833 struct rtw89_btc *btc = &rtwdev->btc; 2834 const struct rtw89_btc_ver *ver = btc->ver; 2835 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 2836 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 2837 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 2838 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 2839 struct sk_buff *skb; 2840 u32 len; 2841 u8 *cmd, offset; 2842 int ret; 2843 int i; 2844 2845 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 2846 2847 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2848 if (!skb) { 2849 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 2850 return -ENOMEM; 2851 } 2852 skb_put(skb, len); 2853 cmd = skb->data; 2854 2855 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE); 2856 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 2857 2858 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 2859 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 2860 2861 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 2862 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 2863 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 2864 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 2865 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 2866 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 2867 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 2868 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 2869 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 2870 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 2871 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 2872 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 2873 2874 offset = PORT_DATA_OFFSET; 2875 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 2876 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 2877 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 2878 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 2879 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 2880 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 2881 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 2882 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 2883 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 2884 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 2885 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 2886 } 2887 2888 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 2889 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 2890 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 2891 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 2892 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 2893 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 2894 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 2895 2896 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2897 H2C_CAT_OUTSRC, BTFC_SET, 2898 SET_DRV_INFO, 0, 0, 2899 len); 2900 2901 ret = rtw89_h2c_tx(rtwdev, skb, false); 2902 if (ret) { 2903 rtw89_err(rtwdev, "failed to send h2c\n"); 2904 goto fail; 2905 } 2906 2907 return 0; 2908 fail: 2909 dev_kfree_skb_any(skb); 2910 2911 return ret; 2912 } 2913 2914 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 2915 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev) 2916 { 2917 struct rtw89_btc *btc = &rtwdev->btc; 2918 const struct rtw89_btc_ver *ver = btc->ver; 2919 struct rtw89_btc_ctrl *ctrl = &btc->ctrl; 2920 struct sk_buff *skb; 2921 u8 *cmd; 2922 int ret; 2923 2924 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 2925 if (!skb) { 2926 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 2927 return -ENOMEM; 2928 } 2929 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 2930 cmd = skb->data; 2931 2932 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL); 2933 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 2934 2935 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 2936 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 2937 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 2938 if (ver->fcxctrl == 0) 2939 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 2940 2941 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2942 H2C_CAT_OUTSRC, BTFC_SET, 2943 SET_DRV_INFO, 0, 0, 2944 H2C_LEN_CXDRVINFO_CTRL); 2945 2946 ret = rtw89_h2c_tx(rtwdev, skb, false); 2947 if (ret) { 2948 rtw89_err(rtwdev, "failed to send h2c\n"); 2949 goto fail; 2950 } 2951 2952 return 0; 2953 fail: 2954 dev_kfree_skb_any(skb); 2955 2956 return ret; 2957 } 2958 2959 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 2960 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev) 2961 { 2962 struct rtw89_btc *btc = &rtwdev->btc; 2963 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 2964 struct sk_buff *skb; 2965 u8 *cmd; 2966 int ret; 2967 2968 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 2969 if (!skb) { 2970 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 2971 return -ENOMEM; 2972 } 2973 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 2974 cmd = skb->data; 2975 2976 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_TRX); 2977 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 2978 2979 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 2980 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 2981 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 2982 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 2983 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 2984 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 2985 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 2986 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 2987 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 2988 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 2989 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 2990 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 2991 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 2992 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 2993 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 2994 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 2995 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 2996 2997 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2998 H2C_CAT_OUTSRC, BTFC_SET, 2999 SET_DRV_INFO, 0, 0, 3000 H2C_LEN_CXDRVINFO_TRX); 3001 3002 ret = rtw89_h2c_tx(rtwdev, skb, false); 3003 if (ret) { 3004 rtw89_err(rtwdev, "failed to send h2c\n"); 3005 goto fail; 3006 } 3007 3008 return 0; 3009 fail: 3010 dev_kfree_skb_any(skb); 3011 3012 return ret; 3013 } 3014 3015 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 3016 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev) 3017 { 3018 struct rtw89_btc *btc = &rtwdev->btc; 3019 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 3020 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 3021 struct sk_buff *skb; 3022 u8 *cmd; 3023 int ret; 3024 3025 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 3026 if (!skb) { 3027 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 3028 return -ENOMEM; 3029 } 3030 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 3031 cmd = skb->data; 3032 3033 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK); 3034 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 3035 3036 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 3037 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 3038 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 3039 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 3040 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 3041 3042 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3043 H2C_CAT_OUTSRC, BTFC_SET, 3044 SET_DRV_INFO, 0, 0, 3045 H2C_LEN_CXDRVINFO_RFK); 3046 3047 ret = rtw89_h2c_tx(rtwdev, skb, false); 3048 if (ret) { 3049 rtw89_err(rtwdev, "failed to send h2c\n"); 3050 goto fail; 3051 } 3052 3053 return 0; 3054 fail: 3055 dev_kfree_skb_any(skb); 3056 3057 return ret; 3058 } 3059 3060 #define H2C_LEN_PKT_OFLD 4 3061 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 3062 { 3063 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 3064 struct sk_buff *skb; 3065 unsigned int cond; 3066 u8 *cmd; 3067 int ret; 3068 3069 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 3070 if (!skb) { 3071 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 3072 return -ENOMEM; 3073 } 3074 skb_put(skb, H2C_LEN_PKT_OFLD); 3075 cmd = skb->data; 3076 3077 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 3078 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 3079 3080 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3081 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3082 H2C_FUNC_PACKET_OFLD, 1, 1, 3083 H2C_LEN_PKT_OFLD); 3084 3085 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 3086 3087 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3088 if (ret < 0) { 3089 rtw89_debug(rtwdev, RTW89_DBG_FW, 3090 "failed to del pkt ofld: id %d, ret %d\n", 3091 id, ret); 3092 return ret; 3093 } 3094 3095 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 3096 return 0; 3097 } 3098 3099 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 3100 struct sk_buff *skb_ofld) 3101 { 3102 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 3103 struct sk_buff *skb; 3104 unsigned int cond; 3105 u8 *cmd; 3106 u8 alloc_id; 3107 int ret; 3108 3109 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 3110 RTW89_MAX_PKT_OFLD_NUM); 3111 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 3112 return -ENOSPC; 3113 3114 *id = alloc_id; 3115 3116 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 3117 if (!skb) { 3118 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 3119 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 3120 return -ENOMEM; 3121 } 3122 skb_put(skb, H2C_LEN_PKT_OFLD); 3123 cmd = skb->data; 3124 3125 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 3126 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 3127 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 3128 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 3129 3130 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3131 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3132 H2C_FUNC_PACKET_OFLD, 1, 1, 3133 H2C_LEN_PKT_OFLD + skb_ofld->len); 3134 3135 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 3136 3137 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3138 if (ret < 0) { 3139 rtw89_debug(rtwdev, RTW89_DBG_FW, 3140 "failed to add pkt ofld: id %d, ret %d\n", 3141 alloc_id, ret); 3142 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 3143 return ret; 3144 } 3145 3146 return 0; 3147 } 3148 3149 #define H2C_LEN_SCAN_LIST_OFFLOAD 4 3150 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int len, 3151 struct list_head *chan_list) 3152 { 3153 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 3154 struct rtw89_mac_chinfo *ch_info; 3155 struct sk_buff *skb; 3156 int skb_len = H2C_LEN_SCAN_LIST_OFFLOAD + len * RTW89_MAC_CHINFO_SIZE; 3157 unsigned int cond; 3158 u8 *cmd; 3159 int ret; 3160 3161 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 3162 if (!skb) { 3163 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 3164 return -ENOMEM; 3165 } 3166 skb_put(skb, H2C_LEN_SCAN_LIST_OFFLOAD); 3167 cmd = skb->data; 3168 3169 RTW89_SET_FWCMD_SCANOFLD_CH_NUM(cmd, len); 3170 /* in unit of 4 bytes */ 3171 RTW89_SET_FWCMD_SCANOFLD_CH_SIZE(cmd, RTW89_MAC_CHINFO_SIZE / 4); 3172 3173 list_for_each_entry(ch_info, chan_list, list) { 3174 cmd = skb_put(skb, RTW89_MAC_CHINFO_SIZE); 3175 3176 RTW89_SET_FWCMD_CHINFO_PERIOD(cmd, ch_info->period); 3177 RTW89_SET_FWCMD_CHINFO_DWELL(cmd, ch_info->dwell_time); 3178 RTW89_SET_FWCMD_CHINFO_CENTER_CH(cmd, ch_info->central_ch); 3179 RTW89_SET_FWCMD_CHINFO_PRI_CH(cmd, ch_info->pri_ch); 3180 RTW89_SET_FWCMD_CHINFO_BW(cmd, ch_info->bw); 3181 RTW89_SET_FWCMD_CHINFO_ACTION(cmd, ch_info->notify_action); 3182 RTW89_SET_FWCMD_CHINFO_NUM_PKT(cmd, ch_info->num_pkt); 3183 RTW89_SET_FWCMD_CHINFO_TX(cmd, ch_info->tx_pkt); 3184 RTW89_SET_FWCMD_CHINFO_PAUSE_DATA(cmd, ch_info->pause_data); 3185 RTW89_SET_FWCMD_CHINFO_BAND(cmd, ch_info->ch_band); 3186 RTW89_SET_FWCMD_CHINFO_PKT_ID(cmd, ch_info->probe_id); 3187 RTW89_SET_FWCMD_CHINFO_DFS(cmd, ch_info->dfs_ch); 3188 RTW89_SET_FWCMD_CHINFO_TX_NULL(cmd, ch_info->tx_null); 3189 RTW89_SET_FWCMD_CHINFO_RANDOM(cmd, ch_info->rand_seq_num); 3190 RTW89_SET_FWCMD_CHINFO_PKT0(cmd, ch_info->pkt_id[0]); 3191 RTW89_SET_FWCMD_CHINFO_PKT1(cmd, ch_info->pkt_id[1]); 3192 RTW89_SET_FWCMD_CHINFO_PKT2(cmd, ch_info->pkt_id[2]); 3193 RTW89_SET_FWCMD_CHINFO_PKT3(cmd, ch_info->pkt_id[3]); 3194 RTW89_SET_FWCMD_CHINFO_PKT4(cmd, ch_info->pkt_id[4]); 3195 RTW89_SET_FWCMD_CHINFO_PKT5(cmd, ch_info->pkt_id[5]); 3196 RTW89_SET_FWCMD_CHINFO_PKT6(cmd, ch_info->pkt_id[6]); 3197 RTW89_SET_FWCMD_CHINFO_PKT7(cmd, ch_info->pkt_id[7]); 3198 } 3199 3200 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3201 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3202 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 3203 3204 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_ADD_SCANOFLD_CH); 3205 3206 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3207 if (ret) { 3208 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 3209 return ret; 3210 } 3211 3212 return 0; 3213 } 3214 3215 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 3216 struct rtw89_scan_option *option, 3217 struct rtw89_vif *rtwvif) 3218 { 3219 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 3220 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 3221 struct rtw89_h2c_scanofld *h2c; 3222 u32 len = sizeof(*h2c); 3223 struct sk_buff *skb; 3224 unsigned int cond; 3225 int ret; 3226 3227 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3228 if (!skb) { 3229 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 3230 return -ENOMEM; 3231 } 3232 skb_put(skb, len); 3233 h2c = (struct rtw89_h2c_scanofld *)skb->data; 3234 3235 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 3236 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 3237 le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) | 3238 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 3239 3240 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 3241 le32_encode_bits(option->target_ch_mode, 3242 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 3243 le32_encode_bits(RTW89_SCAN_IMMEDIATE, 3244 RTW89_H2C_SCANOFLD_W1_START_MODE) | 3245 le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 3246 3247 if (option->target_ch_mode) { 3248 h2c->w1 |= le32_encode_bits(op->band_width, 3249 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 3250 le32_encode_bits(op->primary_channel, 3251 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 3252 le32_encode_bits(op->channel, 3253 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 3254 h2c->w0 |= le32_encode_bits(op->band_type, 3255 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 3256 } 3257 3258 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3259 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3260 H2C_FUNC_SCANOFLD, 1, 1, 3261 len); 3262 3263 cond = RTW89_FW_OFLD_WAIT_COND(0, H2C_FUNC_SCANOFLD); 3264 3265 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 3266 if (ret) { 3267 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 3268 return ret; 3269 } 3270 3271 return 0; 3272 } 3273 3274 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 3275 struct rtw89_fw_h2c_rf_reg_info *info, 3276 u16 len, u8 page) 3277 { 3278 struct sk_buff *skb; 3279 u8 class = info->rf_path == RF_PATH_A ? 3280 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 3281 int ret; 3282 3283 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3284 if (!skb) { 3285 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 3286 return -ENOMEM; 3287 } 3288 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 3289 3290 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3291 H2C_CAT_OUTSRC, class, page, 0, 0, 3292 len); 3293 3294 ret = rtw89_h2c_tx(rtwdev, skb, false); 3295 if (ret) { 3296 rtw89_err(rtwdev, "failed to send h2c\n"); 3297 goto fail; 3298 } 3299 3300 return 0; 3301 fail: 3302 dev_kfree_skb_any(skb); 3303 3304 return ret; 3305 } 3306 3307 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 3308 { 3309 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 3310 struct rtw89_fw_h2c_rf_get_mccch *mccch; 3311 struct sk_buff *skb; 3312 int ret; 3313 u8 idx; 3314 3315 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 3316 if (!skb) { 3317 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 3318 return -ENOMEM; 3319 } 3320 skb_put(skb, sizeof(*mccch)); 3321 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 3322 3323 idx = rfk_mcc->table_idx; 3324 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 3325 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 3326 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 3327 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 3328 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 3329 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 3330 3331 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3332 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 3333 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 3334 sizeof(*mccch)); 3335 3336 ret = rtw89_h2c_tx(rtwdev, skb, false); 3337 if (ret) { 3338 rtw89_err(rtwdev, "failed to send h2c\n"); 3339 goto fail; 3340 } 3341 3342 return 0; 3343 fail: 3344 dev_kfree_skb_any(skb); 3345 3346 return ret; 3347 } 3348 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 3349 3350 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 3351 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 3352 bool rack, bool dack) 3353 { 3354 struct sk_buff *skb; 3355 int ret; 3356 3357 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3358 if (!skb) { 3359 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 3360 return -ENOMEM; 3361 } 3362 skb_put_data(skb, buf, len); 3363 3364 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3365 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 3366 len); 3367 3368 ret = rtw89_h2c_tx(rtwdev, skb, false); 3369 if (ret) { 3370 rtw89_err(rtwdev, "failed to send h2c\n"); 3371 goto fail; 3372 } 3373 3374 return 0; 3375 fail: 3376 dev_kfree_skb_any(skb); 3377 3378 return ret; 3379 } 3380 3381 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 3382 { 3383 struct sk_buff *skb; 3384 int ret; 3385 3386 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 3387 if (!skb) { 3388 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 3389 return -ENOMEM; 3390 } 3391 skb_put_data(skb, buf, len); 3392 3393 ret = rtw89_h2c_tx(rtwdev, skb, false); 3394 if (ret) { 3395 rtw89_err(rtwdev, "failed to send h2c\n"); 3396 goto fail; 3397 } 3398 3399 return 0; 3400 fail: 3401 dev_kfree_skb_any(skb); 3402 3403 return ret; 3404 } 3405 3406 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 3407 { 3408 struct rtw89_early_h2c *early_h2c; 3409 3410 lockdep_assert_held(&rtwdev->mutex); 3411 3412 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 3413 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 3414 } 3415 } 3416 3417 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 3418 { 3419 struct rtw89_early_h2c *early_h2c, *tmp; 3420 3421 mutex_lock(&rtwdev->mutex); 3422 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 3423 list_del(&early_h2c->list); 3424 kfree(early_h2c->h2c); 3425 kfree(early_h2c); 3426 } 3427 mutex_unlock(&rtwdev->mutex); 3428 } 3429 3430 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 3431 { 3432 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 3433 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 3434 3435 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 3436 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 3437 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 3438 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 3439 } 3440 3441 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 3442 struct sk_buff *c2h) 3443 { 3444 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 3445 u8 category = attr->category; 3446 u8 class = attr->class; 3447 u8 func = attr->func; 3448 3449 switch (category) { 3450 default: 3451 return false; 3452 case RTW89_C2H_CAT_MAC: 3453 return rtw89_mac_c2h_chk_atomic(rtwdev, class, func); 3454 } 3455 } 3456 3457 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 3458 { 3459 rtw89_fw_c2h_parse_attr(c2h); 3460 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 3461 goto enqueue; 3462 3463 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 3464 dev_kfree_skb_any(c2h); 3465 return; 3466 3467 enqueue: 3468 skb_queue_tail(&rtwdev->c2h_queue, c2h); 3469 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 3470 } 3471 3472 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 3473 struct sk_buff *skb) 3474 { 3475 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 3476 u8 category = attr->category; 3477 u8 class = attr->class; 3478 u8 func = attr->func; 3479 u16 len = attr->len; 3480 bool dump = true; 3481 3482 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 3483 return; 3484 3485 switch (category) { 3486 case RTW89_C2H_CAT_TEST: 3487 break; 3488 case RTW89_C2H_CAT_MAC: 3489 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 3490 if (class == RTW89_MAC_C2H_CLASS_INFO && 3491 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 3492 dump = false; 3493 break; 3494 case RTW89_C2H_CAT_OUTSRC: 3495 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 3496 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 3497 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 3498 else 3499 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 3500 break; 3501 } 3502 3503 if (dump) 3504 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 3505 } 3506 3507 void rtw89_fw_c2h_work(struct work_struct *work) 3508 { 3509 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 3510 c2h_work); 3511 struct sk_buff *skb, *tmp; 3512 3513 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 3514 skb_unlink(skb, &rtwdev->c2h_queue); 3515 mutex_lock(&rtwdev->mutex); 3516 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 3517 mutex_unlock(&rtwdev->mutex); 3518 dev_kfree_skb_any(skb); 3519 } 3520 } 3521 3522 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 3523 struct rtw89_mac_h2c_info *info) 3524 { 3525 const struct rtw89_chip_info *chip = rtwdev->chip; 3526 struct rtw89_fw_info *fw_info = &rtwdev->fw; 3527 const u32 *h2c_reg = chip->h2c_regs; 3528 u8 i, val, len; 3529 int ret; 3530 3531 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 3532 rtwdev, chip->h2c_ctrl_reg); 3533 if (ret) { 3534 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 3535 return ret; 3536 } 3537 3538 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 3539 sizeof(info->u.h2creg[0])); 3540 3541 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 3542 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 3543 3544 for (i = 0; i < RTW89_H2CREG_MAX; i++) 3545 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 3546 3547 fw_info->h2c_counter++; 3548 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 3549 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 3550 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 3551 3552 return 0; 3553 } 3554 3555 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 3556 struct rtw89_mac_c2h_info *info) 3557 { 3558 const struct rtw89_chip_info *chip = rtwdev->chip; 3559 struct rtw89_fw_info *fw_info = &rtwdev->fw; 3560 const u32 *c2h_reg = chip->c2h_regs; 3561 u32 ret; 3562 u8 i, val; 3563 3564 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 3565 3566 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 3567 RTW89_C2H_TIMEOUT, false, rtwdev, 3568 chip->c2h_ctrl_reg); 3569 if (ret) { 3570 rtw89_warn(rtwdev, "c2h reg timeout\n"); 3571 return ret; 3572 } 3573 3574 for (i = 0; i < RTW89_C2HREG_MAX; i++) 3575 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 3576 3577 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 3578 3579 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 3580 info->content_len = 3581 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 3582 RTW89_C2HREG_HDR_LEN; 3583 3584 fw_info->c2h_counter++; 3585 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 3586 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 3587 3588 return 0; 3589 } 3590 3591 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 3592 struct rtw89_mac_h2c_info *h2c_info, 3593 struct rtw89_mac_c2h_info *c2h_info) 3594 { 3595 u32 ret; 3596 3597 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 3598 lockdep_assert_held(&rtwdev->mutex); 3599 3600 if (!h2c_info && !c2h_info) 3601 return -EINVAL; 3602 3603 if (!h2c_info) 3604 goto recv_c2h; 3605 3606 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 3607 if (ret) 3608 return ret; 3609 3610 recv_c2h: 3611 if (!c2h_info) 3612 return 0; 3613 3614 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 3615 if (ret) 3616 return ret; 3617 3618 return 0; 3619 } 3620 3621 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 3622 { 3623 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 3624 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 3625 return; 3626 } 3627 3628 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 3629 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 3630 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 3631 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 3632 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 3633 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 3634 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 3635 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 3636 3637 rtw89_fw_prog_cnt_dump(rtwdev); 3638 } 3639 3640 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 3641 { 3642 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3643 struct rtw89_pktofld_info *info, *tmp; 3644 u8 idx; 3645 3646 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 3647 if (!(rtwdev->chip->support_bands & BIT(idx))) 3648 continue; 3649 3650 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 3651 if (test_bit(info->id, rtwdev->pkt_offload)) 3652 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 3653 list_del(&info->list); 3654 kfree(info); 3655 } 3656 } 3657 } 3658 3659 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 3660 struct rtw89_vif *rtwvif, 3661 struct rtw89_pktofld_info *info, 3662 enum nl80211_band band, u8 ssid_idx) 3663 { 3664 struct cfg80211_scan_request *req = rtwvif->scan_req; 3665 3666 if (band != NL80211_BAND_6GHZ) 3667 return false; 3668 3669 if (req->ssids[ssid_idx].ssid_len) { 3670 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 3671 req->ssids[ssid_idx].ssid_len); 3672 info->ssid_len = req->ssids[ssid_idx].ssid_len; 3673 return false; 3674 } else { 3675 return true; 3676 } 3677 } 3678 3679 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 3680 struct rtw89_vif *rtwvif, 3681 struct sk_buff *skb, u8 ssid_idx) 3682 { 3683 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3684 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3685 struct rtw89_pktofld_info *info; 3686 struct sk_buff *new; 3687 int ret = 0; 3688 u8 band; 3689 3690 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 3691 if (!(rtwdev->chip->support_bands & BIT(band))) 3692 continue; 3693 3694 new = skb_copy(skb, GFP_KERNEL); 3695 if (!new) { 3696 ret = -ENOMEM; 3697 goto out; 3698 } 3699 skb_put_data(new, ies->ies[band], ies->len[band]); 3700 skb_put_data(new, ies->common_ies, ies->common_ie_len); 3701 3702 info = kzalloc(sizeof(*info), GFP_KERNEL); 3703 if (!info) { 3704 ret = -ENOMEM; 3705 kfree_skb(new); 3706 goto out; 3707 } 3708 3709 if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band, 3710 ssid_idx)) { 3711 kfree_skb(new); 3712 kfree(info); 3713 goto out; 3714 } 3715 3716 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 3717 if (ret) { 3718 kfree_skb(new); 3719 kfree(info); 3720 goto out; 3721 } 3722 3723 list_add_tail(&info->list, &scan_info->pkt_list[band]); 3724 kfree_skb(new); 3725 } 3726 out: 3727 return ret; 3728 } 3729 3730 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 3731 struct rtw89_vif *rtwvif) 3732 { 3733 struct cfg80211_scan_request *req = rtwvif->scan_req; 3734 struct sk_buff *skb; 3735 u8 num = req->n_ssids, i; 3736 int ret; 3737 3738 for (i = 0; i < num; i++) { 3739 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3740 req->ssids[i].ssid, 3741 req->ssids[i].ssid_len, 3742 req->ie_len); 3743 if (!skb) 3744 return -ENOMEM; 3745 3746 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i); 3747 kfree_skb(skb); 3748 3749 if (ret) 3750 return ret; 3751 } 3752 3753 return 0; 3754 } 3755 3756 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 3757 struct cfg80211_scan_request *req, 3758 struct rtw89_mac_chinfo *ch_info) 3759 { 3760 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3761 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 3762 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 3763 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 3764 struct cfg80211_scan_6ghz_params *params; 3765 struct rtw89_pktofld_info *info, *tmp; 3766 struct ieee80211_hdr *hdr; 3767 struct sk_buff *skb; 3768 bool found; 3769 int ret = 0; 3770 u8 i; 3771 3772 if (!req->n_6ghz_params) 3773 return 0; 3774 3775 for (i = 0; i < req->n_6ghz_params; i++) { 3776 params = &req->scan_6ghz_params[i]; 3777 3778 if (req->channels[params->channel_idx]->hw_value != 3779 ch_info->pri_ch) 3780 continue; 3781 3782 found = false; 3783 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 3784 if (ether_addr_equal(tmp->bssid, params->bssid)) { 3785 found = true; 3786 break; 3787 } 3788 } 3789 if (found) 3790 continue; 3791 3792 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 3793 NULL, 0, req->ie_len); 3794 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 3795 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 3796 hdr = (struct ieee80211_hdr *)skb->data; 3797 ether_addr_copy(hdr->addr3, params->bssid); 3798 3799 info = kzalloc(sizeof(*info), GFP_KERNEL); 3800 if (!info) { 3801 ret = -ENOMEM; 3802 kfree_skb(skb); 3803 goto out; 3804 } 3805 3806 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 3807 if (ret) { 3808 kfree_skb(skb); 3809 kfree(info); 3810 goto out; 3811 } 3812 3813 ether_addr_copy(info->bssid, params->bssid); 3814 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 3815 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 3816 3817 ch_info->tx_pkt = true; 3818 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 3819 3820 kfree_skb(skb); 3821 } 3822 3823 out: 3824 return ret; 3825 } 3826 3827 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 3828 int ssid_num, 3829 struct rtw89_mac_chinfo *ch_info) 3830 { 3831 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 3832 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 3833 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3834 struct cfg80211_scan_request *req = rtwvif->scan_req; 3835 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 3836 struct rtw89_pktofld_info *info; 3837 u8 band, probe_count = 0; 3838 int ret; 3839 3840 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 3841 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 3842 ch_info->bw = RTW89_SCAN_WIDTH; 3843 ch_info->tx_pkt = true; 3844 ch_info->cfg_tx_pwr = false; 3845 ch_info->tx_pwr_idx = 0; 3846 ch_info->tx_null = false; 3847 ch_info->pause_data = false; 3848 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 3849 3850 if (ch_info->ch_band == RTW89_BAND_6G) { 3851 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 3852 !ch_info->is_psc) { 3853 ch_info->tx_pkt = false; 3854 if (!req->duration_mandatory) 3855 ch_info->period -= RTW89_DWELL_TIME_6G; 3856 } 3857 } 3858 3859 ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info); 3860 if (ret) 3861 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 3862 3863 if (ssid_num) { 3864 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 3865 3866 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 3867 if (info->channel_6ghz && 3868 ch_info->pri_ch != info->channel_6ghz) 3869 continue; 3870 ch_info->pkt_id[probe_count++] = info->id; 3871 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 3872 break; 3873 } 3874 ch_info->num_pkt = probe_count; 3875 } 3876 3877 switch (chan_type) { 3878 case RTW89_CHAN_OPERATE: 3879 ch_info->central_ch = op->channel; 3880 ch_info->pri_ch = op->primary_channel; 3881 ch_info->ch_band = op->band_type; 3882 ch_info->bw = op->band_width; 3883 ch_info->tx_null = true; 3884 ch_info->num_pkt = 0; 3885 break; 3886 case RTW89_CHAN_DFS: 3887 if (ch_info->ch_band != RTW89_BAND_6G) 3888 ch_info->period = max_t(u8, ch_info->period, 3889 RTW89_DFS_CHAN_TIME); 3890 ch_info->dwell_time = RTW89_DWELL_TIME; 3891 break; 3892 case RTW89_CHAN_ACTIVE: 3893 break; 3894 default: 3895 rtw89_err(rtwdev, "Channel type out of bound\n"); 3896 } 3897 } 3898 3899 static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 3900 struct rtw89_vif *rtwvif, bool connected) 3901 { 3902 struct cfg80211_scan_request *req = rtwvif->scan_req; 3903 struct rtw89_mac_chinfo *ch_info, *tmp; 3904 struct ieee80211_channel *channel; 3905 struct list_head chan_list; 3906 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 3907 int list_len, off_chan_time = 0; 3908 enum rtw89_chan_type type; 3909 int ret = 0; 3910 u32 idx; 3911 3912 INIT_LIST_HEAD(&chan_list); 3913 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 3914 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 3915 idx++, list_len++) { 3916 channel = req->channels[idx]; 3917 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 3918 if (!ch_info) { 3919 ret = -ENOMEM; 3920 goto out; 3921 } 3922 3923 if (req->duration_mandatory) 3924 ch_info->period = req->duration; 3925 else if (channel->band == NL80211_BAND_6GHZ) 3926 ch_info->period = RTW89_CHANNEL_TIME_6G + 3927 RTW89_DWELL_TIME_6G; 3928 else 3929 ch_info->period = RTW89_CHANNEL_TIME; 3930 3931 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 3932 ch_info->central_ch = channel->hw_value; 3933 ch_info->pri_ch = channel->hw_value; 3934 ch_info->rand_seq_num = random_seq; 3935 ch_info->is_psc = cfg80211_channel_is_psc(channel); 3936 3937 if (channel->flags & 3938 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 3939 type = RTW89_CHAN_DFS; 3940 else 3941 type = RTW89_CHAN_ACTIVE; 3942 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 3943 3944 if (connected && 3945 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 3946 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 3947 if (!tmp) { 3948 ret = -ENOMEM; 3949 kfree(ch_info); 3950 goto out; 3951 } 3952 3953 type = RTW89_CHAN_OPERATE; 3954 tmp->period = req->duration_mandatory ? 3955 req->duration : RTW89_CHANNEL_TIME; 3956 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 3957 list_add_tail(&tmp->list, &chan_list); 3958 off_chan_time = 0; 3959 list_len++; 3960 } 3961 list_add_tail(&ch_info->list, &chan_list); 3962 off_chan_time += ch_info->period; 3963 } 3964 rtwdev->scan_info.last_chan_idx = idx; 3965 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 3966 3967 out: 3968 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 3969 list_del(&ch_info->list); 3970 kfree(ch_info); 3971 } 3972 3973 return ret; 3974 } 3975 3976 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 3977 struct rtw89_vif *rtwvif, bool connected) 3978 { 3979 int ret; 3980 3981 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 3982 if (ret) { 3983 rtw89_err(rtwdev, "Update probe request failed\n"); 3984 goto out; 3985 } 3986 ret = rtw89_hw_scan_add_chan_list(rtwdev, rtwvif, connected); 3987 out: 3988 return ret; 3989 } 3990 3991 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 3992 struct ieee80211_scan_request *scan_req) 3993 { 3994 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 3995 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 3996 struct cfg80211_scan_request *req = &scan_req->req; 3997 u32 rx_fltr = rtwdev->hal.rx_fltr; 3998 u8 mac_addr[ETH_ALEN]; 3999 4000 rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan); 4001 rtwdev->scan_info.scanning_vif = vif; 4002 rtwdev->scan_info.last_chan_idx = 0; 4003 rtwvif->scan_ies = &scan_req->ies; 4004 rtwvif->scan_req = req; 4005 ieee80211_stop_queues(rtwdev->hw); 4006 4007 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 4008 get_random_mask_addr(mac_addr, req->mac_addr, 4009 req->mac_addr_mask); 4010 else 4011 ether_addr_copy(mac_addr, vif->addr); 4012 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 4013 4014 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 4015 rx_fltr &= ~B_AX_A_BC; 4016 rx_fltr &= ~B_AX_A_A1_MATCH; 4017 rtw89_write32_mask(rtwdev, 4018 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 4019 B_AX_RX_FLTR_CFG_MASK, 4020 rx_fltr); 4021 4022 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); 4023 } 4024 4025 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 4026 bool aborted) 4027 { 4028 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 4029 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 4030 struct cfg80211_scan_info info = { 4031 .aborted = aborted, 4032 }; 4033 struct rtw89_vif *rtwvif; 4034 4035 if (!vif) 4036 return; 4037 4038 rtw89_write32_mask(rtwdev, 4039 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 4040 B_AX_RX_FLTR_CFG_MASK, 4041 rtwdev->hal.rx_fltr); 4042 4043 rtw89_core_scan_complete(rtwdev, vif, true); 4044 ieee80211_scan_completed(rtwdev->hw, &info); 4045 ieee80211_wake_queues(rtwdev->hw); 4046 4047 rtw89_release_pkt_list(rtwdev); 4048 rtwvif = (struct rtw89_vif *)vif->drv_priv; 4049 rtwvif->scan_req = NULL; 4050 rtwvif->scan_ies = NULL; 4051 scan_info->last_chan_idx = 0; 4052 scan_info->scanning_vif = NULL; 4053 4054 rtw89_chanctx_proceed(rtwdev); 4055 } 4056 4057 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 4058 { 4059 rtw89_hw_scan_offload(rtwdev, vif, false); 4060 rtw89_hw_scan_complete(rtwdev, vif, true); 4061 } 4062 4063 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 4064 bool enable) 4065 { 4066 struct rtw89_scan_option opt = {0}; 4067 struct rtw89_vif *rtwvif; 4068 bool connected; 4069 int ret = 0; 4070 4071 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 4072 if (!rtwvif) 4073 return -EINVAL; 4074 4075 /* This variable implies connected or during attempt to connect */ 4076 connected = !is_zero_ether_addr(rtwvif->bssid); 4077 opt.enable = enable; 4078 opt.target_ch_mode = connected; 4079 if (enable) { 4080 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected); 4081 if (ret) 4082 goto out; 4083 } 4084 ret = rtw89_fw_h2c_scan_offload(rtwdev, &opt, rtwvif); 4085 out: 4086 return ret; 4087 } 4088 4089 #define H2C_FW_CPU_EXCEPTION_LEN 4 4090 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 4091 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 4092 { 4093 struct sk_buff *skb; 4094 int ret; 4095 4096 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 4097 if (!skb) { 4098 rtw89_err(rtwdev, 4099 "failed to alloc skb for fw cpu exception\n"); 4100 return -ENOMEM; 4101 } 4102 4103 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 4104 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 4105 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 4106 4107 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4108 H2C_CAT_TEST, 4109 H2C_CL_FW_STATUS_TEST, 4110 H2C_FUNC_CPU_EXCEPTION, 0, 0, 4111 H2C_FW_CPU_EXCEPTION_LEN); 4112 4113 ret = rtw89_h2c_tx(rtwdev, skb, false); 4114 if (ret) { 4115 rtw89_err(rtwdev, "failed to send h2c\n"); 4116 goto fail; 4117 } 4118 4119 return 0; 4120 4121 fail: 4122 dev_kfree_skb_any(skb); 4123 return ret; 4124 } 4125 4126 #define H2C_PKT_DROP_LEN 24 4127 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 4128 const struct rtw89_pkt_drop_params *params) 4129 { 4130 struct sk_buff *skb; 4131 int ret; 4132 4133 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 4134 if (!skb) { 4135 rtw89_err(rtwdev, 4136 "failed to alloc skb for packet drop\n"); 4137 return -ENOMEM; 4138 } 4139 4140 switch (params->sel) { 4141 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 4142 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 4143 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 4144 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 4145 case RTW89_PKT_DROP_SEL_BAND_ONCE: 4146 break; 4147 default: 4148 rtw89_debug(rtwdev, RTW89_DBG_FW, 4149 "H2C of pkt drop might not fully support sel: %d yet\n", 4150 params->sel); 4151 break; 4152 } 4153 4154 skb_put(skb, H2C_PKT_DROP_LEN); 4155 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 4156 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 4157 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 4158 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 4159 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 4160 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 4161 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 4162 params->macid_band_sel[0]); 4163 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 4164 params->macid_band_sel[1]); 4165 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 4166 params->macid_band_sel[2]); 4167 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 4168 params->macid_band_sel[3]); 4169 4170 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4171 H2C_CAT_MAC, 4172 H2C_CL_MAC_FW_OFLD, 4173 H2C_FUNC_PKT_DROP, 0, 0, 4174 H2C_PKT_DROP_LEN); 4175 4176 ret = rtw89_h2c_tx(rtwdev, skb, false); 4177 if (ret) { 4178 rtw89_err(rtwdev, "failed to send h2c\n"); 4179 goto fail; 4180 } 4181 4182 return 0; 4183 4184 fail: 4185 dev_kfree_skb_any(skb); 4186 return ret; 4187 } 4188 4189 #define H2C_KEEP_ALIVE_LEN 4 4190 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 4191 bool enable) 4192 { 4193 struct sk_buff *skb; 4194 u8 pkt_id = 0; 4195 int ret; 4196 4197 if (enable) { 4198 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 4199 RTW89_PKT_OFLD_TYPE_NULL_DATA, 4200 &pkt_id); 4201 if (ret) 4202 return -EPERM; 4203 } 4204 4205 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 4206 if (!skb) { 4207 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4208 return -ENOMEM; 4209 } 4210 4211 skb_put(skb, H2C_KEEP_ALIVE_LEN); 4212 4213 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 4214 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 4215 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 4216 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 4217 4218 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4219 H2C_CAT_MAC, 4220 H2C_CL_MAC_WOW, 4221 H2C_FUNC_KEEP_ALIVE, 0, 1, 4222 H2C_KEEP_ALIVE_LEN); 4223 4224 ret = rtw89_h2c_tx(rtwdev, skb, false); 4225 if (ret) { 4226 rtw89_err(rtwdev, "failed to send h2c\n"); 4227 goto fail; 4228 } 4229 4230 return 0; 4231 4232 fail: 4233 dev_kfree_skb_any(skb); 4234 4235 return ret; 4236 } 4237 4238 #define H2C_DISCONNECT_DETECT_LEN 8 4239 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 4240 struct rtw89_vif *rtwvif, bool enable) 4241 { 4242 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 4243 struct sk_buff *skb; 4244 u8 macid = rtwvif->mac_id; 4245 int ret; 4246 4247 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 4248 if (!skb) { 4249 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4250 return -ENOMEM; 4251 } 4252 4253 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 4254 4255 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 4256 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 4257 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 4258 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 4259 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 4260 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 4261 } 4262 4263 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4264 H2C_CAT_MAC, 4265 H2C_CL_MAC_WOW, 4266 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 4267 H2C_DISCONNECT_DETECT_LEN); 4268 4269 ret = rtw89_h2c_tx(rtwdev, skb, false); 4270 if (ret) { 4271 rtw89_err(rtwdev, "failed to send h2c\n"); 4272 goto fail; 4273 } 4274 4275 return 0; 4276 4277 fail: 4278 dev_kfree_skb_any(skb); 4279 4280 return ret; 4281 } 4282 4283 #define H2C_WOW_GLOBAL_LEN 8 4284 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 4285 bool enable) 4286 { 4287 struct sk_buff *skb; 4288 u8 macid = rtwvif->mac_id; 4289 int ret; 4290 4291 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN); 4292 if (!skb) { 4293 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4294 return -ENOMEM; 4295 } 4296 4297 skb_put(skb, H2C_WOW_GLOBAL_LEN); 4298 4299 RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable); 4300 RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid); 4301 4302 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4303 H2C_CAT_MAC, 4304 H2C_CL_MAC_WOW, 4305 H2C_FUNC_WOW_GLOBAL, 0, 1, 4306 H2C_WOW_GLOBAL_LEN); 4307 4308 ret = rtw89_h2c_tx(rtwdev, skb, false); 4309 if (ret) { 4310 rtw89_err(rtwdev, "failed to send h2c\n"); 4311 goto fail; 4312 } 4313 4314 return 0; 4315 4316 fail: 4317 dev_kfree_skb_any(skb); 4318 4319 return ret; 4320 } 4321 4322 #define H2C_WAKEUP_CTRL_LEN 4 4323 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 4324 struct rtw89_vif *rtwvif, 4325 bool enable) 4326 { 4327 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 4328 struct sk_buff *skb; 4329 u8 macid = rtwvif->mac_id; 4330 int ret; 4331 4332 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 4333 if (!skb) { 4334 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4335 return -ENOMEM; 4336 } 4337 4338 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 4339 4340 if (rtw_wow->pattern_cnt) 4341 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 4342 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 4343 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 4344 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 4345 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 4346 4347 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 4348 4349 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4350 H2C_CAT_MAC, 4351 H2C_CL_MAC_WOW, 4352 H2C_FUNC_WAKEUP_CTRL, 0, 1, 4353 H2C_WAKEUP_CTRL_LEN); 4354 4355 ret = rtw89_h2c_tx(rtwdev, skb, false); 4356 if (ret) { 4357 rtw89_err(rtwdev, "failed to send h2c\n"); 4358 goto fail; 4359 } 4360 4361 return 0; 4362 4363 fail: 4364 dev_kfree_skb_any(skb); 4365 4366 return ret; 4367 } 4368 4369 #define H2C_WOW_CAM_UPD_LEN 24 4370 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 4371 struct rtw89_wow_cam_info *cam_info) 4372 { 4373 struct sk_buff *skb; 4374 int ret; 4375 4376 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 4377 if (!skb) { 4378 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 4379 return -ENOMEM; 4380 } 4381 4382 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 4383 4384 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 4385 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 4386 if (cam_info->valid) { 4387 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 4388 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 4389 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 4390 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 4391 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 4392 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 4393 cam_info->negative_pattern_match); 4394 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 4395 cam_info->skip_mac_hdr); 4396 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 4397 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 4398 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 4399 } 4400 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 4401 4402 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4403 H2C_CAT_MAC, 4404 H2C_CL_MAC_WOW, 4405 H2C_FUNC_WOW_CAM_UPD, 0, 1, 4406 H2C_WOW_CAM_UPD_LEN); 4407 4408 ret = rtw89_h2c_tx(rtwdev, skb, false); 4409 if (ret) { 4410 rtw89_err(rtwdev, "failed to send h2c\n"); 4411 goto fail; 4412 } 4413 4414 return 0; 4415 fail: 4416 dev_kfree_skb_any(skb); 4417 4418 return ret; 4419 } 4420 4421 /* Return < 0, if failures happen during waiting for the condition. 4422 * Return 0, when waiting for the condition succeeds. 4423 * Return > 0, if the wait is considered unreachable due to driver/FW design, 4424 * where 1 means during SER. 4425 */ 4426 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 4427 struct rtw89_wait_info *wait, unsigned int cond) 4428 { 4429 int ret; 4430 4431 ret = rtw89_h2c_tx(rtwdev, skb, false); 4432 if (ret) { 4433 rtw89_err(rtwdev, "failed to send h2c\n"); 4434 dev_kfree_skb_any(skb); 4435 return -EBUSY; 4436 } 4437 4438 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 4439 return 1; 4440 4441 return rtw89_wait_for_cond(wait, cond); 4442 } 4443 4444 #define H2C_ADD_MCC_LEN 16 4445 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 4446 const struct rtw89_fw_mcc_add_req *p) 4447 { 4448 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4449 struct sk_buff *skb; 4450 unsigned int cond; 4451 4452 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 4453 if (!skb) { 4454 rtw89_err(rtwdev, 4455 "failed to alloc skb for add mcc\n"); 4456 return -ENOMEM; 4457 } 4458 4459 skb_put(skb, H2C_ADD_MCC_LEN); 4460 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 4461 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 4462 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 4463 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 4464 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 4465 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 4466 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 4467 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 4468 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 4469 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 4470 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 4471 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 4472 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 4473 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 4474 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 4475 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 4476 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 4477 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 4478 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 4479 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 4480 4481 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4482 H2C_CAT_MAC, 4483 H2C_CL_MCC, 4484 H2C_FUNC_ADD_MCC, 0, 0, 4485 H2C_ADD_MCC_LEN); 4486 4487 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 4488 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4489 } 4490 4491 #define H2C_START_MCC_LEN 12 4492 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 4493 const struct rtw89_fw_mcc_start_req *p) 4494 { 4495 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4496 struct sk_buff *skb; 4497 unsigned int cond; 4498 4499 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 4500 if (!skb) { 4501 rtw89_err(rtwdev, 4502 "failed to alloc skb for start mcc\n"); 4503 return -ENOMEM; 4504 } 4505 4506 skb_put(skb, H2C_START_MCC_LEN); 4507 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 4508 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 4509 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 4510 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 4511 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 4512 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 4513 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 4514 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 4515 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 4516 4517 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4518 H2C_CAT_MAC, 4519 H2C_CL_MCC, 4520 H2C_FUNC_START_MCC, 0, 0, 4521 H2C_START_MCC_LEN); 4522 4523 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 4524 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4525 } 4526 4527 #define H2C_STOP_MCC_LEN 4 4528 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 4529 bool prev_groups) 4530 { 4531 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4532 struct sk_buff *skb; 4533 unsigned int cond; 4534 4535 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 4536 if (!skb) { 4537 rtw89_err(rtwdev, 4538 "failed to alloc skb for stop mcc\n"); 4539 return -ENOMEM; 4540 } 4541 4542 skb_put(skb, H2C_STOP_MCC_LEN); 4543 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 4544 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 4545 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 4546 4547 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4548 H2C_CAT_MAC, 4549 H2C_CL_MCC, 4550 H2C_FUNC_STOP_MCC, 0, 0, 4551 H2C_STOP_MCC_LEN); 4552 4553 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 4554 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4555 } 4556 4557 #define H2C_DEL_MCC_GROUP_LEN 4 4558 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 4559 bool prev_groups) 4560 { 4561 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4562 struct sk_buff *skb; 4563 unsigned int cond; 4564 4565 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 4566 if (!skb) { 4567 rtw89_err(rtwdev, 4568 "failed to alloc skb for del mcc group\n"); 4569 return -ENOMEM; 4570 } 4571 4572 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 4573 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 4574 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 4575 4576 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4577 H2C_CAT_MAC, 4578 H2C_CL_MCC, 4579 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 4580 H2C_DEL_MCC_GROUP_LEN); 4581 4582 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 4583 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4584 } 4585 4586 #define H2C_RESET_MCC_GROUP_LEN 4 4587 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 4588 { 4589 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4590 struct sk_buff *skb; 4591 unsigned int cond; 4592 4593 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 4594 if (!skb) { 4595 rtw89_err(rtwdev, 4596 "failed to alloc skb for reset mcc group\n"); 4597 return -ENOMEM; 4598 } 4599 4600 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 4601 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 4602 4603 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4604 H2C_CAT_MAC, 4605 H2C_CL_MCC, 4606 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 4607 H2C_RESET_MCC_GROUP_LEN); 4608 4609 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 4610 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4611 } 4612 4613 #define H2C_MCC_REQ_TSF_LEN 4 4614 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 4615 const struct rtw89_fw_mcc_tsf_req *req, 4616 struct rtw89_mac_mcc_tsf_rpt *rpt) 4617 { 4618 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4619 struct rtw89_mac_mcc_tsf_rpt *tmp; 4620 struct sk_buff *skb; 4621 unsigned int cond; 4622 int ret; 4623 4624 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 4625 if (!skb) { 4626 rtw89_err(rtwdev, 4627 "failed to alloc skb for mcc req tsf\n"); 4628 return -ENOMEM; 4629 } 4630 4631 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 4632 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 4633 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 4634 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 4635 4636 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4637 H2C_CAT_MAC, 4638 H2C_CL_MCC, 4639 H2C_FUNC_MCC_REQ_TSF, 0, 0, 4640 H2C_MCC_REQ_TSF_LEN); 4641 4642 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 4643 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4644 if (ret) 4645 return ret; 4646 4647 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 4648 *rpt = *tmp; 4649 4650 return 0; 4651 } 4652 4653 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 4654 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 4655 u8 *bitmap) 4656 { 4657 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4658 struct sk_buff *skb; 4659 unsigned int cond; 4660 u8 map_len; 4661 u8 h2c_len; 4662 4663 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 4664 map_len = RTW89_MAX_MAC_ID_NUM / 8; 4665 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 4666 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 4667 if (!skb) { 4668 rtw89_err(rtwdev, 4669 "failed to alloc skb for mcc macid bitmap\n"); 4670 return -ENOMEM; 4671 } 4672 4673 skb_put(skb, h2c_len); 4674 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 4675 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 4676 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 4677 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 4678 4679 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4680 H2C_CAT_MAC, 4681 H2C_CL_MCC, 4682 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 4683 h2c_len); 4684 4685 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 4686 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4687 } 4688 4689 #define H2C_MCC_SYNC_LEN 4 4690 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 4691 u8 target, u8 offset) 4692 { 4693 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4694 struct sk_buff *skb; 4695 unsigned int cond; 4696 4697 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 4698 if (!skb) { 4699 rtw89_err(rtwdev, 4700 "failed to alloc skb for mcc sync\n"); 4701 return -ENOMEM; 4702 } 4703 4704 skb_put(skb, H2C_MCC_SYNC_LEN); 4705 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 4706 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 4707 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 4708 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 4709 4710 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4711 H2C_CAT_MAC, 4712 H2C_CL_MCC, 4713 H2C_FUNC_MCC_SYNC, 0, 0, 4714 H2C_MCC_SYNC_LEN); 4715 4716 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 4717 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4718 } 4719 4720 #define H2C_MCC_SET_DURATION_LEN 20 4721 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 4722 const struct rtw89_fw_mcc_duration *p) 4723 { 4724 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 4725 struct sk_buff *skb; 4726 unsigned int cond; 4727 4728 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 4729 if (!skb) { 4730 rtw89_err(rtwdev, 4731 "failed to alloc skb for mcc set duration\n"); 4732 return -ENOMEM; 4733 } 4734 4735 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 4736 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 4737 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 4738 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 4739 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 4740 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 4741 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 4742 p->start_tsf_low); 4743 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 4744 p->start_tsf_high); 4745 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 4746 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 4747 4748 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4749 H2C_CAT_MAC, 4750 H2C_CL_MCC, 4751 H2C_FUNC_MCC_SET_DURATION, 0, 0, 4752 H2C_MCC_SET_DURATION_LEN); 4753 4754 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 4755 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4756 } 4757 4758 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 4759 { 4760 static const u8 zeros[U8_MAX] = {}; 4761 4762 return memcmp(ext_ptr, zeros, ext_len) == 0; 4763 } 4764 4765 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 4766 ({ \ 4767 u8 __var_sz = sizeof(*(e)); \ 4768 bool __accept; \ 4769 if (__var_sz >= (ent_sz)) \ 4770 __accept = true; \ 4771 else \ 4772 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 4773 (ent_sz) - __var_sz);\ 4774 __accept; \ 4775 }) 4776 4777 static bool 4778 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 4779 const void *cursor, 4780 const struct rtw89_txpwr_conf *conf) 4781 { 4782 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 4783 return false; 4784 4785 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 4786 return false; 4787 4788 switch (e->rs) { 4789 case RTW89_RS_CCK: 4790 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 4791 return false; 4792 break; 4793 case RTW89_RS_OFDM: 4794 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 4795 return false; 4796 break; 4797 case RTW89_RS_MCS: 4798 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 4799 e->nss >= RTW89_NSS_NUM || 4800 e->ofdma >= RTW89_OFDMA_NUM) 4801 return false; 4802 break; 4803 case RTW89_RS_HEDCM: 4804 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 4805 e->nss >= RTW89_NSS_HEDCM_NUM || 4806 e->ofdma >= RTW89_OFDMA_NUM) 4807 return false; 4808 break; 4809 case RTW89_RS_OFFSET: 4810 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 4811 return false; 4812 break; 4813 default: 4814 return false; 4815 } 4816 4817 return true; 4818 } 4819 4820 static 4821 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 4822 const struct rtw89_txpwr_table *tbl) 4823 { 4824 const struct rtw89_txpwr_conf *conf = tbl->data; 4825 struct rtw89_fw_txpwr_byrate_entry entry = {}; 4826 struct rtw89_txpwr_byrate *byr_head; 4827 struct rtw89_rate_desc desc = {}; 4828 const void *cursor; 4829 u32 data; 4830 s8 *byr; 4831 int i; 4832 4833 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 4834 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 4835 continue; 4836 4837 byr_head = &rtwdev->byr[entry.band][entry.bw]; 4838 data = le32_to_cpu(entry.data); 4839 desc.ofdma = entry.ofdma; 4840 desc.nss = entry.nss; 4841 desc.rs = entry.rs; 4842 4843 for (i = 0; i < entry.len; i++, data >>= 8) { 4844 desc.idx = entry.shf + i; 4845 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 4846 *byr = data & 0xff; 4847 } 4848 } 4849 } 4850 4851 static bool 4852 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 4853 const void *cursor, 4854 const struct rtw89_txpwr_conf *conf) 4855 { 4856 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 4857 return false; 4858 4859 if (e->bw >= RTW89_2G_BW_NUM) 4860 return false; 4861 if (e->nt >= RTW89_NTX_NUM) 4862 return false; 4863 if (e->rs >= RTW89_RS_LMT_NUM) 4864 return false; 4865 if (e->bf >= RTW89_BF_NUM) 4866 return false; 4867 if (e->regd >= RTW89_REGD_NUM) 4868 return false; 4869 if (e->ch_idx >= RTW89_2G_CH_NUM) 4870 return false; 4871 4872 return true; 4873 } 4874 4875 static 4876 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 4877 { 4878 const struct rtw89_txpwr_conf *conf = &data->conf; 4879 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 4880 const void *cursor; 4881 4882 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 4883 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 4884 continue; 4885 4886 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 4887 [entry.ch_idx] = entry.v; 4888 } 4889 } 4890 4891 static bool 4892 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 4893 const void *cursor, 4894 const struct rtw89_txpwr_conf *conf) 4895 { 4896 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 4897 return false; 4898 4899 if (e->bw >= RTW89_5G_BW_NUM) 4900 return false; 4901 if (e->nt >= RTW89_NTX_NUM) 4902 return false; 4903 if (e->rs >= RTW89_RS_LMT_NUM) 4904 return false; 4905 if (e->bf >= RTW89_BF_NUM) 4906 return false; 4907 if (e->regd >= RTW89_REGD_NUM) 4908 return false; 4909 if (e->ch_idx >= RTW89_5G_CH_NUM) 4910 return false; 4911 4912 return true; 4913 } 4914 4915 static 4916 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 4917 { 4918 const struct rtw89_txpwr_conf *conf = &data->conf; 4919 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 4920 const void *cursor; 4921 4922 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 4923 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 4924 continue; 4925 4926 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 4927 [entry.ch_idx] = entry.v; 4928 } 4929 } 4930 4931 static bool 4932 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 4933 const void *cursor, 4934 const struct rtw89_txpwr_conf *conf) 4935 { 4936 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 4937 return false; 4938 4939 if (e->bw >= RTW89_6G_BW_NUM) 4940 return false; 4941 if (e->nt >= RTW89_NTX_NUM) 4942 return false; 4943 if (e->rs >= RTW89_RS_LMT_NUM) 4944 return false; 4945 if (e->bf >= RTW89_BF_NUM) 4946 return false; 4947 if (e->regd >= RTW89_REGD_NUM) 4948 return false; 4949 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 4950 return false; 4951 if (e->ch_idx >= RTW89_6G_CH_NUM) 4952 return false; 4953 4954 return true; 4955 } 4956 4957 static 4958 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 4959 { 4960 const struct rtw89_txpwr_conf *conf = &data->conf; 4961 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 4962 const void *cursor; 4963 4964 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 4965 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 4966 continue; 4967 4968 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 4969 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 4970 } 4971 } 4972 4973 static bool 4974 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 4975 const void *cursor, 4976 const struct rtw89_txpwr_conf *conf) 4977 { 4978 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 4979 return false; 4980 4981 if (e->ru >= RTW89_RU_NUM) 4982 return false; 4983 if (e->nt >= RTW89_NTX_NUM) 4984 return false; 4985 if (e->regd >= RTW89_REGD_NUM) 4986 return false; 4987 if (e->ch_idx >= RTW89_2G_CH_NUM) 4988 return false; 4989 4990 return true; 4991 } 4992 4993 static 4994 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 4995 { 4996 const struct rtw89_txpwr_conf *conf = &data->conf; 4997 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 4998 const void *cursor; 4999 5000 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 5001 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 5002 continue; 5003 5004 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 5005 } 5006 } 5007 5008 static bool 5009 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 5010 const void *cursor, 5011 const struct rtw89_txpwr_conf *conf) 5012 { 5013 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 5014 return false; 5015 5016 if (e->ru >= RTW89_RU_NUM) 5017 return false; 5018 if (e->nt >= RTW89_NTX_NUM) 5019 return false; 5020 if (e->regd >= RTW89_REGD_NUM) 5021 return false; 5022 if (e->ch_idx >= RTW89_5G_CH_NUM) 5023 return false; 5024 5025 return true; 5026 } 5027 5028 static 5029 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 5030 { 5031 const struct rtw89_txpwr_conf *conf = &data->conf; 5032 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 5033 const void *cursor; 5034 5035 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 5036 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 5037 continue; 5038 5039 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 5040 } 5041 } 5042 5043 static bool 5044 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 5045 const void *cursor, 5046 const struct rtw89_txpwr_conf *conf) 5047 { 5048 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 5049 return false; 5050 5051 if (e->ru >= RTW89_RU_NUM) 5052 return false; 5053 if (e->nt >= RTW89_NTX_NUM) 5054 return false; 5055 if (e->regd >= RTW89_REGD_NUM) 5056 return false; 5057 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 5058 return false; 5059 if (e->ch_idx >= RTW89_6G_CH_NUM) 5060 return false; 5061 5062 return true; 5063 } 5064 5065 static 5066 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 5067 { 5068 const struct rtw89_txpwr_conf *conf = &data->conf; 5069 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 5070 const void *cursor; 5071 5072 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 5073 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 5074 continue; 5075 5076 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 5077 [entry.ch_idx] = entry.v; 5078 } 5079 } 5080 5081 static bool 5082 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 5083 const void *cursor, 5084 const struct rtw89_txpwr_conf *conf) 5085 { 5086 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 5087 return false; 5088 5089 if (e->band >= RTW89_BAND_NUM) 5090 return false; 5091 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 5092 return false; 5093 if (e->regd >= RTW89_REGD_NUM) 5094 return false; 5095 5096 return true; 5097 } 5098 5099 static 5100 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 5101 { 5102 const struct rtw89_txpwr_conf *conf = &data->conf; 5103 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 5104 const void *cursor; 5105 5106 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 5107 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 5108 continue; 5109 5110 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 5111 } 5112 } 5113 5114 static bool 5115 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 5116 const void *cursor, 5117 const struct rtw89_txpwr_conf *conf) 5118 { 5119 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 5120 return false; 5121 5122 if (e->band >= RTW89_BAND_NUM) 5123 return false; 5124 if (e->regd >= RTW89_REGD_NUM) 5125 return false; 5126 5127 return true; 5128 } 5129 5130 static 5131 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 5132 { 5133 const struct rtw89_txpwr_conf *conf = &data->conf; 5134 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 5135 const void *cursor; 5136 5137 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 5138 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 5139 continue; 5140 5141 data->v[entry.band][entry.regd] = entry.v; 5142 } 5143 } 5144 5145 const struct rtw89_rfe_parms * 5146 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 5147 const struct rtw89_rfe_parms *init) 5148 { 5149 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 5150 struct rtw89_rfe_parms *parms; 5151 5152 if (!rfe_data) 5153 return init; 5154 5155 parms = &rfe_data->rfe_parms; 5156 if (init) 5157 *parms = *init; 5158 5159 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 5160 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 5161 rfe_data->byrate.tbl.size = 0; /* don't care here */ 5162 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 5163 parms->byr_tbl = &rfe_data->byrate.tbl; 5164 } 5165 5166 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 5167 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 5168 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 5169 } 5170 5171 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 5172 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 5173 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 5174 } 5175 5176 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 5177 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 5178 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 5179 } 5180 5181 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 5182 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 5183 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 5184 } 5185 5186 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 5187 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 5188 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 5189 } 5190 5191 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 5192 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 5193 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 5194 } 5195 5196 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 5197 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 5198 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 5199 } 5200 5201 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 5202 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 5203 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 5204 } 5205 5206 return parms; 5207 } 5208