1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* Copyright(c) 2019-2020 Realtek Corporation 3 */ 4 5 #include "cam.h" 6 #include "chan.h" 7 #include "coex.h" 8 #include "debug.h" 9 #include "fw.h" 10 #include "mac.h" 11 #include "phy.h" 12 #include "ps.h" 13 #include "reg.h" 14 #include "util.h" 15 16 static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; 17 18 union rtw89_fw_element_arg { 19 size_t offset; 20 enum rtw89_rf_path rf_path; 21 enum rtw89_fw_type fw_type; 22 }; 23 24 struct rtw89_fw_element_handler { 25 int (*fn)(struct rtw89_dev *rtwdev, 26 const struct rtw89_fw_element_hdr *elm, 27 const union rtw89_fw_element_arg arg); 28 const union rtw89_fw_element_arg arg; 29 const char *name; 30 }; 31 32 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 33 struct sk_buff *skb); 34 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 35 struct rtw89_wait_info *wait, unsigned int cond); 36 37 static struct sk_buff *rtw89_fw_h2c_alloc_skb(struct rtw89_dev *rtwdev, u32 len, 38 bool header) 39 { 40 struct sk_buff *skb; 41 u32 header_len = 0; 42 u32 h2c_desc_size = rtwdev->chip->h2c_desc_size; 43 44 if (header) 45 header_len = H2C_HEADER_LEN; 46 47 skb = dev_alloc_skb(len + header_len + h2c_desc_size); 48 if (!skb) 49 return NULL; 50 skb_reserve(skb, header_len + h2c_desc_size); 51 memset(skb->data, 0, len); 52 53 return skb; 54 } 55 56 struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(struct rtw89_dev *rtwdev, u32 len) 57 { 58 return rtw89_fw_h2c_alloc_skb(rtwdev, len, true); 59 } 60 61 struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(struct rtw89_dev *rtwdev, u32 len) 62 { 63 return rtw89_fw_h2c_alloc_skb(rtwdev, len, false); 64 } 65 66 int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev, enum rtw89_fwdl_check_type type) 67 { 68 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 69 u8 val; 70 int ret; 71 72 ret = read_poll_timeout_atomic(mac->fwdl_get_status, val, 73 val == RTW89_FWDL_WCPU_FW_INIT_RDY, 74 1, FWDL_WAIT_CNT, false, rtwdev, type); 75 if (ret) { 76 switch (val) { 77 case RTW89_FWDL_CHECKSUM_FAIL: 78 rtw89_err(rtwdev, "fw checksum fail\n"); 79 return -EINVAL; 80 81 case RTW89_FWDL_SECURITY_FAIL: 82 rtw89_err(rtwdev, "fw security fail\n"); 83 return -EINVAL; 84 85 case RTW89_FWDL_CV_NOT_MATCH: 86 rtw89_err(rtwdev, "fw cv not match\n"); 87 return -EINVAL; 88 89 default: 90 rtw89_err(rtwdev, "fw unexpected status %d\n", val); 91 return -EBUSY; 92 } 93 } 94 95 set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags); 96 97 return 0; 98 } 99 100 static int rtw89_fw_hdr_parser_v0(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 101 struct rtw89_fw_bin_info *info) 102 { 103 const struct rtw89_fw_hdr *fw_hdr = (const struct rtw89_fw_hdr *)fw; 104 struct rtw89_fw_hdr_section_info *section_info; 105 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 106 const struct rtw89_fw_hdr_section *section; 107 const u8 *fw_end = fw + len; 108 const u8 *bin; 109 u32 base_hdr_len; 110 u32 mssc_len = 0; 111 u32 i; 112 113 if (!info) 114 return -EINVAL; 115 116 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_W6_SEC_NUM); 117 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 118 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_W7_DYN_HDR); 119 120 if (info->dynamic_hdr_en) { 121 info->hdr_len = le32_get_bits(fw_hdr->w3, FW_HDR_W3_LEN); 122 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 123 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 124 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 125 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 126 return -EINVAL; 127 } 128 } else { 129 info->hdr_len = base_hdr_len; 130 info->dynamic_hdr_len = 0; 131 } 132 133 bin = fw + info->hdr_len; 134 135 /* jump to section header */ 136 section_info = info->section_info; 137 for (i = 0; i < info->section_num; i++) { 138 section = &fw_hdr->sections[i]; 139 section_info->type = 140 le32_get_bits(section->w1, FWSECTION_HDR_W1_SECTIONTYPE); 141 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 142 section_info->mssc = 143 le32_get_bits(section->w2, FWSECTION_HDR_W2_MSSC); 144 mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN; 145 } else { 146 section_info->mssc = 0; 147 } 148 149 section_info->len = le32_get_bits(section->w1, FWSECTION_HDR_W1_SEC_SIZE); 150 if (le32_get_bits(section->w1, FWSECTION_HDR_W1_CHECKSUM)) 151 section_info->len += FWDL_SECTION_CHKSUM_LEN; 152 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_W1_REDL); 153 section_info->dladdr = 154 le32_get_bits(section->w0, FWSECTION_HDR_W0_DL_ADDR) & 0x1fffffff; 155 section_info->addr = bin; 156 bin += section_info->len; 157 section_info++; 158 } 159 160 if (fw_end != bin + mssc_len) { 161 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 162 return -EINVAL; 163 } 164 165 return 0; 166 } 167 168 static int __get_mssc_key_idx(struct rtw89_dev *rtwdev, 169 const struct rtw89_fw_mss_pool_hdr *mss_hdr, 170 u32 rmp_tbl_size, u32 *key_idx) 171 { 172 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 173 u32 sel_byte_idx; 174 u32 mss_sel_idx; 175 u8 sel_bit_idx; 176 int i; 177 178 if (sec->mss_dev_type == RTW89_FW_MSS_DEV_TYPE_FWSEC_DEF) { 179 if (!mss_hdr->defen) 180 return -ENOENT; 181 182 mss_sel_idx = sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 183 sec->mss_key_num; 184 } else { 185 if (mss_hdr->defen) 186 mss_sel_idx = FWDL_MSS_POOL_DEFKEYSETS_SIZE << 3; 187 else 188 mss_sel_idx = 0; 189 mss_sel_idx += sec->mss_dev_type * le16_to_cpu(mss_hdr->msskey_num_max) * 190 le16_to_cpu(mss_hdr->msscust_max) + 191 sec->mss_cust_idx * le16_to_cpu(mss_hdr->msskey_num_max) + 192 sec->mss_key_num; 193 } 194 195 sel_byte_idx = mss_sel_idx >> 3; 196 sel_bit_idx = mss_sel_idx & 0x7; 197 198 if (sel_byte_idx >= rmp_tbl_size) 199 return -EFAULT; 200 201 if (!(mss_hdr->rmp_tbl[sel_byte_idx] & BIT(sel_bit_idx))) 202 return -ENOENT; 203 204 *key_idx = hweight8(mss_hdr->rmp_tbl[sel_byte_idx] & (BIT(sel_bit_idx) - 1)); 205 206 for (i = 0; i < sel_byte_idx; i++) 207 *key_idx += hweight8(mss_hdr->rmp_tbl[i]); 208 209 return 0; 210 } 211 212 static int __parse_formatted_mssc(struct rtw89_dev *rtwdev, 213 struct rtw89_fw_bin_info *info, 214 struct rtw89_fw_hdr_section_info *section_info, 215 const struct rtw89_fw_hdr_section_v1 *section, 216 const void *content, 217 u32 *mssc_len) 218 { 219 const struct rtw89_fw_mss_pool_hdr *mss_hdr = content + section_info->len; 220 const union rtw89_fw_section_mssc_content *section_content = content; 221 struct rtw89_fw_secure *sec = &rtwdev->fw.sec; 222 u32 rmp_tbl_size; 223 u32 key_sign_len; 224 u32 real_key_idx; 225 u32 sb_sel_ver; 226 int ret; 227 228 if (memcmp(mss_signature, mss_hdr->signature, sizeof(mss_signature)) != 0) { 229 rtw89_err(rtwdev, "[ERR] wrong MSS signature\n"); 230 return -ENOENT; 231 } 232 233 if (mss_hdr->rmpfmt == MSS_POOL_RMP_TBL_BITMASK) { 234 rmp_tbl_size = (le16_to_cpu(mss_hdr->msskey_num_max) * 235 le16_to_cpu(mss_hdr->msscust_max) * 236 mss_hdr->mssdev_max) >> 3; 237 if (mss_hdr->defen) 238 rmp_tbl_size += FWDL_MSS_POOL_DEFKEYSETS_SIZE; 239 } else { 240 rtw89_err(rtwdev, "[ERR] MSS Key Pool Remap Table Format Unsupport:%X\n", 241 mss_hdr->rmpfmt); 242 return -EINVAL; 243 } 244 245 if (rmp_tbl_size + sizeof(*mss_hdr) != le32_to_cpu(mss_hdr->key_raw_offset)) { 246 rtw89_err(rtwdev, "[ERR] MSS Key Pool Format Error:0x%X + 0x%X != 0x%X\n", 247 rmp_tbl_size, (int)sizeof(*mss_hdr), 248 le32_to_cpu(mss_hdr->key_raw_offset)); 249 return -EINVAL; 250 } 251 252 key_sign_len = le16_to_cpu(section_content->key_sign_len.v) >> 2; 253 if (!key_sign_len) 254 key_sign_len = 512; 255 256 if (info->dsp_checksum) 257 key_sign_len += FWDL_SECURITY_CHKSUM_LEN; 258 259 *mssc_len = sizeof(*mss_hdr) + rmp_tbl_size + 260 le16_to_cpu(mss_hdr->keypair_num) * key_sign_len; 261 262 if (!sec->secure_boot) 263 goto out; 264 265 sb_sel_ver = le32_to_cpu(section_content->sb_sel_ver.v); 266 if (sb_sel_ver && sb_sel_ver != sec->sb_sel_mgn) 267 goto ignore; 268 269 ret = __get_mssc_key_idx(rtwdev, mss_hdr, rmp_tbl_size, &real_key_idx); 270 if (ret) 271 goto ignore; 272 273 section_info->key_addr = content + section_info->len + 274 le32_to_cpu(mss_hdr->key_raw_offset) + 275 key_sign_len * real_key_idx; 276 section_info->key_len = key_sign_len; 277 section_info->key_idx = real_key_idx; 278 279 out: 280 if (info->secure_section_exist) { 281 section_info->ignore = true; 282 return 0; 283 } 284 285 info->secure_section_exist = true; 286 287 return 0; 288 289 ignore: 290 section_info->ignore = true; 291 292 return 0; 293 } 294 295 static int __parse_security_section(struct rtw89_dev *rtwdev, 296 struct rtw89_fw_bin_info *info, 297 struct rtw89_fw_hdr_section_info *section_info, 298 const struct rtw89_fw_hdr_section_v1 *section, 299 const void *content, 300 u32 *mssc_len) 301 { 302 int ret; 303 304 section_info->mssc = 305 le32_get_bits(section->w2, FWSECTION_HDR_V1_W2_MSSC); 306 307 if (section_info->mssc == FORMATTED_MSSC) { 308 ret = __parse_formatted_mssc(rtwdev, info, section_info, 309 section, content, mssc_len); 310 if (ret) 311 return -EINVAL; 312 } else { 313 *mssc_len = section_info->mssc * FWDL_SECURITY_SIGLEN; 314 if (info->dsp_checksum) 315 *mssc_len += section_info->mssc * FWDL_SECURITY_CHKSUM_LEN; 316 317 info->secure_section_exist = true; 318 } 319 320 return 0; 321 } 322 323 static int rtw89_fw_hdr_parser_v1(struct rtw89_dev *rtwdev, const u8 *fw, u32 len, 324 struct rtw89_fw_bin_info *info) 325 { 326 const struct rtw89_fw_hdr_v1 *fw_hdr = (const struct rtw89_fw_hdr_v1 *)fw; 327 struct rtw89_fw_hdr_section_info *section_info; 328 const struct rtw89_fw_dynhdr_hdr *fwdynhdr; 329 const struct rtw89_fw_hdr_section_v1 *section; 330 const u8 *fw_end = fw + len; 331 const u8 *bin; 332 u32 base_hdr_len; 333 u32 mssc_len; 334 int ret; 335 u32 i; 336 337 info->section_num = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_SEC_NUM); 338 info->dsp_checksum = le32_get_bits(fw_hdr->w6, FW_HDR_V1_W6_DSP_CHKSUM); 339 base_hdr_len = struct_size(fw_hdr, sections, info->section_num); 340 info->dynamic_hdr_en = le32_get_bits(fw_hdr->w7, FW_HDR_V1_W7_DYN_HDR); 341 342 if (info->dynamic_hdr_en) { 343 info->hdr_len = le32_get_bits(fw_hdr->w5, FW_HDR_V1_W5_HDR_SIZE); 344 info->dynamic_hdr_len = info->hdr_len - base_hdr_len; 345 fwdynhdr = (const struct rtw89_fw_dynhdr_hdr *)(fw + base_hdr_len); 346 if (le32_to_cpu(fwdynhdr->hdr_len) != info->dynamic_hdr_len) { 347 rtw89_err(rtwdev, "[ERR]invalid fw dynamic header len\n"); 348 return -EINVAL; 349 } 350 } else { 351 info->hdr_len = base_hdr_len; 352 info->dynamic_hdr_len = 0; 353 } 354 355 bin = fw + info->hdr_len; 356 357 /* jump to section header */ 358 section_info = info->section_info; 359 for (i = 0; i < info->section_num; i++) { 360 section = &fw_hdr->sections[i]; 361 362 section_info->type = 363 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SECTIONTYPE); 364 section_info->len = 365 le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_SEC_SIZE); 366 if (le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_CHECKSUM)) 367 section_info->len += FWDL_SECTION_CHKSUM_LEN; 368 section_info->redl = le32_get_bits(section->w1, FWSECTION_HDR_V1_W1_REDL); 369 section_info->dladdr = 370 le32_get_bits(section->w0, FWSECTION_HDR_V1_W0_DL_ADDR); 371 section_info->addr = bin; 372 373 if (section_info->type == FWDL_SECURITY_SECTION_TYPE) { 374 ret = __parse_security_section(rtwdev, info, section_info, 375 section, bin, &mssc_len); 376 if (ret) 377 return ret; 378 } else { 379 section_info->mssc = 0; 380 mssc_len = 0; 381 } 382 383 rtw89_debug(rtwdev, RTW89_DBG_FW, 384 "section[%d] type=%d len=0x%-6x mssc=%d mssc_len=%d addr=%tx\n", 385 i, section_info->type, section_info->len, 386 section_info->mssc, mssc_len, bin - fw); 387 rtw89_debug(rtwdev, RTW89_DBG_FW, 388 " ignore=%d key_addr=%p (0x%tx) key_len=%d key_idx=%d\n", 389 section_info->ignore, section_info->key_addr, 390 section_info->key_addr ? 391 section_info->key_addr - section_info->addr : 0, 392 section_info->key_len, section_info->key_idx); 393 394 bin += section_info->len + mssc_len; 395 section_info++; 396 } 397 398 if (fw_end != bin) { 399 rtw89_err(rtwdev, "[ERR]fw bin size\n"); 400 return -EINVAL; 401 } 402 403 if (!info->secure_section_exist) 404 rtw89_warn(rtwdev, "no firmware secure section\n"); 405 406 return 0; 407 } 408 409 static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, 410 const struct rtw89_fw_suit *fw_suit, 411 struct rtw89_fw_bin_info *info) 412 { 413 const u8 *fw = fw_suit->data; 414 u32 len = fw_suit->size; 415 416 if (!fw || !len) { 417 rtw89_err(rtwdev, "fw type %d isn't recognized\n", fw_suit->type); 418 return -ENOENT; 419 } 420 421 switch (fw_suit->hdr_ver) { 422 case 0: 423 return rtw89_fw_hdr_parser_v0(rtwdev, fw, len, info); 424 case 1: 425 return rtw89_fw_hdr_parser_v1(rtwdev, fw, len, info); 426 default: 427 return -ENOENT; 428 } 429 } 430 431 static 432 int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 433 struct rtw89_fw_suit *fw_suit, bool nowarn) 434 { 435 struct rtw89_fw_info *fw_info = &rtwdev->fw; 436 const struct firmware *firmware = fw_info->req.firmware; 437 const u8 *mfw = firmware->data; 438 u32 mfw_len = firmware->size; 439 const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; 440 const struct rtw89_mfw_info *mfw_info; 441 int i; 442 443 if (mfw_hdr->sig != RTW89_MFW_SIG) { 444 rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n"); 445 /* legacy firmware support normal type only */ 446 if (type != RTW89_FW_NORMAL) 447 return -EINVAL; 448 fw_suit->data = mfw; 449 fw_suit->size = mfw_len; 450 return 0; 451 } 452 453 for (i = 0; i < mfw_hdr->fw_nr; i++) { 454 mfw_info = &mfw_hdr->info[i]; 455 if (mfw_info->type == type) { 456 if (mfw_info->cv == rtwdev->hal.cv && !mfw_info->mp) 457 goto found; 458 if (type == RTW89_FW_LOGFMT) 459 goto found; 460 } 461 } 462 463 if (!nowarn) 464 rtw89_err(rtwdev, "no suitable firmware found\n"); 465 return -ENOENT; 466 467 found: 468 fw_suit->data = mfw + le32_to_cpu(mfw_info->shift); 469 fw_suit->size = le32_to_cpu(mfw_info->size); 470 return 0; 471 } 472 473 static u32 rtw89_mfw_get_size(struct rtw89_dev *rtwdev) 474 { 475 struct rtw89_fw_info *fw_info = &rtwdev->fw; 476 const struct firmware *firmware = fw_info->req.firmware; 477 const struct rtw89_mfw_hdr *mfw_hdr = 478 (const struct rtw89_mfw_hdr *)firmware->data; 479 const struct rtw89_mfw_info *mfw_info; 480 u32 size; 481 482 if (mfw_hdr->sig != RTW89_MFW_SIG) { 483 rtw89_warn(rtwdev, "not mfw format\n"); 484 return 0; 485 } 486 487 mfw_info = &mfw_hdr->info[mfw_hdr->fw_nr - 1]; 488 size = le32_to_cpu(mfw_info->shift) + le32_to_cpu(mfw_info->size); 489 490 return size; 491 } 492 493 static void rtw89_fw_update_ver_v0(struct rtw89_dev *rtwdev, 494 struct rtw89_fw_suit *fw_suit, 495 const struct rtw89_fw_hdr *hdr) 496 { 497 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MAJOR_VERSION); 498 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_W1_MINOR_VERSION); 499 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_W1_SUBVERSION); 500 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_W1_SUBINDEX); 501 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_W2_COMMITID); 502 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_W5_YEAR); 503 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_W4_MONTH); 504 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_W4_DATE); 505 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_W4_HOUR); 506 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_W4_MIN); 507 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_W7_CMD_VERSERION); 508 } 509 510 static void rtw89_fw_update_ver_v1(struct rtw89_dev *rtwdev, 511 struct rtw89_fw_suit *fw_suit, 512 const struct rtw89_fw_hdr_v1 *hdr) 513 { 514 fw_suit->major_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MAJOR_VERSION); 515 fw_suit->minor_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_MINOR_VERSION); 516 fw_suit->sub_ver = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBVERSION); 517 fw_suit->sub_idex = le32_get_bits(hdr->w1, FW_HDR_V1_W1_SUBINDEX); 518 fw_suit->commitid = le32_get_bits(hdr->w2, FW_HDR_V1_W2_COMMITID); 519 fw_suit->build_year = le32_get_bits(hdr->w5, FW_HDR_V1_W5_YEAR); 520 fw_suit->build_mon = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MONTH); 521 fw_suit->build_date = le32_get_bits(hdr->w4, FW_HDR_V1_W4_DATE); 522 fw_suit->build_hour = le32_get_bits(hdr->w4, FW_HDR_V1_W4_HOUR); 523 fw_suit->build_min = le32_get_bits(hdr->w4, FW_HDR_V1_W4_MIN); 524 fw_suit->cmd_ver = le32_get_bits(hdr->w7, FW_HDR_V1_W3_CMD_VERSERION); 525 } 526 527 static int rtw89_fw_update_ver(struct rtw89_dev *rtwdev, 528 enum rtw89_fw_type type, 529 struct rtw89_fw_suit *fw_suit) 530 { 531 const struct rtw89_fw_hdr *v0 = (const struct rtw89_fw_hdr *)fw_suit->data; 532 const struct rtw89_fw_hdr_v1 *v1 = (const struct rtw89_fw_hdr_v1 *)fw_suit->data; 533 534 if (type == RTW89_FW_LOGFMT) 535 return 0; 536 537 fw_suit->type = type; 538 fw_suit->hdr_ver = le32_get_bits(v0->w3, FW_HDR_W3_HDR_VER); 539 540 switch (fw_suit->hdr_ver) { 541 case 0: 542 rtw89_fw_update_ver_v0(rtwdev, fw_suit, v0); 543 break; 544 case 1: 545 rtw89_fw_update_ver_v1(rtwdev, fw_suit, v1); 546 break; 547 default: 548 rtw89_err(rtwdev, "Unknown firmware header version %u\n", 549 fw_suit->hdr_ver); 550 return -ENOENT; 551 } 552 553 rtw89_info(rtwdev, 554 "Firmware version %u.%u.%u.%u (%08x), cmd version %u, type %u\n", 555 fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver, 556 fw_suit->sub_idex, fw_suit->commitid, fw_suit->cmd_ver, type); 557 558 return 0; 559 } 560 561 static 562 int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 563 bool nowarn) 564 { 565 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 566 int ret; 567 568 ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn); 569 if (ret) 570 return ret; 571 572 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 573 } 574 575 static 576 int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, 577 const struct rtw89_fw_element_hdr *elm, 578 const union rtw89_fw_element_arg arg) 579 { 580 enum rtw89_fw_type type = arg.fw_type; 581 struct rtw89_hal *hal = &rtwdev->hal; 582 struct rtw89_fw_suit *fw_suit; 583 584 if (hal->cv != elm->u.bbmcu.cv) 585 return 1; /* ignore this element */ 586 587 fw_suit = rtw89_fw_suit_get(rtwdev, type); 588 fw_suit->data = elm->u.bbmcu.contents; 589 fw_suit->size = le32_to_cpu(elm->size); 590 591 return rtw89_fw_update_ver(rtwdev, type, fw_suit); 592 } 593 594 #define __DEF_FW_FEAT_COND(__cond, __op) \ 595 static bool __fw_feat_cond_ ## __cond(u32 suit_ver_code, u32 comp_ver_code) \ 596 { \ 597 return suit_ver_code __op comp_ver_code; \ 598 } 599 600 __DEF_FW_FEAT_COND(ge, >=); /* greater or equal */ 601 __DEF_FW_FEAT_COND(le, <=); /* less or equal */ 602 __DEF_FW_FEAT_COND(lt, <); /* less than */ 603 604 struct __fw_feat_cfg { 605 enum rtw89_core_chip_id chip_id; 606 enum rtw89_fw_feature feature; 607 u32 ver_code; 608 bool (*cond)(u32 suit_ver_code, u32 comp_ver_code); 609 }; 610 611 #define __CFG_FW_FEAT(_chip, _cond, _maj, _min, _sub, _idx, _feat) \ 612 { \ 613 .chip_id = _chip, \ 614 .feature = RTW89_FW_FEATURE_ ## _feat, \ 615 .ver_code = RTW89_FW_VER_CODE(_maj, _min, _sub, _idx), \ 616 .cond = __fw_feat_cond_ ## _cond, \ 617 } 618 619 static const struct __fw_feat_cfg fw_feat_tbl[] = { 620 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, TX_WAKE), 621 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 37, 1, SCAN_OFFLOAD), 622 __CFG_FW_FEAT(RTL8851B, ge, 0, 29, 41, 0, CRASH_TRIGGER), 623 __CFG_FW_FEAT(RTL8852A, le, 0, 13, 29, 0, OLD_HT_RA_FORMAT), 624 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, SCAN_OFFLOAD), 625 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE), 626 __CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER), 627 __CFG_FW_FEAT(RTL8852A, lt, 0, 13, 38, 0, NO_PACKET_DROP), 628 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG), 629 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, TX_WAKE), 630 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, CRASH_TRIGGER), 631 __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 29, 0, SCAN_OFFLOAD), 632 __CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS), 633 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE), 634 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), 635 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), 636 __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), 637 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), 638 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), 639 __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), 640 __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), 641 }; 642 643 static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, 644 const struct rtw89_chip_info *chip, 645 u32 ver_code) 646 { 647 int i; 648 649 for (i = 0; i < ARRAY_SIZE(fw_feat_tbl); i++) { 650 const struct __fw_feat_cfg *ent = &fw_feat_tbl[i]; 651 652 if (chip->chip_id != ent->chip_id) 653 continue; 654 655 if (ent->cond(ver_code, ent->ver_code)) 656 RTW89_SET_FW_FEATURE(ent->feature, fw); 657 } 658 } 659 660 static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev) 661 { 662 const struct rtw89_chip_info *chip = rtwdev->chip; 663 const struct rtw89_fw_suit *fw_suit; 664 u32 suit_ver_code; 665 666 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL); 667 suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit); 668 669 rtw89_fw_iterate_feature_cfg(&rtwdev->fw, chip, suit_ver_code); 670 } 671 672 const struct firmware * 673 rtw89_early_fw_feature_recognize(struct device *device, 674 const struct rtw89_chip_info *chip, 675 struct rtw89_fw_info *early_fw, 676 int *used_fw_format) 677 { 678 const struct firmware *firmware; 679 char fw_name[64]; 680 int fw_format; 681 u32 ver_code; 682 int ret; 683 684 for (fw_format = chip->fw_format_max; fw_format >= 0; fw_format--) { 685 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 686 chip->fw_basename, fw_format); 687 688 ret = request_firmware(&firmware, fw_name, device); 689 if (!ret) { 690 dev_info(device, "loaded firmware %s\n", fw_name); 691 *used_fw_format = fw_format; 692 break; 693 } 694 } 695 696 if (ret) { 697 dev_err(device, "failed to early request firmware: %d\n", ret); 698 return NULL; 699 } 700 701 ver_code = rtw89_compat_fw_hdr_ver_code(firmware->data); 702 703 if (!ver_code) 704 goto out; 705 706 rtw89_fw_iterate_feature_cfg(early_fw, chip, ver_code); 707 708 out: 709 return firmware; 710 } 711 712 int rtw89_fw_recognize(struct rtw89_dev *rtwdev) 713 { 714 const struct rtw89_chip_info *chip = rtwdev->chip; 715 int ret; 716 717 if (chip->try_ce_fw) { 718 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true); 719 if (!ret) 720 goto normal_done; 721 } 722 723 ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false); 724 if (ret) 725 return ret; 726 727 normal_done: 728 /* It still works if wowlan firmware isn't existing. */ 729 __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false); 730 731 /* It still works if log format file isn't existing. */ 732 __rtw89_fw_recognize(rtwdev, RTW89_FW_LOGFMT, true); 733 734 rtw89_fw_recognize_features(rtwdev); 735 736 rtw89_coex_recognize_ver(rtwdev); 737 738 return 0; 739 } 740 741 static 742 int rtw89_build_phy_tbl_from_elm(struct rtw89_dev *rtwdev, 743 const struct rtw89_fw_element_hdr *elm, 744 const union rtw89_fw_element_arg arg) 745 { 746 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 747 struct rtw89_phy_table *tbl; 748 struct rtw89_reg2_def *regs; 749 enum rtw89_rf_path rf_path; 750 u32 n_regs, i; 751 u8 idx; 752 753 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); 754 if (!tbl) 755 return -ENOMEM; 756 757 switch (le32_to_cpu(elm->id)) { 758 case RTW89_FW_ELEMENT_ID_BB_REG: 759 elm_info->bb_tbl = tbl; 760 break; 761 case RTW89_FW_ELEMENT_ID_BB_GAIN: 762 elm_info->bb_gain = tbl; 763 break; 764 case RTW89_FW_ELEMENT_ID_RADIO_A: 765 case RTW89_FW_ELEMENT_ID_RADIO_B: 766 case RTW89_FW_ELEMENT_ID_RADIO_C: 767 case RTW89_FW_ELEMENT_ID_RADIO_D: 768 rf_path = arg.rf_path; 769 idx = elm->u.reg2.idx; 770 771 elm_info->rf_radio[idx] = tbl; 772 tbl->rf_path = rf_path; 773 tbl->config = rtw89_phy_config_rf_reg_v1; 774 break; 775 case RTW89_FW_ELEMENT_ID_RF_NCTL: 776 elm_info->rf_nctl = tbl; 777 break; 778 default: 779 kfree(tbl); 780 return -ENOENT; 781 } 782 783 n_regs = le32_to_cpu(elm->size) / sizeof(tbl->regs[0]); 784 regs = kcalloc(n_regs, sizeof(tbl->regs[0]), GFP_KERNEL); 785 if (!regs) 786 goto out; 787 788 for (i = 0; i < n_regs; i++) { 789 regs[i].addr = le32_to_cpu(elm->u.reg2.regs[i].addr); 790 regs[i].data = le32_to_cpu(elm->u.reg2.regs[i].data); 791 } 792 793 tbl->n_regs = n_regs; 794 tbl->regs = regs; 795 796 return 0; 797 798 out: 799 kfree(tbl); 800 return -ENOMEM; 801 } 802 803 static 804 int rtw89_fw_recognize_txpwr_from_elm(struct rtw89_dev *rtwdev, 805 const struct rtw89_fw_element_hdr *elm, 806 const union rtw89_fw_element_arg arg) 807 { 808 const struct __rtw89_fw_txpwr_element *txpwr_elm = &elm->u.txpwr; 809 const unsigned long offset = arg.offset; 810 struct rtw89_efuse *efuse = &rtwdev->efuse; 811 struct rtw89_txpwr_conf *conf; 812 813 if (!rtwdev->rfe_data) { 814 rtwdev->rfe_data = kzalloc(sizeof(*rtwdev->rfe_data), GFP_KERNEL); 815 if (!rtwdev->rfe_data) 816 return -ENOMEM; 817 } 818 819 conf = (void *)rtwdev->rfe_data + offset; 820 821 /* if multiple matched, take the last eventually */ 822 if (txpwr_elm->rfe_type == efuse->rfe_type) 823 goto setup; 824 825 /* without one is matched, accept default */ 826 if (txpwr_elm->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE && 827 (!rtw89_txpwr_conf_valid(conf) || 828 conf->rfe_type == RTW89_TXPWR_CONF_DFLT_RFE_TYPE)) 829 goto setup; 830 831 rtw89_debug(rtwdev, RTW89_DBG_FW, "skip txpwr element ID %u RFE %u\n", 832 elm->id, txpwr_elm->rfe_type); 833 return 0; 834 835 setup: 836 rtw89_debug(rtwdev, RTW89_DBG_FW, "take txpwr element ID %u RFE %u\n", 837 elm->id, txpwr_elm->rfe_type); 838 839 conf->rfe_type = txpwr_elm->rfe_type; 840 conf->ent_sz = txpwr_elm->ent_sz; 841 conf->num_ents = le32_to_cpu(txpwr_elm->num_ents); 842 conf->data = txpwr_elm->content; 843 return 0; 844 } 845 846 static 847 int rtw89_build_txpwr_trk_tbl_from_elm(struct rtw89_dev *rtwdev, 848 const struct rtw89_fw_element_hdr *elm, 849 const union rtw89_fw_element_arg arg) 850 { 851 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 852 const struct rtw89_chip_info *chip = rtwdev->chip; 853 u32 needed_bitmap = 0; 854 u32 offset = 0; 855 int subband; 856 u32 bitmap; 857 int type; 858 859 if (chip->support_bands & BIT(NL80211_BAND_6GHZ)) 860 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_6GHZ; 861 if (chip->support_bands & BIT(NL80211_BAND_5GHZ)) 862 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_5GHZ; 863 if (chip->support_bands & BIT(NL80211_BAND_2GHZ)) 864 needed_bitmap |= RTW89_DEFAULT_NEEDED_FW_TXPWR_TRK_2GHZ; 865 866 bitmap = le32_to_cpu(elm->u.txpwr_trk.bitmap); 867 868 if ((bitmap & needed_bitmap) != needed_bitmap) { 869 rtw89_warn(rtwdev, "needed txpwr trk bitmap %08x but %0x8x\n", 870 needed_bitmap, bitmap); 871 return -ENOENT; 872 } 873 874 elm_info->txpwr_trk = kzalloc(sizeof(*elm_info->txpwr_trk), GFP_KERNEL); 875 if (!elm_info->txpwr_trk) 876 return -ENOMEM; 877 878 for (type = 0; bitmap; type++, bitmap >>= 1) { 879 if (!(bitmap & BIT(0))) 880 continue; 881 882 if (type >= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_START && 883 type <= __RTW89_FW_TXPWR_TRK_TYPE_6GHZ_MAX) 884 subband = 4; 885 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_START && 886 type <= __RTW89_FW_TXPWR_TRK_TYPE_5GHZ_MAX) 887 subband = 3; 888 else if (type >= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_START && 889 type <= __RTW89_FW_TXPWR_TRK_TYPE_2GHZ_MAX) 890 subband = 1; 891 else 892 break; 893 894 elm_info->txpwr_trk->delta[type] = &elm->u.txpwr_trk.contents[offset]; 895 896 offset += subband; 897 if (offset * DELTA_SWINGIDX_SIZE > le32_to_cpu(elm->size)) 898 goto err; 899 } 900 901 return 0; 902 903 err: 904 rtw89_warn(rtwdev, "unexpected txpwr trk offset %d over size %d\n", 905 offset, le32_to_cpu(elm->size)); 906 kfree(elm_info->txpwr_trk); 907 elm_info->txpwr_trk = NULL; 908 909 return -EFAULT; 910 } 911 912 static 913 int rtw89_build_rfk_log_fmt_from_elm(struct rtw89_dev *rtwdev, 914 const struct rtw89_fw_element_hdr *elm, 915 const union rtw89_fw_element_arg arg) 916 { 917 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 918 u8 rfk_id; 919 920 if (elm_info->rfk_log_fmt) 921 goto allocated; 922 923 elm_info->rfk_log_fmt = kzalloc(sizeof(*elm_info->rfk_log_fmt), GFP_KERNEL); 924 if (!elm_info->rfk_log_fmt) 925 return 1; /* this is an optional element, so just ignore this */ 926 927 allocated: 928 rfk_id = elm->u.rfk_log_fmt.rfk_id; 929 if (rfk_id >= RTW89_PHY_C2H_RFK_LOG_FUNC_NUM) 930 return 1; 931 932 elm_info->rfk_log_fmt->elm[rfk_id] = elm; 933 934 return 0; 935 } 936 937 static const struct rtw89_fw_element_handler __fw_element_handlers[] = { 938 [RTW89_FW_ELEMENT_ID_BBMCU0] = {__rtw89_fw_recognize_from_elm, 939 { .fw_type = RTW89_FW_BBMCU0 }, NULL}, 940 [RTW89_FW_ELEMENT_ID_BBMCU1] = {__rtw89_fw_recognize_from_elm, 941 { .fw_type = RTW89_FW_BBMCU1 }, NULL}, 942 [RTW89_FW_ELEMENT_ID_BB_REG] = {rtw89_build_phy_tbl_from_elm, {}, "BB"}, 943 [RTW89_FW_ELEMENT_ID_BB_GAIN] = {rtw89_build_phy_tbl_from_elm, {}, NULL}, 944 [RTW89_FW_ELEMENT_ID_RADIO_A] = {rtw89_build_phy_tbl_from_elm, 945 { .rf_path = RF_PATH_A }, "radio A"}, 946 [RTW89_FW_ELEMENT_ID_RADIO_B] = {rtw89_build_phy_tbl_from_elm, 947 { .rf_path = RF_PATH_B }, NULL}, 948 [RTW89_FW_ELEMENT_ID_RADIO_C] = {rtw89_build_phy_tbl_from_elm, 949 { .rf_path = RF_PATH_C }, NULL}, 950 [RTW89_FW_ELEMENT_ID_RADIO_D] = {rtw89_build_phy_tbl_from_elm, 951 { .rf_path = RF_PATH_D }, NULL}, 952 [RTW89_FW_ELEMENT_ID_RF_NCTL] = {rtw89_build_phy_tbl_from_elm, {}, "NCTL"}, 953 [RTW89_FW_ELEMENT_ID_TXPWR_BYRATE] = { 954 rtw89_fw_recognize_txpwr_from_elm, 955 { .offset = offsetof(struct rtw89_rfe_data, byrate.conf) }, "TXPWR", 956 }, 957 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_2GHZ] = { 958 rtw89_fw_recognize_txpwr_from_elm, 959 { .offset = offsetof(struct rtw89_rfe_data, lmt_2ghz.conf) }, NULL, 960 }, 961 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_5GHZ] = { 962 rtw89_fw_recognize_txpwr_from_elm, 963 { .offset = offsetof(struct rtw89_rfe_data, lmt_5ghz.conf) }, NULL, 964 }, 965 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_6GHZ] = { 966 rtw89_fw_recognize_txpwr_from_elm, 967 { .offset = offsetof(struct rtw89_rfe_data, lmt_6ghz.conf) }, NULL, 968 }, 969 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_2GHZ] = { 970 rtw89_fw_recognize_txpwr_from_elm, 971 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_2ghz.conf) }, NULL, 972 }, 973 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_5GHZ] = { 974 rtw89_fw_recognize_txpwr_from_elm, 975 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_5ghz.conf) }, NULL, 976 }, 977 [RTW89_FW_ELEMENT_ID_TXPWR_LMT_RU_6GHZ] = { 978 rtw89_fw_recognize_txpwr_from_elm, 979 { .offset = offsetof(struct rtw89_rfe_data, lmt_ru_6ghz.conf) }, NULL, 980 }, 981 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT] = { 982 rtw89_fw_recognize_txpwr_from_elm, 983 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt.conf) }, NULL, 984 }, 985 [RTW89_FW_ELEMENT_ID_TX_SHAPE_LMT_RU] = { 986 rtw89_fw_recognize_txpwr_from_elm, 987 { .offset = offsetof(struct rtw89_rfe_data, tx_shape_lmt_ru.conf) }, NULL, 988 }, 989 [RTW89_FW_ELEMENT_ID_TXPWR_TRK] = { 990 rtw89_build_txpwr_trk_tbl_from_elm, {}, "PWR_TRK", 991 }, 992 [RTW89_FW_ELEMENT_ID_RFKLOG_FMT] = { 993 rtw89_build_rfk_log_fmt_from_elm, {}, NULL, 994 }, 995 }; 996 997 int rtw89_fw_recognize_elements(struct rtw89_dev *rtwdev) 998 { 999 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1000 const struct firmware *firmware = fw_info->req.firmware; 1001 const struct rtw89_chip_info *chip = rtwdev->chip; 1002 u32 unrecognized_elements = chip->needed_fw_elms; 1003 const struct rtw89_fw_element_handler *handler; 1004 const struct rtw89_fw_element_hdr *hdr; 1005 u32 elm_size; 1006 u32 elem_id; 1007 u32 offset; 1008 int ret; 1009 1010 BUILD_BUG_ON(sizeof(chip->needed_fw_elms) * 8 < RTW89_FW_ELEMENT_ID_NUM); 1011 1012 offset = rtw89_mfw_get_size(rtwdev); 1013 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1014 if (offset == 0) 1015 return -EINVAL; 1016 1017 while (offset + sizeof(*hdr) < firmware->size) { 1018 hdr = (const struct rtw89_fw_element_hdr *)(firmware->data + offset); 1019 1020 elm_size = le32_to_cpu(hdr->size); 1021 if (offset + elm_size >= firmware->size) { 1022 rtw89_warn(rtwdev, "firmware element size exceeds\n"); 1023 break; 1024 } 1025 1026 elem_id = le32_to_cpu(hdr->id); 1027 if (elem_id >= ARRAY_SIZE(__fw_element_handlers)) 1028 goto next; 1029 1030 handler = &__fw_element_handlers[elem_id]; 1031 if (!handler->fn) 1032 goto next; 1033 1034 ret = handler->fn(rtwdev, hdr, handler->arg); 1035 if (ret == 1) /* ignore this element */ 1036 goto next; 1037 if (ret) 1038 return ret; 1039 1040 if (handler->name) 1041 rtw89_info(rtwdev, "Firmware element %s version: %4ph\n", 1042 handler->name, hdr->ver); 1043 1044 unrecognized_elements &= ~BIT(elem_id); 1045 next: 1046 offset += sizeof(*hdr) + elm_size; 1047 offset = ALIGN(offset, RTW89_FW_ELEMENT_ALIGN); 1048 } 1049 1050 if (unrecognized_elements) { 1051 rtw89_err(rtwdev, "Firmware elements 0x%08x are unrecognized\n", 1052 unrecognized_elements); 1053 return -ENOENT; 1054 } 1055 1056 return 0; 1057 } 1058 1059 void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb, 1060 u8 type, u8 cat, u8 class, u8 func, 1061 bool rack, bool dack, u32 len) 1062 { 1063 struct fwcmd_hdr *hdr; 1064 1065 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1066 1067 if (!(rtwdev->fw.h2c_seq % 4)) 1068 rack = true; 1069 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1070 FIELD_PREP(H2C_HDR_CAT, cat) | 1071 FIELD_PREP(H2C_HDR_CLASS, class) | 1072 FIELD_PREP(H2C_HDR_FUNC, func) | 1073 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1074 1075 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1076 len + H2C_HEADER_LEN) | 1077 (rack ? H2C_HDR_REC_ACK : 0) | 1078 (dack ? H2C_HDR_DONE_ACK : 0)); 1079 1080 rtwdev->fw.h2c_seq++; 1081 } 1082 1083 static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev, 1084 struct sk_buff *skb, 1085 u8 type, u8 cat, u8 class, u8 func, 1086 u32 len) 1087 { 1088 struct fwcmd_hdr *hdr; 1089 1090 hdr = (struct fwcmd_hdr *)skb_push(skb, 8); 1091 1092 hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) | 1093 FIELD_PREP(H2C_HDR_CAT, cat) | 1094 FIELD_PREP(H2C_HDR_CLASS, class) | 1095 FIELD_PREP(H2C_HDR_FUNC, func) | 1096 FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq)); 1097 1098 hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN, 1099 len + H2C_HEADER_LEN)); 1100 } 1101 1102 static u32 __rtw89_fw_download_tweak_hdr_v0(struct rtw89_dev *rtwdev, 1103 struct rtw89_fw_bin_info *info, 1104 struct rtw89_fw_hdr *fw_hdr) 1105 { 1106 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1107 FW_HDR_W7_PART_SIZE); 1108 1109 return 0; 1110 } 1111 1112 static u32 __rtw89_fw_download_tweak_hdr_v1(struct rtw89_dev *rtwdev, 1113 struct rtw89_fw_bin_info *info, 1114 struct rtw89_fw_hdr_v1 *fw_hdr) 1115 { 1116 struct rtw89_fw_hdr_section_info *section_info; 1117 struct rtw89_fw_hdr_section_v1 *section; 1118 u8 dst_sec_idx = 0; 1119 u8 sec_idx; 1120 1121 le32p_replace_bits(&fw_hdr->w7, FWDL_SECTION_PER_PKT_LEN, 1122 FW_HDR_V1_W7_PART_SIZE); 1123 1124 for (sec_idx = 0; sec_idx < info->section_num; sec_idx++) { 1125 section_info = &info->section_info[sec_idx]; 1126 section = &fw_hdr->sections[sec_idx]; 1127 1128 if (section_info->ignore) 1129 continue; 1130 1131 if (dst_sec_idx != sec_idx) 1132 fw_hdr->sections[dst_sec_idx] = *section; 1133 1134 dst_sec_idx++; 1135 } 1136 1137 le32p_replace_bits(&fw_hdr->w6, dst_sec_idx, FW_HDR_V1_W6_SEC_NUM); 1138 1139 return (info->section_num - dst_sec_idx) * sizeof(*section); 1140 } 1141 1142 static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1143 const struct rtw89_fw_suit *fw_suit, 1144 struct rtw89_fw_bin_info *info) 1145 { 1146 u32 len = info->hdr_len - info->dynamic_hdr_len; 1147 struct rtw89_fw_hdr_v1 *fw_hdr_v1; 1148 const u8 *fw = fw_suit->data; 1149 struct rtw89_fw_hdr *fw_hdr; 1150 struct sk_buff *skb; 1151 u32 truncated; 1152 u32 ret = 0; 1153 1154 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1155 if (!skb) { 1156 rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n"); 1157 return -ENOMEM; 1158 } 1159 1160 skb_put_data(skb, fw, len); 1161 1162 switch (fw_suit->hdr_ver) { 1163 case 0: 1164 fw_hdr = (struct rtw89_fw_hdr *)skb->data; 1165 truncated = __rtw89_fw_download_tweak_hdr_v0(rtwdev, info, fw_hdr); 1166 break; 1167 case 1: 1168 fw_hdr_v1 = (struct rtw89_fw_hdr_v1 *)skb->data; 1169 truncated = __rtw89_fw_download_tweak_hdr_v1(rtwdev, info, fw_hdr_v1); 1170 break; 1171 default: 1172 ret = -EOPNOTSUPP; 1173 goto fail; 1174 } 1175 1176 if (truncated) { 1177 len -= truncated; 1178 skb_trim(skb, len); 1179 } 1180 1181 rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C, 1182 H2C_CAT_MAC, H2C_CL_MAC_FWDL, 1183 H2C_FUNC_MAC_FWHDR_DL, len); 1184 1185 ret = rtw89_h2c_tx(rtwdev, skb, false); 1186 if (ret) { 1187 rtw89_err(rtwdev, "failed to send h2c\n"); 1188 ret = -1; 1189 goto fail; 1190 } 1191 1192 return 0; 1193 fail: 1194 dev_kfree_skb_any(skb); 1195 1196 return ret; 1197 } 1198 1199 static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, 1200 const struct rtw89_fw_suit *fw_suit, 1201 struct rtw89_fw_bin_info *info) 1202 { 1203 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1204 int ret; 1205 1206 ret = __rtw89_fw_download_hdr(rtwdev, fw_suit, info); 1207 if (ret) { 1208 rtw89_err(rtwdev, "[ERR]FW header download\n"); 1209 return ret; 1210 } 1211 1212 ret = mac->fwdl_check_path_ready(rtwdev, false); 1213 if (ret) { 1214 rtw89_err(rtwdev, "[ERR]FWDL path ready\n"); 1215 return ret; 1216 } 1217 1218 rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0); 1219 rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0); 1220 1221 return 0; 1222 } 1223 1224 static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1225 struct rtw89_fw_hdr_section_info *info) 1226 { 1227 struct sk_buff *skb; 1228 const u8 *section = info->addr; 1229 u32 residue_len = info->len; 1230 bool copy_key = false; 1231 u32 pkt_len; 1232 int ret; 1233 1234 if (info->ignore) 1235 return 0; 1236 1237 if (info->key_addr && info->key_len) { 1238 if (info->len > FWDL_SECTION_PER_PKT_LEN || info->len < info->key_len) 1239 rtw89_warn(rtwdev, "ignore to copy key data because of len %d, %d, %d\n", 1240 info->len, FWDL_SECTION_PER_PKT_LEN, info->key_len); 1241 else 1242 copy_key = true; 1243 } 1244 1245 while (residue_len) { 1246 if (residue_len >= FWDL_SECTION_PER_PKT_LEN) 1247 pkt_len = FWDL_SECTION_PER_PKT_LEN; 1248 else 1249 pkt_len = residue_len; 1250 1251 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, pkt_len); 1252 if (!skb) { 1253 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1254 return -ENOMEM; 1255 } 1256 skb_put_data(skb, section, pkt_len); 1257 1258 if (copy_key) 1259 memcpy(skb->data + pkt_len - info->key_len, 1260 info->key_addr, info->key_len); 1261 1262 ret = rtw89_h2c_tx(rtwdev, skb, true); 1263 if (ret) { 1264 rtw89_err(rtwdev, "failed to send h2c\n"); 1265 ret = -1; 1266 goto fail; 1267 } 1268 1269 section += pkt_len; 1270 residue_len -= pkt_len; 1271 } 1272 1273 return 0; 1274 fail: 1275 dev_kfree_skb_any(skb); 1276 1277 return ret; 1278 } 1279 1280 static enum rtw89_fwdl_check_type 1281 rtw89_fw_get_fwdl_chk_type_from_suit(struct rtw89_dev *rtwdev, 1282 const struct rtw89_fw_suit *fw_suit) 1283 { 1284 switch (fw_suit->type) { 1285 case RTW89_FW_BBMCU0: 1286 return RTW89_FWDL_CHECK_BB0_FWDL_DONE; 1287 case RTW89_FW_BBMCU1: 1288 return RTW89_FWDL_CHECK_BB1_FWDL_DONE; 1289 default: 1290 return RTW89_FWDL_CHECK_WCPU_FWDL_DONE; 1291 } 1292 } 1293 1294 static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, 1295 const struct rtw89_fw_suit *fw_suit, 1296 struct rtw89_fw_bin_info *info) 1297 { 1298 struct rtw89_fw_hdr_section_info *section_info = info->section_info; 1299 const struct rtw89_chip_info *chip = rtwdev->chip; 1300 enum rtw89_fwdl_check_type chk_type; 1301 u8 section_num = info->section_num; 1302 int ret; 1303 1304 while (section_num--) { 1305 ret = __rtw89_fw_download_main(rtwdev, section_info); 1306 if (ret) 1307 return ret; 1308 section_info++; 1309 } 1310 1311 if (chip->chip_gen == RTW89_CHIP_AX) 1312 return 0; 1313 1314 chk_type = rtw89_fw_get_fwdl_chk_type_from_suit(rtwdev, fw_suit); 1315 ret = rtw89_fw_check_rdy(rtwdev, chk_type); 1316 if (ret) { 1317 rtw89_warn(rtwdev, "failed to download firmware type %u\n", 1318 fw_suit->type); 1319 return ret; 1320 } 1321 1322 return 0; 1323 } 1324 1325 static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev) 1326 { 1327 enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen; 1328 u32 addr = R_AX_DBG_PORT_SEL; 1329 u32 val32; 1330 u16 index; 1331 1332 if (chip_gen == RTW89_CHIP_BE) { 1333 addr = R_BE_WLCPU_PORT_PC; 1334 goto dump; 1335 } 1336 1337 rtw89_write32(rtwdev, R_AX_DBG_CTRL, 1338 FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) | 1339 FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL)); 1340 rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL); 1341 1342 dump: 1343 for (index = 0; index < 15; index++) { 1344 val32 = rtw89_read32(rtwdev, addr); 1345 rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32); 1346 fsleep(10); 1347 } 1348 } 1349 1350 static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev) 1351 { 1352 u32 val32; 1353 1354 val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL); 1355 rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32); 1356 1357 val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG); 1358 rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32); 1359 1360 rtw89_fw_prog_cnt_dump(rtwdev); 1361 } 1362 1363 static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev, 1364 struct rtw89_fw_suit *fw_suit) 1365 { 1366 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1367 struct rtw89_fw_bin_info info = {}; 1368 int ret; 1369 1370 ret = rtw89_fw_hdr_parser(rtwdev, fw_suit, &info); 1371 if (ret) { 1372 rtw89_err(rtwdev, "parse fw header fail\n"); 1373 return ret; 1374 } 1375 1376 if (rtwdev->chip->chip_id == RTL8922A && 1377 (fw_suit->type == RTW89_FW_NORMAL || fw_suit->type == RTW89_FW_WOWLAN)) 1378 rtw89_write32(rtwdev, R_BE_SECURE_BOOT_MALLOC_INFO, 0x20248000); 1379 1380 ret = mac->fwdl_check_path_ready(rtwdev, true); 1381 if (ret) { 1382 rtw89_err(rtwdev, "[ERR]H2C path ready\n"); 1383 return ret; 1384 } 1385 1386 ret = rtw89_fw_download_hdr(rtwdev, fw_suit, &info); 1387 if (ret) 1388 return ret; 1389 1390 ret = rtw89_fw_download_main(rtwdev, fw_suit, &info); 1391 if (ret) 1392 return ret; 1393 1394 return 0; 1395 } 1396 1397 static 1398 int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1399 bool include_bb) 1400 { 1401 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 1402 struct rtw89_fw_info *fw_info = &rtwdev->fw; 1403 struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type); 1404 u8 bbmcu_nr = rtwdev->chip->bbmcu_nr; 1405 int ret; 1406 int i; 1407 1408 mac->disable_cpu(rtwdev); 1409 ret = mac->fwdl_enable_wcpu(rtwdev, 0, true, include_bb); 1410 if (ret) 1411 return ret; 1412 1413 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1414 if (ret) 1415 goto fwdl_err; 1416 1417 for (i = 0; i < bbmcu_nr && include_bb; i++) { 1418 fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_BBMCU0 + i); 1419 1420 ret = rtw89_fw_download_suit(rtwdev, fw_suit); 1421 if (ret) 1422 goto fwdl_err; 1423 } 1424 1425 fw_info->h2c_seq = 0; 1426 fw_info->rec_seq = 0; 1427 fw_info->h2c_counter = 0; 1428 fw_info->c2h_counter = 0; 1429 rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX; 1430 rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX; 1431 1432 mdelay(5); 1433 1434 ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE); 1435 if (ret) { 1436 rtw89_warn(rtwdev, "download firmware fail\n"); 1437 goto fwdl_err; 1438 } 1439 1440 return ret; 1441 1442 fwdl_err: 1443 rtw89_fw_dl_fail_dump(rtwdev); 1444 return ret; 1445 } 1446 1447 int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, 1448 bool include_bb) 1449 { 1450 int retry; 1451 int ret; 1452 1453 for (retry = 0; retry < 5; retry++) { 1454 ret = __rtw89_fw_download(rtwdev, type, include_bb); 1455 if (!ret) 1456 return 0; 1457 } 1458 1459 return ret; 1460 } 1461 1462 int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev) 1463 { 1464 struct rtw89_fw_info *fw = &rtwdev->fw; 1465 1466 wait_for_completion(&fw->req.completion); 1467 if (!fw->req.firmware) 1468 return -EINVAL; 1469 1470 return 0; 1471 } 1472 1473 static int rtw89_load_firmware_req(struct rtw89_dev *rtwdev, 1474 struct rtw89_fw_req_info *req, 1475 const char *fw_name, bool nowarn) 1476 { 1477 int ret; 1478 1479 if (req->firmware) { 1480 rtw89_debug(rtwdev, RTW89_DBG_FW, 1481 "full firmware has been early requested\n"); 1482 complete_all(&req->completion); 1483 return 0; 1484 } 1485 1486 if (nowarn) 1487 ret = firmware_request_nowarn(&req->firmware, fw_name, rtwdev->dev); 1488 else 1489 ret = request_firmware(&req->firmware, fw_name, rtwdev->dev); 1490 1491 complete_all(&req->completion); 1492 1493 return ret; 1494 } 1495 1496 void rtw89_load_firmware_work(struct work_struct *work) 1497 { 1498 struct rtw89_dev *rtwdev = 1499 container_of(work, struct rtw89_dev, load_firmware_work); 1500 const struct rtw89_chip_info *chip = rtwdev->chip; 1501 char fw_name[64]; 1502 1503 rtw89_fw_get_filename(fw_name, sizeof(fw_name), 1504 chip->fw_basename, rtwdev->fw.fw_format); 1505 1506 rtw89_load_firmware_req(rtwdev, &rtwdev->fw.req, fw_name, false); 1507 } 1508 1509 static void rtw89_free_phy_tbl_from_elm(struct rtw89_phy_table *tbl) 1510 { 1511 if (!tbl) 1512 return; 1513 1514 kfree(tbl->regs); 1515 kfree(tbl); 1516 } 1517 1518 static void rtw89_unload_firmware_elements(struct rtw89_dev *rtwdev) 1519 { 1520 struct rtw89_fw_elm_info *elm_info = &rtwdev->fw.elm_info; 1521 int i; 1522 1523 rtw89_free_phy_tbl_from_elm(elm_info->bb_tbl); 1524 rtw89_free_phy_tbl_from_elm(elm_info->bb_gain); 1525 for (i = 0; i < ARRAY_SIZE(elm_info->rf_radio); i++) 1526 rtw89_free_phy_tbl_from_elm(elm_info->rf_radio[i]); 1527 rtw89_free_phy_tbl_from_elm(elm_info->rf_nctl); 1528 1529 kfree(elm_info->txpwr_trk); 1530 kfree(elm_info->rfk_log_fmt); 1531 } 1532 1533 void rtw89_unload_firmware(struct rtw89_dev *rtwdev) 1534 { 1535 struct rtw89_fw_info *fw = &rtwdev->fw; 1536 1537 cancel_work_sync(&rtwdev->load_firmware_work); 1538 1539 if (fw->req.firmware) { 1540 release_firmware(fw->req.firmware); 1541 1542 /* assign NULL back in case rtw89_free_ieee80211_hw() 1543 * try to release the same one again. 1544 */ 1545 fw->req.firmware = NULL; 1546 } 1547 1548 kfree(fw->log.fmts); 1549 rtw89_unload_firmware_elements(rtwdev); 1550 } 1551 1552 static u32 rtw89_fw_log_get_fmt_idx(struct rtw89_dev *rtwdev, u32 fmt_id) 1553 { 1554 struct rtw89_fw_log *fw_log = &rtwdev->fw.log; 1555 u32 i; 1556 1557 if (fmt_id > fw_log->last_fmt_id) 1558 return 0; 1559 1560 for (i = 0; i < fw_log->fmt_count; i++) { 1561 if (le32_to_cpu(fw_log->fmt_ids[i]) == fmt_id) 1562 return i; 1563 } 1564 return 0; 1565 } 1566 1567 static int rtw89_fw_log_create_fmts_dict(struct rtw89_dev *rtwdev) 1568 { 1569 struct rtw89_fw_log *log = &rtwdev->fw.log; 1570 const struct rtw89_fw_logsuit_hdr *suit_hdr; 1571 struct rtw89_fw_suit *suit = &log->suit; 1572 const void *fmts_ptr, *fmts_end_ptr; 1573 u32 fmt_count; 1574 int i; 1575 1576 suit_hdr = (const struct rtw89_fw_logsuit_hdr *)suit->data; 1577 fmt_count = le32_to_cpu(suit_hdr->count); 1578 log->fmt_ids = suit_hdr->ids; 1579 fmts_ptr = &suit_hdr->ids[fmt_count]; 1580 fmts_end_ptr = suit->data + suit->size; 1581 log->fmts = kcalloc(fmt_count, sizeof(char *), GFP_KERNEL); 1582 if (!log->fmts) 1583 return -ENOMEM; 1584 1585 for (i = 0; i < fmt_count; i++) { 1586 fmts_ptr = memchr_inv(fmts_ptr, 0, fmts_end_ptr - fmts_ptr); 1587 if (!fmts_ptr) 1588 break; 1589 1590 (*log->fmts)[i] = fmts_ptr; 1591 log->last_fmt_id = le32_to_cpu(log->fmt_ids[i]); 1592 log->fmt_count++; 1593 fmts_ptr += strlen(fmts_ptr); 1594 } 1595 1596 return 0; 1597 } 1598 1599 int rtw89_fw_log_prepare(struct rtw89_dev *rtwdev) 1600 { 1601 struct rtw89_fw_log *log = &rtwdev->fw.log; 1602 struct rtw89_fw_suit *suit = &log->suit; 1603 1604 if (!suit || !suit->data) { 1605 rtw89_debug(rtwdev, RTW89_DBG_FW, "no log format file\n"); 1606 return -EINVAL; 1607 } 1608 if (log->fmts) 1609 return 0; 1610 1611 return rtw89_fw_log_create_fmts_dict(rtwdev); 1612 } 1613 1614 static void rtw89_fw_log_dump_data(struct rtw89_dev *rtwdev, 1615 const struct rtw89_fw_c2h_log_fmt *log_fmt, 1616 u32 fmt_idx, u8 para_int, bool raw_data) 1617 { 1618 const char *(*fmts)[] = rtwdev->fw.log.fmts; 1619 char str_buf[RTW89_C2H_FW_LOG_STR_BUF_SIZE]; 1620 u32 args[RTW89_C2H_FW_LOG_MAX_PARA_NUM] = {0}; 1621 int i; 1622 1623 if (log_fmt->argc > RTW89_C2H_FW_LOG_MAX_PARA_NUM) { 1624 rtw89_warn(rtwdev, "C2H log: Arg count is unexpected %d\n", 1625 log_fmt->argc); 1626 return; 1627 } 1628 1629 if (para_int) 1630 for (i = 0 ; i < log_fmt->argc; i++) 1631 args[i] = le32_to_cpu(log_fmt->u.argv[i]); 1632 1633 if (raw_data) { 1634 if (para_int) 1635 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1636 "fw_enc(%d, %d, %d) %*ph", le32_to_cpu(log_fmt->fmt_id), 1637 para_int, log_fmt->argc, (int)sizeof(args), args); 1638 else 1639 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, 1640 "fw_enc(%d, %d, %d, %s)", le32_to_cpu(log_fmt->fmt_id), 1641 para_int, log_fmt->argc, log_fmt->u.raw); 1642 } else { 1643 snprintf(str_buf, RTW89_C2H_FW_LOG_STR_BUF_SIZE, (*fmts)[fmt_idx], 1644 args[0x0], args[0x1], args[0x2], args[0x3], args[0x4], 1645 args[0x5], args[0x6], args[0x7], args[0x8], args[0x9], 1646 args[0xa], args[0xb], args[0xc], args[0xd], args[0xe], 1647 args[0xf]); 1648 } 1649 1650 rtw89_info(rtwdev, "C2H log: %s", str_buf); 1651 } 1652 1653 void rtw89_fw_log_dump(struct rtw89_dev *rtwdev, u8 *buf, u32 len) 1654 { 1655 const struct rtw89_fw_c2h_log_fmt *log_fmt; 1656 u8 para_int; 1657 u32 fmt_idx; 1658 1659 if (len < RTW89_C2H_HEADER_LEN) { 1660 rtw89_err(rtwdev, "c2h log length is wrong!\n"); 1661 return; 1662 } 1663 1664 buf += RTW89_C2H_HEADER_LEN; 1665 len -= RTW89_C2H_HEADER_LEN; 1666 log_fmt = (const struct rtw89_fw_c2h_log_fmt *)buf; 1667 1668 if (len < RTW89_C2H_FW_FORMATTED_LOG_MIN_LEN) 1669 goto plain_log; 1670 1671 if (log_fmt->signature != cpu_to_le16(RTW89_C2H_FW_LOG_SIGNATURE)) 1672 goto plain_log; 1673 1674 if (!rtwdev->fw.log.fmts) 1675 return; 1676 1677 para_int = u8_get_bits(log_fmt->feature, RTW89_C2H_FW_LOG_FEATURE_PARA_INT); 1678 fmt_idx = rtw89_fw_log_get_fmt_idx(rtwdev, le32_to_cpu(log_fmt->fmt_id)); 1679 1680 if (!para_int && log_fmt->argc != 0 && fmt_idx != 0) 1681 rtw89_info(rtwdev, "C2H log: %s%s", 1682 (*rtwdev->fw.log.fmts)[fmt_idx], log_fmt->u.raw); 1683 else if (fmt_idx != 0 && para_int) 1684 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, false); 1685 else 1686 rtw89_fw_log_dump_data(rtwdev, log_fmt, fmt_idx, para_int, true); 1687 return; 1688 1689 plain_log: 1690 rtw89_info(rtwdev, "C2H log: %.*s", len, buf); 1691 1692 } 1693 1694 #define H2C_CAM_LEN 60 1695 int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 1696 struct rtw89_sta *rtwsta, const u8 *scan_mac_addr) 1697 { 1698 struct sk_buff *skb; 1699 int ret; 1700 1701 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CAM_LEN); 1702 if (!skb) { 1703 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 1704 return -ENOMEM; 1705 } 1706 skb_put(skb, H2C_CAM_LEN); 1707 rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, rtwsta, scan_mac_addr, skb->data); 1708 rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, rtwsta, skb->data); 1709 1710 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1711 H2C_CAT_MAC, 1712 H2C_CL_MAC_ADDR_CAM_UPDATE, 1713 H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1, 1714 H2C_CAM_LEN); 1715 1716 ret = rtw89_h2c_tx(rtwdev, skb, false); 1717 if (ret) { 1718 rtw89_err(rtwdev, "failed to send h2c\n"); 1719 goto fail; 1720 } 1721 1722 return 0; 1723 fail: 1724 dev_kfree_skb_any(skb); 1725 1726 return ret; 1727 } 1728 1729 #define H2C_DCTL_SEC_CAM_LEN 68 1730 int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev, 1731 struct rtw89_vif *rtwvif, 1732 struct rtw89_sta *rtwsta) 1733 { 1734 struct sk_buff *skb; 1735 int ret; 1736 1737 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN); 1738 if (!skb) { 1739 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1740 return -ENOMEM; 1741 } 1742 skb_put(skb, H2C_DCTL_SEC_CAM_LEN); 1743 1744 rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data); 1745 1746 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1747 H2C_CAT_MAC, 1748 H2C_CL_MAC_FR_EXCHG, 1749 H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0, 1750 H2C_DCTL_SEC_CAM_LEN); 1751 1752 ret = rtw89_h2c_tx(rtwdev, skb, false); 1753 if (ret) { 1754 rtw89_err(rtwdev, "failed to send h2c\n"); 1755 goto fail; 1756 } 1757 1758 return 0; 1759 fail: 1760 dev_kfree_skb_any(skb); 1761 1762 return ret; 1763 } 1764 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v1); 1765 1766 int rtw89_fw_h2c_dctl_sec_cam_v2(struct rtw89_dev *rtwdev, 1767 struct rtw89_vif *rtwvif, 1768 struct rtw89_sta *rtwsta) 1769 { 1770 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1771 u32 len = sizeof(*h2c); 1772 struct sk_buff *skb; 1773 int ret; 1774 1775 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1776 if (!skb) { 1777 rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n"); 1778 return -ENOMEM; 1779 } 1780 skb_put(skb, len); 1781 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1782 1783 rtw89_cam_fill_dctl_sec_cam_info_v2(rtwdev, rtwvif, rtwsta, h2c); 1784 1785 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1786 H2C_CAT_MAC, 1787 H2C_CL_MAC_FR_EXCHG, 1788 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1789 len); 1790 1791 ret = rtw89_h2c_tx(rtwdev, skb, false); 1792 if (ret) { 1793 rtw89_err(rtwdev, "failed to send h2c\n"); 1794 goto fail; 1795 } 1796 1797 return 0; 1798 fail: 1799 dev_kfree_skb_any(skb); 1800 1801 return ret; 1802 } 1803 EXPORT_SYMBOL(rtw89_fw_h2c_dctl_sec_cam_v2); 1804 1805 int rtw89_fw_h2c_default_dmac_tbl_v2(struct rtw89_dev *rtwdev, 1806 struct rtw89_vif *rtwvif, 1807 struct rtw89_sta *rtwsta) 1808 { 1809 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 1810 struct rtw89_h2c_dctlinfo_ud_v2 *h2c; 1811 u32 len = sizeof(*h2c); 1812 struct sk_buff *skb; 1813 int ret; 1814 1815 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1816 if (!skb) { 1817 rtw89_err(rtwdev, "failed to alloc skb for dctl v2\n"); 1818 return -ENOMEM; 1819 } 1820 skb_put(skb, len); 1821 h2c = (struct rtw89_h2c_dctlinfo_ud_v2 *)skb->data; 1822 1823 h2c->c0 = le32_encode_bits(mac_id, DCTLINFO_V2_C0_MACID) | 1824 le32_encode_bits(1, DCTLINFO_V2_C0_OP); 1825 1826 h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_ALL); 1827 h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_ALL); 1828 h2c->m2 = cpu_to_le32(DCTLINFO_V2_W2_ALL); 1829 h2c->m3 = cpu_to_le32(DCTLINFO_V2_W3_ALL); 1830 h2c->m4 = cpu_to_le32(DCTLINFO_V2_W4_ALL); 1831 h2c->m5 = cpu_to_le32(DCTLINFO_V2_W5_ALL); 1832 h2c->m6 = cpu_to_le32(DCTLINFO_V2_W6_ALL); 1833 h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_ALL); 1834 h2c->m8 = cpu_to_le32(DCTLINFO_V2_W8_ALL); 1835 h2c->m9 = cpu_to_le32(DCTLINFO_V2_W9_ALL); 1836 h2c->m10 = cpu_to_le32(DCTLINFO_V2_W10_ALL); 1837 h2c->m11 = cpu_to_le32(DCTLINFO_V2_W11_ALL); 1838 h2c->m12 = cpu_to_le32(DCTLINFO_V2_W12_ALL); 1839 1840 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1841 H2C_CAT_MAC, 1842 H2C_CL_MAC_FR_EXCHG, 1843 H2C_FUNC_MAC_DCTLINFO_UD_V2, 0, 0, 1844 len); 1845 1846 ret = rtw89_h2c_tx(rtwdev, skb, false); 1847 if (ret) { 1848 rtw89_err(rtwdev, "failed to send h2c\n"); 1849 goto fail; 1850 } 1851 1852 return 0; 1853 fail: 1854 dev_kfree_skb_any(skb); 1855 1856 return ret; 1857 } 1858 EXPORT_SYMBOL(rtw89_fw_h2c_default_dmac_tbl_v2); 1859 1860 int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 1861 bool valid, struct ieee80211_ampdu_params *params) 1862 { 1863 const struct rtw89_chip_info *chip = rtwdev->chip; 1864 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 1865 struct rtw89_h2c_ba_cam *h2c; 1866 u8 macid = rtwsta->mac_id; 1867 u32 len = sizeof(*h2c); 1868 struct sk_buff *skb; 1869 u8 entry_idx; 1870 int ret; 1871 1872 ret = valid ? 1873 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 1874 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 1875 if (ret) { 1876 /* it still works even if we don't have static BA CAM, because 1877 * hardware can create dynamic BA CAM automatically. 1878 */ 1879 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 1880 "failed to %s entry tid=%d for h2c ba cam\n", 1881 valid ? "alloc" : "free", params->tid); 1882 return 0; 1883 } 1884 1885 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1886 if (!skb) { 1887 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 1888 return -ENOMEM; 1889 } 1890 skb_put(skb, len); 1891 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 1892 1893 h2c->w0 = le32_encode_bits(macid, RTW89_H2C_BA_CAM_W0_MACID); 1894 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) 1895 h2c->w1 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1); 1896 else 1897 h2c->w0 |= le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W0_ENTRY_IDX); 1898 if (!valid) 1899 goto end; 1900 h2c->w0 |= le32_encode_bits(valid, RTW89_H2C_BA_CAM_W0_VALID) | 1901 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_W0_TID); 1902 if (params->buf_size > 64) 1903 h2c->w0 |= le32_encode_bits(4, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 1904 else 1905 h2c->w0 |= le32_encode_bits(0, RTW89_H2C_BA_CAM_W0_BMAP_SIZE); 1906 /* If init req is set, hw will set the ssn */ 1907 h2c->w0 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_INIT_REQ) | 1908 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_W0_SSN); 1909 1910 if (chip->bacam_ver == RTW89_BACAM_V0_EXT) { 1911 h2c->w1 |= le32_encode_bits(1, RTW89_H2C_BA_CAM_W1_STD_EN) | 1912 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BA_CAM_W1_BAND); 1913 } 1914 1915 end: 1916 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1917 H2C_CAT_MAC, 1918 H2C_CL_BA_CAM, 1919 H2C_FUNC_MAC_BA_CAM, 0, 1, 1920 len); 1921 1922 ret = rtw89_h2c_tx(rtwdev, skb, false); 1923 if (ret) { 1924 rtw89_err(rtwdev, "failed to send h2c\n"); 1925 goto fail; 1926 } 1927 1928 return 0; 1929 fail: 1930 dev_kfree_skb_any(skb); 1931 1932 return ret; 1933 } 1934 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam); 1935 1936 static int rtw89_fw_h2c_init_ba_cam_v0_ext(struct rtw89_dev *rtwdev, 1937 u8 entry_idx, u8 uid) 1938 { 1939 struct rtw89_h2c_ba_cam *h2c; 1940 u32 len = sizeof(*h2c); 1941 struct sk_buff *skb; 1942 int ret; 1943 1944 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 1945 if (!skb) { 1946 rtw89_err(rtwdev, "failed to alloc skb for dynamic h2c ba cam\n"); 1947 return -ENOMEM; 1948 } 1949 skb_put(skb, len); 1950 h2c = (struct rtw89_h2c_ba_cam *)skb->data; 1951 1952 h2c->w0 = le32_encode_bits(1, RTW89_H2C_BA_CAM_W0_VALID); 1953 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_W1_ENTRY_IDX_V1) | 1954 le32_encode_bits(uid, RTW89_H2C_BA_CAM_W1_UID) | 1955 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_BAND) | 1956 le32_encode_bits(0, RTW89_H2C_BA_CAM_W1_STD_EN); 1957 1958 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 1959 H2C_CAT_MAC, 1960 H2C_CL_BA_CAM, 1961 H2C_FUNC_MAC_BA_CAM, 0, 1, 1962 len); 1963 1964 ret = rtw89_h2c_tx(rtwdev, skb, false); 1965 if (ret) { 1966 rtw89_err(rtwdev, "failed to send h2c\n"); 1967 goto fail; 1968 } 1969 1970 return 0; 1971 fail: 1972 dev_kfree_skb_any(skb); 1973 1974 return ret; 1975 } 1976 1977 void rtw89_fw_h2c_init_dynamic_ba_cam_v0_ext(struct rtw89_dev *rtwdev) 1978 { 1979 const struct rtw89_chip_info *chip = rtwdev->chip; 1980 u8 entry_idx = chip->bacam_num; 1981 u8 uid = 0; 1982 int i; 1983 1984 for (i = 0; i < chip->bacam_dynamic_num; i++) { 1985 rtw89_fw_h2c_init_ba_cam_v0_ext(rtwdev, entry_idx, uid); 1986 entry_idx++; 1987 uid++; 1988 } 1989 } 1990 1991 int rtw89_fw_h2c_ba_cam_v1(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta, 1992 bool valid, struct ieee80211_ampdu_params *params) 1993 { 1994 const struct rtw89_chip_info *chip = rtwdev->chip; 1995 struct rtw89_vif *rtwvif = rtwsta->rtwvif; 1996 struct rtw89_h2c_ba_cam_v1 *h2c; 1997 u8 macid = rtwsta->mac_id; 1998 u32 len = sizeof(*h2c); 1999 struct sk_buff *skb; 2000 u8 entry_idx; 2001 u8 bmap_size; 2002 int ret; 2003 2004 ret = valid ? 2005 rtw89_core_acquire_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx) : 2006 rtw89_core_release_sta_ba_entry(rtwdev, rtwsta, params->tid, &entry_idx); 2007 if (ret) { 2008 /* it still works even if we don't have static BA CAM, because 2009 * hardware can create dynamic BA CAM automatically. 2010 */ 2011 rtw89_debug(rtwdev, RTW89_DBG_TXRX, 2012 "failed to %s entry tid=%d for h2c ba cam\n", 2013 valid ? "alloc" : "free", params->tid); 2014 return 0; 2015 } 2016 2017 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2018 if (!skb) { 2019 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n"); 2020 return -ENOMEM; 2021 } 2022 skb_put(skb, len); 2023 h2c = (struct rtw89_h2c_ba_cam_v1 *)skb->data; 2024 2025 if (params->buf_size > 512) 2026 bmap_size = 10; 2027 else if (params->buf_size > 256) 2028 bmap_size = 8; 2029 else if (params->buf_size > 64) 2030 bmap_size = 4; 2031 else 2032 bmap_size = 0; 2033 2034 h2c->w0 = le32_encode_bits(valid, RTW89_H2C_BA_CAM_V1_W0_VALID) | 2035 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W0_INIT_REQ) | 2036 le32_encode_bits(macid, RTW89_H2C_BA_CAM_V1_W0_MACID_MASK) | 2037 le32_encode_bits(params->tid, RTW89_H2C_BA_CAM_V1_W0_TID_MASK) | 2038 le32_encode_bits(bmap_size, RTW89_H2C_BA_CAM_V1_W0_BMAP_SIZE_MASK) | 2039 le32_encode_bits(params->ssn, RTW89_H2C_BA_CAM_V1_W0_SSN_MASK); 2040 2041 entry_idx += chip->bacam_dynamic_num; /* std entry right after dynamic ones */ 2042 h2c->w1 = le32_encode_bits(entry_idx, RTW89_H2C_BA_CAM_V1_W1_ENTRY_IDX_MASK) | 2043 le32_encode_bits(1, RTW89_H2C_BA_CAM_V1_W1_STD_ENTRY_EN) | 2044 le32_encode_bits(!!rtwvif->mac_idx, RTW89_H2C_BA_CAM_V1_W1_BAND_SEL); 2045 2046 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2047 H2C_CAT_MAC, 2048 H2C_CL_BA_CAM, 2049 H2C_FUNC_MAC_BA_CAM_V1, 0, 1, 2050 len); 2051 2052 ret = rtw89_h2c_tx(rtwdev, skb, false); 2053 if (ret) { 2054 rtw89_err(rtwdev, "failed to send h2c\n"); 2055 goto fail; 2056 } 2057 2058 return 0; 2059 fail: 2060 dev_kfree_skb_any(skb); 2061 2062 return ret; 2063 } 2064 EXPORT_SYMBOL(rtw89_fw_h2c_ba_cam_v1); 2065 2066 int rtw89_fw_h2c_init_ba_cam_users(struct rtw89_dev *rtwdev, u8 users, 2067 u8 offset, u8 mac_idx) 2068 { 2069 struct rtw89_h2c_ba_cam_init *h2c; 2070 u32 len = sizeof(*h2c); 2071 struct sk_buff *skb; 2072 int ret; 2073 2074 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2075 if (!skb) { 2076 rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam init\n"); 2077 return -ENOMEM; 2078 } 2079 skb_put(skb, len); 2080 h2c = (struct rtw89_h2c_ba_cam_init *)skb->data; 2081 2082 h2c->w0 = le32_encode_bits(users, RTW89_H2C_BA_CAM_INIT_USERS_MASK) | 2083 le32_encode_bits(offset, RTW89_H2C_BA_CAM_INIT_OFFSET_MASK) | 2084 le32_encode_bits(mac_idx, RTW89_H2C_BA_CAM_INIT_BAND_SEL); 2085 2086 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2087 H2C_CAT_MAC, 2088 H2C_CL_BA_CAM, 2089 H2C_FUNC_MAC_BA_CAM_INIT, 0, 1, 2090 len); 2091 2092 ret = rtw89_h2c_tx(rtwdev, skb, false); 2093 if (ret) { 2094 rtw89_err(rtwdev, "failed to send h2c\n"); 2095 goto fail; 2096 } 2097 2098 return 0; 2099 fail: 2100 dev_kfree_skb_any(skb); 2101 2102 return ret; 2103 } 2104 2105 #define H2C_LOG_CFG_LEN 12 2106 int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable) 2107 { 2108 struct sk_buff *skb; 2109 u32 comp = 0; 2110 int ret; 2111 2112 if (enable) 2113 comp = BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) | 2114 BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) | 2115 BIT(RTW89_FW_LOG_COMP_SCAN); 2116 2117 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LOG_CFG_LEN); 2118 if (!skb) { 2119 rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n"); 2120 return -ENOMEM; 2121 } 2122 2123 skb_put(skb, H2C_LOG_CFG_LEN); 2124 SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_LOUD); 2125 SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H)); 2126 SET_LOG_CFG_COMP(skb->data, comp); 2127 SET_LOG_CFG_COMP_EXT(skb->data, 0); 2128 2129 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2130 H2C_CAT_MAC, 2131 H2C_CL_FW_INFO, 2132 H2C_FUNC_LOG_CFG, 0, 0, 2133 H2C_LOG_CFG_LEN); 2134 2135 ret = rtw89_h2c_tx(rtwdev, skb, false); 2136 if (ret) { 2137 rtw89_err(rtwdev, "failed to send h2c\n"); 2138 goto fail; 2139 } 2140 2141 return 0; 2142 fail: 2143 dev_kfree_skb_any(skb); 2144 2145 return ret; 2146 } 2147 2148 static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev, 2149 struct rtw89_vif *rtwvif, 2150 enum rtw89_fw_pkt_ofld_type type, 2151 u8 *id) 2152 { 2153 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 2154 struct rtw89_pktofld_info *info; 2155 struct sk_buff *skb; 2156 int ret; 2157 2158 info = kzalloc(sizeof(*info), GFP_KERNEL); 2159 if (!info) 2160 return -ENOMEM; 2161 2162 switch (type) { 2163 case RTW89_PKT_OFLD_TYPE_PS_POLL: 2164 skb = ieee80211_pspoll_get(rtwdev->hw, vif); 2165 break; 2166 case RTW89_PKT_OFLD_TYPE_PROBE_RSP: 2167 skb = ieee80211_proberesp_get(rtwdev->hw, vif); 2168 break; 2169 case RTW89_PKT_OFLD_TYPE_NULL_DATA: 2170 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, false); 2171 break; 2172 case RTW89_PKT_OFLD_TYPE_QOS_NULL: 2173 skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true); 2174 break; 2175 default: 2176 goto err; 2177 } 2178 2179 if (!skb) 2180 goto err; 2181 2182 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 2183 kfree_skb(skb); 2184 2185 if (ret) 2186 goto err; 2187 2188 list_add_tail(&info->list, &rtwvif->general_pkt_list); 2189 *id = info->id; 2190 return 0; 2191 2192 err: 2193 kfree(info); 2194 return -ENOMEM; 2195 } 2196 2197 void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev, 2198 struct rtw89_vif *rtwvif, bool notify_fw) 2199 { 2200 struct list_head *pkt_list = &rtwvif->general_pkt_list; 2201 struct rtw89_pktofld_info *info, *tmp; 2202 2203 list_for_each_entry_safe(info, tmp, pkt_list, list) { 2204 if (notify_fw) 2205 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 2206 else 2207 rtw89_core_release_bit_map(rtwdev->pkt_offload, info->id); 2208 list_del(&info->list); 2209 kfree(info); 2210 } 2211 } 2212 2213 void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw) 2214 { 2215 struct rtw89_vif *rtwvif; 2216 2217 rtw89_for_each_rtwvif(rtwdev, rtwvif) 2218 rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw); 2219 } 2220 2221 #define H2C_GENERAL_PKT_LEN 6 2222 #define H2C_GENERAL_PKT_ID_UND 0xff 2223 int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, 2224 struct rtw89_vif *rtwvif, u8 macid) 2225 { 2226 u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND; 2227 u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND; 2228 u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND; 2229 struct sk_buff *skb; 2230 int ret; 2231 2232 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 2233 RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll); 2234 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 2235 RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null); 2236 rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 2237 RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null); 2238 2239 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN); 2240 if (!skb) { 2241 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2242 return -ENOMEM; 2243 } 2244 skb_put(skb, H2C_GENERAL_PKT_LEN); 2245 SET_GENERAL_PKT_MACID(skb->data, macid); 2246 SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2247 SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll); 2248 SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null); 2249 SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null); 2250 SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND); 2251 2252 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2253 H2C_CAT_MAC, 2254 H2C_CL_FW_INFO, 2255 H2C_FUNC_MAC_GENERAL_PKT, 0, 1, 2256 H2C_GENERAL_PKT_LEN); 2257 2258 ret = rtw89_h2c_tx(rtwdev, skb, false); 2259 if (ret) { 2260 rtw89_err(rtwdev, "failed to send h2c\n"); 2261 goto fail; 2262 } 2263 2264 return 0; 2265 fail: 2266 dev_kfree_skb_any(skb); 2267 2268 return ret; 2269 } 2270 2271 #define H2C_LPS_PARM_LEN 8 2272 int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev, 2273 struct rtw89_lps_parm *lps_param) 2274 { 2275 struct sk_buff *skb; 2276 int ret; 2277 2278 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LPS_PARM_LEN); 2279 if (!skb) { 2280 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2281 return -ENOMEM; 2282 } 2283 skb_put(skb, H2C_LPS_PARM_LEN); 2284 2285 SET_LPS_PARM_MACID(skb->data, lps_param->macid); 2286 SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode); 2287 SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm); 2288 SET_LPS_PARM_RLBM(skb->data, 1); 2289 SET_LPS_PARM_SMARTPS(skb->data, 1); 2290 SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1); 2291 SET_LPS_PARM_VOUAPSD(skb->data, 0); 2292 SET_LPS_PARM_VIUAPSD(skb->data, 0); 2293 SET_LPS_PARM_BEUAPSD(skb->data, 0); 2294 SET_LPS_PARM_BKUAPSD(skb->data, 0); 2295 2296 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2297 H2C_CAT_MAC, 2298 H2C_CL_MAC_PS, 2299 H2C_FUNC_MAC_LPS_PARM, 0, 1, 2300 H2C_LPS_PARM_LEN); 2301 2302 ret = rtw89_h2c_tx(rtwdev, skb, false); 2303 if (ret) { 2304 rtw89_err(rtwdev, "failed to send h2c\n"); 2305 goto fail; 2306 } 2307 2308 return 0; 2309 fail: 2310 dev_kfree_skb_any(skb); 2311 2312 return ret; 2313 } 2314 2315 int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 2316 { 2317 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 2318 rtwvif->sub_entity_idx); 2319 const struct rtw89_chip_info *chip = rtwdev->chip; 2320 struct rtw89_h2c_lps_ch_info *h2c; 2321 u32 len = sizeof(*h2c); 2322 struct sk_buff *skb; 2323 int ret; 2324 2325 if (chip->chip_gen != RTW89_CHIP_BE) 2326 return 0; 2327 2328 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2329 if (!skb) { 2330 rtw89_err(rtwdev, "failed to alloc skb for h2c lps_ch_info\n"); 2331 return -ENOMEM; 2332 } 2333 skb_put(skb, len); 2334 h2c = (struct rtw89_h2c_lps_ch_info *)skb->data; 2335 2336 h2c->info[0].central_ch = chan->channel; 2337 h2c->info[0].pri_ch = chan->primary_channel; 2338 h2c->info[0].band = chan->band_type; 2339 h2c->info[0].bw = chan->band_width; 2340 h2c->mlo_dbcc_mode_lps = cpu_to_le32(MLO_2_PLUS_0_1RF); 2341 2342 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2343 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, 2344 H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); 2345 2346 ret = rtw89_h2c_tx(rtwdev, skb, false); 2347 if (ret) { 2348 rtw89_err(rtwdev, "failed to send h2c\n"); 2349 goto fail; 2350 } 2351 2352 return 0; 2353 fail: 2354 dev_kfree_skb_any(skb); 2355 2356 return ret; 2357 } 2358 2359 #define H2C_P2P_ACT_LEN 20 2360 int rtw89_fw_h2c_p2p_act(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 2361 struct ieee80211_p2p_noa_desc *desc, 2362 u8 act, u8 noa_id) 2363 { 2364 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2365 bool p2p_type_gc = rtwvif->wifi_role == RTW89_WIFI_ROLE_P2P_CLIENT; 2366 u8 ctwindow_oppps = vif->bss_conf.p2p_noa_attr.oppps_ctwindow; 2367 struct sk_buff *skb; 2368 u8 *cmd; 2369 int ret; 2370 2371 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_P2P_ACT_LEN); 2372 if (!skb) { 2373 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 2374 return -ENOMEM; 2375 } 2376 skb_put(skb, H2C_P2P_ACT_LEN); 2377 cmd = skb->data; 2378 2379 RTW89_SET_FWCMD_P2P_MACID(cmd, rtwvif->mac_id); 2380 RTW89_SET_FWCMD_P2P_P2PID(cmd, 0); 2381 RTW89_SET_FWCMD_P2P_NOAID(cmd, noa_id); 2382 RTW89_SET_FWCMD_P2P_ACT(cmd, act); 2383 RTW89_SET_FWCMD_P2P_TYPE(cmd, p2p_type_gc); 2384 RTW89_SET_FWCMD_P2P_ALL_SLEP(cmd, 0); 2385 if (desc) { 2386 RTW89_SET_FWCMD_NOA_START_TIME(cmd, desc->start_time); 2387 RTW89_SET_FWCMD_NOA_INTERVAL(cmd, desc->interval); 2388 RTW89_SET_FWCMD_NOA_DURATION(cmd, desc->duration); 2389 RTW89_SET_FWCMD_NOA_COUNT(cmd, desc->count); 2390 RTW89_SET_FWCMD_NOA_CTWINDOW(cmd, ctwindow_oppps); 2391 } 2392 2393 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2394 H2C_CAT_MAC, H2C_CL_MAC_PS, 2395 H2C_FUNC_P2P_ACT, 0, 0, 2396 H2C_P2P_ACT_LEN); 2397 2398 ret = rtw89_h2c_tx(rtwdev, skb, false); 2399 if (ret) { 2400 rtw89_err(rtwdev, "failed to send h2c\n"); 2401 goto fail; 2402 } 2403 2404 return 0; 2405 fail: 2406 dev_kfree_skb_any(skb); 2407 2408 return ret; 2409 } 2410 2411 static void __rtw89_fw_h2c_set_tx_path(struct rtw89_dev *rtwdev, 2412 struct sk_buff *skb) 2413 { 2414 const struct rtw89_chip_info *chip = rtwdev->chip; 2415 struct rtw89_hal *hal = &rtwdev->hal; 2416 u8 ntx_path; 2417 u8 map_b; 2418 2419 if (chip->rf_path_num == 1) { 2420 ntx_path = RF_A; 2421 map_b = 0; 2422 } else { 2423 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B; 2424 map_b = hal->antenna_tx == RF_AB ? 1 : 0; 2425 } 2426 2427 SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path); 2428 SET_CMC_TBL_PATH_MAP_A(skb->data, 0); 2429 SET_CMC_TBL_PATH_MAP_B(skb->data, map_b); 2430 SET_CMC_TBL_PATH_MAP_C(skb->data, 0); 2431 SET_CMC_TBL_PATH_MAP_D(skb->data, 0); 2432 } 2433 2434 #define H2C_CMC_TBL_LEN 68 2435 int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, 2436 struct rtw89_vif *rtwvif, 2437 struct rtw89_sta *rtwsta) 2438 { 2439 const struct rtw89_chip_info *chip = rtwdev->chip; 2440 u8 macid = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2441 struct sk_buff *skb; 2442 int ret; 2443 2444 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2445 if (!skb) { 2446 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2447 return -ENOMEM; 2448 } 2449 skb_put(skb, H2C_CMC_TBL_LEN); 2450 SET_CTRL_INFO_MACID(skb->data, macid); 2451 SET_CTRL_INFO_OPERATION(skb->data, 1); 2452 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2453 SET_CMC_TBL_TXPWR_MODE(skb->data, 0); 2454 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 2455 SET_CMC_TBL_ANTSEL_A(skb->data, 0); 2456 SET_CMC_TBL_ANTSEL_B(skb->data, 0); 2457 SET_CMC_TBL_ANTSEL_C(skb->data, 0); 2458 SET_CMC_TBL_ANTSEL_D(skb->data, 0); 2459 } 2460 SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0); 2461 SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0); 2462 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 2463 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2464 2465 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2466 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2467 chip->h2c_cctl_func_id, 0, 1, 2468 H2C_CMC_TBL_LEN); 2469 2470 ret = rtw89_h2c_tx(rtwdev, skb, false); 2471 if (ret) { 2472 rtw89_err(rtwdev, "failed to send h2c\n"); 2473 goto fail; 2474 } 2475 2476 return 0; 2477 fail: 2478 dev_kfree_skb_any(skb); 2479 2480 return ret; 2481 } 2482 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl); 2483 2484 int rtw89_fw_h2c_default_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2485 struct rtw89_vif *rtwvif, 2486 struct rtw89_sta *rtwsta) 2487 { 2488 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2489 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2490 u32 len = sizeof(*h2c); 2491 struct sk_buff *skb; 2492 int ret; 2493 2494 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2495 if (!skb) { 2496 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 2497 return -ENOMEM; 2498 } 2499 skb_put(skb, len); 2500 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2501 2502 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 2503 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2504 2505 h2c->w0 = le32_encode_bits(4, CCTLINFO_G7_W0_DATARATE); 2506 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_ALL); 2507 2508 h2c->w1 = le32_encode_bits(4, CCTLINFO_G7_W1_DATA_RTY_LOWEST_RATE) | 2509 le32_encode_bits(0xa, CCTLINFO_G7_W1_RTSRATE) | 2510 le32_encode_bits(4, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2511 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_ALL); 2512 2513 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_ALL); 2514 2515 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_ALL); 2516 2517 h2c->w4 = le32_encode_bits(0xFFFF, CCTLINFO_G7_W4_ACT_SUBCH_CBW); 2518 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_ALL); 2519 2520 h2c->w5 = le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 2521 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 2522 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 2523 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 2524 le32_encode_bits(2, CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 2525 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_ALL); 2526 2527 h2c->w6 = le32_encode_bits(0xb, CCTLINFO_G7_W6_RESP_REF_RATE); 2528 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ALL); 2529 2530 h2c->w7 = le32_encode_bits(1, CCTLINFO_G7_W7_NC) | 2531 le32_encode_bits(1, CCTLINFO_G7_W7_NR) | 2532 le32_encode_bits(1, CCTLINFO_G7_W7_CB) | 2533 le32_encode_bits(0x1, CCTLINFO_G7_W7_CSI_PARA_EN) | 2534 le32_encode_bits(0xb, CCTLINFO_G7_W7_CSI_FIX_RATE); 2535 h2c->m7 = cpu_to_le32(CCTLINFO_G7_W7_ALL); 2536 2537 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_ALL); 2538 2539 h2c->w14 = le32_encode_bits(0, CCTLINFO_G7_W14_VO_CURR_RATE) | 2540 le32_encode_bits(0, CCTLINFO_G7_W14_VI_CURR_RATE) | 2541 le32_encode_bits(0, CCTLINFO_G7_W14_BE_CURR_RATE_L); 2542 h2c->m14 = cpu_to_le32(CCTLINFO_G7_W14_ALL); 2543 2544 h2c->w15 = le32_encode_bits(0, CCTLINFO_G7_W15_BE_CURR_RATE_H) | 2545 le32_encode_bits(0, CCTLINFO_G7_W15_BK_CURR_RATE) | 2546 le32_encode_bits(0, CCTLINFO_G7_W15_MGNT_CURR_RATE); 2547 h2c->m15 = cpu_to_le32(CCTLINFO_G7_W15_ALL); 2548 2549 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2550 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2551 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 2552 len); 2553 2554 ret = rtw89_h2c_tx(rtwdev, skb, false); 2555 if (ret) { 2556 rtw89_err(rtwdev, "failed to send h2c\n"); 2557 goto fail; 2558 } 2559 2560 return 0; 2561 fail: 2562 dev_kfree_skb_any(skb); 2563 2564 return ret; 2565 } 2566 EXPORT_SYMBOL(rtw89_fw_h2c_default_cmac_tbl_g7); 2567 2568 static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, 2569 struct ieee80211_sta *sta, u8 *pads) 2570 { 2571 bool ppe_th; 2572 u8 ppe16, ppe8; 2573 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 2574 u8 ppe_thres_hdr = sta->deflink.he_cap.ppe_thres[0]; 2575 u8 ru_bitmap; 2576 u8 n, idx, sh; 2577 u16 ppe; 2578 int i; 2579 2580 ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 2581 sta->deflink.he_cap.he_cap_elem.phy_cap_info[6]); 2582 if (!ppe_th) { 2583 u8 pad; 2584 2585 pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK, 2586 sta->deflink.he_cap.he_cap_elem.phy_cap_info[9]); 2587 2588 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 2589 pads[i] = pad; 2590 2591 return; 2592 } 2593 2594 ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr); 2595 n = hweight8(ru_bitmap); 2596 n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 2597 2598 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 2599 if (!(ru_bitmap & BIT(i))) { 2600 pads[i] = 1; 2601 continue; 2602 } 2603 2604 idx = n >> 3; 2605 sh = n & 7; 2606 n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2; 2607 2608 ppe = le16_to_cpu(*((__le16 *)&sta->deflink.he_cap.ppe_thres[idx])); 2609 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2610 sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 2611 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2612 2613 if (ppe16 != 7 && ppe8 == 7) 2614 pads[i] = 2; 2615 else if (ppe8 != 7) 2616 pads[i] = 1; 2617 else 2618 pads[i] = 0; 2619 } 2620 } 2621 2622 int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev, 2623 struct ieee80211_vif *vif, 2624 struct ieee80211_sta *sta) 2625 { 2626 const struct rtw89_chip_info *chip = rtwdev->chip; 2627 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 2628 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2629 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 2630 rtwvif->sub_entity_idx); 2631 struct sk_buff *skb; 2632 u8 pads[RTW89_PPE_BW_NUM]; 2633 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2634 u16 lowest_rate; 2635 int ret; 2636 2637 memset(pads, 0, sizeof(pads)); 2638 if (sta && sta->deflink.he_cap.has_he) 2639 __get_sta_he_pkt_padding(rtwdev, sta, pads); 2640 2641 if (vif->p2p) 2642 lowest_rate = RTW89_HW_RATE_OFDM6; 2643 else if (chan->band_type == RTW89_BAND_2G) 2644 lowest_rate = RTW89_HW_RATE_CCK1; 2645 else 2646 lowest_rate = RTW89_HW_RATE_OFDM6; 2647 2648 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2649 if (!skb) { 2650 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2651 return -ENOMEM; 2652 } 2653 skb_put(skb, H2C_CMC_TBL_LEN); 2654 SET_CTRL_INFO_MACID(skb->data, mac_id); 2655 SET_CTRL_INFO_OPERATION(skb->data, 1); 2656 SET_CMC_TBL_DISRTSFB(skb->data, 1); 2657 SET_CMC_TBL_DISDATAFB(skb->data, 1); 2658 SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, lowest_rate); 2659 SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0); 2660 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0); 2661 if (vif->type == NL80211_IFTYPE_STATION) 2662 SET_CMC_TBL_ULDL(skb->data, 1); 2663 else 2664 SET_CMC_TBL_ULDL(skb->data, 0); 2665 SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port); 2666 if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD_V1) { 2667 SET_CMC_TBL_NOMINAL_PKT_PADDING_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 2668 SET_CMC_TBL_NOMINAL_PKT_PADDING40_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 2669 SET_CMC_TBL_NOMINAL_PKT_PADDING80_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 2670 SET_CMC_TBL_NOMINAL_PKT_PADDING160_V1(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 2671 } else if (chip->h2c_cctl_func_id == H2C_FUNC_MAC_CCTLINFO_UD) { 2672 SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]); 2673 SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]); 2674 SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]); 2675 SET_CMC_TBL_NOMINAL_PKT_PADDING160(skb->data, pads[RTW89_CHANNEL_WIDTH_160]); 2676 } 2677 if (sta) 2678 SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, 2679 sta->deflink.he_cap.has_he); 2680 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) 2681 SET_CMC_TBL_DATA_DCM(skb->data, 0); 2682 2683 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2684 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2685 chip->h2c_cctl_func_id, 0, 1, 2686 H2C_CMC_TBL_LEN); 2687 2688 ret = rtw89_h2c_tx(rtwdev, skb, false); 2689 if (ret) { 2690 rtw89_err(rtwdev, "failed to send h2c\n"); 2691 goto fail; 2692 } 2693 2694 return 0; 2695 fail: 2696 dev_kfree_skb_any(skb); 2697 2698 return ret; 2699 } 2700 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl); 2701 2702 static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, 2703 struct ieee80211_sta *sta, u8 *pads) 2704 { 2705 u8 nss = min(sta->deflink.rx_nss, rtwdev->hal.tx_nss) - 1; 2706 u16 ppe_thres_hdr; 2707 u8 ppe16, ppe8; 2708 u8 n, idx, sh; 2709 u8 ru_bitmap; 2710 bool ppe_th; 2711 u16 ppe; 2712 int i; 2713 2714 ppe_th = !!u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5], 2715 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT); 2716 if (!ppe_th) { 2717 u8 pad; 2718 2719 pad = u8_get_bits(sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[5], 2720 IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK); 2721 2722 for (i = 0; i < RTW89_PPE_BW_NUM; i++) 2723 pads[i] = pad; 2724 2725 return; 2726 } 2727 2728 ppe_thres_hdr = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres); 2729 ru_bitmap = u16_get_bits(ppe_thres_hdr, 2730 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 2731 n = hweight8(ru_bitmap); 2732 n = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE + 2733 (n * IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2) * nss; 2734 2735 for (i = 0; i < RTW89_PPE_BW_NUM; i++) { 2736 if (!(ru_bitmap & BIT(i))) { 2737 pads[i] = 1; 2738 continue; 2739 } 2740 2741 idx = n >> 3; 2742 sh = n & 7; 2743 n += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 2744 2745 ppe = get_unaligned_le16(sta->deflink.eht_cap.eht_ppe_thres + idx); 2746 ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2747 sh += IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE; 2748 ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; 2749 2750 if (ppe16 != 7 && ppe8 == 7) 2751 pads[i] = 2; 2752 else if (ppe8 != 7) 2753 pads[i] = 1; 2754 else 2755 pads[i] = 0; 2756 } 2757 } 2758 2759 int rtw89_fw_h2c_assoc_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2760 struct ieee80211_vif *vif, 2761 struct ieee80211_sta *sta) 2762 { 2763 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 2764 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 2765 struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); 2766 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 2767 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2768 u8 pads[RTW89_PPE_BW_NUM]; 2769 u32 len = sizeof(*h2c); 2770 struct sk_buff *skb; 2771 u16 lowest_rate; 2772 int ret; 2773 2774 memset(pads, 0, sizeof(pads)); 2775 if (sta) { 2776 if (sta->deflink.eht_cap.has_eht) 2777 __get_sta_eht_pkt_padding(rtwdev, sta, pads); 2778 else if (sta->deflink.he_cap.has_he) 2779 __get_sta_he_pkt_padding(rtwdev, sta, pads); 2780 } 2781 2782 if (vif->p2p) 2783 lowest_rate = RTW89_HW_RATE_OFDM6; 2784 else if (chan->band_type == RTW89_BAND_2G) 2785 lowest_rate = RTW89_HW_RATE_CCK1; 2786 else 2787 lowest_rate = RTW89_HW_RATE_OFDM6; 2788 2789 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2790 if (!skb) { 2791 rtw89_err(rtwdev, "failed to alloc skb for cmac g7\n"); 2792 return -ENOMEM; 2793 } 2794 skb_put(skb, len); 2795 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2796 2797 h2c->c0 = le32_encode_bits(mac_id, CCTLINFO_G7_C0_MACID) | 2798 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2799 2800 h2c->w0 = le32_encode_bits(1, CCTLINFO_G7_W0_DISRTSFB) | 2801 le32_encode_bits(1, CCTLINFO_G7_W0_DISDATAFB); 2802 h2c->m0 = cpu_to_le32(CCTLINFO_G7_W0_DISRTSFB | 2803 CCTLINFO_G7_W0_DISDATAFB); 2804 2805 h2c->w1 = le32_encode_bits(lowest_rate, CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2806 h2c->m1 = cpu_to_le32(CCTLINFO_G7_W1_RTS_RTY_LOWEST_RATE); 2807 2808 h2c->w2 = le32_encode_bits(0, CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 2809 h2c->m2 = cpu_to_le32(CCTLINFO_G7_W2_DATA_TXCNT_LMT_SEL); 2810 2811 h2c->w3 = le32_encode_bits(0, CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 2812 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_RTS_TXCNT_LMT_SEL); 2813 2814 h2c->w4 = le32_encode_bits(rtwvif->port, CCTLINFO_G7_W4_MULTI_PORT_ID); 2815 h2c->m4 = cpu_to_le32(CCTLINFO_G7_W4_MULTI_PORT_ID); 2816 2817 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 2818 h2c->w4 |= le32_encode_bits(0, CCTLINFO_G7_W4_DATA_DCM); 2819 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_DATA_DCM); 2820 } 2821 2822 if (vif->bss_conf.eht_support) { 2823 u16 punct = vif->bss_conf.chanreq.oper.punctured; 2824 2825 h2c->w4 |= le32_encode_bits(~punct, 2826 CCTLINFO_G7_W4_ACT_SUBCH_CBW); 2827 h2c->m4 |= cpu_to_le32(CCTLINFO_G7_W4_ACT_SUBCH_CBW); 2828 } 2829 2830 h2c->w5 = le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_20], 2831 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0) | 2832 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_40], 2833 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1) | 2834 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_80], 2835 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2) | 2836 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_160], 2837 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3) | 2838 le32_encode_bits(pads[RTW89_CHANNEL_WIDTH_320], 2839 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 2840 h2c->m5 = cpu_to_le32(CCTLINFO_G7_W5_NOMINAL_PKT_PADDING0 | 2841 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING1 | 2842 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING2 | 2843 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING3 | 2844 CCTLINFO_G7_W5_NOMINAL_PKT_PADDING4); 2845 2846 h2c->w6 = le32_encode_bits(vif->type == NL80211_IFTYPE_STATION ? 1 : 0, 2847 CCTLINFO_G7_W6_ULDL); 2848 h2c->m6 = cpu_to_le32(CCTLINFO_G7_W6_ULDL); 2849 2850 if (sta) { 2851 h2c->w8 = le32_encode_bits(sta->deflink.he_cap.has_he, 2852 CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 2853 h2c->m8 = cpu_to_le32(CCTLINFO_G7_W8_BSR_QUEUE_SIZE_FORMAT); 2854 } 2855 2856 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2857 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2858 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 1, 2859 len); 2860 2861 ret = rtw89_h2c_tx(rtwdev, skb, false); 2862 if (ret) { 2863 rtw89_err(rtwdev, "failed to send h2c\n"); 2864 goto fail; 2865 } 2866 2867 return 0; 2868 fail: 2869 dev_kfree_skb_any(skb); 2870 2871 return ret; 2872 } 2873 EXPORT_SYMBOL(rtw89_fw_h2c_assoc_cmac_tbl_g7); 2874 2875 int rtw89_fw_h2c_ampdu_cmac_tbl_g7(struct rtw89_dev *rtwdev, 2876 struct ieee80211_vif *vif, 2877 struct ieee80211_sta *sta) 2878 { 2879 struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; 2880 struct rtw89_h2c_cctlinfo_ud_g7 *h2c; 2881 u32 len = sizeof(*h2c); 2882 struct sk_buff *skb; 2883 u16 agg_num = 0; 2884 u8 ba_bmap = 0; 2885 int ret; 2886 u8 tid; 2887 2888 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 2889 if (!skb) { 2890 rtw89_err(rtwdev, "failed to alloc skb for ampdu cmac g7\n"); 2891 return -ENOMEM; 2892 } 2893 skb_put(skb, len); 2894 h2c = (struct rtw89_h2c_cctlinfo_ud_g7 *)skb->data; 2895 2896 for_each_set_bit(tid, rtwsta->ampdu_map, IEEE80211_NUM_TIDS) { 2897 if (agg_num == 0) 2898 agg_num = rtwsta->ampdu_params[tid].agg_num; 2899 else 2900 agg_num = min(agg_num, rtwsta->ampdu_params[tid].agg_num); 2901 } 2902 2903 if (agg_num <= 0x20) 2904 ba_bmap = 3; 2905 else if (agg_num > 0x20 && agg_num <= 0x40) 2906 ba_bmap = 0; 2907 else if (agg_num > 0x40 && agg_num <= 0x80) 2908 ba_bmap = 1; 2909 else if (agg_num > 0x80 && agg_num <= 0x100) 2910 ba_bmap = 2; 2911 else if (agg_num > 0x100 && agg_num <= 0x200) 2912 ba_bmap = 4; 2913 else if (agg_num > 0x200 && agg_num <= 0x400) 2914 ba_bmap = 5; 2915 2916 h2c->c0 = le32_encode_bits(rtwsta->mac_id, CCTLINFO_G7_C0_MACID) | 2917 le32_encode_bits(1, CCTLINFO_G7_C0_OP); 2918 2919 h2c->w3 = le32_encode_bits(ba_bmap, CCTLINFO_G7_W3_BA_BMAP); 2920 h2c->m3 = cpu_to_le32(CCTLINFO_G7_W3_BA_BMAP); 2921 2922 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2923 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2924 H2C_FUNC_MAC_CCTLINFO_UD_G7, 0, 0, 2925 len); 2926 2927 ret = rtw89_h2c_tx(rtwdev, skb, false); 2928 if (ret) { 2929 rtw89_err(rtwdev, "failed to send h2c\n"); 2930 goto fail; 2931 } 2932 2933 return 0; 2934 fail: 2935 dev_kfree_skb_any(skb); 2936 2937 return ret; 2938 } 2939 EXPORT_SYMBOL(rtw89_fw_h2c_ampdu_cmac_tbl_g7); 2940 2941 int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev, 2942 struct rtw89_sta *rtwsta) 2943 { 2944 const struct rtw89_chip_info *chip = rtwdev->chip; 2945 struct sk_buff *skb; 2946 int ret; 2947 2948 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2949 if (!skb) { 2950 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2951 return -ENOMEM; 2952 } 2953 skb_put(skb, H2C_CMC_TBL_LEN); 2954 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 2955 SET_CTRL_INFO_OPERATION(skb->data, 1); 2956 if (rtwsta->cctl_tx_time) { 2957 SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1); 2958 SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time); 2959 } 2960 if (rtwsta->cctl_tx_retry_limit) { 2961 SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1); 2962 SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt); 2963 } 2964 2965 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 2966 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 2967 chip->h2c_cctl_func_id, 0, 1, 2968 H2C_CMC_TBL_LEN); 2969 2970 ret = rtw89_h2c_tx(rtwdev, skb, false); 2971 if (ret) { 2972 rtw89_err(rtwdev, "failed to send h2c\n"); 2973 goto fail; 2974 } 2975 2976 return 0; 2977 fail: 2978 dev_kfree_skb_any(skb); 2979 2980 return ret; 2981 } 2982 2983 int rtw89_fw_h2c_txpath_cmac_tbl(struct rtw89_dev *rtwdev, 2984 struct rtw89_sta *rtwsta) 2985 { 2986 const struct rtw89_chip_info *chip = rtwdev->chip; 2987 struct sk_buff *skb; 2988 int ret; 2989 2990 if (chip->h2c_cctl_func_id != H2C_FUNC_MAC_CCTLINFO_UD) 2991 return 0; 2992 2993 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_CMC_TBL_LEN); 2994 if (!skb) { 2995 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 2996 return -ENOMEM; 2997 } 2998 skb_put(skb, H2C_CMC_TBL_LEN); 2999 SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id); 3000 SET_CTRL_INFO_OPERATION(skb->data, 1); 3001 3002 __rtw89_fw_h2c_set_tx_path(rtwdev, skb); 3003 3004 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3005 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3006 H2C_FUNC_MAC_CCTLINFO_UD, 0, 1, 3007 H2C_CMC_TBL_LEN); 3008 3009 ret = rtw89_h2c_tx(rtwdev, skb, false); 3010 if (ret) { 3011 rtw89_err(rtwdev, "failed to send h2c\n"); 3012 goto fail; 3013 } 3014 3015 return 0; 3016 fail: 3017 dev_kfree_skb_any(skb); 3018 3019 return ret; 3020 } 3021 3022 int rtw89_fw_h2c_update_beacon(struct rtw89_dev *rtwdev, 3023 struct rtw89_vif *rtwvif) 3024 { 3025 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 3026 rtwvif->sub_entity_idx); 3027 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3028 struct rtw89_h2c_bcn_upd *h2c; 3029 struct sk_buff *skb_beacon; 3030 struct ieee80211_hdr *hdr; 3031 u32 len = sizeof(*h2c); 3032 struct sk_buff *skb; 3033 int bcn_total_len; 3034 u16 beacon_rate; 3035 u16 tim_offset; 3036 void *noa_data; 3037 u8 noa_len; 3038 int ret; 3039 3040 if (vif->p2p) 3041 beacon_rate = RTW89_HW_RATE_OFDM6; 3042 else if (chan->band_type == RTW89_BAND_2G) 3043 beacon_rate = RTW89_HW_RATE_CCK1; 3044 else 3045 beacon_rate = RTW89_HW_RATE_OFDM6; 3046 3047 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3048 NULL, 0); 3049 if (!skb_beacon) { 3050 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3051 return -ENOMEM; 3052 } 3053 3054 noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data); 3055 if (noa_len && 3056 (noa_len <= skb_tailroom(skb_beacon) || 3057 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3058 skb_put_data(skb_beacon, noa_data, noa_len); 3059 } 3060 3061 hdr = (struct ieee80211_hdr *)skb_beacon; 3062 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3063 3064 bcn_total_len = len + skb_beacon->len; 3065 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3066 if (!skb) { 3067 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3068 dev_kfree_skb_any(skb_beacon); 3069 return -ENOMEM; 3070 } 3071 skb_put(skb, len); 3072 h2c = (struct rtw89_h2c_bcn_upd *)skb->data; 3073 3074 h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_W0_PORT) | 3075 le32_encode_bits(0, RTW89_H2C_BCN_UPD_W0_MBSSID) | 3076 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_W0_BAND) | 3077 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_W0_GRP_IE_OFST); 3078 h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_W1_MACID) | 3079 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_W1_SSN_SEL) | 3080 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_W1_SSN_MODE) | 3081 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_W1_RATE); 3082 3083 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3084 dev_kfree_skb_any(skb_beacon); 3085 3086 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3087 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3088 H2C_FUNC_MAC_BCN_UPD, 0, 1, 3089 bcn_total_len); 3090 3091 ret = rtw89_h2c_tx(rtwdev, skb, false); 3092 if (ret) { 3093 rtw89_err(rtwdev, "failed to send h2c\n"); 3094 dev_kfree_skb_any(skb); 3095 return ret; 3096 } 3097 3098 return 0; 3099 } 3100 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon); 3101 3102 int rtw89_fw_h2c_update_beacon_be(struct rtw89_dev *rtwdev, 3103 struct rtw89_vif *rtwvif) 3104 { 3105 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); 3106 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3107 struct rtw89_h2c_bcn_upd_be *h2c; 3108 struct sk_buff *skb_beacon; 3109 struct ieee80211_hdr *hdr; 3110 u32 len = sizeof(*h2c); 3111 struct sk_buff *skb; 3112 int bcn_total_len; 3113 u16 beacon_rate; 3114 u16 tim_offset; 3115 void *noa_data; 3116 u8 noa_len; 3117 int ret; 3118 3119 if (vif->p2p) 3120 beacon_rate = RTW89_HW_RATE_OFDM6; 3121 else if (chan->band_type == RTW89_BAND_2G) 3122 beacon_rate = RTW89_HW_RATE_CCK1; 3123 else 3124 beacon_rate = RTW89_HW_RATE_OFDM6; 3125 3126 skb_beacon = ieee80211_beacon_get_tim(rtwdev->hw, vif, &tim_offset, 3127 NULL, 0); 3128 if (!skb_beacon) { 3129 rtw89_err(rtwdev, "failed to get beacon skb\n"); 3130 return -ENOMEM; 3131 } 3132 3133 noa_len = rtw89_p2p_noa_fetch(rtwvif, &noa_data); 3134 if (noa_len && 3135 (noa_len <= skb_tailroom(skb_beacon) || 3136 pskb_expand_head(skb_beacon, 0, noa_len, GFP_KERNEL) == 0)) { 3137 skb_put_data(skb_beacon, noa_data, noa_len); 3138 } 3139 3140 hdr = (struct ieee80211_hdr *)skb_beacon; 3141 tim_offset -= ieee80211_hdrlen(hdr->frame_control); 3142 3143 bcn_total_len = len + skb_beacon->len; 3144 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, bcn_total_len); 3145 if (!skb) { 3146 rtw89_err(rtwdev, "failed to alloc skb for fw dl\n"); 3147 dev_kfree_skb_any(skb_beacon); 3148 return -ENOMEM; 3149 } 3150 skb_put(skb, len); 3151 h2c = (struct rtw89_h2c_bcn_upd_be *)skb->data; 3152 3153 h2c->w0 = le32_encode_bits(rtwvif->port, RTW89_H2C_BCN_UPD_BE_W0_PORT) | 3154 le32_encode_bits(0, RTW89_H2C_BCN_UPD_BE_W0_MBSSID) | 3155 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_BCN_UPD_BE_W0_BAND) | 3156 le32_encode_bits(tim_offset | BIT(7), RTW89_H2C_BCN_UPD_BE_W0_GRP_IE_OFST); 3157 h2c->w1 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCN_UPD_BE_W1_MACID) | 3158 le32_encode_bits(RTW89_MGMT_HW_SSN_SEL, RTW89_H2C_BCN_UPD_BE_W1_SSN_SEL) | 3159 le32_encode_bits(RTW89_MGMT_HW_SEQ_MODE, RTW89_H2C_BCN_UPD_BE_W1_SSN_MODE) | 3160 le32_encode_bits(beacon_rate, RTW89_H2C_BCN_UPD_BE_W1_RATE); 3161 3162 skb_put_data(skb, skb_beacon->data, skb_beacon->len); 3163 dev_kfree_skb_any(skb_beacon); 3164 3165 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3166 H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG, 3167 H2C_FUNC_MAC_BCN_UPD_BE, 0, 1, 3168 bcn_total_len); 3169 3170 ret = rtw89_h2c_tx(rtwdev, skb, false); 3171 if (ret) { 3172 rtw89_err(rtwdev, "failed to send h2c\n"); 3173 goto fail; 3174 } 3175 3176 return 0; 3177 3178 fail: 3179 dev_kfree_skb_any(skb); 3180 3181 return ret; 3182 } 3183 EXPORT_SYMBOL(rtw89_fw_h2c_update_beacon_be); 3184 3185 #define H2C_ROLE_MAINTAIN_LEN 4 3186 int rtw89_fw_h2c_role_maintain(struct rtw89_dev *rtwdev, 3187 struct rtw89_vif *rtwvif, 3188 struct rtw89_sta *rtwsta, 3189 enum rtw89_upd_mode upd_mode) 3190 { 3191 struct sk_buff *skb; 3192 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 3193 u8 self_role; 3194 int ret; 3195 3196 if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE) { 3197 if (rtwsta) 3198 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3199 else 3200 self_role = rtwvif->self_role; 3201 } else { 3202 self_role = rtwvif->self_role; 3203 } 3204 3205 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ROLE_MAINTAIN_LEN); 3206 if (!skb) { 3207 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3208 return -ENOMEM; 3209 } 3210 skb_put(skb, H2C_ROLE_MAINTAIN_LEN); 3211 SET_FWROLE_MAINTAIN_MACID(skb->data, mac_id); 3212 SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, self_role); 3213 SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode); 3214 SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role); 3215 3216 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3217 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3218 H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1, 3219 H2C_ROLE_MAINTAIN_LEN); 3220 3221 ret = rtw89_h2c_tx(rtwdev, skb, false); 3222 if (ret) { 3223 rtw89_err(rtwdev, "failed to send h2c\n"); 3224 goto fail; 3225 } 3226 3227 return 0; 3228 fail: 3229 dev_kfree_skb_any(skb); 3230 3231 return ret; 3232 } 3233 3234 static enum rtw89_fw_sta_type 3235 rtw89_fw_get_sta_type(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3236 struct rtw89_sta *rtwsta) 3237 { 3238 struct ieee80211_sta *sta = rtwsta_to_sta_safe(rtwsta); 3239 struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); 3240 3241 if (!sta) 3242 goto by_vif; 3243 3244 if (sta->deflink.eht_cap.has_eht) 3245 return RTW89_FW_BE_STA; 3246 else if (sta->deflink.he_cap.has_he) 3247 return RTW89_FW_AX_STA; 3248 else 3249 return RTW89_FW_N_AC_STA; 3250 3251 by_vif: 3252 if (vif->bss_conf.eht_support) 3253 return RTW89_FW_BE_STA; 3254 else if (vif->bss_conf.he_support) 3255 return RTW89_FW_AX_STA; 3256 else 3257 return RTW89_FW_N_AC_STA; 3258 } 3259 3260 int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3261 struct rtw89_sta *rtwsta, bool dis_conn) 3262 { 3263 struct sk_buff *skb; 3264 u8 mac_id = rtwsta ? rtwsta->mac_id : rtwvif->mac_id; 3265 u8 self_role = rtwvif->self_role; 3266 enum rtw89_fw_sta_type sta_type; 3267 u8 net_type = rtwvif->net_type; 3268 struct rtw89_h2c_join_v1 *h2c_v1; 3269 struct rtw89_h2c_join *h2c; 3270 u32 len = sizeof(*h2c); 3271 bool format_v1 = false; 3272 int ret; 3273 3274 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 3275 len = sizeof(*h2c_v1); 3276 format_v1 = true; 3277 } 3278 3279 if (net_type == RTW89_NET_TYPE_AP_MODE && rtwsta) { 3280 self_role = RTW89_SELF_ROLE_AP_CLIENT; 3281 net_type = dis_conn ? RTW89_NET_TYPE_NO_LINK : net_type; 3282 } 3283 3284 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3285 if (!skb) { 3286 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3287 return -ENOMEM; 3288 } 3289 skb_put(skb, len); 3290 h2c = (struct rtw89_h2c_join *)skb->data; 3291 3292 h2c->w0 = le32_encode_bits(mac_id, RTW89_H2C_JOININFO_W0_MACID) | 3293 le32_encode_bits(dis_conn, RTW89_H2C_JOININFO_W0_OP) | 3294 le32_encode_bits(rtwvif->mac_idx, RTW89_H2C_JOININFO_W0_BAND) | 3295 le32_encode_bits(rtwvif->wmm, RTW89_H2C_JOININFO_W0_WMM) | 3296 le32_encode_bits(rtwvif->trigger, RTW89_H2C_JOININFO_W0_TGR) | 3297 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_ISHESTA) | 3298 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DLBW) | 3299 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_TF_MAC_PAD) | 3300 le32_encode_bits(0, RTW89_H2C_JOININFO_W0_DL_T_PE) | 3301 le32_encode_bits(rtwvif->port, RTW89_H2C_JOININFO_W0_PORT_ID) | 3302 le32_encode_bits(net_type, RTW89_H2C_JOININFO_W0_NET_TYPE) | 3303 le32_encode_bits(rtwvif->wifi_role, RTW89_H2C_JOININFO_W0_WIFI_ROLE) | 3304 le32_encode_bits(self_role, RTW89_H2C_JOININFO_W0_SELF_ROLE); 3305 3306 if (!format_v1) 3307 goto done; 3308 3309 h2c_v1 = (struct rtw89_h2c_join_v1 *)skb->data; 3310 3311 sta_type = rtw89_fw_get_sta_type(rtwdev, rtwvif, rtwsta); 3312 3313 h2c_v1->w1 = le32_encode_bits(sta_type, RTW89_H2C_JOININFO_W1_STA_TYPE); 3314 h2c_v1->w2 = 0; 3315 3316 done: 3317 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3318 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3319 H2C_FUNC_MAC_JOININFO, 0, 1, 3320 len); 3321 3322 ret = rtw89_h2c_tx(rtwdev, skb, false); 3323 if (ret) { 3324 rtw89_err(rtwdev, "failed to send h2c\n"); 3325 goto fail; 3326 } 3327 3328 return 0; 3329 fail: 3330 dev_kfree_skb_any(skb); 3331 3332 return ret; 3333 } 3334 3335 int rtw89_fw_h2c_notify_dbcc(struct rtw89_dev *rtwdev, bool en) 3336 { 3337 struct rtw89_h2c_notify_dbcc *h2c; 3338 u32 len = sizeof(*h2c); 3339 struct sk_buff *skb; 3340 int ret; 3341 3342 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3343 if (!skb) { 3344 rtw89_err(rtwdev, "failed to alloc skb for h2c notify dbcc\n"); 3345 return -ENOMEM; 3346 } 3347 skb_put(skb, len); 3348 h2c = (struct rtw89_h2c_notify_dbcc *)skb->data; 3349 3350 h2c->w0 = le32_encode_bits(en, RTW89_H2C_NOTIFY_DBCC_EN); 3351 3352 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3353 H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT, 3354 H2C_FUNC_NOTIFY_DBCC, 0, 1, 3355 len); 3356 3357 ret = rtw89_h2c_tx(rtwdev, skb, false); 3358 if (ret) { 3359 rtw89_err(rtwdev, "failed to send h2c\n"); 3360 goto fail; 3361 } 3362 3363 return 0; 3364 fail: 3365 dev_kfree_skb_any(skb); 3366 3367 return ret; 3368 } 3369 3370 int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp, 3371 bool pause) 3372 { 3373 struct rtw89_fw_macid_pause_sleep_grp *h2c_new; 3374 struct rtw89_fw_macid_pause_grp *h2c; 3375 __le32 set = cpu_to_le32(BIT(sh)); 3376 u8 h2c_macid_pause_id; 3377 struct sk_buff *skb; 3378 u32 len; 3379 int ret; 3380 3381 if (RTW89_CHK_FW_FEATURE(MACID_PAUSE_SLEEP, &rtwdev->fw)) { 3382 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE_SLEEP; 3383 len = sizeof(*h2c_new); 3384 } else { 3385 h2c_macid_pause_id = H2C_FUNC_MAC_MACID_PAUSE; 3386 len = sizeof(*h2c); 3387 } 3388 3389 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3390 if (!skb) { 3391 rtw89_err(rtwdev, "failed to alloc skb for h2c macid pause\n"); 3392 return -ENOMEM; 3393 } 3394 skb_put(skb, len); 3395 3396 if (h2c_macid_pause_id == H2C_FUNC_MAC_MACID_PAUSE_SLEEP) { 3397 h2c_new = (struct rtw89_fw_macid_pause_sleep_grp *)skb->data; 3398 3399 h2c_new->n[0].pause_mask_grp[grp] = set; 3400 h2c_new->n[0].sleep_mask_grp[grp] = set; 3401 if (pause) { 3402 h2c_new->n[0].pause_grp[grp] = set; 3403 h2c_new->n[0].sleep_grp[grp] = set; 3404 } 3405 } else { 3406 h2c = (struct rtw89_fw_macid_pause_grp *)skb->data; 3407 3408 h2c->mask_grp[grp] = set; 3409 if (pause) 3410 h2c->pause_grp[grp] = set; 3411 } 3412 3413 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3414 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3415 h2c_macid_pause_id, 1, 0, 3416 len); 3417 3418 ret = rtw89_h2c_tx(rtwdev, skb, false); 3419 if (ret) { 3420 rtw89_err(rtwdev, "failed to send h2c\n"); 3421 goto fail; 3422 } 3423 3424 return 0; 3425 fail: 3426 dev_kfree_skb_any(skb); 3427 3428 return ret; 3429 } 3430 3431 #define H2C_EDCA_LEN 12 3432 int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3433 u8 ac, u32 val) 3434 { 3435 struct sk_buff *skb; 3436 int ret; 3437 3438 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_EDCA_LEN); 3439 if (!skb) { 3440 rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n"); 3441 return -ENOMEM; 3442 } 3443 skb_put(skb, H2C_EDCA_LEN); 3444 RTW89_SET_EDCA_SEL(skb->data, 0); 3445 RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx); 3446 RTW89_SET_EDCA_WMM(skb->data, 0); 3447 RTW89_SET_EDCA_AC(skb->data, ac); 3448 RTW89_SET_EDCA_PARAM(skb->data, val); 3449 3450 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3451 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3452 H2C_FUNC_USR_EDCA, 0, 1, 3453 H2C_EDCA_LEN); 3454 3455 ret = rtw89_h2c_tx(rtwdev, skb, false); 3456 if (ret) { 3457 rtw89_err(rtwdev, "failed to send h2c\n"); 3458 goto fail; 3459 } 3460 3461 return 0; 3462 fail: 3463 dev_kfree_skb_any(skb); 3464 3465 return ret; 3466 } 3467 3468 #define H2C_TSF32_TOGL_LEN 4 3469 int rtw89_fw_h2c_tsf32_toggle(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 3470 bool en) 3471 { 3472 struct sk_buff *skb; 3473 u16 early_us = en ? 2000 : 0; 3474 u8 *cmd; 3475 int ret; 3476 3477 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_TSF32_TOGL_LEN); 3478 if (!skb) { 3479 rtw89_err(rtwdev, "failed to alloc skb for h2c p2p act\n"); 3480 return -ENOMEM; 3481 } 3482 skb_put(skb, H2C_TSF32_TOGL_LEN); 3483 cmd = skb->data; 3484 3485 RTW89_SET_FWCMD_TSF32_TOGL_BAND(cmd, rtwvif->mac_idx); 3486 RTW89_SET_FWCMD_TSF32_TOGL_EN(cmd, en); 3487 RTW89_SET_FWCMD_TSF32_TOGL_PORT(cmd, rtwvif->port); 3488 RTW89_SET_FWCMD_TSF32_TOGL_EARLY(cmd, early_us); 3489 3490 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3491 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3492 H2C_FUNC_TSF32_TOGL, 0, 0, 3493 H2C_TSF32_TOGL_LEN); 3494 3495 ret = rtw89_h2c_tx(rtwdev, skb, false); 3496 if (ret) { 3497 rtw89_err(rtwdev, "failed to send h2c\n"); 3498 goto fail; 3499 } 3500 3501 return 0; 3502 fail: 3503 dev_kfree_skb_any(skb); 3504 3505 return ret; 3506 } 3507 3508 #define H2C_OFLD_CFG_LEN 8 3509 int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev) 3510 { 3511 static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00}; 3512 struct sk_buff *skb; 3513 int ret; 3514 3515 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_OFLD_CFG_LEN); 3516 if (!skb) { 3517 rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n"); 3518 return -ENOMEM; 3519 } 3520 skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN); 3521 3522 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3523 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3524 H2C_FUNC_OFLD_CFG, 0, 1, 3525 H2C_OFLD_CFG_LEN); 3526 3527 ret = rtw89_h2c_tx(rtwdev, skb, false); 3528 if (ret) { 3529 rtw89_err(rtwdev, "failed to send h2c\n"); 3530 goto fail; 3531 } 3532 3533 return 0; 3534 fail: 3535 dev_kfree_skb_any(skb); 3536 3537 return ret; 3538 } 3539 3540 int rtw89_fw_h2c_set_bcn_fltr_cfg(struct rtw89_dev *rtwdev, 3541 struct ieee80211_vif *vif, 3542 bool connect) 3543 { 3544 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 3545 struct ieee80211_bss_conf *bss_conf = vif ? &vif->bss_conf : NULL; 3546 s32 thold = RTW89_DEFAULT_CQM_THOLD; 3547 u32 hyst = RTW89_DEFAULT_CQM_HYST; 3548 struct rtw89_h2c_bcnfltr *h2c; 3549 u32 len = sizeof(*h2c); 3550 struct sk_buff *skb; 3551 int ret; 3552 3553 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 3554 return -EINVAL; 3555 3556 if (!rtwvif || !bss_conf || rtwvif->net_type != RTW89_NET_TYPE_INFRA) 3557 return -EINVAL; 3558 3559 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3560 if (!skb) { 3561 rtw89_err(rtwdev, "failed to alloc skb for h2c bcn filter\n"); 3562 return -ENOMEM; 3563 } 3564 3565 skb_put(skb, len); 3566 h2c = (struct rtw89_h2c_bcnfltr *)skb->data; 3567 3568 if (bss_conf->cqm_rssi_hyst) 3569 hyst = bss_conf->cqm_rssi_hyst; 3570 if (bss_conf->cqm_rssi_thold) 3571 thold = bss_conf->cqm_rssi_thold; 3572 3573 h2c->w0 = le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_RSSI) | 3574 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_BCN) | 3575 le32_encode_bits(connect, RTW89_H2C_BCNFLTR_W0_MON_EN) | 3576 le32_encode_bits(RTW89_BCN_FLTR_OFFLOAD_MODE_DEFAULT, 3577 RTW89_H2C_BCNFLTR_W0_MODE) | 3578 le32_encode_bits(RTW89_BCN_LOSS_CNT, RTW89_H2C_BCNFLTR_W0_BCN_LOSS_CNT) | 3579 le32_encode_bits(hyst, RTW89_H2C_BCNFLTR_W0_RSSI_HYST) | 3580 le32_encode_bits(thold + MAX_RSSI, 3581 RTW89_H2C_BCNFLTR_W0_RSSI_THRESHOLD) | 3582 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_BCNFLTR_W0_MAC_ID); 3583 3584 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3585 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3586 H2C_FUNC_CFG_BCNFLTR, 0, 1, len); 3587 3588 ret = rtw89_h2c_tx(rtwdev, skb, false); 3589 if (ret) { 3590 rtw89_err(rtwdev, "failed to send h2c\n"); 3591 goto fail; 3592 } 3593 3594 return 0; 3595 fail: 3596 dev_kfree_skb_any(skb); 3597 3598 return ret; 3599 } 3600 3601 int rtw89_fw_h2c_rssi_offload(struct rtw89_dev *rtwdev, 3602 struct rtw89_rx_phy_ppdu *phy_ppdu) 3603 { 3604 struct rtw89_h2c_ofld_rssi *h2c; 3605 u32 len = sizeof(*h2c); 3606 struct sk_buff *skb; 3607 s8 rssi; 3608 int ret; 3609 3610 if (!RTW89_CHK_FW_FEATURE(BEACON_FILTER, &rtwdev->fw)) 3611 return -EINVAL; 3612 3613 if (!phy_ppdu) 3614 return -EINVAL; 3615 3616 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3617 if (!skb) { 3618 rtw89_err(rtwdev, "failed to alloc skb for h2c rssi\n"); 3619 return -ENOMEM; 3620 } 3621 3622 rssi = phy_ppdu->rssi_avg >> RSSI_FACTOR; 3623 skb_put(skb, len); 3624 h2c = (struct rtw89_h2c_ofld_rssi *)skb->data; 3625 3626 h2c->w0 = le32_encode_bits(phy_ppdu->mac_id, RTW89_H2C_OFLD_RSSI_W0_MACID) | 3627 le32_encode_bits(1, RTW89_H2C_OFLD_RSSI_W0_NUM); 3628 h2c->w1 = le32_encode_bits(rssi, RTW89_H2C_OFLD_RSSI_W1_VAL); 3629 3630 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3631 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3632 H2C_FUNC_OFLD_RSSI, 0, 1, len); 3633 3634 ret = rtw89_h2c_tx(rtwdev, skb, false); 3635 if (ret) { 3636 rtw89_err(rtwdev, "failed to send h2c\n"); 3637 goto fail; 3638 } 3639 3640 return 0; 3641 fail: 3642 dev_kfree_skb_any(skb); 3643 3644 return ret; 3645 } 3646 3647 int rtw89_fw_h2c_tp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) 3648 { 3649 struct rtw89_traffic_stats *stats = &rtwvif->stats; 3650 struct rtw89_h2c_ofld *h2c; 3651 u32 len = sizeof(*h2c); 3652 struct sk_buff *skb; 3653 int ret; 3654 3655 if (rtwvif->net_type != RTW89_NET_TYPE_INFRA) 3656 return -EINVAL; 3657 3658 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3659 if (!skb) { 3660 rtw89_err(rtwdev, "failed to alloc skb for h2c tp\n"); 3661 return -ENOMEM; 3662 } 3663 3664 skb_put(skb, len); 3665 h2c = (struct rtw89_h2c_ofld *)skb->data; 3666 3667 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_OFLD_W0_MAC_ID) | 3668 le32_encode_bits(stats->tx_throughput, RTW89_H2C_OFLD_W0_TX_TP) | 3669 le32_encode_bits(stats->rx_throughput, RTW89_H2C_OFLD_W0_RX_TP); 3670 3671 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3672 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 3673 H2C_FUNC_OFLD_TP, 0, 1, len); 3674 3675 ret = rtw89_h2c_tx(rtwdev, skb, false); 3676 if (ret) { 3677 rtw89_err(rtwdev, "failed to send h2c\n"); 3678 goto fail; 3679 } 3680 3681 return 0; 3682 fail: 3683 dev_kfree_skb_any(skb); 3684 3685 return ret; 3686 } 3687 3688 int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi) 3689 { 3690 const struct rtw89_chip_info *chip = rtwdev->chip; 3691 struct rtw89_h2c_ra_v1 *h2c_v1; 3692 struct rtw89_h2c_ra *h2c; 3693 u32 len = sizeof(*h2c); 3694 bool format_v1 = false; 3695 struct sk_buff *skb; 3696 int ret; 3697 3698 if (chip->chip_gen == RTW89_CHIP_BE) { 3699 len = sizeof(*h2c_v1); 3700 format_v1 = true; 3701 } 3702 3703 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3704 if (!skb) { 3705 rtw89_err(rtwdev, "failed to alloc skb for h2c join\n"); 3706 return -ENOMEM; 3707 } 3708 skb_put(skb, len); 3709 h2c = (struct rtw89_h2c_ra *)skb->data; 3710 rtw89_debug(rtwdev, RTW89_DBG_RA, 3711 "ra cmd msk: %llx ", ra->ra_mask); 3712 3713 h2c->w0 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_W0_MODE) | 3714 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_W0_BW_CAP) | 3715 le32_encode_bits(ra->macid, RTW89_H2C_RA_W0_MACID) | 3716 le32_encode_bits(ra->dcm_cap, RTW89_H2C_RA_W0_DCM) | 3717 le32_encode_bits(ra->er_cap, RTW89_H2C_RA_W0_ER) | 3718 le32_encode_bits(ra->init_rate_lv, RTW89_H2C_RA_W0_INIT_RATE_LV) | 3719 le32_encode_bits(ra->upd_all, RTW89_H2C_RA_W0_UPD_ALL) | 3720 le32_encode_bits(ra->en_sgi, RTW89_H2C_RA_W0_SGI) | 3721 le32_encode_bits(ra->ldpc_cap, RTW89_H2C_RA_W0_LDPC) | 3722 le32_encode_bits(ra->stbc_cap, RTW89_H2C_RA_W0_STBC) | 3723 le32_encode_bits(ra->ss_num, RTW89_H2C_RA_W0_SS_NUM) | 3724 le32_encode_bits(ra->giltf, RTW89_H2C_RA_W0_GILTF) | 3725 le32_encode_bits(ra->upd_bw_nss_mask, RTW89_H2C_RA_W0_UPD_BW_NSS_MASK) | 3726 le32_encode_bits(ra->upd_mask, RTW89_H2C_RA_W0_UPD_MASK); 3727 h2c->w1 = le32_encode_bits(ra->ra_mask, RTW89_H2C_RA_W1_RAMASK_LO32); 3728 h2c->w2 = le32_encode_bits(ra->ra_mask >> 32, RTW89_H2C_RA_W2_RAMASK_HI32); 3729 h2c->w3 = le32_encode_bits(ra->fix_giltf_en, RTW89_H2C_RA_W3_FIX_GILTF_EN) | 3730 le32_encode_bits(ra->fix_giltf, RTW89_H2C_RA_W3_FIX_GILTF); 3731 3732 if (!format_v1) 3733 goto csi; 3734 3735 h2c_v1 = (struct rtw89_h2c_ra_v1 *)h2c; 3736 h2c_v1->w4 = le32_encode_bits(ra->mode_ctrl, RTW89_H2C_RA_V1_W4_MODE_EHT) | 3737 le32_encode_bits(ra->bw_cap, RTW89_H2C_RA_V1_W4_BW_EHT); 3738 3739 csi: 3740 if (!csi) 3741 goto done; 3742 3743 h2c->w2 |= le32_encode_bits(1, RTW89_H2C_RA_W2_BFEE_CSI_CTL); 3744 h2c->w3 |= le32_encode_bits(ra->band_num, RTW89_H2C_RA_W3_BAND_NUM) | 3745 le32_encode_bits(ra->cr_tbl_sel, RTW89_H2C_RA_W3_CR_TBL_SEL) | 3746 le32_encode_bits(ra->fixed_csi_rate_en, RTW89_H2C_RA_W3_FIXED_CSI_RATE_EN) | 3747 le32_encode_bits(ra->ra_csi_rate_en, RTW89_H2C_RA_W3_RA_CSI_RATE_EN) | 3748 le32_encode_bits(ra->csi_mcs_ss_idx, RTW89_H2C_RA_W3_FIXED_CSI_MCS_SS_IDX) | 3749 le32_encode_bits(ra->csi_mode, RTW89_H2C_RA_W3_FIXED_CSI_MODE) | 3750 le32_encode_bits(ra->csi_gi_ltf, RTW89_H2C_RA_W3_FIXED_CSI_GI_LTF) | 3751 le32_encode_bits(ra->csi_bw, RTW89_H2C_RA_W3_FIXED_CSI_BW); 3752 3753 done: 3754 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3755 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA, 3756 H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0, 3757 len); 3758 3759 ret = rtw89_h2c_tx(rtwdev, skb, false); 3760 if (ret) { 3761 rtw89_err(rtwdev, "failed to send h2c\n"); 3762 goto fail; 3763 } 3764 3765 return 0; 3766 fail: 3767 dev_kfree_skb_any(skb); 3768 3769 return ret; 3770 } 3771 3772 int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev, u8 type) 3773 { 3774 struct rtw89_btc *btc = &rtwdev->btc; 3775 struct rtw89_btc_dm *dm = &btc->dm; 3776 struct rtw89_btc_init_info *init_info = &dm->init_info.init; 3777 struct rtw89_btc_module *module = &init_info->module; 3778 struct rtw89_btc_ant_info *ant = &module->ant; 3779 struct rtw89_h2c_cxinit *h2c; 3780 u32 len = sizeof(*h2c); 3781 struct sk_buff *skb; 3782 int ret; 3783 3784 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3785 if (!skb) { 3786 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n"); 3787 return -ENOMEM; 3788 } 3789 skb_put(skb, len); 3790 h2c = (struct rtw89_h2c_cxinit *)skb->data; 3791 3792 h2c->hdr.type = type; 3793 h2c->hdr.len = len - H2C_LEN_CXDRVHDR; 3794 3795 h2c->ant_type = ant->type; 3796 h2c->ant_num = ant->num; 3797 h2c->ant_iso = ant->isolation; 3798 h2c->ant_info = 3799 u8_encode_bits(ant->single_pos, RTW89_H2C_CXINIT_ANT_INFO_POS) | 3800 u8_encode_bits(ant->diversity, RTW89_H2C_CXINIT_ANT_INFO_DIVERSITY) | 3801 u8_encode_bits(ant->btg_pos, RTW89_H2C_CXINIT_ANT_INFO_BTG_POS) | 3802 u8_encode_bits(ant->stream_cnt, RTW89_H2C_CXINIT_ANT_INFO_STREAM_CNT); 3803 3804 h2c->mod_rfe = module->rfe_type; 3805 h2c->mod_cv = module->cv; 3806 h2c->mod_info = 3807 u8_encode_bits(module->bt_solo, RTW89_H2C_CXINIT_MOD_INFO_BT_SOLO) | 3808 u8_encode_bits(module->bt_pos, RTW89_H2C_CXINIT_MOD_INFO_BT_POS) | 3809 u8_encode_bits(module->switch_type, RTW89_H2C_CXINIT_MOD_INFO_SW_TYPE) | 3810 u8_encode_bits(module->wa_type, RTW89_H2C_CXINIT_MOD_INFO_WA_TYPE); 3811 h2c->mod_adie_kt = module->kt_ver_adie; 3812 h2c->wl_gch = init_info->wl_guard_ch; 3813 3814 h2c->info = 3815 u8_encode_bits(init_info->wl_only, RTW89_H2C_CXINIT_INFO_WL_ONLY) | 3816 u8_encode_bits(init_info->wl_init_ok, RTW89_H2C_CXINIT_INFO_WL_INITOK) | 3817 u8_encode_bits(init_info->dbcc_en, RTW89_H2C_CXINIT_INFO_DBCC_EN) | 3818 u8_encode_bits(init_info->cx_other, RTW89_H2C_CXINIT_INFO_CX_OTHER) | 3819 u8_encode_bits(init_info->bt_only, RTW89_H2C_CXINIT_INFO_BT_ONLY); 3820 3821 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3822 H2C_CAT_OUTSRC, BTFC_SET, 3823 SET_DRV_INFO, 0, 0, 3824 len); 3825 3826 ret = rtw89_h2c_tx(rtwdev, skb, false); 3827 if (ret) { 3828 rtw89_err(rtwdev, "failed to send h2c\n"); 3829 goto fail; 3830 } 3831 3832 return 0; 3833 fail: 3834 dev_kfree_skb_any(skb); 3835 3836 return ret; 3837 } 3838 3839 int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type) 3840 { 3841 struct rtw89_btc *btc = &rtwdev->btc; 3842 struct rtw89_btc_dm *dm = &btc->dm; 3843 struct rtw89_btc_init_info_v7 *init_info = &dm->init_info.init_v7; 3844 struct rtw89_h2c_cxinit_v7 *h2c; 3845 u32 len = sizeof(*h2c); 3846 struct sk_buff *skb; 3847 int ret; 3848 3849 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3850 if (!skb) { 3851 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init_v7\n"); 3852 return -ENOMEM; 3853 } 3854 skb_put(skb, len); 3855 h2c = (struct rtw89_h2c_cxinit_v7 *)skb->data; 3856 3857 h2c->hdr.type = type; 3858 h2c->hdr.ver = btc->ver->fcxinit; 3859 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 3860 h2c->init = *init_info; 3861 3862 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3863 H2C_CAT_OUTSRC, BTFC_SET, 3864 SET_DRV_INFO, 0, 0, 3865 len); 3866 3867 ret = rtw89_h2c_tx(rtwdev, skb, false); 3868 if (ret) { 3869 rtw89_err(rtwdev, "failed to send h2c\n"); 3870 goto fail; 3871 } 3872 3873 return 0; 3874 fail: 3875 dev_kfree_skb_any(skb); 3876 3877 return ret; 3878 } 3879 3880 #define PORT_DATA_OFFSET 4 3881 #define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12 3882 #define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \ 3883 (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR) 3884 3885 int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type) 3886 { 3887 struct rtw89_btc *btc = &rtwdev->btc; 3888 const struct rtw89_btc_ver *ver = btc->ver; 3889 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 3890 struct rtw89_btc_wl_role_info *role_info = &wl->role_info; 3891 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 3892 struct rtw89_btc_wl_active_role *active = role_info->active_role; 3893 struct sk_buff *skb; 3894 u32 len; 3895 u8 offset = 0; 3896 u8 *cmd; 3897 int ret; 3898 int i; 3899 3900 len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num); 3901 3902 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3903 if (!skb) { 3904 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 3905 return -ENOMEM; 3906 } 3907 skb_put(skb, len); 3908 cmd = skb->data; 3909 3910 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 3911 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 3912 3913 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 3914 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 3915 3916 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 3917 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 3918 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 3919 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 3920 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 3921 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 3922 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 3923 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 3924 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 3925 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 3926 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 3927 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 3928 3929 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 3930 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 3931 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 3932 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 3933 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 3934 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 3935 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 3936 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 3937 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 3938 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 3939 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 3940 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 3941 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 3942 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 3943 } 3944 3945 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 3946 H2C_CAT_OUTSRC, BTFC_SET, 3947 SET_DRV_INFO, 0, 0, 3948 len); 3949 3950 ret = rtw89_h2c_tx(rtwdev, skb, false); 3951 if (ret) { 3952 rtw89_err(rtwdev, "failed to send h2c\n"); 3953 goto fail; 3954 } 3955 3956 return 0; 3957 fail: 3958 dev_kfree_skb_any(skb); 3959 3960 return ret; 3961 } 3962 3963 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \ 3964 (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 3965 3966 int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type) 3967 { 3968 struct rtw89_btc *btc = &rtwdev->btc; 3969 const struct rtw89_btc_ver *ver = btc->ver; 3970 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 3971 struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1; 3972 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 3973 struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1; 3974 struct sk_buff *skb; 3975 u32 len; 3976 u8 *cmd, offset; 3977 int ret; 3978 int i; 3979 3980 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num); 3981 3982 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 3983 if (!skb) { 3984 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 3985 return -ENOMEM; 3986 } 3987 skb_put(skb, len); 3988 cmd = skb->data; 3989 3990 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 3991 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 3992 3993 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 3994 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 3995 3996 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 3997 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 3998 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 3999 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4000 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4001 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4002 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4003 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4004 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4005 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4006 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4007 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4008 4009 offset = PORT_DATA_OFFSET; 4010 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4011 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i, offset); 4012 RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i, offset); 4013 RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i, offset); 4014 RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i, offset); 4015 RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i, offset); 4016 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i, offset); 4017 RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i, offset); 4018 RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i, offset); 4019 RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i, offset); 4020 RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i, offset); 4021 RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i, offset); 4022 RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i, offset); 4023 RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i, offset); 4024 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset); 4025 } 4026 4027 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4028 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4029 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4030 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4031 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4032 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4033 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4034 4035 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4036 H2C_CAT_OUTSRC, BTFC_SET, 4037 SET_DRV_INFO, 0, 0, 4038 len); 4039 4040 ret = rtw89_h2c_tx(rtwdev, skb, false); 4041 if (ret) { 4042 rtw89_err(rtwdev, "failed to send h2c\n"); 4043 goto fail; 4044 } 4045 4046 return 0; 4047 fail: 4048 dev_kfree_skb_any(skb); 4049 4050 return ret; 4051 } 4052 4053 #define H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(max_role_num) \ 4054 (4 + 8 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR) 4055 4056 int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type) 4057 { 4058 struct rtw89_btc *btc = &rtwdev->btc; 4059 const struct rtw89_btc_ver *ver = btc->ver; 4060 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4061 struct rtw89_btc_wl_role_info_v2 *role_info = &wl->role_info_v2; 4062 struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role; 4063 struct rtw89_btc_wl_active_role_v2 *active = role_info->active_role_v2; 4064 struct sk_buff *skb; 4065 u32 len; 4066 u8 *cmd, offset; 4067 int ret; 4068 int i; 4069 4070 len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V2(ver->max_role_num); 4071 4072 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4073 if (!skb) { 4074 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n"); 4075 return -ENOMEM; 4076 } 4077 skb_put(skb, len); 4078 cmd = skb->data; 4079 4080 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4081 RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR); 4082 4083 RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt); 4084 RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode); 4085 4086 RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none); 4087 RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station); 4088 RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap); 4089 RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap); 4090 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc); 4091 RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master); 4092 RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh); 4093 RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter); 4094 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device); 4095 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc); 4096 RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go); 4097 RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan); 4098 4099 offset = PORT_DATA_OFFSET; 4100 for (i = 0; i < RTW89_PORT_NUM; i++, active++) { 4101 RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED_V2(cmd, active->connected, i, offset); 4102 RTW89_SET_FWCMD_CXROLE_ACT_PID_V2(cmd, active->pid, i, offset); 4103 RTW89_SET_FWCMD_CXROLE_ACT_PHY_V2(cmd, active->phy, i, offset); 4104 RTW89_SET_FWCMD_CXROLE_ACT_NOA_V2(cmd, active->noa, i, offset); 4105 RTW89_SET_FWCMD_CXROLE_ACT_BAND_V2(cmd, active->band, i, offset); 4106 RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS_V2(cmd, active->client_ps, i, offset); 4107 RTW89_SET_FWCMD_CXROLE_ACT_BW_V2(cmd, active->bw, i, offset); 4108 RTW89_SET_FWCMD_CXROLE_ACT_ROLE_V2(cmd, active->role, i, offset); 4109 RTW89_SET_FWCMD_CXROLE_ACT_CH_V2(cmd, active->ch, i, offset); 4110 RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR_V2(cmd, active->noa_duration, i, offset); 4111 } 4112 4113 offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN; 4114 RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset); 4115 RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset); 4116 RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset); 4117 RTW89_SET_FWCMD_CXROLE_DBCC_CHG(cmd, role_info->dbcc_chg, offset); 4118 RTW89_SET_FWCMD_CXROLE_DBCC_2G_PHY(cmd, role_info->dbcc_2g_phy, offset); 4119 RTW89_SET_FWCMD_CXROLE_LINK_MODE_CHG(cmd, role_info->link_mode_chg, offset); 4120 4121 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4122 H2C_CAT_OUTSRC, BTFC_SET, 4123 SET_DRV_INFO, 0, 0, 4124 len); 4125 4126 ret = rtw89_h2c_tx(rtwdev, skb, false); 4127 if (ret) { 4128 rtw89_err(rtwdev, "failed to send h2c\n"); 4129 goto fail; 4130 } 4131 4132 return 0; 4133 fail: 4134 dev_kfree_skb_any(skb); 4135 4136 return ret; 4137 } 4138 4139 int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type) 4140 { 4141 struct rtw89_btc *btc = &rtwdev->btc; 4142 struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8; 4143 struct rtw89_h2c_cxrole_v8 *h2c; 4144 u32 len = sizeof(*h2c); 4145 struct sk_buff *skb; 4146 int ret; 4147 4148 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4149 if (!skb) { 4150 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4151 return -ENOMEM; 4152 } 4153 skb_put(skb, len); 4154 h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data; 4155 4156 h2c->hdr.type = type; 4157 h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7; 4158 memcpy(&h2c->_u8, role, sizeof(h2c->_u8)); 4159 h2c->_u32.role_map = cpu_to_le32(role->role_map); 4160 h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type); 4161 h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration); 4162 4163 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4164 H2C_CAT_OUTSRC, BTFC_SET, 4165 SET_DRV_INFO, 0, 0, 4166 len); 4167 4168 ret = rtw89_h2c_tx(rtwdev, skb, false); 4169 if (ret) { 4170 rtw89_err(rtwdev, "failed to send h2c\n"); 4171 goto fail; 4172 } 4173 4174 return 0; 4175 fail: 4176 dev_kfree_skb_any(skb); 4177 4178 return ret; 4179 } 4180 4181 #define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR) 4182 int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type) 4183 { 4184 struct rtw89_btc *btc = &rtwdev->btc; 4185 const struct rtw89_btc_ver *ver = btc->ver; 4186 struct rtw89_btc_ctrl *ctrl = &btc->ctrl.ctrl; 4187 struct sk_buff *skb; 4188 u8 *cmd; 4189 int ret; 4190 4191 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_CTRL); 4192 if (!skb) { 4193 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4194 return -ENOMEM; 4195 } 4196 skb_put(skb, H2C_LEN_CXDRVINFO_CTRL); 4197 cmd = skb->data; 4198 4199 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4200 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR); 4201 4202 RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual); 4203 RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt); 4204 RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun); 4205 if (ver->fcxctrl == 0) 4206 RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step); 4207 4208 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4209 H2C_CAT_OUTSRC, BTFC_SET, 4210 SET_DRV_INFO, 0, 0, 4211 H2C_LEN_CXDRVINFO_CTRL); 4212 4213 ret = rtw89_h2c_tx(rtwdev, skb, false); 4214 if (ret) { 4215 rtw89_err(rtwdev, "failed to send h2c\n"); 4216 goto fail; 4217 } 4218 4219 return 0; 4220 fail: 4221 dev_kfree_skb_any(skb); 4222 4223 return ret; 4224 } 4225 4226 int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type) 4227 { 4228 struct rtw89_btc *btc = &rtwdev->btc; 4229 struct rtw89_btc_ctrl_v7 *ctrl = &btc->ctrl.ctrl_v7; 4230 struct rtw89_h2c_cxctrl_v7 *h2c; 4231 u32 len = sizeof(*h2c); 4232 struct sk_buff *skb; 4233 int ret; 4234 4235 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4236 if (!skb) { 4237 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4238 return -ENOMEM; 4239 } 4240 skb_put(skb, len); 4241 h2c = (struct rtw89_h2c_cxctrl_v7 *)skb->data; 4242 4243 h2c->hdr.type = type; 4244 h2c->hdr.ver = btc->ver->fcxctrl; 4245 h2c->hdr.len = sizeof(*h2c) - H2C_LEN_CXDRVHDR_V7; 4246 h2c->ctrl = *ctrl; 4247 4248 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4249 H2C_CAT_OUTSRC, BTFC_SET, 4250 SET_DRV_INFO, 0, 0, len); 4251 4252 ret = rtw89_h2c_tx(rtwdev, skb, false); 4253 if (ret) { 4254 rtw89_err(rtwdev, "failed to send h2c\n"); 4255 goto fail; 4256 } 4257 4258 return 0; 4259 fail: 4260 dev_kfree_skb_any(skb); 4261 4262 return ret; 4263 } 4264 4265 #define H2C_LEN_CXDRVINFO_TRX (28 + H2C_LEN_CXDRVHDR) 4266 int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type) 4267 { 4268 struct rtw89_btc *btc = &rtwdev->btc; 4269 struct rtw89_btc_trx_info *trx = &btc->dm.trx_info; 4270 struct sk_buff *skb; 4271 u8 *cmd; 4272 int ret; 4273 4274 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_TRX); 4275 if (!skb) { 4276 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_trx\n"); 4277 return -ENOMEM; 4278 } 4279 skb_put(skb, H2C_LEN_CXDRVINFO_TRX); 4280 cmd = skb->data; 4281 4282 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4283 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_TRX - H2C_LEN_CXDRVHDR); 4284 4285 RTW89_SET_FWCMD_CXTRX_TXLV(cmd, trx->tx_lvl); 4286 RTW89_SET_FWCMD_CXTRX_RXLV(cmd, trx->rx_lvl); 4287 RTW89_SET_FWCMD_CXTRX_WLRSSI(cmd, trx->wl_rssi); 4288 RTW89_SET_FWCMD_CXTRX_BTRSSI(cmd, trx->bt_rssi); 4289 RTW89_SET_FWCMD_CXTRX_TXPWR(cmd, trx->tx_power); 4290 RTW89_SET_FWCMD_CXTRX_RXGAIN(cmd, trx->rx_gain); 4291 RTW89_SET_FWCMD_CXTRX_BTTXPWR(cmd, trx->bt_tx_power); 4292 RTW89_SET_FWCMD_CXTRX_BTRXGAIN(cmd, trx->bt_rx_gain); 4293 RTW89_SET_FWCMD_CXTRX_CN(cmd, trx->cn); 4294 RTW89_SET_FWCMD_CXTRX_NHM(cmd, trx->nhm); 4295 RTW89_SET_FWCMD_CXTRX_BTPROFILE(cmd, trx->bt_profile); 4296 RTW89_SET_FWCMD_CXTRX_RSVD2(cmd, trx->rsvd2); 4297 RTW89_SET_FWCMD_CXTRX_TXRATE(cmd, trx->tx_rate); 4298 RTW89_SET_FWCMD_CXTRX_RXRATE(cmd, trx->rx_rate); 4299 RTW89_SET_FWCMD_CXTRX_TXTP(cmd, trx->tx_tp); 4300 RTW89_SET_FWCMD_CXTRX_RXTP(cmd, trx->rx_tp); 4301 RTW89_SET_FWCMD_CXTRX_RXERRRA(cmd, trx->rx_err_ratio); 4302 4303 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4304 H2C_CAT_OUTSRC, BTFC_SET, 4305 SET_DRV_INFO, 0, 0, 4306 H2C_LEN_CXDRVINFO_TRX); 4307 4308 ret = rtw89_h2c_tx(rtwdev, skb, false); 4309 if (ret) { 4310 rtw89_err(rtwdev, "failed to send h2c\n"); 4311 goto fail; 4312 } 4313 4314 return 0; 4315 fail: 4316 dev_kfree_skb_any(skb); 4317 4318 return ret; 4319 } 4320 4321 #define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR) 4322 int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev, u8 type) 4323 { 4324 struct rtw89_btc *btc = &rtwdev->btc; 4325 struct rtw89_btc_wl_info *wl = &btc->cx.wl; 4326 struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info; 4327 struct sk_buff *skb; 4328 u8 *cmd; 4329 int ret; 4330 4331 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_RFK); 4332 if (!skb) { 4333 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4334 return -ENOMEM; 4335 } 4336 skb_put(skb, H2C_LEN_CXDRVINFO_RFK); 4337 cmd = skb->data; 4338 4339 RTW89_SET_FWCMD_CXHDR_TYPE(cmd, type); 4340 RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR); 4341 4342 RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state); 4343 RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map); 4344 RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map); 4345 RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band); 4346 RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type); 4347 4348 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4349 H2C_CAT_OUTSRC, BTFC_SET, 4350 SET_DRV_INFO, 0, 0, 4351 H2C_LEN_CXDRVINFO_RFK); 4352 4353 ret = rtw89_h2c_tx(rtwdev, skb, false); 4354 if (ret) { 4355 rtw89_err(rtwdev, "failed to send h2c\n"); 4356 goto fail; 4357 } 4358 4359 return 0; 4360 fail: 4361 dev_kfree_skb_any(skb); 4362 4363 return ret; 4364 } 4365 4366 #define H2C_LEN_PKT_OFLD 4 4367 int rtw89_fw_h2c_del_pkt_offload(struct rtw89_dev *rtwdev, u8 id) 4368 { 4369 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4370 struct sk_buff *skb; 4371 unsigned int cond; 4372 u8 *cmd; 4373 int ret; 4374 4375 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD); 4376 if (!skb) { 4377 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4378 return -ENOMEM; 4379 } 4380 skb_put(skb, H2C_LEN_PKT_OFLD); 4381 cmd = skb->data; 4382 4383 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, id); 4384 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_DEL); 4385 4386 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4387 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4388 H2C_FUNC_PACKET_OFLD, 1, 1, 4389 H2C_LEN_PKT_OFLD); 4390 4391 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(id, RTW89_PKT_OFLD_OP_DEL); 4392 4393 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4394 if (ret < 0) { 4395 rtw89_debug(rtwdev, RTW89_DBG_FW, 4396 "failed to del pkt ofld: id %d, ret %d\n", 4397 id, ret); 4398 return ret; 4399 } 4400 4401 rtw89_core_release_bit_map(rtwdev->pkt_offload, id); 4402 return 0; 4403 } 4404 4405 int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id, 4406 struct sk_buff *skb_ofld) 4407 { 4408 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4409 struct sk_buff *skb; 4410 unsigned int cond; 4411 u8 *cmd; 4412 u8 alloc_id; 4413 int ret; 4414 4415 alloc_id = rtw89_core_acquire_bit_map(rtwdev->pkt_offload, 4416 RTW89_MAX_PKT_OFLD_NUM); 4417 if (alloc_id == RTW89_MAX_PKT_OFLD_NUM) 4418 return -ENOSPC; 4419 4420 *id = alloc_id; 4421 4422 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len); 4423 if (!skb) { 4424 rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n"); 4425 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 4426 return -ENOMEM; 4427 } 4428 skb_put(skb, H2C_LEN_PKT_OFLD); 4429 cmd = skb->data; 4430 4431 RTW89_SET_FWCMD_PACKET_OFLD_PKT_IDX(cmd, alloc_id); 4432 RTW89_SET_FWCMD_PACKET_OFLD_PKT_OP(cmd, RTW89_PKT_OFLD_OP_ADD); 4433 RTW89_SET_FWCMD_PACKET_OFLD_PKT_LENGTH(cmd, skb_ofld->len); 4434 skb_put_data(skb, skb_ofld->data, skb_ofld->len); 4435 4436 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4437 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4438 H2C_FUNC_PACKET_OFLD, 1, 1, 4439 H2C_LEN_PKT_OFLD + skb_ofld->len); 4440 4441 cond = RTW89_FW_OFLD_WAIT_COND_PKT_OFLD(alloc_id, RTW89_PKT_OFLD_OP_ADD); 4442 4443 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4444 if (ret < 0) { 4445 rtw89_debug(rtwdev, RTW89_DBG_FW, 4446 "failed to add pkt ofld: id %d, ret %d\n", 4447 alloc_id, ret); 4448 rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id); 4449 return ret; 4450 } 4451 4452 return 0; 4453 } 4454 4455 int rtw89_fw_h2c_scan_list_offload(struct rtw89_dev *rtwdev, int ch_num, 4456 struct list_head *chan_list) 4457 { 4458 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4459 struct rtw89_h2c_chinfo_elem *elem; 4460 struct rtw89_mac_chinfo *ch_info; 4461 struct rtw89_h2c_chinfo *h2c; 4462 struct sk_buff *skb; 4463 unsigned int cond; 4464 int skb_len; 4465 int ret; 4466 4467 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 4468 4469 skb_len = struct_size(h2c, elem, ch_num); 4470 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 4471 if (!skb) { 4472 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 4473 return -ENOMEM; 4474 } 4475 skb_put(skb, sizeof(*h2c)); 4476 h2c = (struct rtw89_h2c_chinfo *)skb->data; 4477 4478 h2c->ch_num = ch_num; 4479 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 4480 4481 list_for_each_entry(ch_info, chan_list, list) { 4482 elem = (struct rtw89_h2c_chinfo_elem *)skb_put(skb, sizeof(*elem)); 4483 4484 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_W0_PERIOD) | 4485 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_W0_DWELL) | 4486 le32_encode_bits(ch_info->central_ch, RTW89_H2C_CHINFO_W0_CENTER_CH) | 4487 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_W0_PRI_CH); 4488 4489 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_W1_BW) | 4490 le32_encode_bits(ch_info->notify_action, RTW89_H2C_CHINFO_W1_ACTION) | 4491 le32_encode_bits(ch_info->num_pkt, RTW89_H2C_CHINFO_W1_NUM_PKT) | 4492 le32_encode_bits(ch_info->tx_pkt, RTW89_H2C_CHINFO_W1_TX) | 4493 le32_encode_bits(ch_info->pause_data, RTW89_H2C_CHINFO_W1_PAUSE_DATA) | 4494 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_W1_BAND) | 4495 le32_encode_bits(ch_info->probe_id, RTW89_H2C_CHINFO_W1_PKT_ID) | 4496 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_W1_DFS) | 4497 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_W1_TX_NULL) | 4498 le32_encode_bits(ch_info->rand_seq_num, RTW89_H2C_CHINFO_W1_RANDOM); 4499 4500 elem->w2 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_W2_PKT0) | 4501 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_W2_PKT1) | 4502 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_W2_PKT2) | 4503 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_W2_PKT3); 4504 4505 elem->w3 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_W3_PKT4) | 4506 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_W3_PKT5) | 4507 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_W3_PKT6) | 4508 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_W3_PKT7); 4509 } 4510 4511 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4512 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4513 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 4514 4515 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 4516 4517 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4518 if (ret) { 4519 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 4520 return ret; 4521 } 4522 4523 return 0; 4524 } 4525 4526 int rtw89_fw_h2c_scan_list_offload_be(struct rtw89_dev *rtwdev, int ch_num, 4527 struct list_head *chan_list) 4528 { 4529 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4530 struct rtw89_h2c_chinfo_elem_be *elem; 4531 struct rtw89_mac_chinfo_be *ch_info; 4532 struct rtw89_h2c_chinfo *h2c; 4533 struct sk_buff *skb; 4534 unsigned int cond; 4535 int skb_len; 4536 int ret; 4537 4538 static_assert(sizeof(*elem) == RTW89_MAC_CHINFO_SIZE); 4539 4540 skb_len = struct_size(h2c, elem, ch_num); 4541 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, skb_len); 4542 if (!skb) { 4543 rtw89_err(rtwdev, "failed to alloc skb for h2c scan list\n"); 4544 return -ENOMEM; 4545 } 4546 4547 skb_put(skb, sizeof(*h2c)); 4548 h2c = (struct rtw89_h2c_chinfo *)skb->data; 4549 4550 h2c->ch_num = ch_num; 4551 h2c->elem_size = sizeof(*elem) / 4; /* in unit of 4 bytes */ 4552 h2c->arg = u8_encode_bits(RTW89_PHY_0, RTW89_H2C_CHINFO_ARG_MAC_IDX_MASK); 4553 4554 list_for_each_entry(ch_info, chan_list, list) { 4555 elem = (struct rtw89_h2c_chinfo_elem_be *)skb_put(skb, sizeof(*elem)); 4556 4557 elem->w0 = le32_encode_bits(ch_info->period, RTW89_H2C_CHINFO_BE_W0_PERIOD) | 4558 le32_encode_bits(ch_info->dwell_time, RTW89_H2C_CHINFO_BE_W0_DWELL) | 4559 le32_encode_bits(ch_info->central_ch, 4560 RTW89_H2C_CHINFO_BE_W0_CENTER_CH) | 4561 le32_encode_bits(ch_info->pri_ch, RTW89_H2C_CHINFO_BE_W0_PRI_CH); 4562 4563 elem->w1 = le32_encode_bits(ch_info->bw, RTW89_H2C_CHINFO_BE_W1_BW) | 4564 le32_encode_bits(ch_info->ch_band, RTW89_H2C_CHINFO_BE_W1_CH_BAND) | 4565 le32_encode_bits(ch_info->dfs_ch, RTW89_H2C_CHINFO_BE_W1_DFS) | 4566 le32_encode_bits(ch_info->pause_data, 4567 RTW89_H2C_CHINFO_BE_W1_PAUSE_DATA) | 4568 le32_encode_bits(ch_info->tx_null, RTW89_H2C_CHINFO_BE_W1_TX_NULL) | 4569 le32_encode_bits(ch_info->rand_seq_num, 4570 RTW89_H2C_CHINFO_BE_W1_RANDOM) | 4571 le32_encode_bits(ch_info->notify_action, 4572 RTW89_H2C_CHINFO_BE_W1_NOTIFY) | 4573 le32_encode_bits(ch_info->probe_id != 0xff ? 1 : 0, 4574 RTW89_H2C_CHINFO_BE_W1_PROBE) | 4575 le32_encode_bits(ch_info->leave_crit, 4576 RTW89_H2C_CHINFO_BE_W1_EARLY_LEAVE_CRIT) | 4577 le32_encode_bits(ch_info->chkpt_timer, 4578 RTW89_H2C_CHINFO_BE_W1_CHKPT_TIMER); 4579 4580 elem->w2 = le32_encode_bits(ch_info->leave_time, 4581 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TIME) | 4582 le32_encode_bits(ch_info->leave_th, 4583 RTW89_H2C_CHINFO_BE_W2_EARLY_LEAVE_TH) | 4584 le32_encode_bits(ch_info->tx_pkt_ctrl, 4585 RTW89_H2C_CHINFO_BE_W2_TX_PKT_CTRL); 4586 4587 elem->w3 = le32_encode_bits(ch_info->pkt_id[0], RTW89_H2C_CHINFO_BE_W3_PKT0) | 4588 le32_encode_bits(ch_info->pkt_id[1], RTW89_H2C_CHINFO_BE_W3_PKT1) | 4589 le32_encode_bits(ch_info->pkt_id[2], RTW89_H2C_CHINFO_BE_W3_PKT2) | 4590 le32_encode_bits(ch_info->pkt_id[3], RTW89_H2C_CHINFO_BE_W3_PKT3); 4591 4592 elem->w4 = le32_encode_bits(ch_info->pkt_id[4], RTW89_H2C_CHINFO_BE_W4_PKT4) | 4593 le32_encode_bits(ch_info->pkt_id[5], RTW89_H2C_CHINFO_BE_W4_PKT5) | 4594 le32_encode_bits(ch_info->pkt_id[6], RTW89_H2C_CHINFO_BE_W4_PKT6) | 4595 le32_encode_bits(ch_info->pkt_id[7], RTW89_H2C_CHINFO_BE_W4_PKT7); 4596 4597 elem->w5 = le32_encode_bits(ch_info->sw_def, RTW89_H2C_CHINFO_BE_W5_SW_DEF) | 4598 le32_encode_bits(ch_info->fw_probe0_ssids, 4599 RTW89_H2C_CHINFO_BE_W5_FW_PROBE0_SSIDS); 4600 4601 elem->w6 = le32_encode_bits(ch_info->fw_probe0_shortssids, 4602 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_SHORTSSIDS) | 4603 le32_encode_bits(ch_info->fw_probe0_bssids, 4604 RTW89_H2C_CHINFO_BE_W6_FW_PROBE0_BSSIDS); 4605 } 4606 4607 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4608 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4609 H2C_FUNC_ADD_SCANOFLD_CH, 1, 1, skb_len); 4610 4611 cond = RTW89_SCANOFLD_WAIT_COND_ADD_CH; 4612 4613 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4614 if (ret) { 4615 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to add scan ofld ch\n"); 4616 return ret; 4617 } 4618 4619 return 0; 4620 } 4621 4622 int rtw89_fw_h2c_scan_offload(struct rtw89_dev *rtwdev, 4623 struct rtw89_scan_option *option, 4624 struct rtw89_vif *rtwvif) 4625 { 4626 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4627 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 4628 struct rtw89_h2c_scanofld *h2c; 4629 u32 len = sizeof(*h2c); 4630 struct sk_buff *skb; 4631 unsigned int cond; 4632 int ret; 4633 4634 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4635 if (!skb) { 4636 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 4637 return -ENOMEM; 4638 } 4639 skb_put(skb, len); 4640 h2c = (struct rtw89_h2c_scanofld *)skb->data; 4641 4642 h2c->w0 = le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_W0_MACID) | 4643 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_W0_PORT_ID) | 4644 le32_encode_bits(RTW89_PHY_0, RTW89_H2C_SCANOFLD_W0_BAND) | 4645 le32_encode_bits(option->enable, RTW89_H2C_SCANOFLD_W0_OPERATION); 4646 4647 h2c->w1 = le32_encode_bits(true, RTW89_H2C_SCANOFLD_W1_NOTIFY_END) | 4648 le32_encode_bits(option->target_ch_mode, 4649 RTW89_H2C_SCANOFLD_W1_TARGET_CH_MODE) | 4650 le32_encode_bits(RTW89_SCAN_IMMEDIATE, 4651 RTW89_H2C_SCANOFLD_W1_START_MODE) | 4652 le32_encode_bits(RTW89_SCAN_ONCE, RTW89_H2C_SCANOFLD_W1_SCAN_TYPE); 4653 4654 if (option->target_ch_mode) { 4655 h2c->w1 |= le32_encode_bits(op->band_width, 4656 RTW89_H2C_SCANOFLD_W1_TARGET_CH_BW) | 4657 le32_encode_bits(op->primary_channel, 4658 RTW89_H2C_SCANOFLD_W1_TARGET_PRI_CH) | 4659 le32_encode_bits(op->channel, 4660 RTW89_H2C_SCANOFLD_W1_TARGET_CENTRAL_CH); 4661 h2c->w0 |= le32_encode_bits(op->band_type, 4662 RTW89_H2C_SCANOFLD_W0_TARGET_CH_BAND); 4663 } 4664 4665 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4666 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4667 H2C_FUNC_SCANOFLD, 1, 1, 4668 len); 4669 4670 if (option->enable) 4671 cond = RTW89_SCANOFLD_WAIT_COND_START; 4672 else 4673 cond = RTW89_SCANOFLD_WAIT_COND_STOP; 4674 4675 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4676 if (ret) { 4677 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan ofld\n"); 4678 return ret; 4679 } 4680 4681 return 0; 4682 } 4683 4684 static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev, 4685 struct rtw89_scan_option *option) 4686 { 4687 struct ieee80211_supported_band *sband; 4688 struct ieee80211_channel *chan; 4689 u8 i, idx; 4690 4691 sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ]; 4692 4693 for (i = 0; i < sband->n_channels; i++) { 4694 chan = &sband->channels[i]; 4695 if (chan->flags & IEEE80211_CHAN_DISABLED) { 4696 idx = (chan->hw_value - 1) / 4; 4697 option->prohib_chan |= BIT(idx); 4698 } 4699 } 4700 } 4701 4702 int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, 4703 struct rtw89_scan_option *option, 4704 struct rtw89_vif *rtwvif) 4705 { 4706 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 4707 struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; 4708 struct rtw89_h2c_scanofld_be_macc_role *macc_role; 4709 struct rtw89_chan *op = &scan_info->op_chan; 4710 struct rtw89_h2c_scanofld_be_opch *opch; 4711 struct rtw89_pktofld_info *pkt_info; 4712 struct rtw89_h2c_scanofld_be *h2c; 4713 struct sk_buff *skb; 4714 u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role; 4715 u8 opch_size = sizeof(*opch) * option->num_opch; 4716 u8 probe_id[NUM_NL80211_BANDS]; 4717 unsigned int cond; 4718 void *ptr; 4719 int ret; 4720 u32 len; 4721 u8 i; 4722 4723 rtw89_scan_get_6g_disabled_chan(rtwdev, option); 4724 4725 len = sizeof(*h2c) + macc_role_size + opch_size; 4726 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4727 if (!skb) { 4728 rtw89_err(rtwdev, "failed to alloc skb for h2c scan offload\n"); 4729 return -ENOMEM; 4730 } 4731 4732 skb_put(skb, len); 4733 h2c = (struct rtw89_h2c_scanofld_be *)skb->data; 4734 ptr = skb->data; 4735 4736 memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id)); 4737 4738 list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) { 4739 if (pkt_info->wildcard_6ghz) { 4740 /* Provide wildcard as template */ 4741 probe_id[NL80211_BAND_6GHZ] = pkt_info->id; 4742 break; 4743 } 4744 } 4745 4746 h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) | 4747 le32_encode_bits(option->scan_mode, 4748 RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) | 4749 le32_encode_bits(option->repeat, RTW89_H2C_SCANOFLD_BE_W0_REPEAT) | 4750 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_NOTIFY_END) | 4751 le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_LEARN_CH) | 4752 le32_encode_bits(rtwvif->mac_id, RTW89_H2C_SCANOFLD_BE_W0_MACID) | 4753 le32_encode_bits(rtwvif->port, RTW89_H2C_SCANOFLD_BE_W0_PORT) | 4754 le32_encode_bits(option->band, RTW89_H2C_SCANOFLD_BE_W0_BAND); 4755 4756 h2c->w1 = le32_encode_bits(option->num_macc_role, RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE) | 4757 le32_encode_bits(option->num_opch, RTW89_H2C_SCANOFLD_BE_W1_NUM_OP) | 4758 le32_encode_bits(option->norm_pd, RTW89_H2C_SCANOFLD_BE_W1_NORM_PD); 4759 4760 h2c->w2 = le32_encode_bits(option->slow_pd, RTW89_H2C_SCANOFLD_BE_W2_SLOW_PD) | 4761 le32_encode_bits(option->norm_cy, RTW89_H2C_SCANOFLD_BE_W2_NORM_CY) | 4762 le32_encode_bits(option->opch_end, RTW89_H2C_SCANOFLD_BE_W2_OPCH_END); 4763 4764 h2c->w3 = le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SSID) | 4765 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_SHORT_SSID) | 4766 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W3_NUM_BSSID) | 4767 le32_encode_bits(probe_id[NL80211_BAND_2GHZ], RTW89_H2C_SCANOFLD_BE_W3_PROBEID); 4768 4769 h2c->w4 = le32_encode_bits(probe_id[NL80211_BAND_5GHZ], 4770 RTW89_H2C_SCANOFLD_BE_W4_PROBE_5G) | 4771 le32_encode_bits(probe_id[NL80211_BAND_6GHZ], 4772 RTW89_H2C_SCANOFLD_BE_W4_PROBE_6G) | 4773 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_W4_DELAY_START); 4774 4775 h2c->w5 = le32_encode_bits(option->mlo_mode, RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE); 4776 4777 h2c->w6 = le32_encode_bits(option->prohib_chan, 4778 RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); 4779 h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, 4780 RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); 4781 ptr += sizeof(*h2c); 4782 4783 for (i = 0; i < option->num_macc_role; i++) { 4784 macc_role = (struct rtw89_h2c_scanofld_be_macc_role *)&h2c->role[i]; 4785 macc_role->w0 = 4786 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_BAND) | 4787 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_PORT) | 4788 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_MACID) | 4789 le32_encode_bits(0, RTW89_H2C_SCANOFLD_BE_MACC_ROLE_W0_OPCH_END); 4790 ptr += sizeof(*macc_role); 4791 } 4792 4793 for (i = 0; i < option->num_opch; i++) { 4794 opch = ptr; 4795 opch->w0 = le32_encode_bits(rtwvif->mac_id, 4796 RTW89_H2C_SCANOFLD_BE_OPCH_W0_MACID) | 4797 le32_encode_bits(option->band, 4798 RTW89_H2C_SCANOFLD_BE_OPCH_W0_BAND) | 4799 le32_encode_bits(rtwvif->port, 4800 RTW89_H2C_SCANOFLD_BE_OPCH_W0_PORT) | 4801 le32_encode_bits(RTW89_SCAN_OPMODE_INTV, 4802 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY) | 4803 le32_encode_bits(true, 4804 RTW89_H2C_SCANOFLD_BE_OPCH_W0_TXNULL) | 4805 le32_encode_bits(RTW89_OFF_CHAN_TIME / 10, 4806 RTW89_H2C_SCANOFLD_BE_OPCH_W0_POLICY_VAL); 4807 4808 opch->w1 = le32_encode_bits(RTW89_CHANNEL_TIME, 4809 RTW89_H2C_SCANOFLD_BE_OPCH_W1_DURATION) | 4810 le32_encode_bits(op->band_type, 4811 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CH_BAND) | 4812 le32_encode_bits(op->band_width, 4813 RTW89_H2C_SCANOFLD_BE_OPCH_W1_BW) | 4814 le32_encode_bits(0x3, 4815 RTW89_H2C_SCANOFLD_BE_OPCH_W1_NOTIFY) | 4816 le32_encode_bits(op->primary_channel, 4817 RTW89_H2C_SCANOFLD_BE_OPCH_W1_PRI_CH) | 4818 le32_encode_bits(op->channel, 4819 RTW89_H2C_SCANOFLD_BE_OPCH_W1_CENTRAL_CH); 4820 4821 opch->w2 = le32_encode_bits(0, 4822 RTW89_H2C_SCANOFLD_BE_OPCH_W2_PKTS_CTRL) | 4823 le32_encode_bits(0, 4824 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SW_DEF) | 4825 le32_encode_bits(2, 4826 RTW89_H2C_SCANOFLD_BE_OPCH_W2_SS); 4827 4828 opch->w3 = le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 4829 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT0) | 4830 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 4831 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT1) | 4832 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 4833 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT2) | 4834 le32_encode_bits(RTW89_SCANOFLD_PKT_NONE, 4835 RTW89_H2C_SCANOFLD_BE_OPCH_W3_PKT3); 4836 ptr += sizeof(*opch); 4837 } 4838 4839 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4840 H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD, 4841 H2C_FUNC_SCANOFLD_BE, 1, 1, 4842 len); 4843 4844 if (option->enable) 4845 cond = RTW89_SCANOFLD_BE_WAIT_COND_START; 4846 else 4847 cond = RTW89_SCANOFLD_BE_WAIT_COND_STOP; 4848 4849 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 4850 if (ret) { 4851 rtw89_debug(rtwdev, RTW89_DBG_FW, "failed to scan be ofld\n"); 4852 return ret; 4853 } 4854 4855 return 0; 4856 } 4857 4858 int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev, 4859 struct rtw89_fw_h2c_rf_reg_info *info, 4860 u16 len, u8 page) 4861 { 4862 struct sk_buff *skb; 4863 u8 class = info->rf_path == RF_PATH_A ? 4864 H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B; 4865 int ret; 4866 4867 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4868 if (!skb) { 4869 rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n"); 4870 return -ENOMEM; 4871 } 4872 skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len); 4873 4874 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4875 H2C_CAT_OUTSRC, class, page, 0, 0, 4876 len); 4877 4878 ret = rtw89_h2c_tx(rtwdev, skb, false); 4879 if (ret) { 4880 rtw89_err(rtwdev, "failed to send h2c\n"); 4881 goto fail; 4882 } 4883 4884 return 0; 4885 fail: 4886 dev_kfree_skb_any(skb); 4887 4888 return ret; 4889 } 4890 4891 int rtw89_fw_h2c_rf_ntfy_mcc(struct rtw89_dev *rtwdev) 4892 { 4893 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 4894 struct rtw89_fw_h2c_rf_get_mccch *mccch; 4895 struct sk_buff *skb; 4896 int ret; 4897 u8 idx; 4898 4899 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, sizeof(*mccch)); 4900 if (!skb) { 4901 rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n"); 4902 return -ENOMEM; 4903 } 4904 skb_put(skb, sizeof(*mccch)); 4905 mccch = (struct rtw89_fw_h2c_rf_get_mccch *)skb->data; 4906 4907 idx = rfk_mcc->table_idx; 4908 mccch->ch_0 = cpu_to_le32(rfk_mcc->ch[0]); 4909 mccch->ch_1 = cpu_to_le32(rfk_mcc->ch[1]); 4910 mccch->band_0 = cpu_to_le32(rfk_mcc->band[0]); 4911 mccch->band_1 = cpu_to_le32(rfk_mcc->band[1]); 4912 mccch->current_channel = cpu_to_le32(rfk_mcc->ch[idx]); 4913 mccch->current_band_type = cpu_to_le32(rfk_mcc->band[idx]); 4914 4915 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4916 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_NOTIFY, 4917 H2C_FUNC_OUTSRC_RF_GET_MCCCH, 0, 0, 4918 sizeof(*mccch)); 4919 4920 ret = rtw89_h2c_tx(rtwdev, skb, false); 4921 if (ret) { 4922 rtw89_err(rtwdev, "failed to send h2c\n"); 4923 goto fail; 4924 } 4925 4926 return 0; 4927 fail: 4928 dev_kfree_skb_any(skb); 4929 4930 return ret; 4931 } 4932 EXPORT_SYMBOL(rtw89_fw_h2c_rf_ntfy_mcc); 4933 4934 int rtw89_fw_h2c_rf_pre_ntfy(struct rtw89_dev *rtwdev, 4935 enum rtw89_phy_idx phy_idx) 4936 { 4937 struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; 4938 struct rtw89_fw_h2c_rfk_pre_info *h2c; 4939 u8 tbl_sel = rfk_mcc->table_idx; 4940 u32 len = sizeof(*h2c); 4941 struct sk_buff *skb; 4942 u8 tbl, path; 4943 u32 val32; 4944 int ret; 4945 4946 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 4947 if (!skb) { 4948 rtw89_err(rtwdev, "failed to alloc skb for h2c rfk_pre_ntfy\n"); 4949 return -ENOMEM; 4950 } 4951 skb_put(skb, len); 4952 h2c = (struct rtw89_fw_h2c_rfk_pre_info *)skb->data; 4953 4954 h2c->mlo_mode = cpu_to_le32(rtwdev->mlo_dbcc_mode); 4955 4956 BUILD_BUG_ON(NUM_OF_RTW89_FW_RFK_TBL > RTW89_RFK_CHS_NR); 4957 4958 for (tbl = 0; tbl < NUM_OF_RTW89_FW_RFK_TBL; tbl++) { 4959 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 4960 h2c->dbcc.ch[path][tbl] = cpu_to_le32(rfk_mcc->ch[tbl]); 4961 h2c->dbcc.band[path][tbl] = cpu_to_le32(rfk_mcc->band[tbl]); 4962 } 4963 } 4964 4965 for (path = 0; path < NUM_OF_RTW89_FW_RFK_PATH; path++) { 4966 h2c->tbl.cur_ch[path] = cpu_to_le32(rfk_mcc->ch[tbl_sel]); 4967 h2c->tbl.cur_band[path] = cpu_to_le32(rfk_mcc->band[tbl_sel]); 4968 } 4969 4970 h2c->phy_idx = cpu_to_le32(phy_idx); 4971 h2c->cur_band = cpu_to_le32(rfk_mcc->band[tbl_sel]); 4972 h2c->cur_bw = cpu_to_le32(rfk_mcc->bw[tbl_sel]); 4973 h2c->cur_center_ch = cpu_to_le32(rfk_mcc->ch[tbl_sel]); 4974 4975 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL, B_COEF_SEL_IQC_V1); 4976 h2c->ktbl_sel0 = cpu_to_le32(val32); 4977 val32 = rtw89_phy_read32_mask(rtwdev, R_COEF_SEL_C1, B_COEF_SEL_IQC_V1); 4978 h2c->ktbl_sel1 = cpu_to_le32(val32); 4979 val32 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_CFGCH, RFREG_MASK); 4980 h2c->rfmod0 = cpu_to_le32(val32); 4981 val32 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_CFGCH, RFREG_MASK); 4982 h2c->rfmod1 = cpu_to_le32(val32); 4983 4984 if (rtw89_is_mlo_1_1(rtwdev)) 4985 h2c->mlo_1_1 = cpu_to_le32(1); 4986 4987 h2c->rfe_type = cpu_to_le32(rtwdev->efuse.rfe_type); 4988 4989 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 4990 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 4991 H2C_FUNC_RFK_PRE_NOTIFY, 0, 0, 4992 len); 4993 4994 ret = rtw89_h2c_tx(rtwdev, skb, false); 4995 if (ret) { 4996 rtw89_err(rtwdev, "failed to send h2c\n"); 4997 goto fail; 4998 } 4999 5000 return 0; 5001 fail: 5002 dev_kfree_skb_any(skb); 5003 5004 return ret; 5005 } 5006 5007 int rtw89_fw_h2c_rf_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, 5008 enum rtw89_tssi_mode tssi_mode) 5009 { 5010 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 5011 RTW89_SUB_ENTITY_0); 5012 struct rtw89_hal *hal = &rtwdev->hal; 5013 struct rtw89_h2c_rf_tssi *h2c; 5014 u32 len = sizeof(*h2c); 5015 struct sk_buff *skb; 5016 int ret; 5017 5018 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5019 if (!skb) { 5020 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TSSI\n"); 5021 return -ENOMEM; 5022 } 5023 skb_put(skb, len); 5024 h2c = (struct rtw89_h2c_rf_tssi *)skb->data; 5025 5026 h2c->len = cpu_to_le16(len); 5027 h2c->phy = phy_idx; 5028 h2c->ch = chan->channel; 5029 h2c->bw = chan->band_width; 5030 h2c->band = chan->band_type; 5031 h2c->hwtx_en = true; 5032 h2c->cv = hal->cv; 5033 h2c->tssi_mode = tssi_mode; 5034 5035 rtw89_phy_rfk_tssi_fill_fwcmd_efuse_to_de(rtwdev, phy_idx, chan, h2c); 5036 rtw89_phy_rfk_tssi_fill_fwcmd_tmeter_tbl(rtwdev, phy_idx, chan, h2c); 5037 5038 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5039 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5040 H2C_FUNC_RFK_TSSI_OFFLOAD, 0, 0, len); 5041 5042 ret = rtw89_h2c_tx(rtwdev, skb, false); 5043 if (ret) { 5044 rtw89_err(rtwdev, "failed to send h2c\n"); 5045 goto fail; 5046 } 5047 5048 return 0; 5049 fail: 5050 dev_kfree_skb_any(skb); 5051 5052 return ret; 5053 } 5054 5055 int rtw89_fw_h2c_rf_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5056 { 5057 struct rtw89_h2c_rf_iqk *h2c; 5058 u32 len = sizeof(*h2c); 5059 struct sk_buff *skb; 5060 int ret; 5061 5062 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5063 if (!skb) { 5064 rtw89_err(rtwdev, "failed to alloc skb for h2c RF IQK\n"); 5065 return -ENOMEM; 5066 } 5067 skb_put(skb, len); 5068 h2c = (struct rtw89_h2c_rf_iqk *)skb->data; 5069 5070 h2c->phy_idx = cpu_to_le32(phy_idx); 5071 h2c->dbcc = cpu_to_le32(rtwdev->dbcc_en); 5072 5073 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5074 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5075 H2C_FUNC_RFK_IQK_OFFLOAD, 0, 0, len); 5076 5077 ret = rtw89_h2c_tx(rtwdev, skb, false); 5078 if (ret) { 5079 rtw89_err(rtwdev, "failed to send h2c\n"); 5080 goto fail; 5081 } 5082 5083 return 0; 5084 fail: 5085 dev_kfree_skb_any(skb); 5086 5087 return ret; 5088 } 5089 5090 int rtw89_fw_h2c_rf_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5091 { 5092 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 5093 RTW89_SUB_ENTITY_0); 5094 struct rtw89_h2c_rf_dpk *h2c; 5095 u32 len = sizeof(*h2c); 5096 struct sk_buff *skb; 5097 int ret; 5098 5099 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5100 if (!skb) { 5101 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DPK\n"); 5102 return -ENOMEM; 5103 } 5104 skb_put(skb, len); 5105 h2c = (struct rtw89_h2c_rf_dpk *)skb->data; 5106 5107 h2c->len = len; 5108 h2c->phy = phy_idx; 5109 h2c->dpk_enable = true; 5110 h2c->kpath = RF_AB; 5111 h2c->cur_band = chan->band_type; 5112 h2c->cur_bw = chan->band_width; 5113 h2c->cur_ch = chan->channel; 5114 h2c->dpk_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5115 5116 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5117 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5118 H2C_FUNC_RFK_DPK_OFFLOAD, 0, 0, len); 5119 5120 ret = rtw89_h2c_tx(rtwdev, skb, false); 5121 if (ret) { 5122 rtw89_err(rtwdev, "failed to send h2c\n"); 5123 goto fail; 5124 } 5125 5126 return 0; 5127 fail: 5128 dev_kfree_skb_any(skb); 5129 5130 return ret; 5131 } 5132 5133 int rtw89_fw_h2c_rf_txgapk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5134 { 5135 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 5136 RTW89_SUB_ENTITY_0); 5137 struct rtw89_hal *hal = &rtwdev->hal; 5138 struct rtw89_h2c_rf_txgapk *h2c; 5139 u32 len = sizeof(*h2c); 5140 struct sk_buff *skb; 5141 int ret; 5142 5143 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5144 if (!skb) { 5145 rtw89_err(rtwdev, "failed to alloc skb for h2c RF TXGAPK\n"); 5146 return -ENOMEM; 5147 } 5148 skb_put(skb, len); 5149 h2c = (struct rtw89_h2c_rf_txgapk *)skb->data; 5150 5151 h2c->len = len; 5152 h2c->ktype = 2; 5153 h2c->phy = phy_idx; 5154 h2c->kpath = RF_AB; 5155 h2c->band = chan->band_type; 5156 h2c->bw = chan->band_width; 5157 h2c->ch = chan->channel; 5158 h2c->cv = hal->cv; 5159 5160 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5161 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5162 H2C_FUNC_RFK_TXGAPK_OFFLOAD, 0, 0, len); 5163 5164 ret = rtw89_h2c_tx(rtwdev, skb, false); 5165 if (ret) { 5166 rtw89_err(rtwdev, "failed to send h2c\n"); 5167 goto fail; 5168 } 5169 5170 return 0; 5171 fail: 5172 dev_kfree_skb_any(skb); 5173 5174 return ret; 5175 } 5176 5177 int rtw89_fw_h2c_rf_dack(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5178 { 5179 struct rtw89_h2c_rf_dack *h2c; 5180 u32 len = sizeof(*h2c); 5181 struct sk_buff *skb; 5182 int ret; 5183 5184 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5185 if (!skb) { 5186 rtw89_err(rtwdev, "failed to alloc skb for h2c RF DACK\n"); 5187 return -ENOMEM; 5188 } 5189 skb_put(skb, len); 5190 h2c = (struct rtw89_h2c_rf_dack *)skb->data; 5191 5192 h2c->len = cpu_to_le32(len); 5193 h2c->phy = cpu_to_le32(phy_idx); 5194 h2c->type = cpu_to_le32(0); 5195 5196 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5197 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5198 H2C_FUNC_RFK_DACK_OFFLOAD, 0, 0, len); 5199 5200 ret = rtw89_h2c_tx(rtwdev, skb, false); 5201 if (ret) { 5202 rtw89_err(rtwdev, "failed to send h2c\n"); 5203 goto fail; 5204 } 5205 5206 return 0; 5207 fail: 5208 dev_kfree_skb_any(skb); 5209 5210 return ret; 5211 } 5212 5213 int rtw89_fw_h2c_rf_rxdck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) 5214 { 5215 const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, 5216 RTW89_SUB_ENTITY_0); 5217 struct rtw89_h2c_rf_rxdck *h2c; 5218 u32 len = sizeof(*h2c); 5219 struct sk_buff *skb; 5220 int ret; 5221 5222 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5223 if (!skb) { 5224 rtw89_err(rtwdev, "failed to alloc skb for h2c RF RXDCK\n"); 5225 return -ENOMEM; 5226 } 5227 skb_put(skb, len); 5228 h2c = (struct rtw89_h2c_rf_rxdck *)skb->data; 5229 5230 h2c->len = len; 5231 h2c->phy = phy_idx; 5232 h2c->is_afe = false; 5233 h2c->kpath = RF_AB; 5234 h2c->cur_band = chan->band_type; 5235 h2c->cur_bw = chan->band_width; 5236 h2c->cur_ch = chan->channel; 5237 h2c->rxdck_dbg_en = rtw89_debug_is_enabled(rtwdev, RTW89_DBG_RFK); 5238 5239 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5240 H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RF_FW_RFK, 5241 H2C_FUNC_RFK_RXDCK_OFFLOAD, 0, 0, len); 5242 5243 ret = rtw89_h2c_tx(rtwdev, skb, false); 5244 if (ret) { 5245 rtw89_err(rtwdev, "failed to send h2c\n"); 5246 goto fail; 5247 } 5248 5249 return 0; 5250 fail: 5251 dev_kfree_skb_any(skb); 5252 5253 return ret; 5254 } 5255 5256 int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev, 5257 u8 h2c_class, u8 h2c_func, u8 *buf, u16 len, 5258 bool rack, bool dack) 5259 { 5260 struct sk_buff *skb; 5261 int ret; 5262 5263 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 5264 if (!skb) { 5265 rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n"); 5266 return -ENOMEM; 5267 } 5268 skb_put_data(skb, buf, len); 5269 5270 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 5271 H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack, 5272 len); 5273 5274 ret = rtw89_h2c_tx(rtwdev, skb, false); 5275 if (ret) { 5276 rtw89_err(rtwdev, "failed to send h2c\n"); 5277 goto fail; 5278 } 5279 5280 return 0; 5281 fail: 5282 dev_kfree_skb_any(skb); 5283 5284 return ret; 5285 } 5286 5287 int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len) 5288 { 5289 struct sk_buff *skb; 5290 int ret; 5291 5292 skb = rtw89_fw_h2c_alloc_skb_no_hdr(rtwdev, len); 5293 if (!skb) { 5294 rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n"); 5295 return -ENOMEM; 5296 } 5297 skb_put_data(skb, buf, len); 5298 5299 ret = rtw89_h2c_tx(rtwdev, skb, false); 5300 if (ret) { 5301 rtw89_err(rtwdev, "failed to send h2c\n"); 5302 goto fail; 5303 } 5304 5305 return 0; 5306 fail: 5307 dev_kfree_skb_any(skb); 5308 5309 return ret; 5310 } 5311 5312 void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev) 5313 { 5314 struct rtw89_early_h2c *early_h2c; 5315 5316 lockdep_assert_held(&rtwdev->mutex); 5317 5318 list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) { 5319 rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len); 5320 } 5321 } 5322 5323 void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev) 5324 { 5325 struct rtw89_early_h2c *early_h2c, *tmp; 5326 5327 mutex_lock(&rtwdev->mutex); 5328 list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) { 5329 list_del(&early_h2c->list); 5330 kfree(early_h2c->h2c); 5331 kfree(early_h2c); 5332 } 5333 mutex_unlock(&rtwdev->mutex); 5334 } 5335 5336 static void rtw89_fw_c2h_parse_attr(struct sk_buff *c2h) 5337 { 5338 const struct rtw89_c2h_hdr *hdr = (const struct rtw89_c2h_hdr *)c2h->data; 5339 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 5340 5341 attr->category = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CATEGORY); 5342 attr->class = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_CLASS); 5343 attr->func = le32_get_bits(hdr->w0, RTW89_C2H_HDR_W0_FUNC); 5344 attr->len = le32_get_bits(hdr->w1, RTW89_C2H_HDR_W1_LEN); 5345 } 5346 5347 static bool rtw89_fw_c2h_chk_atomic(struct rtw89_dev *rtwdev, 5348 struct sk_buff *c2h) 5349 { 5350 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(c2h); 5351 u8 category = attr->category; 5352 u8 class = attr->class; 5353 u8 func = attr->func; 5354 5355 switch (category) { 5356 default: 5357 return false; 5358 case RTW89_C2H_CAT_MAC: 5359 return rtw89_mac_c2h_chk_atomic(rtwdev, c2h, class, func); 5360 case RTW89_C2H_CAT_OUTSRC: 5361 return rtw89_phy_c2h_chk_atomic(rtwdev, class, func); 5362 } 5363 } 5364 5365 void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h) 5366 { 5367 rtw89_fw_c2h_parse_attr(c2h); 5368 if (!rtw89_fw_c2h_chk_atomic(rtwdev, c2h)) 5369 goto enqueue; 5370 5371 rtw89_fw_c2h_cmd_handle(rtwdev, c2h); 5372 dev_kfree_skb_any(c2h); 5373 return; 5374 5375 enqueue: 5376 skb_queue_tail(&rtwdev->c2h_queue, c2h); 5377 ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work); 5378 } 5379 5380 static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev, 5381 struct sk_buff *skb) 5382 { 5383 struct rtw89_fw_c2h_attr *attr = RTW89_SKB_C2H_CB(skb); 5384 u8 category = attr->category; 5385 u8 class = attr->class; 5386 u8 func = attr->func; 5387 u16 len = attr->len; 5388 bool dump = true; 5389 5390 if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags)) 5391 return; 5392 5393 switch (category) { 5394 case RTW89_C2H_CAT_TEST: 5395 break; 5396 case RTW89_C2H_CAT_MAC: 5397 rtw89_mac_c2h_handle(rtwdev, skb, len, class, func); 5398 if (class == RTW89_MAC_C2H_CLASS_INFO && 5399 func == RTW89_MAC_C2H_FUNC_C2H_LOG) 5400 dump = false; 5401 break; 5402 case RTW89_C2H_CAT_OUTSRC: 5403 if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN && 5404 class <= RTW89_PHY_C2H_CLASS_BTC_MAX) 5405 rtw89_btc_c2h_handle(rtwdev, skb, len, class, func); 5406 else 5407 rtw89_phy_c2h_handle(rtwdev, skb, len, class, func); 5408 break; 5409 } 5410 5411 if (dump) 5412 rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len); 5413 } 5414 5415 void rtw89_fw_c2h_work(struct work_struct *work) 5416 { 5417 struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev, 5418 c2h_work); 5419 struct sk_buff *skb, *tmp; 5420 5421 skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) { 5422 skb_unlink(skb, &rtwdev->c2h_queue); 5423 mutex_lock(&rtwdev->mutex); 5424 rtw89_fw_c2h_cmd_handle(rtwdev, skb); 5425 mutex_unlock(&rtwdev->mutex); 5426 dev_kfree_skb_any(skb); 5427 } 5428 } 5429 5430 static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev, 5431 struct rtw89_mac_h2c_info *info) 5432 { 5433 const struct rtw89_chip_info *chip = rtwdev->chip; 5434 struct rtw89_fw_info *fw_info = &rtwdev->fw; 5435 const u32 *h2c_reg = chip->h2c_regs; 5436 u8 i, val, len; 5437 int ret; 5438 5439 ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false, 5440 rtwdev, chip->h2c_ctrl_reg); 5441 if (ret) { 5442 rtw89_warn(rtwdev, "FW does not process h2c registers\n"); 5443 return ret; 5444 } 5445 5446 len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN, 5447 sizeof(info->u.h2creg[0])); 5448 5449 u32p_replace_bits(&info->u.hdr.w0, info->id, RTW89_H2CREG_HDR_FUNC_MASK); 5450 u32p_replace_bits(&info->u.hdr.w0, len, RTW89_H2CREG_HDR_LEN_MASK); 5451 5452 for (i = 0; i < RTW89_H2CREG_MAX; i++) 5453 rtw89_write32(rtwdev, h2c_reg[i], info->u.h2creg[i]); 5454 5455 fw_info->h2c_counter++; 5456 rtw89_write8_mask(rtwdev, chip->h2c_counter_reg.addr, 5457 chip->h2c_counter_reg.mask, fw_info->h2c_counter); 5458 rtw89_write8(rtwdev, chip->h2c_ctrl_reg, B_AX_H2CREG_TRIGGER); 5459 5460 return 0; 5461 } 5462 5463 static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev, 5464 struct rtw89_mac_c2h_info *info) 5465 { 5466 const struct rtw89_chip_info *chip = rtwdev->chip; 5467 struct rtw89_fw_info *fw_info = &rtwdev->fw; 5468 const u32 *c2h_reg = chip->c2h_regs; 5469 u32 ret; 5470 u8 i, val; 5471 5472 info->id = RTW89_FWCMD_C2HREG_FUNC_NULL; 5473 5474 ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1, 5475 RTW89_C2H_TIMEOUT, false, rtwdev, 5476 chip->c2h_ctrl_reg); 5477 if (ret) { 5478 rtw89_warn(rtwdev, "c2h reg timeout\n"); 5479 return ret; 5480 } 5481 5482 for (i = 0; i < RTW89_C2HREG_MAX; i++) 5483 info->u.c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]); 5484 5485 rtw89_write8(rtwdev, chip->c2h_ctrl_reg, 0); 5486 5487 info->id = u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_FUNC_MASK); 5488 info->content_len = 5489 (u32_get_bits(info->u.hdr.w0, RTW89_C2HREG_HDR_LEN_MASK) << 2) - 5490 RTW89_C2HREG_HDR_LEN; 5491 5492 fw_info->c2h_counter++; 5493 rtw89_write8_mask(rtwdev, chip->c2h_counter_reg.addr, 5494 chip->c2h_counter_reg.mask, fw_info->c2h_counter); 5495 5496 return 0; 5497 } 5498 5499 int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev, 5500 struct rtw89_mac_h2c_info *h2c_info, 5501 struct rtw89_mac_c2h_info *c2h_info) 5502 { 5503 u32 ret; 5504 5505 if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE) 5506 lockdep_assert_held(&rtwdev->mutex); 5507 5508 if (!h2c_info && !c2h_info) 5509 return -EINVAL; 5510 5511 if (!h2c_info) 5512 goto recv_c2h; 5513 5514 ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info); 5515 if (ret) 5516 return ret; 5517 5518 recv_c2h: 5519 if (!c2h_info) 5520 return 0; 5521 5522 ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info); 5523 if (ret) 5524 return ret; 5525 5526 return 0; 5527 } 5528 5529 void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev) 5530 { 5531 if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) { 5532 rtw89_err(rtwdev, "[ERR]pwr is off\n"); 5533 return; 5534 } 5535 5536 rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0)); 5537 rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1)); 5538 rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2)); 5539 rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3)); 5540 rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n", 5541 rtw89_read32(rtwdev, R_AX_HALT_C2H)); 5542 rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n", 5543 rtw89_read32(rtwdev, R_AX_SER_DBG_INFO)); 5544 5545 rtw89_fw_prog_cnt_dump(rtwdev); 5546 } 5547 5548 static void rtw89_release_pkt_list(struct rtw89_dev *rtwdev) 5549 { 5550 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 5551 struct rtw89_pktofld_info *info, *tmp; 5552 u8 idx; 5553 5554 for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) { 5555 if (!(rtwdev->chip->support_bands & BIT(idx))) 5556 continue; 5557 5558 list_for_each_entry_safe(info, tmp, &pkt_list[idx], list) { 5559 if (test_bit(info->id, rtwdev->pkt_offload)) 5560 rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id); 5561 list_del(&info->list); 5562 kfree(info); 5563 } 5564 } 5565 } 5566 5567 static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev, 5568 struct rtw89_vif *rtwvif, 5569 struct rtw89_pktofld_info *info, 5570 enum nl80211_band band, u8 ssid_idx) 5571 { 5572 struct cfg80211_scan_request *req = rtwvif->scan_req; 5573 5574 if (band != NL80211_BAND_6GHZ) 5575 return false; 5576 5577 if (req->ssids[ssid_idx].ssid_len) { 5578 memcpy(info->ssid, req->ssids[ssid_idx].ssid, 5579 req->ssids[ssid_idx].ssid_len); 5580 info->ssid_len = req->ssids[ssid_idx].ssid_len; 5581 return false; 5582 } else { 5583 info->wildcard_6ghz = true; 5584 return true; 5585 } 5586 } 5587 5588 static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev, 5589 struct rtw89_vif *rtwvif, 5590 struct sk_buff *skb, u8 ssid_idx) 5591 { 5592 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5593 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 5594 struct rtw89_pktofld_info *info; 5595 struct sk_buff *new; 5596 int ret = 0; 5597 u8 band; 5598 5599 for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { 5600 if (!(rtwdev->chip->support_bands & BIT(band))) 5601 continue; 5602 5603 new = skb_copy(skb, GFP_KERNEL); 5604 if (!new) { 5605 ret = -ENOMEM; 5606 goto out; 5607 } 5608 skb_put_data(new, ies->ies[band], ies->len[band]); 5609 skb_put_data(new, ies->common_ies, ies->common_ie_len); 5610 5611 info = kzalloc(sizeof(*info), GFP_KERNEL); 5612 if (!info) { 5613 ret = -ENOMEM; 5614 kfree_skb(new); 5615 goto out; 5616 } 5617 5618 rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band, 5619 ssid_idx); 5620 5621 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new); 5622 if (ret) { 5623 kfree_skb(new); 5624 kfree(info); 5625 goto out; 5626 } 5627 5628 list_add_tail(&info->list, &scan_info->pkt_list[band]); 5629 kfree_skb(new); 5630 } 5631 out: 5632 return ret; 5633 } 5634 5635 static int rtw89_hw_scan_update_probe_req(struct rtw89_dev *rtwdev, 5636 struct rtw89_vif *rtwvif) 5637 { 5638 struct cfg80211_scan_request *req = rtwvif->scan_req; 5639 struct sk_buff *skb; 5640 u8 num = req->n_ssids, i; 5641 int ret; 5642 5643 for (i = 0; i < num; i++) { 5644 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 5645 req->ssids[i].ssid, 5646 req->ssids[i].ssid_len, 5647 req->ie_len); 5648 if (!skb) 5649 return -ENOMEM; 5650 5651 ret = rtw89_append_probe_req_ie(rtwdev, rtwvif, skb, i); 5652 kfree_skb(skb); 5653 5654 if (ret) 5655 return ret; 5656 } 5657 5658 return 0; 5659 } 5660 5661 static int rtw89_update_6ghz_rnr_chan(struct rtw89_dev *rtwdev, 5662 struct cfg80211_scan_request *req, 5663 struct rtw89_mac_chinfo *ch_info) 5664 { 5665 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 5666 struct list_head *pkt_list = rtwdev->scan_info.pkt_list; 5667 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 5668 struct ieee80211_scan_ies *ies = rtwvif->scan_ies; 5669 struct cfg80211_scan_6ghz_params *params; 5670 struct rtw89_pktofld_info *info, *tmp; 5671 struct ieee80211_hdr *hdr; 5672 struct sk_buff *skb; 5673 bool found; 5674 int ret = 0; 5675 u8 i; 5676 5677 if (!req->n_6ghz_params) 5678 return 0; 5679 5680 for (i = 0; i < req->n_6ghz_params; i++) { 5681 params = &req->scan_6ghz_params[i]; 5682 5683 if (req->channels[params->channel_idx]->hw_value != 5684 ch_info->pri_ch) 5685 continue; 5686 5687 found = false; 5688 list_for_each_entry(tmp, &pkt_list[NL80211_BAND_6GHZ], list) { 5689 if (ether_addr_equal(tmp->bssid, params->bssid)) { 5690 found = true; 5691 break; 5692 } 5693 } 5694 if (found) 5695 continue; 5696 5697 skb = ieee80211_probereq_get(rtwdev->hw, rtwvif->mac_addr, 5698 NULL, 0, req->ie_len); 5699 skb_put_data(skb, ies->ies[NL80211_BAND_6GHZ], ies->len[NL80211_BAND_6GHZ]); 5700 skb_put_data(skb, ies->common_ies, ies->common_ie_len); 5701 hdr = (struct ieee80211_hdr *)skb->data; 5702 ether_addr_copy(hdr->addr3, params->bssid); 5703 5704 info = kzalloc(sizeof(*info), GFP_KERNEL); 5705 if (!info) { 5706 ret = -ENOMEM; 5707 kfree_skb(skb); 5708 goto out; 5709 } 5710 5711 ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb); 5712 if (ret) { 5713 kfree_skb(skb); 5714 kfree(info); 5715 goto out; 5716 } 5717 5718 ether_addr_copy(info->bssid, params->bssid); 5719 info->channel_6ghz = req->channels[params->channel_idx]->hw_value; 5720 list_add_tail(&info->list, &rtwdev->scan_info.pkt_list[NL80211_BAND_6GHZ]); 5721 5722 ch_info->tx_pkt = true; 5723 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 5724 5725 kfree_skb(skb); 5726 } 5727 5728 out: 5729 return ret; 5730 } 5731 5732 static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type, 5733 int ssid_num, 5734 struct rtw89_mac_chinfo *ch_info) 5735 { 5736 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5737 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 5738 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 5739 struct cfg80211_scan_request *req = rtwvif->scan_req; 5740 struct rtw89_chan *op = &rtwdev->scan_info.op_chan; 5741 struct rtw89_pktofld_info *info; 5742 u8 band, probe_count = 0; 5743 int ret; 5744 5745 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 5746 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 5747 ch_info->bw = RTW89_SCAN_WIDTH; 5748 ch_info->tx_pkt = true; 5749 ch_info->cfg_tx_pwr = false; 5750 ch_info->tx_pwr_idx = 0; 5751 ch_info->tx_null = false; 5752 ch_info->pause_data = false; 5753 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 5754 5755 if (ch_info->ch_band == RTW89_BAND_6G) { 5756 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 5757 !ch_info->is_psc) { 5758 ch_info->tx_pkt = false; 5759 if (!req->duration_mandatory) 5760 ch_info->period -= RTW89_DWELL_TIME_6G; 5761 } 5762 } 5763 5764 ret = rtw89_update_6ghz_rnr_chan(rtwdev, req, ch_info); 5765 if (ret) 5766 rtw89_warn(rtwdev, "RNR fails: %d\n", ret); 5767 5768 if (ssid_num) { 5769 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 5770 5771 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 5772 if (info->channel_6ghz && 5773 ch_info->pri_ch != info->channel_6ghz) 5774 continue; 5775 else if (info->channel_6ghz && probe_count != 0) 5776 ch_info->period += RTW89_CHANNEL_TIME_6G; 5777 5778 if (info->wildcard_6ghz) 5779 continue; 5780 5781 ch_info->pkt_id[probe_count++] = info->id; 5782 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 5783 break; 5784 } 5785 ch_info->num_pkt = probe_count; 5786 } 5787 5788 switch (chan_type) { 5789 case RTW89_CHAN_OPERATE: 5790 ch_info->central_ch = op->channel; 5791 ch_info->pri_ch = op->primary_channel; 5792 ch_info->ch_band = op->band_type; 5793 ch_info->bw = op->band_width; 5794 ch_info->tx_null = true; 5795 ch_info->num_pkt = 0; 5796 break; 5797 case RTW89_CHAN_DFS: 5798 if (ch_info->ch_band != RTW89_BAND_6G) 5799 ch_info->period = max_t(u8, ch_info->period, 5800 RTW89_DFS_CHAN_TIME); 5801 ch_info->dwell_time = RTW89_DWELL_TIME; 5802 break; 5803 case RTW89_CHAN_ACTIVE: 5804 break; 5805 default: 5806 rtw89_err(rtwdev, "Channel type out of bound\n"); 5807 } 5808 } 5809 5810 static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type, 5811 int ssid_num, 5812 struct rtw89_mac_chinfo_be *ch_info) 5813 { 5814 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 5815 struct ieee80211_vif *vif = rtwdev->scan_info.scanning_vif; 5816 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 5817 struct cfg80211_scan_request *req = rtwvif->scan_req; 5818 struct rtw89_pktofld_info *info; 5819 u8 band, probe_count = 0, i; 5820 5821 ch_info->notify_action = RTW89_SCANOFLD_DEBUG_MASK; 5822 ch_info->dfs_ch = chan_type == RTW89_CHAN_DFS; 5823 ch_info->bw = RTW89_SCAN_WIDTH; 5824 ch_info->tx_null = false; 5825 ch_info->pause_data = false; 5826 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 5827 5828 if (ssid_num) { 5829 band = rtw89_hw_to_nl80211_band(ch_info->ch_band); 5830 5831 list_for_each_entry(info, &scan_info->pkt_list[band], list) { 5832 if (info->channel_6ghz && 5833 ch_info->pri_ch != info->channel_6ghz) 5834 continue; 5835 5836 if (info->wildcard_6ghz) 5837 continue; 5838 5839 ch_info->pkt_id[probe_count++] = info->id; 5840 if (probe_count >= RTW89_SCANOFLD_MAX_SSID) 5841 break; 5842 } 5843 } 5844 5845 if (ch_info->ch_band == RTW89_BAND_6G) { 5846 if ((ssid_num == 1 && req->ssids[0].ssid_len == 0) || 5847 !ch_info->is_psc) { 5848 ch_info->probe_id = RTW89_SCANOFLD_PKT_NONE; 5849 if (!req->duration_mandatory) 5850 ch_info->period -= RTW89_DWELL_TIME_6G; 5851 } 5852 } 5853 5854 for (i = probe_count; i < RTW89_SCANOFLD_MAX_SSID; i++) 5855 ch_info->pkt_id[i] = RTW89_SCANOFLD_PKT_NONE; 5856 5857 switch (chan_type) { 5858 case RTW89_CHAN_DFS: 5859 if (ch_info->ch_band != RTW89_BAND_6G) 5860 ch_info->period = 5861 max_t(u8, ch_info->period, RTW89_DFS_CHAN_TIME); 5862 ch_info->dwell_time = RTW89_DWELL_TIME; 5863 break; 5864 case RTW89_CHAN_ACTIVE: 5865 break; 5866 default: 5867 rtw89_warn(rtwdev, "Channel type out of bound\n"); 5868 break; 5869 } 5870 } 5871 5872 int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev, 5873 struct rtw89_vif *rtwvif, bool connected) 5874 { 5875 struct cfg80211_scan_request *req = rtwvif->scan_req; 5876 struct rtw89_mac_chinfo *ch_info, *tmp; 5877 struct ieee80211_channel *channel; 5878 struct list_head chan_list; 5879 bool random_seq = req->flags & NL80211_SCAN_FLAG_RANDOM_SN; 5880 int list_len, off_chan_time = 0; 5881 enum rtw89_chan_type type; 5882 int ret = 0; 5883 u32 idx; 5884 5885 INIT_LIST_HEAD(&chan_list); 5886 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 5887 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 5888 idx++, list_len++) { 5889 channel = req->channels[idx]; 5890 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 5891 if (!ch_info) { 5892 ret = -ENOMEM; 5893 goto out; 5894 } 5895 5896 if (req->duration) 5897 ch_info->period = req->duration; 5898 else if (channel->band == NL80211_BAND_6GHZ) 5899 ch_info->period = RTW89_CHANNEL_TIME_6G + 5900 RTW89_DWELL_TIME_6G; 5901 else 5902 ch_info->period = RTW89_CHANNEL_TIME; 5903 5904 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 5905 ch_info->central_ch = channel->hw_value; 5906 ch_info->pri_ch = channel->hw_value; 5907 ch_info->rand_seq_num = random_seq; 5908 ch_info->is_psc = cfg80211_channel_is_psc(channel); 5909 5910 if (channel->flags & 5911 (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 5912 type = RTW89_CHAN_DFS; 5913 else 5914 type = RTW89_CHAN_ACTIVE; 5915 rtw89_hw_scan_add_chan(rtwdev, type, req->n_ssids, ch_info); 5916 5917 if (connected && 5918 off_chan_time + ch_info->period > RTW89_OFF_CHAN_TIME) { 5919 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 5920 if (!tmp) { 5921 ret = -ENOMEM; 5922 kfree(ch_info); 5923 goto out; 5924 } 5925 5926 type = RTW89_CHAN_OPERATE; 5927 tmp->period = req->duration_mandatory ? 5928 req->duration : RTW89_CHANNEL_TIME; 5929 rtw89_hw_scan_add_chan(rtwdev, type, 0, tmp); 5930 list_add_tail(&tmp->list, &chan_list); 5931 off_chan_time = 0; 5932 list_len++; 5933 } 5934 list_add_tail(&ch_info->list, &chan_list); 5935 off_chan_time += ch_info->period; 5936 } 5937 rtwdev->scan_info.last_chan_idx = idx; 5938 ret = rtw89_fw_h2c_scan_list_offload(rtwdev, list_len, &chan_list); 5939 5940 out: 5941 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 5942 list_del(&ch_info->list); 5943 kfree(ch_info); 5944 } 5945 5946 return ret; 5947 } 5948 5949 int rtw89_hw_scan_add_chan_list_be(struct rtw89_dev *rtwdev, 5950 struct rtw89_vif *rtwvif, bool connected) 5951 { 5952 struct cfg80211_scan_request *req = rtwvif->scan_req; 5953 struct rtw89_mac_chinfo_be *ch_info, *tmp; 5954 struct ieee80211_channel *channel; 5955 struct list_head chan_list; 5956 enum rtw89_chan_type type; 5957 int list_len, ret; 5958 bool random_seq; 5959 u32 idx; 5960 5961 random_seq = !!(req->flags & NL80211_SCAN_FLAG_RANDOM_SN); 5962 INIT_LIST_HEAD(&chan_list); 5963 5964 for (idx = rtwdev->scan_info.last_chan_idx, list_len = 0; 5965 idx < req->n_channels && list_len < RTW89_SCAN_LIST_LIMIT; 5966 idx++, list_len++) { 5967 channel = req->channels[idx]; 5968 ch_info = kzalloc(sizeof(*ch_info), GFP_KERNEL); 5969 if (!ch_info) { 5970 ret = -ENOMEM; 5971 goto out; 5972 } 5973 5974 if (req->duration) 5975 ch_info->period = req->duration; 5976 else if (channel->band == NL80211_BAND_6GHZ) 5977 ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME_6G; 5978 else 5979 ch_info->period = RTW89_CHANNEL_TIME; 5980 5981 ch_info->ch_band = rtw89_nl80211_to_hw_band(channel->band); 5982 ch_info->central_ch = channel->hw_value; 5983 ch_info->pri_ch = channel->hw_value; 5984 ch_info->rand_seq_num = random_seq; 5985 ch_info->is_psc = cfg80211_channel_is_psc(channel); 5986 5987 if (channel->flags & (IEEE80211_CHAN_RADAR | IEEE80211_CHAN_NO_IR)) 5988 type = RTW89_CHAN_DFS; 5989 else 5990 type = RTW89_CHAN_ACTIVE; 5991 rtw89_hw_scan_add_chan_be(rtwdev, type, req->n_ssids, ch_info); 5992 5993 list_add_tail(&ch_info->list, &chan_list); 5994 } 5995 5996 rtwdev->scan_info.last_chan_idx = idx; 5997 ret = rtw89_fw_h2c_scan_list_offload_be(rtwdev, list_len, &chan_list); 5998 5999 out: 6000 list_for_each_entry_safe(ch_info, tmp, &chan_list, list) { 6001 list_del(&ch_info->list); 6002 kfree(ch_info); 6003 } 6004 6005 return ret; 6006 } 6007 6008 static int rtw89_hw_scan_prehandle(struct rtw89_dev *rtwdev, 6009 struct rtw89_vif *rtwvif, bool connected) 6010 { 6011 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6012 int ret; 6013 6014 ret = rtw89_hw_scan_update_probe_req(rtwdev, rtwvif); 6015 if (ret) { 6016 rtw89_err(rtwdev, "Update probe request failed\n"); 6017 goto out; 6018 } 6019 ret = mac->add_chan_list(rtwdev, rtwvif, connected); 6020 out: 6021 return ret; 6022 } 6023 6024 void rtw89_hw_scan_start(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 6025 struct ieee80211_scan_request *scan_req) 6026 { 6027 struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; 6028 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6029 struct cfg80211_scan_request *req = &scan_req->req; 6030 u32 rx_fltr = rtwdev->hal.rx_fltr; 6031 u8 mac_addr[ETH_ALEN]; 6032 6033 rtw89_get_channel(rtwdev, rtwvif, &rtwdev->scan_info.op_chan); 6034 rtwdev->scan_info.scanning_vif = vif; 6035 rtwdev->scan_info.last_chan_idx = 0; 6036 rtwdev->scan_info.abort = false; 6037 rtwvif->scan_ies = &scan_req->ies; 6038 rtwvif->scan_req = req; 6039 ieee80211_stop_queues(rtwdev->hw); 6040 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, false); 6041 6042 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) 6043 get_random_mask_addr(mac_addr, req->mac_addr, 6044 req->mac_addr_mask); 6045 else 6046 ether_addr_copy(mac_addr, vif->addr); 6047 rtw89_core_scan_start(rtwdev, rtwvif, mac_addr, true); 6048 6049 rx_fltr &= ~B_AX_A_BCN_CHK_EN; 6050 rx_fltr &= ~B_AX_A_BC; 6051 rx_fltr &= ~B_AX_A_A1_MATCH; 6052 rtw89_write32_mask(rtwdev, 6053 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 6054 B_AX_RX_FLTR_CFG_MASK, 6055 rx_fltr); 6056 6057 rtw89_chanctx_pause(rtwdev, RTW89_CHANCTX_PAUSE_REASON_HW_SCAN); 6058 } 6059 6060 void rtw89_hw_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 6061 bool aborted) 6062 { 6063 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6064 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6065 struct rtw89_vif *rtwvif = vif_to_rtwvif_safe(vif); 6066 struct cfg80211_scan_info info = { 6067 .aborted = aborted, 6068 }; 6069 6070 if (!vif) 6071 return; 6072 6073 rtw89_write32_mask(rtwdev, 6074 rtw89_mac_reg_by_idx(rtwdev, mac->rx_fltr, RTW89_MAC_0), 6075 B_AX_RX_FLTR_CFG_MASK, 6076 rtwdev->hal.rx_fltr); 6077 6078 rtw89_core_scan_complete(rtwdev, vif, true); 6079 ieee80211_scan_completed(rtwdev->hw, &info); 6080 ieee80211_wake_queues(rtwdev->hw); 6081 rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif, true); 6082 rtw89_mac_enable_beacon_for_ap_vifs(rtwdev, true); 6083 6084 rtw89_release_pkt_list(rtwdev); 6085 rtwvif->scan_req = NULL; 6086 rtwvif->scan_ies = NULL; 6087 scan_info->last_chan_idx = 0; 6088 scan_info->scanning_vif = NULL; 6089 scan_info->abort = false; 6090 6091 rtw89_chanctx_proceed(rtwdev); 6092 } 6093 6094 void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) 6095 { 6096 struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; 6097 int ret; 6098 6099 scan_info->abort = true; 6100 6101 ret = rtw89_hw_scan_offload(rtwdev, vif, false); 6102 if (ret) 6103 rtw89_hw_scan_complete(rtwdev, vif, true); 6104 } 6105 6106 static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) 6107 { 6108 struct rtw89_vif *rtwvif; 6109 6110 rtw89_for_each_rtwvif(rtwdev, rtwvif) { 6111 /* This variable implies connected or during attempt to connect */ 6112 if (!is_zero_ether_addr(rtwvif->bssid)) 6113 return true; 6114 } 6115 6116 return false; 6117 } 6118 6119 int rtw89_hw_scan_offload(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, 6120 bool enable) 6121 { 6122 const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; 6123 struct rtw89_scan_option opt = {0}; 6124 struct rtw89_vif *rtwvif; 6125 bool connected; 6126 int ret = 0; 6127 6128 rtwvif = vif ? (struct rtw89_vif *)vif->drv_priv : NULL; 6129 if (!rtwvif) 6130 return -EINVAL; 6131 6132 connected = rtw89_is_any_vif_connected_or_connecting(rtwdev); 6133 opt.enable = enable; 6134 opt.target_ch_mode = connected; 6135 if (enable) { 6136 ret = rtw89_hw_scan_prehandle(rtwdev, rtwvif, connected); 6137 if (ret) 6138 goto out; 6139 } 6140 6141 if (rtwdev->chip->chip_gen == RTW89_CHIP_BE) { 6142 opt.operation = enable ? RTW89_SCAN_OP_START : RTW89_SCAN_OP_STOP; 6143 opt.scan_mode = RTW89_SCAN_MODE_SA; 6144 opt.band = RTW89_PHY_0; 6145 opt.num_macc_role = 0; 6146 opt.mlo_mode = rtwdev->mlo_dbcc_mode; 6147 opt.num_opch = connected ? 1 : 0; 6148 opt.opch_end = connected ? 0 : RTW89_CHAN_INVALID; 6149 } 6150 6151 ret = mac->scan_offload(rtwdev, &opt, rtwvif); 6152 out: 6153 return ret; 6154 } 6155 6156 #define H2C_FW_CPU_EXCEPTION_LEN 4 6157 #define H2C_FW_CPU_EXCEPTION_TYPE_DEF 0x5566 6158 int rtw89_fw_h2c_trigger_cpu_exception(struct rtw89_dev *rtwdev) 6159 { 6160 struct sk_buff *skb; 6161 int ret; 6162 6163 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_FW_CPU_EXCEPTION_LEN); 6164 if (!skb) { 6165 rtw89_err(rtwdev, 6166 "failed to alloc skb for fw cpu exception\n"); 6167 return -ENOMEM; 6168 } 6169 6170 skb_put(skb, H2C_FW_CPU_EXCEPTION_LEN); 6171 RTW89_SET_FWCMD_CPU_EXCEPTION_TYPE(skb->data, 6172 H2C_FW_CPU_EXCEPTION_TYPE_DEF); 6173 6174 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6175 H2C_CAT_TEST, 6176 H2C_CL_FW_STATUS_TEST, 6177 H2C_FUNC_CPU_EXCEPTION, 0, 0, 6178 H2C_FW_CPU_EXCEPTION_LEN); 6179 6180 ret = rtw89_h2c_tx(rtwdev, skb, false); 6181 if (ret) { 6182 rtw89_err(rtwdev, "failed to send h2c\n"); 6183 goto fail; 6184 } 6185 6186 return 0; 6187 6188 fail: 6189 dev_kfree_skb_any(skb); 6190 return ret; 6191 } 6192 6193 #define H2C_PKT_DROP_LEN 24 6194 int rtw89_fw_h2c_pkt_drop(struct rtw89_dev *rtwdev, 6195 const struct rtw89_pkt_drop_params *params) 6196 { 6197 struct sk_buff *skb; 6198 int ret; 6199 6200 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_PKT_DROP_LEN); 6201 if (!skb) { 6202 rtw89_err(rtwdev, 6203 "failed to alloc skb for packet drop\n"); 6204 return -ENOMEM; 6205 } 6206 6207 switch (params->sel) { 6208 case RTW89_PKT_DROP_SEL_MACID_BE_ONCE: 6209 case RTW89_PKT_DROP_SEL_MACID_BK_ONCE: 6210 case RTW89_PKT_DROP_SEL_MACID_VI_ONCE: 6211 case RTW89_PKT_DROP_SEL_MACID_VO_ONCE: 6212 case RTW89_PKT_DROP_SEL_BAND_ONCE: 6213 break; 6214 default: 6215 rtw89_debug(rtwdev, RTW89_DBG_FW, 6216 "H2C of pkt drop might not fully support sel: %d yet\n", 6217 params->sel); 6218 break; 6219 } 6220 6221 skb_put(skb, H2C_PKT_DROP_LEN); 6222 RTW89_SET_FWCMD_PKT_DROP_SEL(skb->data, params->sel); 6223 RTW89_SET_FWCMD_PKT_DROP_MACID(skb->data, params->macid); 6224 RTW89_SET_FWCMD_PKT_DROP_BAND(skb->data, params->mac_band); 6225 RTW89_SET_FWCMD_PKT_DROP_PORT(skb->data, params->port); 6226 RTW89_SET_FWCMD_PKT_DROP_MBSSID(skb->data, params->mbssid); 6227 RTW89_SET_FWCMD_PKT_DROP_ROLE_A_INFO_TF_TRS(skb->data, params->tf_trs); 6228 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_0(skb->data, 6229 params->macid_band_sel[0]); 6230 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_1(skb->data, 6231 params->macid_band_sel[1]); 6232 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_2(skb->data, 6233 params->macid_band_sel[2]); 6234 RTW89_SET_FWCMD_PKT_DROP_MACID_BAND_SEL_3(skb->data, 6235 params->macid_band_sel[3]); 6236 6237 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6238 H2C_CAT_MAC, 6239 H2C_CL_MAC_FW_OFLD, 6240 H2C_FUNC_PKT_DROP, 0, 0, 6241 H2C_PKT_DROP_LEN); 6242 6243 ret = rtw89_h2c_tx(rtwdev, skb, false); 6244 if (ret) { 6245 rtw89_err(rtwdev, "failed to send h2c\n"); 6246 goto fail; 6247 } 6248 6249 return 0; 6250 6251 fail: 6252 dev_kfree_skb_any(skb); 6253 return ret; 6254 } 6255 6256 #define H2C_KEEP_ALIVE_LEN 4 6257 int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6258 bool enable) 6259 { 6260 struct sk_buff *skb; 6261 u8 pkt_id = 0; 6262 int ret; 6263 6264 if (enable) { 6265 ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, 6266 RTW89_PKT_OFLD_TYPE_NULL_DATA, 6267 &pkt_id); 6268 if (ret) 6269 return -EPERM; 6270 } 6271 6272 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_KEEP_ALIVE_LEN); 6273 if (!skb) { 6274 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 6275 return -ENOMEM; 6276 } 6277 6278 skb_put(skb, H2C_KEEP_ALIVE_LEN); 6279 6280 RTW89_SET_KEEP_ALIVE_ENABLE(skb->data, enable); 6281 RTW89_SET_KEEP_ALIVE_PKT_NULL_ID(skb->data, pkt_id); 6282 RTW89_SET_KEEP_ALIVE_PERIOD(skb->data, 5); 6283 RTW89_SET_KEEP_ALIVE_MACID(skb->data, rtwvif->mac_id); 6284 6285 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6286 H2C_CAT_MAC, 6287 H2C_CL_MAC_WOW, 6288 H2C_FUNC_KEEP_ALIVE, 0, 1, 6289 H2C_KEEP_ALIVE_LEN); 6290 6291 ret = rtw89_h2c_tx(rtwdev, skb, false); 6292 if (ret) { 6293 rtw89_err(rtwdev, "failed to send h2c\n"); 6294 goto fail; 6295 } 6296 6297 return 0; 6298 6299 fail: 6300 dev_kfree_skb_any(skb); 6301 6302 return ret; 6303 } 6304 6305 #define H2C_DISCONNECT_DETECT_LEN 8 6306 int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev, 6307 struct rtw89_vif *rtwvif, bool enable) 6308 { 6309 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6310 struct sk_buff *skb; 6311 u8 macid = rtwvif->mac_id; 6312 int ret; 6313 6314 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DISCONNECT_DETECT_LEN); 6315 if (!skb) { 6316 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 6317 return -ENOMEM; 6318 } 6319 6320 skb_put(skb, H2C_DISCONNECT_DETECT_LEN); 6321 6322 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) { 6323 RTW89_SET_DISCONNECT_DETECT_ENABLE(skb->data, enable); 6324 RTW89_SET_DISCONNECT_DETECT_DISCONNECT(skb->data, !enable); 6325 RTW89_SET_DISCONNECT_DETECT_MAC_ID(skb->data, macid); 6326 RTW89_SET_DISCONNECT_DETECT_CHECK_PERIOD(skb->data, 100); 6327 RTW89_SET_DISCONNECT_DETECT_TRY_PKT_COUNT(skb->data, 5); 6328 } 6329 6330 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6331 H2C_CAT_MAC, 6332 H2C_CL_MAC_WOW, 6333 H2C_FUNC_DISCONNECT_DETECT, 0, 1, 6334 H2C_DISCONNECT_DETECT_LEN); 6335 6336 ret = rtw89_h2c_tx(rtwdev, skb, false); 6337 if (ret) { 6338 rtw89_err(rtwdev, "failed to send h2c\n"); 6339 goto fail; 6340 } 6341 6342 return 0; 6343 6344 fail: 6345 dev_kfree_skb_any(skb); 6346 6347 return ret; 6348 } 6349 6350 #define H2C_WOW_GLOBAL_LEN 8 6351 int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, 6352 bool enable) 6353 { 6354 struct sk_buff *skb; 6355 u8 macid = rtwvif->mac_id; 6356 int ret; 6357 6358 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN); 6359 if (!skb) { 6360 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 6361 return -ENOMEM; 6362 } 6363 6364 skb_put(skb, H2C_WOW_GLOBAL_LEN); 6365 6366 RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable); 6367 RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid); 6368 6369 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6370 H2C_CAT_MAC, 6371 H2C_CL_MAC_WOW, 6372 H2C_FUNC_WOW_GLOBAL, 0, 1, 6373 H2C_WOW_GLOBAL_LEN); 6374 6375 ret = rtw89_h2c_tx(rtwdev, skb, false); 6376 if (ret) { 6377 rtw89_err(rtwdev, "failed to send h2c\n"); 6378 goto fail; 6379 } 6380 6381 return 0; 6382 6383 fail: 6384 dev_kfree_skb_any(skb); 6385 6386 return ret; 6387 } 6388 6389 #define H2C_WAKEUP_CTRL_LEN 4 6390 int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev, 6391 struct rtw89_vif *rtwvif, 6392 bool enable) 6393 { 6394 struct rtw89_wow_param *rtw_wow = &rtwdev->wow; 6395 struct sk_buff *skb; 6396 u8 macid = rtwvif->mac_id; 6397 int ret; 6398 6399 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN); 6400 if (!skb) { 6401 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 6402 return -ENOMEM; 6403 } 6404 6405 skb_put(skb, H2C_WAKEUP_CTRL_LEN); 6406 6407 if (rtw_wow->pattern_cnt) 6408 RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(skb->data, enable); 6409 if (test_bit(RTW89_WOW_FLAG_EN_MAGIC_PKT, rtw_wow->flags)) 6410 RTW89_SET_WOW_WAKEUP_CTRL_MAGIC_ENABLE(skb->data, enable); 6411 if (test_bit(RTW89_WOW_FLAG_EN_DISCONNECT, rtw_wow->flags)) 6412 RTW89_SET_WOW_WAKEUP_CTRL_DEAUTH_ENABLE(skb->data, enable); 6413 6414 RTW89_SET_WOW_WAKEUP_CTRL_MAC_ID(skb->data, macid); 6415 6416 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6417 H2C_CAT_MAC, 6418 H2C_CL_MAC_WOW, 6419 H2C_FUNC_WAKEUP_CTRL, 0, 1, 6420 H2C_WAKEUP_CTRL_LEN); 6421 6422 ret = rtw89_h2c_tx(rtwdev, skb, false); 6423 if (ret) { 6424 rtw89_err(rtwdev, "failed to send h2c\n"); 6425 goto fail; 6426 } 6427 6428 return 0; 6429 6430 fail: 6431 dev_kfree_skb_any(skb); 6432 6433 return ret; 6434 } 6435 6436 #define H2C_WOW_CAM_UPD_LEN 24 6437 int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev, 6438 struct rtw89_wow_cam_info *cam_info) 6439 { 6440 struct sk_buff *skb; 6441 int ret; 6442 6443 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_CAM_UPD_LEN); 6444 if (!skb) { 6445 rtw89_err(rtwdev, "failed to alloc skb for keep alive\n"); 6446 return -ENOMEM; 6447 } 6448 6449 skb_put(skb, H2C_WOW_CAM_UPD_LEN); 6450 6451 RTW89_SET_WOW_CAM_UPD_R_W(skb->data, cam_info->r_w); 6452 RTW89_SET_WOW_CAM_UPD_IDX(skb->data, cam_info->idx); 6453 if (cam_info->valid) { 6454 RTW89_SET_WOW_CAM_UPD_WKFM1(skb->data, cam_info->mask[0]); 6455 RTW89_SET_WOW_CAM_UPD_WKFM2(skb->data, cam_info->mask[1]); 6456 RTW89_SET_WOW_CAM_UPD_WKFM3(skb->data, cam_info->mask[2]); 6457 RTW89_SET_WOW_CAM_UPD_WKFM4(skb->data, cam_info->mask[3]); 6458 RTW89_SET_WOW_CAM_UPD_CRC(skb->data, cam_info->crc); 6459 RTW89_SET_WOW_CAM_UPD_NEGATIVE_PATTERN_MATCH(skb->data, 6460 cam_info->negative_pattern_match); 6461 RTW89_SET_WOW_CAM_UPD_SKIP_MAC_HDR(skb->data, 6462 cam_info->skip_mac_hdr); 6463 RTW89_SET_WOW_CAM_UPD_UC(skb->data, cam_info->uc); 6464 RTW89_SET_WOW_CAM_UPD_MC(skb->data, cam_info->mc); 6465 RTW89_SET_WOW_CAM_UPD_BC(skb->data, cam_info->bc); 6466 } 6467 RTW89_SET_WOW_CAM_UPD_VALID(skb->data, cam_info->valid); 6468 6469 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6470 H2C_CAT_MAC, 6471 H2C_CL_MAC_WOW, 6472 H2C_FUNC_WOW_CAM_UPD, 0, 1, 6473 H2C_WOW_CAM_UPD_LEN); 6474 6475 ret = rtw89_h2c_tx(rtwdev, skb, false); 6476 if (ret) { 6477 rtw89_err(rtwdev, "failed to send h2c\n"); 6478 goto fail; 6479 } 6480 6481 return 0; 6482 fail: 6483 dev_kfree_skb_any(skb); 6484 6485 return ret; 6486 } 6487 6488 /* Return < 0, if failures happen during waiting for the condition. 6489 * Return 0, when waiting for the condition succeeds. 6490 * Return > 0, if the wait is considered unreachable due to driver/FW design, 6491 * where 1 means during SER. 6492 */ 6493 static int rtw89_h2c_tx_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb, 6494 struct rtw89_wait_info *wait, unsigned int cond) 6495 { 6496 int ret; 6497 6498 ret = rtw89_h2c_tx(rtwdev, skb, false); 6499 if (ret) { 6500 rtw89_err(rtwdev, "failed to send h2c\n"); 6501 dev_kfree_skb_any(skb); 6502 return -EBUSY; 6503 } 6504 6505 if (test_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags)) 6506 return 1; 6507 6508 return rtw89_wait_for_cond(wait, cond); 6509 } 6510 6511 #define H2C_ADD_MCC_LEN 16 6512 int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev, 6513 const struct rtw89_fw_mcc_add_req *p) 6514 { 6515 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6516 struct sk_buff *skb; 6517 unsigned int cond; 6518 6519 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_ADD_MCC_LEN); 6520 if (!skb) { 6521 rtw89_err(rtwdev, 6522 "failed to alloc skb for add mcc\n"); 6523 return -ENOMEM; 6524 } 6525 6526 skb_put(skb, H2C_ADD_MCC_LEN); 6527 RTW89_SET_FWCMD_ADD_MCC_MACID(skb->data, p->macid); 6528 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG0(skb->data, p->central_ch_seg0); 6529 RTW89_SET_FWCMD_ADD_MCC_CENTRAL_CH_SEG1(skb->data, p->central_ch_seg1); 6530 RTW89_SET_FWCMD_ADD_MCC_PRIMARY_CH(skb->data, p->primary_ch); 6531 RTW89_SET_FWCMD_ADD_MCC_BANDWIDTH(skb->data, p->bandwidth); 6532 RTW89_SET_FWCMD_ADD_MCC_GROUP(skb->data, p->group); 6533 RTW89_SET_FWCMD_ADD_MCC_C2H_RPT(skb->data, p->c2h_rpt); 6534 RTW89_SET_FWCMD_ADD_MCC_DIS_TX_NULL(skb->data, p->dis_tx_null); 6535 RTW89_SET_FWCMD_ADD_MCC_DIS_SW_RETRY(skb->data, p->dis_sw_retry); 6536 RTW89_SET_FWCMD_ADD_MCC_IN_CURR_CH(skb->data, p->in_curr_ch); 6537 RTW89_SET_FWCMD_ADD_MCC_SW_RETRY_COUNT(skb->data, p->sw_retry_count); 6538 RTW89_SET_FWCMD_ADD_MCC_TX_NULL_EARLY(skb->data, p->tx_null_early); 6539 RTW89_SET_FWCMD_ADD_MCC_BTC_IN_2G(skb->data, p->btc_in_2g); 6540 RTW89_SET_FWCMD_ADD_MCC_PTA_EN(skb->data, p->pta_en); 6541 RTW89_SET_FWCMD_ADD_MCC_RFK_BY_PASS(skb->data, p->rfk_by_pass); 6542 RTW89_SET_FWCMD_ADD_MCC_CH_BAND_TYPE(skb->data, p->ch_band_type); 6543 RTW89_SET_FWCMD_ADD_MCC_DURATION(skb->data, p->duration); 6544 RTW89_SET_FWCMD_ADD_MCC_COURTESY_EN(skb->data, p->courtesy_en); 6545 RTW89_SET_FWCMD_ADD_MCC_COURTESY_NUM(skb->data, p->courtesy_num); 6546 RTW89_SET_FWCMD_ADD_MCC_COURTESY_TARGET(skb->data, p->courtesy_target); 6547 6548 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6549 H2C_CAT_MAC, 6550 H2C_CL_MCC, 6551 H2C_FUNC_ADD_MCC, 0, 0, 6552 H2C_ADD_MCC_LEN); 6553 6554 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_ADD_MCC); 6555 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6556 } 6557 6558 #define H2C_START_MCC_LEN 12 6559 int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev, 6560 const struct rtw89_fw_mcc_start_req *p) 6561 { 6562 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6563 struct sk_buff *skb; 6564 unsigned int cond; 6565 6566 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_START_MCC_LEN); 6567 if (!skb) { 6568 rtw89_err(rtwdev, 6569 "failed to alloc skb for start mcc\n"); 6570 return -ENOMEM; 6571 } 6572 6573 skb_put(skb, H2C_START_MCC_LEN); 6574 RTW89_SET_FWCMD_START_MCC_GROUP(skb->data, p->group); 6575 RTW89_SET_FWCMD_START_MCC_BTC_IN_GROUP(skb->data, p->btc_in_group); 6576 RTW89_SET_FWCMD_START_MCC_OLD_GROUP_ACTION(skb->data, p->old_group_action); 6577 RTW89_SET_FWCMD_START_MCC_OLD_GROUP(skb->data, p->old_group); 6578 RTW89_SET_FWCMD_START_MCC_NOTIFY_CNT(skb->data, p->notify_cnt); 6579 RTW89_SET_FWCMD_START_MCC_NOTIFY_RXDBG_EN(skb->data, p->notify_rxdbg_en); 6580 RTW89_SET_FWCMD_START_MCC_MACID(skb->data, p->macid); 6581 RTW89_SET_FWCMD_START_MCC_TSF_LOW(skb->data, p->tsf_low); 6582 RTW89_SET_FWCMD_START_MCC_TSF_HIGH(skb->data, p->tsf_high); 6583 6584 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6585 H2C_CAT_MAC, 6586 H2C_CL_MCC, 6587 H2C_FUNC_START_MCC, 0, 0, 6588 H2C_START_MCC_LEN); 6589 6590 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_START_MCC); 6591 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6592 } 6593 6594 #define H2C_STOP_MCC_LEN 4 6595 int rtw89_fw_h2c_stop_mcc(struct rtw89_dev *rtwdev, u8 group, u8 macid, 6596 bool prev_groups) 6597 { 6598 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6599 struct sk_buff *skb; 6600 unsigned int cond; 6601 6602 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_STOP_MCC_LEN); 6603 if (!skb) { 6604 rtw89_err(rtwdev, 6605 "failed to alloc skb for stop mcc\n"); 6606 return -ENOMEM; 6607 } 6608 6609 skb_put(skb, H2C_STOP_MCC_LEN); 6610 RTW89_SET_FWCMD_STOP_MCC_MACID(skb->data, macid); 6611 RTW89_SET_FWCMD_STOP_MCC_GROUP(skb->data, group); 6612 RTW89_SET_FWCMD_STOP_MCC_PREV_GROUPS(skb->data, prev_groups); 6613 6614 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6615 H2C_CAT_MAC, 6616 H2C_CL_MCC, 6617 H2C_FUNC_STOP_MCC, 0, 0, 6618 H2C_STOP_MCC_LEN); 6619 6620 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_STOP_MCC); 6621 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6622 } 6623 6624 #define H2C_DEL_MCC_GROUP_LEN 4 6625 int rtw89_fw_h2c_del_mcc_group(struct rtw89_dev *rtwdev, u8 group, 6626 bool prev_groups) 6627 { 6628 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6629 struct sk_buff *skb; 6630 unsigned int cond; 6631 6632 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DEL_MCC_GROUP_LEN); 6633 if (!skb) { 6634 rtw89_err(rtwdev, 6635 "failed to alloc skb for del mcc group\n"); 6636 return -ENOMEM; 6637 } 6638 6639 skb_put(skb, H2C_DEL_MCC_GROUP_LEN); 6640 RTW89_SET_FWCMD_DEL_MCC_GROUP_GROUP(skb->data, group); 6641 RTW89_SET_FWCMD_DEL_MCC_GROUP_PREV_GROUPS(skb->data, prev_groups); 6642 6643 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6644 H2C_CAT_MAC, 6645 H2C_CL_MCC, 6646 H2C_FUNC_DEL_MCC_GROUP, 0, 0, 6647 H2C_DEL_MCC_GROUP_LEN); 6648 6649 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_DEL_MCC_GROUP); 6650 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6651 } 6652 6653 #define H2C_RESET_MCC_GROUP_LEN 4 6654 int rtw89_fw_h2c_reset_mcc_group(struct rtw89_dev *rtwdev, u8 group) 6655 { 6656 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6657 struct sk_buff *skb; 6658 unsigned int cond; 6659 6660 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_RESET_MCC_GROUP_LEN); 6661 if (!skb) { 6662 rtw89_err(rtwdev, 6663 "failed to alloc skb for reset mcc group\n"); 6664 return -ENOMEM; 6665 } 6666 6667 skb_put(skb, H2C_RESET_MCC_GROUP_LEN); 6668 RTW89_SET_FWCMD_RESET_MCC_GROUP_GROUP(skb->data, group); 6669 6670 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6671 H2C_CAT_MAC, 6672 H2C_CL_MCC, 6673 H2C_FUNC_RESET_MCC_GROUP, 0, 0, 6674 H2C_RESET_MCC_GROUP_LEN); 6675 6676 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_RESET_MCC_GROUP); 6677 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6678 } 6679 6680 #define H2C_MCC_REQ_TSF_LEN 4 6681 int rtw89_fw_h2c_mcc_req_tsf(struct rtw89_dev *rtwdev, 6682 const struct rtw89_fw_mcc_tsf_req *req, 6683 struct rtw89_mac_mcc_tsf_rpt *rpt) 6684 { 6685 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6686 struct rtw89_mac_mcc_tsf_rpt *tmp; 6687 struct sk_buff *skb; 6688 unsigned int cond; 6689 int ret; 6690 6691 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_REQ_TSF_LEN); 6692 if (!skb) { 6693 rtw89_err(rtwdev, 6694 "failed to alloc skb for mcc req tsf\n"); 6695 return -ENOMEM; 6696 } 6697 6698 skb_put(skb, H2C_MCC_REQ_TSF_LEN); 6699 RTW89_SET_FWCMD_MCC_REQ_TSF_GROUP(skb->data, req->group); 6700 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_X(skb->data, req->macid_x); 6701 RTW89_SET_FWCMD_MCC_REQ_TSF_MACID_Y(skb->data, req->macid_y); 6702 6703 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6704 H2C_CAT_MAC, 6705 H2C_CL_MCC, 6706 H2C_FUNC_MCC_REQ_TSF, 0, 0, 6707 H2C_MCC_REQ_TSF_LEN); 6708 6709 cond = RTW89_MCC_WAIT_COND(req->group, H2C_FUNC_MCC_REQ_TSF); 6710 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6711 if (ret) 6712 return ret; 6713 6714 tmp = (struct rtw89_mac_mcc_tsf_rpt *)wait->data.buf; 6715 *rpt = *tmp; 6716 6717 return 0; 6718 } 6719 6720 #define H2C_MCC_MACID_BITMAP_DSC_LEN 4 6721 int rtw89_fw_h2c_mcc_macid_bitmap(struct rtw89_dev *rtwdev, u8 group, u8 macid, 6722 u8 *bitmap) 6723 { 6724 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6725 struct sk_buff *skb; 6726 unsigned int cond; 6727 u8 map_len; 6728 u8 h2c_len; 6729 6730 BUILD_BUG_ON(RTW89_MAX_MAC_ID_NUM % 8); 6731 map_len = RTW89_MAX_MAC_ID_NUM / 8; 6732 h2c_len = H2C_MCC_MACID_BITMAP_DSC_LEN + map_len; 6733 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, h2c_len); 6734 if (!skb) { 6735 rtw89_err(rtwdev, 6736 "failed to alloc skb for mcc macid bitmap\n"); 6737 return -ENOMEM; 6738 } 6739 6740 skb_put(skb, h2c_len); 6741 RTW89_SET_FWCMD_MCC_MACID_BITMAP_GROUP(skb->data, group); 6742 RTW89_SET_FWCMD_MCC_MACID_BITMAP_MACID(skb->data, macid); 6743 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP_LENGTH(skb->data, map_len); 6744 RTW89_SET_FWCMD_MCC_MACID_BITMAP_BITMAP(skb->data, bitmap, map_len); 6745 6746 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6747 H2C_CAT_MAC, 6748 H2C_CL_MCC, 6749 H2C_FUNC_MCC_MACID_BITMAP, 0, 0, 6750 h2c_len); 6751 6752 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_MACID_BITMAP); 6753 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6754 } 6755 6756 #define H2C_MCC_SYNC_LEN 4 6757 int rtw89_fw_h2c_mcc_sync(struct rtw89_dev *rtwdev, u8 group, u8 source, 6758 u8 target, u8 offset) 6759 { 6760 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6761 struct sk_buff *skb; 6762 unsigned int cond; 6763 6764 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SYNC_LEN); 6765 if (!skb) { 6766 rtw89_err(rtwdev, 6767 "failed to alloc skb for mcc sync\n"); 6768 return -ENOMEM; 6769 } 6770 6771 skb_put(skb, H2C_MCC_SYNC_LEN); 6772 RTW89_SET_FWCMD_MCC_SYNC_GROUP(skb->data, group); 6773 RTW89_SET_FWCMD_MCC_SYNC_MACID_SOURCE(skb->data, source); 6774 RTW89_SET_FWCMD_MCC_SYNC_MACID_TARGET(skb->data, target); 6775 RTW89_SET_FWCMD_MCC_SYNC_SYNC_OFFSET(skb->data, offset); 6776 6777 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6778 H2C_CAT_MAC, 6779 H2C_CL_MCC, 6780 H2C_FUNC_MCC_SYNC, 0, 0, 6781 H2C_MCC_SYNC_LEN); 6782 6783 cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_SYNC); 6784 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6785 } 6786 6787 #define H2C_MCC_SET_DURATION_LEN 20 6788 int rtw89_fw_h2c_mcc_set_duration(struct rtw89_dev *rtwdev, 6789 const struct rtw89_fw_mcc_duration *p) 6790 { 6791 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6792 struct sk_buff *skb; 6793 unsigned int cond; 6794 6795 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_MCC_SET_DURATION_LEN); 6796 if (!skb) { 6797 rtw89_err(rtwdev, 6798 "failed to alloc skb for mcc set duration\n"); 6799 return -ENOMEM; 6800 } 6801 6802 skb_put(skb, H2C_MCC_SET_DURATION_LEN); 6803 RTW89_SET_FWCMD_MCC_SET_DURATION_GROUP(skb->data, p->group); 6804 RTW89_SET_FWCMD_MCC_SET_DURATION_BTC_IN_GROUP(skb->data, p->btc_in_group); 6805 RTW89_SET_FWCMD_MCC_SET_DURATION_START_MACID(skb->data, p->start_macid); 6806 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_X(skb->data, p->macid_x); 6807 RTW89_SET_FWCMD_MCC_SET_DURATION_MACID_Y(skb->data, p->macid_y); 6808 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_LOW(skb->data, 6809 p->start_tsf_low); 6810 RTW89_SET_FWCMD_MCC_SET_DURATION_START_TSF_HIGH(skb->data, 6811 p->start_tsf_high); 6812 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_X(skb->data, p->duration_x); 6813 RTW89_SET_FWCMD_MCC_SET_DURATION_DURATION_Y(skb->data, p->duration_y); 6814 6815 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6816 H2C_CAT_MAC, 6817 H2C_CL_MCC, 6818 H2C_FUNC_MCC_SET_DURATION, 0, 0, 6819 H2C_MCC_SET_DURATION_LEN); 6820 6821 cond = RTW89_MCC_WAIT_COND(p->group, H2C_FUNC_MCC_SET_DURATION); 6822 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6823 } 6824 6825 static 6826 u32 rtw89_fw_h2c_mrc_add_slot(struct rtw89_dev *rtwdev, 6827 const struct rtw89_fw_mrc_add_slot_arg *slot_arg, 6828 struct rtw89_h2c_mrc_add_slot *slot_h2c) 6829 { 6830 bool fill_h2c = !!slot_h2c; 6831 unsigned int i; 6832 6833 if (!fill_h2c) 6834 goto calc_len; 6835 6836 slot_h2c->w0 = le32_encode_bits(slot_arg->duration, 6837 RTW89_H2C_MRC_ADD_SLOT_W0_DURATION) | 6838 le32_encode_bits(slot_arg->courtesy_en, 6839 RTW89_H2C_MRC_ADD_SLOT_W0_COURTESY_EN) | 6840 le32_encode_bits(slot_arg->role_num, 6841 RTW89_H2C_MRC_ADD_SLOT_W0_ROLE_NUM); 6842 slot_h2c->w1 = le32_encode_bits(slot_arg->courtesy_period, 6843 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_PERIOD) | 6844 le32_encode_bits(slot_arg->courtesy_target, 6845 RTW89_H2C_MRC_ADD_SLOT_W1_COURTESY_TARGET); 6846 6847 for (i = 0; i < slot_arg->role_num; i++) { 6848 slot_h2c->roles[i].w0 = 6849 le32_encode_bits(slot_arg->roles[i].macid, 6850 RTW89_H2C_MRC_ADD_ROLE_W0_MACID) | 6851 le32_encode_bits(slot_arg->roles[i].role_type, 6852 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_TYPE) | 6853 le32_encode_bits(slot_arg->roles[i].is_master, 6854 RTW89_H2C_MRC_ADD_ROLE_W0_IS_MASTER) | 6855 le32_encode_bits(slot_arg->roles[i].en_tx_null, 6856 RTW89_H2C_MRC_ADD_ROLE_W0_TX_NULL_EN) | 6857 le32_encode_bits(false, 6858 RTW89_H2C_MRC_ADD_ROLE_W0_IS_ALT_ROLE) | 6859 le32_encode_bits(false, 6860 RTW89_H2C_MRC_ADD_ROLE_W0_ROLE_ALT_EN); 6861 slot_h2c->roles[i].w1 = 6862 le32_encode_bits(slot_arg->roles[i].central_ch, 6863 RTW89_H2C_MRC_ADD_ROLE_W1_CENTRAL_CH_SEG) | 6864 le32_encode_bits(slot_arg->roles[i].primary_ch, 6865 RTW89_H2C_MRC_ADD_ROLE_W1_PRI_CH) | 6866 le32_encode_bits(slot_arg->roles[i].bw, 6867 RTW89_H2C_MRC_ADD_ROLE_W1_BW) | 6868 le32_encode_bits(slot_arg->roles[i].band, 6869 RTW89_H2C_MRC_ADD_ROLE_W1_CH_BAND_TYPE) | 6870 le32_encode_bits(slot_arg->roles[i].null_early, 6871 RTW89_H2C_MRC_ADD_ROLE_W1_NULL_EARLY) | 6872 le32_encode_bits(false, 6873 RTW89_H2C_MRC_ADD_ROLE_W1_RFK_BY_PASS) | 6874 le32_encode_bits(true, 6875 RTW89_H2C_MRC_ADD_ROLE_W1_CAN_BTC); 6876 slot_h2c->roles[i].macid_main_bitmap = 6877 cpu_to_le32(slot_arg->roles[i].macid_main_bitmap); 6878 slot_h2c->roles[i].macid_paired_bitmap = 6879 cpu_to_le32(slot_arg->roles[i].macid_paired_bitmap); 6880 } 6881 6882 calc_len: 6883 return struct_size(slot_h2c, roles, slot_arg->role_num); 6884 } 6885 6886 int rtw89_fw_h2c_mrc_add(struct rtw89_dev *rtwdev, 6887 const struct rtw89_fw_mrc_add_arg *arg) 6888 { 6889 struct rtw89_h2c_mrc_add *h2c_head; 6890 struct sk_buff *skb; 6891 unsigned int i; 6892 void *tmp; 6893 u32 len; 6894 int ret; 6895 6896 len = sizeof(*h2c_head); 6897 for (i = 0; i < arg->slot_num; i++) 6898 len += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], NULL); 6899 6900 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6901 if (!skb) { 6902 rtw89_err(rtwdev, "failed to alloc skb for mrc add\n"); 6903 return -ENOMEM; 6904 } 6905 6906 skb_put(skb, len); 6907 tmp = skb->data; 6908 6909 h2c_head = tmp; 6910 h2c_head->w0 = le32_encode_bits(arg->sch_idx, 6911 RTW89_H2C_MRC_ADD_W0_SCH_IDX) | 6912 le32_encode_bits(arg->sch_type, 6913 RTW89_H2C_MRC_ADD_W0_SCH_TYPE) | 6914 le32_encode_bits(arg->slot_num, 6915 RTW89_H2C_MRC_ADD_W0_SLOT_NUM) | 6916 le32_encode_bits(arg->btc_in_sch, 6917 RTW89_H2C_MRC_ADD_W0_BTC_IN_SCH); 6918 6919 tmp += sizeof(*h2c_head); 6920 for (i = 0; i < arg->slot_num; i++) 6921 tmp += rtw89_fw_h2c_mrc_add_slot(rtwdev, &arg->slots[i], tmp); 6922 6923 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6924 H2C_CAT_MAC, 6925 H2C_CL_MRC, 6926 H2C_FUNC_ADD_MRC, 0, 0, 6927 len); 6928 6929 ret = rtw89_h2c_tx(rtwdev, skb, false); 6930 if (ret) { 6931 rtw89_err(rtwdev, "failed to send h2c\n"); 6932 dev_kfree_skb_any(skb); 6933 return -EBUSY; 6934 } 6935 6936 return 0; 6937 } 6938 6939 int rtw89_fw_h2c_mrc_start(struct rtw89_dev *rtwdev, 6940 const struct rtw89_fw_mrc_start_arg *arg) 6941 { 6942 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6943 struct rtw89_h2c_mrc_start *h2c; 6944 u32 len = sizeof(*h2c); 6945 struct sk_buff *skb; 6946 unsigned int cond; 6947 6948 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6949 if (!skb) { 6950 rtw89_err(rtwdev, "failed to alloc skb for mrc start\n"); 6951 return -ENOMEM; 6952 } 6953 6954 skb_put(skb, len); 6955 h2c = (struct rtw89_h2c_mrc_start *)skb->data; 6956 6957 h2c->w0 = le32_encode_bits(arg->sch_idx, 6958 RTW89_H2C_MRC_START_W0_SCH_IDX) | 6959 le32_encode_bits(arg->old_sch_idx, 6960 RTW89_H2C_MRC_START_W0_OLD_SCH_IDX) | 6961 le32_encode_bits(arg->action, 6962 RTW89_H2C_MRC_START_W0_ACTION); 6963 6964 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 6965 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 6966 6967 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6968 H2C_CAT_MAC, 6969 H2C_CL_MRC, 6970 H2C_FUNC_START_MRC, 0, 0, 6971 len); 6972 6973 cond = RTW89_MRC_WAIT_COND(arg->sch_idx, H2C_FUNC_START_MRC); 6974 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 6975 } 6976 6977 int rtw89_fw_h2c_mrc_del(struct rtw89_dev *rtwdev, u8 sch_idx) 6978 { 6979 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 6980 struct rtw89_h2c_mrc_del *h2c; 6981 u32 len = sizeof(*h2c); 6982 struct sk_buff *skb; 6983 unsigned int cond; 6984 6985 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 6986 if (!skb) { 6987 rtw89_err(rtwdev, "failed to alloc skb for mrc del\n"); 6988 return -ENOMEM; 6989 } 6990 6991 skb_put(skb, len); 6992 h2c = (struct rtw89_h2c_mrc_del *)skb->data; 6993 6994 h2c->w0 = le32_encode_bits(sch_idx, RTW89_H2C_MRC_DEL_W0_SCH_IDX); 6995 6996 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 6997 H2C_CAT_MAC, 6998 H2C_CL_MRC, 6999 H2C_FUNC_DEL_MRC, 0, 0, 7000 len); 7001 7002 cond = RTW89_MRC_WAIT_COND(sch_idx, H2C_FUNC_DEL_MRC); 7003 return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond); 7004 } 7005 7006 int rtw89_fw_h2c_mrc_req_tsf(struct rtw89_dev *rtwdev, 7007 const struct rtw89_fw_mrc_req_tsf_arg *arg, 7008 struct rtw89_mac_mrc_tsf_rpt *rpt) 7009 { 7010 struct rtw89_wait_info *wait = &rtwdev->mcc.wait; 7011 struct rtw89_h2c_mrc_req_tsf *h2c; 7012 struct rtw89_mac_mrc_tsf_rpt *tmp; 7013 struct sk_buff *skb; 7014 unsigned int i; 7015 u32 len; 7016 int ret; 7017 7018 len = struct_size(h2c, infos, arg->num); 7019 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7020 if (!skb) { 7021 rtw89_err(rtwdev, "failed to alloc skb for mrc req tsf\n"); 7022 return -ENOMEM; 7023 } 7024 7025 skb_put(skb, len); 7026 h2c = (struct rtw89_h2c_mrc_req_tsf *)skb->data; 7027 7028 h2c->req_tsf_num = arg->num; 7029 for (i = 0; i < arg->num; i++) 7030 h2c->infos[i] = 7031 u8_encode_bits(arg->infos[i].band, 7032 RTW89_H2C_MRC_REQ_TSF_INFO_BAND) | 7033 u8_encode_bits(arg->infos[i].port, 7034 RTW89_H2C_MRC_REQ_TSF_INFO_PORT); 7035 7036 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7037 H2C_CAT_MAC, 7038 H2C_CL_MRC, 7039 H2C_FUNC_MRC_REQ_TSF, 0, 0, 7040 len); 7041 7042 ret = rtw89_h2c_tx_and_wait(rtwdev, skb, wait, RTW89_MRC_WAIT_COND_REQ_TSF); 7043 if (ret) 7044 return ret; 7045 7046 tmp = (struct rtw89_mac_mrc_tsf_rpt *)wait->data.buf; 7047 *rpt = *tmp; 7048 7049 return 0; 7050 } 7051 7052 int rtw89_fw_h2c_mrc_upd_bitmap(struct rtw89_dev *rtwdev, 7053 const struct rtw89_fw_mrc_upd_bitmap_arg *arg) 7054 { 7055 struct rtw89_h2c_mrc_upd_bitmap *h2c; 7056 u32 len = sizeof(*h2c); 7057 struct sk_buff *skb; 7058 int ret; 7059 7060 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7061 if (!skb) { 7062 rtw89_err(rtwdev, "failed to alloc skb for mrc upd bitmap\n"); 7063 return -ENOMEM; 7064 } 7065 7066 skb_put(skb, len); 7067 h2c = (struct rtw89_h2c_mrc_upd_bitmap *)skb->data; 7068 7069 h2c->w0 = le32_encode_bits(arg->sch_idx, 7070 RTW89_H2C_MRC_UPD_BITMAP_W0_SCH_IDX) | 7071 le32_encode_bits(arg->action, 7072 RTW89_H2C_MRC_UPD_BITMAP_W0_ACTION) | 7073 le32_encode_bits(arg->macid, 7074 RTW89_H2C_MRC_UPD_BITMAP_W0_MACID); 7075 h2c->w1 = le32_encode_bits(arg->client_macid, 7076 RTW89_H2C_MRC_UPD_BITMAP_W1_CLIENT_MACID); 7077 7078 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7079 H2C_CAT_MAC, 7080 H2C_CL_MRC, 7081 H2C_FUNC_MRC_UPD_BITMAP, 0, 0, 7082 len); 7083 7084 ret = rtw89_h2c_tx(rtwdev, skb, false); 7085 if (ret) { 7086 rtw89_err(rtwdev, "failed to send h2c\n"); 7087 dev_kfree_skb_any(skb); 7088 return -EBUSY; 7089 } 7090 7091 return 0; 7092 } 7093 7094 int rtw89_fw_h2c_mrc_sync(struct rtw89_dev *rtwdev, 7095 const struct rtw89_fw_mrc_sync_arg *arg) 7096 { 7097 struct rtw89_h2c_mrc_sync *h2c; 7098 u32 len = sizeof(*h2c); 7099 struct sk_buff *skb; 7100 int ret; 7101 7102 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7103 if (!skb) { 7104 rtw89_err(rtwdev, "failed to alloc skb for mrc sync\n"); 7105 return -ENOMEM; 7106 } 7107 7108 skb_put(skb, len); 7109 h2c = (struct rtw89_h2c_mrc_sync *)skb->data; 7110 7111 h2c->w0 = le32_encode_bits(true, RTW89_H2C_MRC_SYNC_W0_SYNC_EN) | 7112 le32_encode_bits(arg->src.port, 7113 RTW89_H2C_MRC_SYNC_W0_SRC_PORT) | 7114 le32_encode_bits(arg->src.band, 7115 RTW89_H2C_MRC_SYNC_W0_SRC_BAND) | 7116 le32_encode_bits(arg->dest.port, 7117 RTW89_H2C_MRC_SYNC_W0_DEST_PORT) | 7118 le32_encode_bits(arg->dest.band, 7119 RTW89_H2C_MRC_SYNC_W0_DEST_BAND); 7120 h2c->w1 = le32_encode_bits(arg->offset, RTW89_H2C_MRC_SYNC_W1_OFFSET); 7121 7122 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7123 H2C_CAT_MAC, 7124 H2C_CL_MRC, 7125 H2C_FUNC_MRC_SYNC, 0, 0, 7126 len); 7127 7128 ret = rtw89_h2c_tx(rtwdev, skb, false); 7129 if (ret) { 7130 rtw89_err(rtwdev, "failed to send h2c\n"); 7131 dev_kfree_skb_any(skb); 7132 return -EBUSY; 7133 } 7134 7135 return 0; 7136 } 7137 7138 int rtw89_fw_h2c_mrc_upd_duration(struct rtw89_dev *rtwdev, 7139 const struct rtw89_fw_mrc_upd_duration_arg *arg) 7140 { 7141 struct rtw89_h2c_mrc_upd_duration *h2c; 7142 struct sk_buff *skb; 7143 unsigned int i; 7144 u32 len; 7145 int ret; 7146 7147 len = struct_size(h2c, slots, arg->slot_num); 7148 skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len); 7149 if (!skb) { 7150 rtw89_err(rtwdev, "failed to alloc skb for mrc upd duration\n"); 7151 return -ENOMEM; 7152 } 7153 7154 skb_put(skb, len); 7155 h2c = (struct rtw89_h2c_mrc_upd_duration *)skb->data; 7156 7157 h2c->w0 = le32_encode_bits(arg->sch_idx, 7158 RTW89_H2C_MRC_UPD_DURATION_W0_SCH_IDX) | 7159 le32_encode_bits(arg->slot_num, 7160 RTW89_H2C_MRC_UPD_DURATION_W0_SLOT_NUM) | 7161 le32_encode_bits(false, 7162 RTW89_H2C_MRC_UPD_DURATION_W0_BTC_IN_SCH); 7163 7164 h2c->start_tsf_high = cpu_to_le32(arg->start_tsf >> 32); 7165 h2c->start_tsf_low = cpu_to_le32(arg->start_tsf); 7166 7167 for (i = 0; i < arg->slot_num; i++) { 7168 h2c->slots[i] = 7169 le32_encode_bits(arg->slots[i].slot_idx, 7170 RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX) | 7171 le32_encode_bits(arg->slots[i].duration, 7172 RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION); 7173 } 7174 7175 rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C, 7176 H2C_CAT_MAC, 7177 H2C_CL_MRC, 7178 H2C_FUNC_MRC_UPD_DURATION, 0, 0, 7179 len); 7180 7181 ret = rtw89_h2c_tx(rtwdev, skb, false); 7182 if (ret) { 7183 rtw89_err(rtwdev, "failed to send h2c\n"); 7184 dev_kfree_skb_any(skb); 7185 return -EBUSY; 7186 } 7187 7188 return 0; 7189 } 7190 7191 static bool __fw_txpwr_entry_zero_ext(const void *ext_ptr, u8 ext_len) 7192 { 7193 static const u8 zeros[U8_MAX] = {}; 7194 7195 return memcmp(ext_ptr, zeros, ext_len) == 0; 7196 } 7197 7198 #define __fw_txpwr_entry_acceptable(e, cursor, ent_sz) \ 7199 ({ \ 7200 u8 __var_sz = sizeof(*(e)); \ 7201 bool __accept; \ 7202 if (__var_sz >= (ent_sz)) \ 7203 __accept = true; \ 7204 else \ 7205 __accept = __fw_txpwr_entry_zero_ext((cursor) + __var_sz,\ 7206 (ent_sz) - __var_sz);\ 7207 __accept; \ 7208 }) 7209 7210 static bool 7211 fw_txpwr_byrate_entry_valid(const struct rtw89_fw_txpwr_byrate_entry *e, 7212 const void *cursor, 7213 const struct rtw89_txpwr_conf *conf) 7214 { 7215 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7216 return false; 7217 7218 if (e->band >= RTW89_BAND_NUM || e->bw >= RTW89_BYR_BW_NUM) 7219 return false; 7220 7221 switch (e->rs) { 7222 case RTW89_RS_CCK: 7223 if (e->shf + e->len > RTW89_RATE_CCK_NUM) 7224 return false; 7225 break; 7226 case RTW89_RS_OFDM: 7227 if (e->shf + e->len > RTW89_RATE_OFDM_NUM) 7228 return false; 7229 break; 7230 case RTW89_RS_MCS: 7231 if (e->shf + e->len > __RTW89_RATE_MCS_NUM || 7232 e->nss >= RTW89_NSS_NUM || 7233 e->ofdma >= RTW89_OFDMA_NUM) 7234 return false; 7235 break; 7236 case RTW89_RS_HEDCM: 7237 if (e->shf + e->len > RTW89_RATE_HEDCM_NUM || 7238 e->nss >= RTW89_NSS_HEDCM_NUM || 7239 e->ofdma >= RTW89_OFDMA_NUM) 7240 return false; 7241 break; 7242 case RTW89_RS_OFFSET: 7243 if (e->shf + e->len > __RTW89_RATE_OFFSET_NUM) 7244 return false; 7245 break; 7246 default: 7247 return false; 7248 } 7249 7250 return true; 7251 } 7252 7253 static 7254 void rtw89_fw_load_txpwr_byrate(struct rtw89_dev *rtwdev, 7255 const struct rtw89_txpwr_table *tbl) 7256 { 7257 const struct rtw89_txpwr_conf *conf = tbl->data; 7258 struct rtw89_fw_txpwr_byrate_entry entry = {}; 7259 struct rtw89_txpwr_byrate *byr_head; 7260 struct rtw89_rate_desc desc = {}; 7261 const void *cursor; 7262 u32 data; 7263 s8 *byr; 7264 int i; 7265 7266 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7267 if (!fw_txpwr_byrate_entry_valid(&entry, cursor, conf)) 7268 continue; 7269 7270 byr_head = &rtwdev->byr[entry.band][entry.bw]; 7271 data = le32_to_cpu(entry.data); 7272 desc.ofdma = entry.ofdma; 7273 desc.nss = entry.nss; 7274 desc.rs = entry.rs; 7275 7276 for (i = 0; i < entry.len; i++, data >>= 8) { 7277 desc.idx = entry.shf + i; 7278 byr = rtw89_phy_raw_byr_seek(rtwdev, byr_head, &desc); 7279 *byr = data & 0xff; 7280 } 7281 } 7282 } 7283 7284 static bool 7285 fw_txpwr_lmt_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_2ghz_entry *e, 7286 const void *cursor, 7287 const struct rtw89_txpwr_conf *conf) 7288 { 7289 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7290 return false; 7291 7292 if (e->bw >= RTW89_2G_BW_NUM) 7293 return false; 7294 if (e->nt >= RTW89_NTX_NUM) 7295 return false; 7296 if (e->rs >= RTW89_RS_LMT_NUM) 7297 return false; 7298 if (e->bf >= RTW89_BF_NUM) 7299 return false; 7300 if (e->regd >= RTW89_REGD_NUM) 7301 return false; 7302 if (e->ch_idx >= RTW89_2G_CH_NUM) 7303 return false; 7304 7305 return true; 7306 } 7307 7308 static 7309 void rtw89_fw_load_txpwr_lmt_2ghz(struct rtw89_txpwr_lmt_2ghz_data *data) 7310 { 7311 const struct rtw89_txpwr_conf *conf = &data->conf; 7312 struct rtw89_fw_txpwr_lmt_2ghz_entry entry = {}; 7313 const void *cursor; 7314 7315 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7316 if (!fw_txpwr_lmt_2ghz_entry_valid(&entry, cursor, conf)) 7317 continue; 7318 7319 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 7320 [entry.ch_idx] = entry.v; 7321 } 7322 } 7323 7324 static bool 7325 fw_txpwr_lmt_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_5ghz_entry *e, 7326 const void *cursor, 7327 const struct rtw89_txpwr_conf *conf) 7328 { 7329 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7330 return false; 7331 7332 if (e->bw >= RTW89_5G_BW_NUM) 7333 return false; 7334 if (e->nt >= RTW89_NTX_NUM) 7335 return false; 7336 if (e->rs >= RTW89_RS_LMT_NUM) 7337 return false; 7338 if (e->bf >= RTW89_BF_NUM) 7339 return false; 7340 if (e->regd >= RTW89_REGD_NUM) 7341 return false; 7342 if (e->ch_idx >= RTW89_5G_CH_NUM) 7343 return false; 7344 7345 return true; 7346 } 7347 7348 static 7349 void rtw89_fw_load_txpwr_lmt_5ghz(struct rtw89_txpwr_lmt_5ghz_data *data) 7350 { 7351 const struct rtw89_txpwr_conf *conf = &data->conf; 7352 struct rtw89_fw_txpwr_lmt_5ghz_entry entry = {}; 7353 const void *cursor; 7354 7355 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7356 if (!fw_txpwr_lmt_5ghz_entry_valid(&entry, cursor, conf)) 7357 continue; 7358 7359 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 7360 [entry.ch_idx] = entry.v; 7361 } 7362 } 7363 7364 static bool 7365 fw_txpwr_lmt_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_6ghz_entry *e, 7366 const void *cursor, 7367 const struct rtw89_txpwr_conf *conf) 7368 { 7369 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7370 return false; 7371 7372 if (e->bw >= RTW89_6G_BW_NUM) 7373 return false; 7374 if (e->nt >= RTW89_NTX_NUM) 7375 return false; 7376 if (e->rs >= RTW89_RS_LMT_NUM) 7377 return false; 7378 if (e->bf >= RTW89_BF_NUM) 7379 return false; 7380 if (e->regd >= RTW89_REGD_NUM) 7381 return false; 7382 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 7383 return false; 7384 if (e->ch_idx >= RTW89_6G_CH_NUM) 7385 return false; 7386 7387 return true; 7388 } 7389 7390 static 7391 void rtw89_fw_load_txpwr_lmt_6ghz(struct rtw89_txpwr_lmt_6ghz_data *data) 7392 { 7393 const struct rtw89_txpwr_conf *conf = &data->conf; 7394 struct rtw89_fw_txpwr_lmt_6ghz_entry entry = {}; 7395 const void *cursor; 7396 7397 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7398 if (!fw_txpwr_lmt_6ghz_entry_valid(&entry, cursor, conf)) 7399 continue; 7400 7401 data->v[entry.bw][entry.nt][entry.rs][entry.bf][entry.regd] 7402 [entry.reg_6ghz_power][entry.ch_idx] = entry.v; 7403 } 7404 } 7405 7406 static bool 7407 fw_txpwr_lmt_ru_2ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_2ghz_entry *e, 7408 const void *cursor, 7409 const struct rtw89_txpwr_conf *conf) 7410 { 7411 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7412 return false; 7413 7414 if (e->ru >= RTW89_RU_NUM) 7415 return false; 7416 if (e->nt >= RTW89_NTX_NUM) 7417 return false; 7418 if (e->regd >= RTW89_REGD_NUM) 7419 return false; 7420 if (e->ch_idx >= RTW89_2G_CH_NUM) 7421 return false; 7422 7423 return true; 7424 } 7425 7426 static 7427 void rtw89_fw_load_txpwr_lmt_ru_2ghz(struct rtw89_txpwr_lmt_ru_2ghz_data *data) 7428 { 7429 const struct rtw89_txpwr_conf *conf = &data->conf; 7430 struct rtw89_fw_txpwr_lmt_ru_2ghz_entry entry = {}; 7431 const void *cursor; 7432 7433 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7434 if (!fw_txpwr_lmt_ru_2ghz_entry_valid(&entry, cursor, conf)) 7435 continue; 7436 7437 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 7438 } 7439 } 7440 7441 static bool 7442 fw_txpwr_lmt_ru_5ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_5ghz_entry *e, 7443 const void *cursor, 7444 const struct rtw89_txpwr_conf *conf) 7445 { 7446 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7447 return false; 7448 7449 if (e->ru >= RTW89_RU_NUM) 7450 return false; 7451 if (e->nt >= RTW89_NTX_NUM) 7452 return false; 7453 if (e->regd >= RTW89_REGD_NUM) 7454 return false; 7455 if (e->ch_idx >= RTW89_5G_CH_NUM) 7456 return false; 7457 7458 return true; 7459 } 7460 7461 static 7462 void rtw89_fw_load_txpwr_lmt_ru_5ghz(struct rtw89_txpwr_lmt_ru_5ghz_data *data) 7463 { 7464 const struct rtw89_txpwr_conf *conf = &data->conf; 7465 struct rtw89_fw_txpwr_lmt_ru_5ghz_entry entry = {}; 7466 const void *cursor; 7467 7468 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7469 if (!fw_txpwr_lmt_ru_5ghz_entry_valid(&entry, cursor, conf)) 7470 continue; 7471 7472 data->v[entry.ru][entry.nt][entry.regd][entry.ch_idx] = entry.v; 7473 } 7474 } 7475 7476 static bool 7477 fw_txpwr_lmt_ru_6ghz_entry_valid(const struct rtw89_fw_txpwr_lmt_ru_6ghz_entry *e, 7478 const void *cursor, 7479 const struct rtw89_txpwr_conf *conf) 7480 { 7481 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7482 return false; 7483 7484 if (e->ru >= RTW89_RU_NUM) 7485 return false; 7486 if (e->nt >= RTW89_NTX_NUM) 7487 return false; 7488 if (e->regd >= RTW89_REGD_NUM) 7489 return false; 7490 if (e->reg_6ghz_power >= NUM_OF_RTW89_REG_6GHZ_POWER) 7491 return false; 7492 if (e->ch_idx >= RTW89_6G_CH_NUM) 7493 return false; 7494 7495 return true; 7496 } 7497 7498 static 7499 void rtw89_fw_load_txpwr_lmt_ru_6ghz(struct rtw89_txpwr_lmt_ru_6ghz_data *data) 7500 { 7501 const struct rtw89_txpwr_conf *conf = &data->conf; 7502 struct rtw89_fw_txpwr_lmt_ru_6ghz_entry entry = {}; 7503 const void *cursor; 7504 7505 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7506 if (!fw_txpwr_lmt_ru_6ghz_entry_valid(&entry, cursor, conf)) 7507 continue; 7508 7509 data->v[entry.ru][entry.nt][entry.regd][entry.reg_6ghz_power] 7510 [entry.ch_idx] = entry.v; 7511 } 7512 } 7513 7514 static bool 7515 fw_tx_shape_lmt_entry_valid(const struct rtw89_fw_tx_shape_lmt_entry *e, 7516 const void *cursor, 7517 const struct rtw89_txpwr_conf *conf) 7518 { 7519 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7520 return false; 7521 7522 if (e->band >= RTW89_BAND_NUM) 7523 return false; 7524 if (e->tx_shape_rs >= RTW89_RS_TX_SHAPE_NUM) 7525 return false; 7526 if (e->regd >= RTW89_REGD_NUM) 7527 return false; 7528 7529 return true; 7530 } 7531 7532 static 7533 void rtw89_fw_load_tx_shape_lmt(struct rtw89_tx_shape_lmt_data *data) 7534 { 7535 const struct rtw89_txpwr_conf *conf = &data->conf; 7536 struct rtw89_fw_tx_shape_lmt_entry entry = {}; 7537 const void *cursor; 7538 7539 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7540 if (!fw_tx_shape_lmt_entry_valid(&entry, cursor, conf)) 7541 continue; 7542 7543 data->v[entry.band][entry.tx_shape_rs][entry.regd] = entry.v; 7544 } 7545 } 7546 7547 static bool 7548 fw_tx_shape_lmt_ru_entry_valid(const struct rtw89_fw_tx_shape_lmt_ru_entry *e, 7549 const void *cursor, 7550 const struct rtw89_txpwr_conf *conf) 7551 { 7552 if (!__fw_txpwr_entry_acceptable(e, cursor, conf->ent_sz)) 7553 return false; 7554 7555 if (e->band >= RTW89_BAND_NUM) 7556 return false; 7557 if (e->regd >= RTW89_REGD_NUM) 7558 return false; 7559 7560 return true; 7561 } 7562 7563 static 7564 void rtw89_fw_load_tx_shape_lmt_ru(struct rtw89_tx_shape_lmt_ru_data *data) 7565 { 7566 const struct rtw89_txpwr_conf *conf = &data->conf; 7567 struct rtw89_fw_tx_shape_lmt_ru_entry entry = {}; 7568 const void *cursor; 7569 7570 rtw89_for_each_in_txpwr_conf(entry, cursor, conf) { 7571 if (!fw_tx_shape_lmt_ru_entry_valid(&entry, cursor, conf)) 7572 continue; 7573 7574 data->v[entry.band][entry.regd] = entry.v; 7575 } 7576 } 7577 7578 const struct rtw89_rfe_parms * 7579 rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, 7580 const struct rtw89_rfe_parms *init) 7581 { 7582 struct rtw89_rfe_data *rfe_data = rtwdev->rfe_data; 7583 struct rtw89_rfe_parms *parms; 7584 7585 if (!rfe_data) 7586 return init; 7587 7588 parms = &rfe_data->rfe_parms; 7589 if (init) 7590 *parms = *init; 7591 7592 if (rtw89_txpwr_conf_valid(&rfe_data->byrate.conf)) { 7593 rfe_data->byrate.tbl.data = &rfe_data->byrate.conf; 7594 rfe_data->byrate.tbl.size = 0; /* don't care here */ 7595 rfe_data->byrate.tbl.load = rtw89_fw_load_txpwr_byrate; 7596 parms->byr_tbl = &rfe_data->byrate.tbl; 7597 } 7598 7599 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_2ghz.conf)) { 7600 rtw89_fw_load_txpwr_lmt_2ghz(&rfe_data->lmt_2ghz); 7601 parms->rule_2ghz.lmt = &rfe_data->lmt_2ghz.v; 7602 } 7603 7604 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_5ghz.conf)) { 7605 rtw89_fw_load_txpwr_lmt_5ghz(&rfe_data->lmt_5ghz); 7606 parms->rule_5ghz.lmt = &rfe_data->lmt_5ghz.v; 7607 } 7608 7609 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_6ghz.conf)) { 7610 rtw89_fw_load_txpwr_lmt_6ghz(&rfe_data->lmt_6ghz); 7611 parms->rule_6ghz.lmt = &rfe_data->lmt_6ghz.v; 7612 } 7613 7614 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_2ghz.conf)) { 7615 rtw89_fw_load_txpwr_lmt_ru_2ghz(&rfe_data->lmt_ru_2ghz); 7616 parms->rule_2ghz.lmt_ru = &rfe_data->lmt_ru_2ghz.v; 7617 } 7618 7619 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_5ghz.conf)) { 7620 rtw89_fw_load_txpwr_lmt_ru_5ghz(&rfe_data->lmt_ru_5ghz); 7621 parms->rule_5ghz.lmt_ru = &rfe_data->lmt_ru_5ghz.v; 7622 } 7623 7624 if (rtw89_txpwr_conf_valid(&rfe_data->lmt_ru_6ghz.conf)) { 7625 rtw89_fw_load_txpwr_lmt_ru_6ghz(&rfe_data->lmt_ru_6ghz); 7626 parms->rule_6ghz.lmt_ru = &rfe_data->lmt_ru_6ghz.v; 7627 } 7628 7629 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt.conf)) { 7630 rtw89_fw_load_tx_shape_lmt(&rfe_data->tx_shape_lmt); 7631 parms->tx_shape.lmt = &rfe_data->tx_shape_lmt.v; 7632 } 7633 7634 if (rtw89_txpwr_conf_valid(&rfe_data->tx_shape_lmt_ru.conf)) { 7635 rtw89_fw_load_tx_shape_lmt_ru(&rfe_data->tx_shape_lmt_ru); 7636 parms->tx_shape.lmt_ru = &rfe_data->tx_shape_lmt_ru.v; 7637 } 7638 7639 return parms; 7640 } 7641