1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2023, 2025 Intel Corporation 4 */ 5 #include <linux/dmi.h> 6 #include "iwl-drv.h" 7 #include "iwl-debug.h" 8 #include "regulatory.h" 9 #include "fw/runtime.h" 10 #include "fw/uefi.h" 11 12 #define GET_BIOS_TABLE(__name, ...) \ 13 do { \ 14 int ret = -ENOENT; \ 15 if (fwrt->uefi_tables_lock_status > UEFI_WIFI_GUID_UNLOCKED) \ 16 ret = iwl_uefi_get_ ## __name(__VA_ARGS__); \ 17 if (ret < 0) \ 18 ret = iwl_acpi_get_ ## __name(__VA_ARGS__); \ 19 return ret; \ 20 } while (0) 21 22 #define IWL_BIOS_TABLE_LOADER(__name) \ 23 int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt) \ 24 {GET_BIOS_TABLE(__name, fwrt); } \ 25 IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name) 26 27 #define IWL_BIOS_TABLE_LOADER_DATA(__name, data_type) \ 28 int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt, \ 29 data_type * data) \ 30 {GET_BIOS_TABLE(__name, fwrt, data); } \ 31 IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name) 32 33 IWL_BIOS_TABLE_LOADER(wrds_table); 34 IWL_BIOS_TABLE_LOADER(ewrd_table); 35 IWL_BIOS_TABLE_LOADER(wgds_table); 36 IWL_BIOS_TABLE_LOADER(ppag_table); 37 IWL_BIOS_TABLE_LOADER(phy_filters); 38 IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data); 39 IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64); 40 IWL_BIOS_TABLE_LOADER_DATA(mcc, char); 41 IWL_BIOS_TABLE_LOADER_DATA(eckv, u32); 42 IWL_BIOS_TABLE_LOADER_DATA(wbem, u32); 43 IWL_BIOS_TABLE_LOADER_DATA(dsbr, u32); 44 45 46 static const struct dmi_system_id dmi_ppag_approved_list[] = { 47 { .ident = "HP", 48 .matches = { 49 DMI_MATCH(DMI_SYS_VENDOR, "HP"), 50 }, 51 }, 52 { .ident = "SAMSUNG", 53 .matches = { 54 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), 55 }, 56 }, 57 { .ident = "MSFT", 58 .matches = { 59 DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), 60 }, 61 }, 62 { .ident = "ASUSTEK", 63 .matches = { 64 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 65 }, 66 }, 67 { .ident = "ASUS", 68 .matches = { 69 DMI_MATCH(DMI_SYS_VENDOR, "ASUS"), 70 }, 71 }, 72 { .ident = "GOOGLE-HP", 73 .matches = { 74 DMI_MATCH(DMI_SYS_VENDOR, "Google"), 75 DMI_MATCH(DMI_BOARD_VENDOR, "HP"), 76 }, 77 }, 78 { .ident = "GOOGLE-ASUS", 79 .matches = { 80 DMI_MATCH(DMI_SYS_VENDOR, "Google"), 81 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."), 82 }, 83 }, 84 { .ident = "GOOGLE-SAMSUNG", 85 .matches = { 86 DMI_MATCH(DMI_SYS_VENDOR, "Google"), 87 DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), 88 }, 89 }, 90 { .ident = "DELL", 91 .matches = { 92 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 93 }, 94 }, 95 { .ident = "DELL", 96 .matches = { 97 DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), 98 }, 99 }, 100 { .ident = "RAZER", 101 .matches = { 102 DMI_MATCH(DMI_SYS_VENDOR, "Razer"), 103 }, 104 }, 105 { .ident = "Honor", 106 .matches = { 107 DMI_MATCH(DMI_SYS_VENDOR, "HONOR"), 108 }, 109 }, 110 { .ident = "WIKO", 111 .matches = { 112 DMI_MATCH(DMI_SYS_VENDOR, "WIKO"), 113 }, 114 }, 115 {} 116 }; 117 118 static const struct dmi_system_id dmi_tas_approved_list[] = { 119 { .ident = "HP", 120 .matches = { 121 DMI_MATCH(DMI_SYS_VENDOR, "HP"), 122 }, 123 }, 124 { .ident = "SAMSUNG", 125 .matches = { 126 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), 127 }, 128 }, 129 { .ident = "LENOVO", 130 .matches = { 131 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 132 }, 133 }, 134 { .ident = "DELL", 135 .matches = { 136 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 137 }, 138 }, 139 { .ident = "MSFT", 140 .matches = { 141 DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), 142 }, 143 }, 144 { .ident = "Acer", 145 .matches = { 146 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 147 }, 148 }, 149 { .ident = "ASUSTEK", 150 .matches = { 151 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 152 }, 153 }, 154 { .ident = "ASUS", 155 .matches = { 156 DMI_MATCH(DMI_SYS_VENDOR, "ASUS"), 157 }, 158 }, 159 { .ident = "GOOGLE-HP", 160 .matches = { 161 DMI_MATCH(DMI_SYS_VENDOR, "Google"), 162 DMI_MATCH(DMI_BOARD_VENDOR, "HP"), 163 }, 164 }, 165 { .ident = "MSI", 166 .matches = { 167 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), 168 }, 169 }, 170 { .ident = "Honor", 171 .matches = { 172 DMI_MATCH(DMI_SYS_VENDOR, "HONOR"), 173 }, 174 }, 175 /* keep last */ 176 {} 177 }; 178 179 bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) 180 { 181 /* 182 * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on 183 * earlier firmware versions. Unfortunately, we don't have a 184 * TLV API flag to rely on, so rely on the major version which 185 * is in the first byte of ucode_ver. This was implemented 186 * initially on version 38 and then backported to 17. It was 187 * also backported to 29, but only for 7265D devices. The 188 * intention was to have it in 36 as well, but not all 8000 189 * family got this feature enabled. The 8000 family is the 190 * only one using version 36, so skip this version entirely. 191 */ 192 return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 || 193 (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 && 194 fwrt->trans->info.hw_rev != CSR_HW_REV_TYPE_3160) || 195 (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && 196 ((fwrt->trans->info.hw_rev & CSR_HW_REV_TYPE_MSK) == 197 CSR_HW_REV_TYPE_7265D)); 198 } 199 IWL_EXPORT_SYMBOL(iwl_sar_geo_support); 200 201 int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt, 202 struct iwl_per_chain_offset *table, 203 u32 n_bands, u32 n_profiles) 204 { 205 int i, j; 206 207 if (!fwrt->geo_enabled) 208 return -ENODATA; 209 210 if (!iwl_sar_geo_support(fwrt)) 211 return -EOPNOTSUPP; 212 213 for (i = 0; i < n_profiles; i++) { 214 for (j = 0; j < n_bands; j++) { 215 struct iwl_per_chain_offset *chain = 216 &table[i * n_bands + j]; 217 218 chain->max_tx_power = 219 cpu_to_le16(fwrt->geo_profiles[i].bands[j].max); 220 chain->chain_a = 221 fwrt->geo_profiles[i].bands[j].chains[0]; 222 chain->chain_b = 223 fwrt->geo_profiles[i].bands[j].chains[1]; 224 IWL_DEBUG_RADIO(fwrt, 225 "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", 226 i, j, 227 fwrt->geo_profiles[i].bands[j].chains[0], 228 fwrt->geo_profiles[i].bands[j].chains[1], 229 fwrt->geo_profiles[i].bands[j].max); 230 } 231 } 232 233 return 0; 234 } 235 IWL_EXPORT_SYMBOL(iwl_sar_geo_fill_table); 236 237 static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt, 238 __le16 *per_chain, u32 n_subbands, 239 int prof_a, int prof_b) 240 { 241 int profs[BIOS_SAR_NUM_CHAINS] = { prof_a, prof_b }; 242 int i, j; 243 244 for (i = 0; i < BIOS_SAR_NUM_CHAINS; i++) { 245 struct iwl_sar_profile *prof; 246 247 /* don't allow SAR to be disabled (profile 0 means disable) */ 248 if (profs[i] == 0) 249 return -EPERM; 250 251 /* we are off by one, so allow up to BIOS_SAR_MAX_PROFILE_NUM */ 252 if (profs[i] > BIOS_SAR_MAX_PROFILE_NUM) 253 return -EINVAL; 254 255 /* profiles go from 1 to 4, so decrement to access the array */ 256 prof = &fwrt->sar_profiles[profs[i] - 1]; 257 258 /* if the profile is disabled, do nothing */ 259 if (!prof->enabled) { 260 IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n", 261 profs[i]); 262 /* 263 * if one of the profiles is disabled, we 264 * ignore all of them and return 1 to 265 * differentiate disabled from other failures. 266 */ 267 return 1; 268 } 269 270 IWL_DEBUG_INFO(fwrt, 271 "SAR EWRD: chain %d profile index %d\n", 272 i, profs[i]); 273 IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i); 274 for (j = 0; j < n_subbands; j++) { 275 per_chain[i * n_subbands + j] = 276 cpu_to_le16(prof->chains[i].subbands[j]); 277 IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n", 278 j, prof->chains[i].subbands[j]); 279 } 280 } 281 282 return 0; 283 } 284 285 int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt, 286 __le16 *per_chain, u32 n_tables, u32 n_subbands, 287 int prof_a, int prof_b) 288 { 289 int i, ret = 0; 290 291 for (i = 0; i < n_tables; i++) { 292 ret = iwl_sar_fill_table(fwrt, 293 &per_chain[i * n_subbands * BIOS_SAR_NUM_CHAINS], 294 n_subbands, prof_a, prof_b); 295 if (ret) 296 break; 297 } 298 299 return ret; 300 } 301 IWL_EXPORT_SYMBOL(iwl_sar_fill_profile); 302 303 static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain, 304 int subband) 305 { 306 s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband]; 307 308 if ((subband == 0 && 309 (ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) || 310 (subband != 0 && 311 (ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) { 312 IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val); 313 return false; 314 } 315 return true; 316 } 317 318 /* Utility function for iwlmvm and iwlxvt */ 319 int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt, 320 union iwl_ppag_table_cmd *cmd, int *cmd_size) 321 { 322 u8 cmd_ver; 323 int i, j, num_sub_bands; 324 s8 *gain; 325 bool send_ppag_always; 326 327 /* many firmware images for JF lie about this */ 328 if (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id) == 329 CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) 330 return -EOPNOTSUPP; 331 332 if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { 333 IWL_DEBUG_RADIO(fwrt, 334 "PPAG capability not supported by FW, command not sent.\n"); 335 return -EINVAL; 336 } 337 338 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, 339 WIDE_ID(PHY_OPS_GROUP, 340 PER_PLATFORM_ANT_GAIN_CMD), 1); 341 /* 342 * Starting from ver 4, driver needs to send the PPAG CMD regardless 343 * if PPAG is enabled/disabled or valid/invalid. 344 */ 345 send_ppag_always = cmd_ver > 3; 346 347 /* Don't send PPAG if it is disabled */ 348 if (!send_ppag_always && !fwrt->ppag_flags) { 349 IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n"); 350 return -EINVAL; 351 } 352 353 IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver); 354 if (cmd_ver == 1) { 355 num_sub_bands = IWL_NUM_SUB_BANDS_V1; 356 gain = cmd->v1.gain[0]; 357 *cmd_size = sizeof(cmd->v1); 358 cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V1_MASK); 359 if (fwrt->ppag_bios_rev >= 1) { 360 /* in this case FW supports revision 0 */ 361 IWL_DEBUG_RADIO(fwrt, 362 "PPAG table rev is %d, send truncated table\n", 363 fwrt->ppag_bios_rev); 364 } 365 } else if (cmd_ver == 5) { 366 num_sub_bands = IWL_NUM_SUB_BANDS_V2; 367 gain = cmd->v5.gain[0]; 368 *cmd_size = sizeof(cmd->v5); 369 cmd->v5.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V5_MASK); 370 if (fwrt->ppag_bios_rev == 0) { 371 /* in this case FW supports revisions 1,2 or 3 */ 372 IWL_DEBUG_RADIO(fwrt, 373 "PPAG table rev is 0, send padded table\n"); 374 } 375 } else if (cmd_ver == 7) { 376 num_sub_bands = IWL_NUM_SUB_BANDS_V2; 377 gain = cmd->v7.gain[0]; 378 *cmd_size = sizeof(cmd->v7); 379 cmd->v7.ppag_config_info.table_source = fwrt->ppag_bios_source; 380 cmd->v7.ppag_config_info.table_revision = fwrt->ppag_bios_rev; 381 cmd->v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags); 382 } else { 383 IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n"); 384 return -EINVAL; 385 } 386 387 /* ppag mode */ 388 IWL_DEBUG_RADIO(fwrt, 389 "PPAG MODE bits were read from bios: %d\n", 390 fwrt->ppag_flags); 391 392 if (cmd_ver == 1 && 393 !fw_has_capa(&fwrt->fw->ucode_capa, 394 IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) { 395 cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); 396 IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n"); 397 } else { 398 IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n"); 399 } 400 401 /* The 'flags' field is the same in v1 and v5 so we can just 402 * use v1 to access it. 403 */ 404 IWL_DEBUG_RADIO(fwrt, 405 "PPAG MODE bits going to be sent: %d\n", 406 (cmd_ver < 7) ? le32_to_cpu(cmd->v1.flags) : 407 le32_to_cpu(cmd->v7.ppag_config_info.value)); 408 409 for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { 410 for (j = 0; j < num_sub_bands; j++) { 411 if (!send_ppag_always && 412 !iwl_ppag_value_valid(fwrt, i, j)) 413 return -EINVAL; 414 415 gain[i * num_sub_bands + j] = 416 fwrt->ppag_chains[i].subbands[j]; 417 IWL_DEBUG_RADIO(fwrt, 418 "PPAG table: chain[%d] band[%d]: gain = %d\n", 419 i, j, gain[i * num_sub_bands + j]); 420 } 421 } 422 423 return 0; 424 } 425 IWL_EXPORT_SYMBOL(iwl_fill_ppag_table); 426 427 bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt) 428 { 429 if (!dmi_check_system(dmi_ppag_approved_list)) { 430 IWL_DEBUG_RADIO(fwrt, 431 "System vendor '%s' is not in the approved list, disabling PPAG.\n", 432 dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>"); 433 fwrt->ppag_flags = 0; 434 return false; 435 } 436 437 return true; 438 } 439 IWL_EXPORT_SYMBOL(iwl_is_ppag_approved); 440 441 bool iwl_is_tas_approved(void) 442 { 443 return dmi_check_system(dmi_tas_approved_list); 444 } 445 IWL_EXPORT_SYMBOL(iwl_is_tas_approved); 446 447 struct iwl_tas_selection_data 448 iwl_parse_tas_selection(const u32 tas_selection_in, const u8 tbl_rev) 449 { 450 struct iwl_tas_selection_data tas_selection_out = {}; 451 u8 override_iec = u32_get_bits(tas_selection_in, 452 IWL_WTAS_OVERRIDE_IEC_MSK); 453 u8 canada_tas_uhb = u32_get_bits(tas_selection_in, 454 IWL_WTAS_CANADA_UHB_MSK); 455 u8 enabled_iec = u32_get_bits(tas_selection_in, 456 IWL_WTAS_ENABLE_IEC_MSK); 457 u8 usa_tas_uhb = u32_get_bits(tas_selection_in, 458 IWL_WTAS_USA_UHB_MSK); 459 460 if (tbl_rev > 0) { 461 tas_selection_out.usa_tas_uhb_allowed = usa_tas_uhb; 462 tas_selection_out.override_tas_iec = override_iec; 463 tas_selection_out.enable_tas_iec = enabled_iec; 464 } 465 466 if (tbl_rev > 1) 467 tas_selection_out.canada_tas_uhb_allowed = canada_tas_uhb; 468 469 return tas_selection_out; 470 } 471 IWL_EXPORT_SYMBOL(iwl_parse_tas_selection); 472 473 bool iwl_add_mcc_to_tas_block_list(u16 *list, u8 *size, u16 mcc) 474 { 475 for (int i = 0; i < *size; i++) { 476 if (list[i] == mcc) 477 return true; 478 } 479 480 /* Verify that there is room for another country 481 * If *size == IWL_WTAS_BLACK_LIST_MAX, then the table is full. 482 */ 483 if (*size >= IWL_WTAS_BLACK_LIST_MAX) 484 return false; 485 486 list[*size++] = mcc; 487 return true; 488 } 489 IWL_EXPORT_SYMBOL(iwl_add_mcc_to_tas_block_list); 490 491 __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) 492 { 493 int ret; 494 u32 val; 495 __le32 config_bitmap = 0; 496 497 switch (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id)) { 498 case IWL_CFG_RF_TYPE_HR1: 499 case IWL_CFG_RF_TYPE_HR2: 500 case IWL_CFG_RF_TYPE_JF1: 501 case IWL_CFG_RF_TYPE_JF2: 502 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_INDONESIA_5G2, 503 &val); 504 505 if (!ret && val == DSM_VALUE_INDONESIA_ENABLE) 506 config_bitmap |= 507 cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); 508 break; 509 default: 510 break; 511 } 512 513 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_DISABLE_SRD, &val); 514 if (!ret) { 515 if (val == DSM_VALUE_SRD_PASSIVE) 516 config_bitmap |= 517 cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK); 518 else if (val == DSM_VALUE_SRD_DISABLE) 519 config_bitmap |= 520 cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); 521 } 522 523 if (fw_has_capa(&fwrt->fw->ucode_capa, 524 IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) { 525 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_REGULATORY_CONFIG, 526 &val); 527 /* 528 * China 2022 enable if the BIOS object does not exist or 529 * if it is enabled in BIOS. 530 */ 531 if (ret < 0 || val & DSM_MASK_CHINA_22_REG) 532 config_bitmap |= 533 cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK); 534 } 535 536 return config_bitmap; 537 } 538 IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap); 539 540 static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver) 541 { 542 size_t cmd_size; 543 544 switch (cmd_ver) { 545 case 12: 546 case 11: 547 cmd_size = sizeof(struct iwl_lari_config_change_cmd); 548 break; 549 case 10: 550 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v10); 551 break; 552 case 9: 553 case 8: 554 case 7: 555 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7); 556 break; 557 case 6: 558 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6); 559 break; 560 case 5: 561 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5); 562 break; 563 case 4: 564 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4); 565 break; 566 case 3: 567 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); 568 break; 569 case 2: 570 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2); 571 break; 572 default: 573 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1); 574 break; 575 } 576 return cmd_size; 577 } 578 579 int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, 580 struct iwl_lari_config_change_cmd *cmd, 581 size_t *cmd_size) 582 { 583 int ret; 584 u32 value; 585 bool has_raw_dsm_capa = fw_has_capa(&fwrt->fw->ucode_capa, 586 IWL_UCODE_TLV_CAPA_FW_ACCEPTS_RAW_DSM_TABLE); 587 u8 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, 588 WIDE_ID(REGULATORY_AND_NVM_GROUP, 589 LARI_CONFIG_CHANGE), 1); 590 591 if (WARN_ONCE(cmd_ver > 12, 592 "Don't add newer versions to this function\n")) 593 return -EINVAL; 594 595 memset(cmd, 0, sizeof(*cmd)); 596 *cmd_size = iwl_get_lari_config_cmd_size(cmd_ver); 597 598 cmd->config_bitmap = iwl_get_lari_config_bitmap(fwrt); 599 600 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value); 601 if (!ret) { 602 if (!has_raw_dsm_capa) 603 value &= DSM_11AX_ALLOW_BITMAP; 604 cmd->oem_11ax_allow_bitmap = cpu_to_le32(value); 605 } 606 607 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value); 608 if (!ret) { 609 if (!has_raw_dsm_capa) 610 value &= DSM_UNII4_ALLOW_BITMAP; 611 612 /* Since version 9, bits 4 and 5 are supported 613 * regardless of this capability, By pass this masking 614 * if firmware has capability of accepting raw DSM table. 615 */ 616 if (!has_raw_dsm_capa && cmd_ver < 9 && 617 !fw_has_capa(&fwrt->fw->ucode_capa, 618 IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA)) 619 value &= ~(DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK | 620 DSM_VALUE_UNII4_CANADA_EN_MSK); 621 622 cmd->oem_unii4_allow_bitmap = cpu_to_le32(value); 623 } 624 625 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value); 626 if (!ret) { 627 if (!has_raw_dsm_capa) 628 value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V12; 629 630 if (!has_raw_dsm_capa && cmd_ver < 8) 631 value &= ~ACTIVATE_5G2_IN_WW_MASK; 632 633 /* Since version 12, bits 5 and 6 are supported 634 * regardless of this capability, By pass this masking 635 * if firmware has capability of accepting raw DSM table. 636 */ 637 if (!has_raw_dsm_capa && cmd_ver < 12 && 638 !fw_has_capa(&fwrt->fw->ucode_capa, 639 IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA)) 640 value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V11; 641 642 cmd->chan_state_active_bitmap = cpu_to_le32(value); 643 } 644 645 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value); 646 if (!ret) 647 cmd->oem_uhb_allow_bitmap = cpu_to_le32(value); 648 649 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value); 650 if (!ret) { 651 if (!has_raw_dsm_capa) 652 value &= DSM_FORCE_DISABLE_CHANNELS_ALLOWED_BITMAP; 653 cmd->force_disable_channels_bitmap = cpu_to_le32(value); 654 } 655 656 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD, 657 &value); 658 if (!ret) { 659 if (!has_raw_dsm_capa) 660 value &= DSM_EDT_ALLOWED_BITMAP; 661 cmd->edt_bitmap = cpu_to_le32(value); 662 } 663 664 ret = iwl_bios_get_wbem(fwrt, &value); 665 if (!ret) 666 cmd->oem_320mhz_allow_bitmap = cpu_to_le32(value); 667 668 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value); 669 if (!ret) 670 cmd->oem_11be_allow_bitmap = cpu_to_le32(value); 671 672 if (cmd->config_bitmap || 673 cmd->oem_uhb_allow_bitmap || 674 cmd->oem_11ax_allow_bitmap || 675 cmd->oem_unii4_allow_bitmap || 676 cmd->chan_state_active_bitmap || 677 cmd->force_disable_channels_bitmap || 678 cmd->edt_bitmap || 679 cmd->oem_320mhz_allow_bitmap || 680 cmd->oem_11be_allow_bitmap) { 681 IWL_DEBUG_RADIO(fwrt, 682 "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n", 683 le32_to_cpu(cmd->config_bitmap), 684 le32_to_cpu(cmd->oem_11ax_allow_bitmap)); 685 IWL_DEBUG_RADIO(fwrt, 686 "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n", 687 le32_to_cpu(cmd->oem_unii4_allow_bitmap), 688 le32_to_cpu(cmd->chan_state_active_bitmap), 689 cmd_ver); 690 IWL_DEBUG_RADIO(fwrt, 691 "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n", 692 le32_to_cpu(cmd->oem_uhb_allow_bitmap), 693 le32_to_cpu(cmd->force_disable_channels_bitmap)); 694 IWL_DEBUG_RADIO(fwrt, 695 "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n", 696 le32_to_cpu(cmd->edt_bitmap), 697 le32_to_cpu(cmd->oem_320mhz_allow_bitmap)); 698 IWL_DEBUG_RADIO(fwrt, 699 "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n", 700 le32_to_cpu(cmd->oem_11be_allow_bitmap)); 701 } else { 702 return 1; 703 } 704 705 return 0; 706 } 707 IWL_EXPORT_SYMBOL(iwl_fill_lari_config); 708 709 int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, 710 u32 *value) 711 { 712 GET_BIOS_TABLE(dsm, fwrt, func, value); 713 } 714 IWL_EXPORT_SYMBOL(iwl_bios_get_dsm); 715 716 bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc) 717 { 718 /* Some kind of regulatory mess means we need to currently disallow 719 * puncturing in the US and Canada unless enabled in BIOS. 720 */ 721 switch (mcc) { 722 case IWL_MCC_US: 723 return puncturing & IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK; 724 case IWL_MCC_CANADA: 725 return puncturing & IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK; 726 default: 727 return true; 728 } 729 } 730 IWL_EXPORT_SYMBOL(iwl_puncturing_is_allowed_in_bios); 731 732 bool iwl_rfi_is_enabled_in_bios(struct iwl_fw_runtime *fwrt) 733 { 734 /* default behaviour is disabled */ 735 u32 value = 0; 736 int ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_RFI_CONFIG, &value); 737 738 if (ret < 0) { 739 IWL_DEBUG_RADIO(fwrt, "Failed to get DSM RFI, ret=%d\n", ret); 740 return false; 741 } 742 743 value &= DSM_VALUE_RFI_DISABLE; 744 /* RFI BIOS CONFIG value can be 0 or 3 only. 745 * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled. 746 * 1 and 2 are invalid BIOS configurations, So, it's not possible to 747 * disable ddr/dlvr separately. 748 */ 749 if (!value) { 750 IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to enable\n"); 751 return true; 752 } else if (value == DSM_VALUE_RFI_DISABLE) { 753 IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to disable\n"); 754 } else { 755 IWL_DEBUG_RADIO(fwrt, 756 "DSM RFI got invalid value, value=%d\n", value); 757 } 758 759 return false; 760 } 761 IWL_EXPORT_SYMBOL(iwl_rfi_is_enabled_in_bios); 762