1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2023, 2025 Intel Corporation 4 */ 5 #include <linux/dmi.h> 6 #include "iwl-drv.h" 7 #include "iwl-debug.h" 8 #include "regulatory.h" 9 #include "fw/runtime.h" 10 #include "fw/uefi.h" 11 12 #define GET_BIOS_TABLE(__name, ...) \ 13 do { \ 14 int ret = -ENOENT; \ 15 if (fwrt->uefi_tables_lock_status > UEFI_WIFI_GUID_UNLOCKED) \ 16 ret = iwl_uefi_get_ ## __name(__VA_ARGS__); \ 17 if (ret < 0) \ 18 ret = iwl_acpi_get_ ## __name(__VA_ARGS__); \ 19 return ret; \ 20 } while (0) 21 22 #define IWL_BIOS_TABLE_LOADER(__name) \ 23 int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt) \ 24 {GET_BIOS_TABLE(__name, fwrt); } \ 25 IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name) 26 27 #define IWL_BIOS_TABLE_LOADER_DATA(__name, data_type) \ 28 int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt, \ 29 data_type * data) \ 30 {GET_BIOS_TABLE(__name, fwrt, data); } \ 31 IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name) 32 33 IWL_BIOS_TABLE_LOADER(wrds_table); 34 IWL_BIOS_TABLE_LOADER(ewrd_table); 35 IWL_BIOS_TABLE_LOADER(wgds_table); 36 IWL_BIOS_TABLE_LOADER(ppag_table); 37 IWL_BIOS_TABLE_LOADER(phy_filters); 38 IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data); 39 IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64); 40 IWL_BIOS_TABLE_LOADER_DATA(mcc, char); 41 IWL_BIOS_TABLE_LOADER_DATA(eckv, u32); 42 IWL_BIOS_TABLE_LOADER_DATA(wbem, u32); 43 IWL_BIOS_TABLE_LOADER_DATA(dsbr, u32); 44 45 46 static const struct dmi_system_id dmi_ppag_approved_list[] = { 47 { .ident = "HP", 48 .matches = { 49 DMI_MATCH(DMI_SYS_VENDOR, "HP"), 50 }, 51 }, 52 { .ident = "SAMSUNG", 53 .matches = { 54 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), 55 }, 56 }, 57 { .ident = "MSFT", 58 .matches = { 59 DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), 60 }, 61 }, 62 { .ident = "ASUS", 63 .matches = { 64 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 65 }, 66 }, 67 { .ident = "GOOGLE-HP", 68 .matches = { 69 DMI_MATCH(DMI_SYS_VENDOR, "Google"), 70 DMI_MATCH(DMI_BOARD_VENDOR, "HP"), 71 }, 72 }, 73 { .ident = "GOOGLE-ASUS", 74 .matches = { 75 DMI_MATCH(DMI_SYS_VENDOR, "Google"), 76 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."), 77 }, 78 }, 79 { .ident = "GOOGLE-SAMSUNG", 80 .matches = { 81 DMI_MATCH(DMI_SYS_VENDOR, "Google"), 82 DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), 83 }, 84 }, 85 { .ident = "DELL", 86 .matches = { 87 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 88 }, 89 }, 90 { .ident = "DELL", 91 .matches = { 92 DMI_MATCH(DMI_SYS_VENDOR, "Alienware"), 93 }, 94 }, 95 { .ident = "RAZER", 96 .matches = { 97 DMI_MATCH(DMI_SYS_VENDOR, "Razer"), 98 }, 99 }, 100 { .ident = "Honor", 101 .matches = { 102 DMI_MATCH(DMI_SYS_VENDOR, "HONOR"), 103 }, 104 }, 105 { .ident = "WIKO", 106 .matches = { 107 DMI_MATCH(DMI_SYS_VENDOR, "WIKO"), 108 }, 109 }, 110 {} 111 }; 112 113 static const struct dmi_system_id dmi_tas_approved_list[] = { 114 { .ident = "HP", 115 .matches = { 116 DMI_MATCH(DMI_SYS_VENDOR, "HP"), 117 }, 118 }, 119 { .ident = "SAMSUNG", 120 .matches = { 121 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"), 122 }, 123 }, 124 { .ident = "LENOVO", 125 .matches = { 126 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 127 }, 128 }, 129 { .ident = "DELL", 130 .matches = { 131 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 132 }, 133 }, 134 { .ident = "MSFT", 135 .matches = { 136 DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), 137 }, 138 }, 139 { .ident = "Acer", 140 .matches = { 141 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 142 }, 143 }, 144 { .ident = "ASUS", 145 .matches = { 146 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 147 }, 148 }, 149 { .ident = "GOOGLE-HP", 150 .matches = { 151 DMI_MATCH(DMI_SYS_VENDOR, "Google"), 152 DMI_MATCH(DMI_BOARD_VENDOR, "HP"), 153 }, 154 }, 155 { .ident = "MSI", 156 .matches = { 157 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."), 158 }, 159 }, 160 { .ident = "Honor", 161 .matches = { 162 DMI_MATCH(DMI_SYS_VENDOR, "HONOR"), 163 }, 164 }, 165 /* keep last */ 166 {} 167 }; 168 169 bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt) 170 { 171 /* 172 * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on 173 * earlier firmware versions. Unfortunately, we don't have a 174 * TLV API flag to rely on, so rely on the major version which 175 * is in the first byte of ucode_ver. This was implemented 176 * initially on version 38 and then backported to 17. It was 177 * also backported to 29, but only for 7265D devices. The 178 * intention was to have it in 36 as well, but not all 8000 179 * family got this feature enabled. The 8000 family is the 180 * only one using version 36, so skip this version entirely. 181 */ 182 return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 || 183 (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 && 184 fwrt->trans->info.hw_rev != CSR_HW_REV_TYPE_3160) || 185 (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 && 186 ((fwrt->trans->info.hw_rev & CSR_HW_REV_TYPE_MSK) == 187 CSR_HW_REV_TYPE_7265D)); 188 } 189 IWL_EXPORT_SYMBOL(iwl_sar_geo_support); 190 191 int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt, 192 struct iwl_per_chain_offset *table, 193 u32 n_bands, u32 n_profiles) 194 { 195 int i, j; 196 197 if (!fwrt->geo_enabled) 198 return -ENODATA; 199 200 if (!iwl_sar_geo_support(fwrt)) 201 return -EOPNOTSUPP; 202 203 for (i = 0; i < n_profiles; i++) { 204 for (j = 0; j < n_bands; j++) { 205 struct iwl_per_chain_offset *chain = 206 &table[i * n_bands + j]; 207 208 chain->max_tx_power = 209 cpu_to_le16(fwrt->geo_profiles[i].bands[j].max); 210 chain->chain_a = 211 fwrt->geo_profiles[i].bands[j].chains[0]; 212 chain->chain_b = 213 fwrt->geo_profiles[i].bands[j].chains[1]; 214 IWL_DEBUG_RADIO(fwrt, 215 "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", 216 i, j, 217 fwrt->geo_profiles[i].bands[j].chains[0], 218 fwrt->geo_profiles[i].bands[j].chains[1], 219 fwrt->geo_profiles[i].bands[j].max); 220 } 221 } 222 223 return 0; 224 } 225 IWL_EXPORT_SYMBOL(iwl_sar_geo_fill_table); 226 227 static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt, 228 __le16 *per_chain, u32 n_subbands, 229 int prof_a, int prof_b) 230 { 231 int profs[BIOS_SAR_NUM_CHAINS] = { prof_a, prof_b }; 232 int i, j; 233 234 for (i = 0; i < BIOS_SAR_NUM_CHAINS; i++) { 235 struct iwl_sar_profile *prof; 236 237 /* don't allow SAR to be disabled (profile 0 means disable) */ 238 if (profs[i] == 0) 239 return -EPERM; 240 241 /* we are off by one, so allow up to BIOS_SAR_MAX_PROFILE_NUM */ 242 if (profs[i] > BIOS_SAR_MAX_PROFILE_NUM) 243 return -EINVAL; 244 245 /* profiles go from 1 to 4, so decrement to access the array */ 246 prof = &fwrt->sar_profiles[profs[i] - 1]; 247 248 /* if the profile is disabled, do nothing */ 249 if (!prof->enabled) { 250 IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n", 251 profs[i]); 252 /* 253 * if one of the profiles is disabled, we 254 * ignore all of them and return 1 to 255 * differentiate disabled from other failures. 256 */ 257 return 1; 258 } 259 260 IWL_DEBUG_INFO(fwrt, 261 "SAR EWRD: chain %d profile index %d\n", 262 i, profs[i]); 263 IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i); 264 for (j = 0; j < n_subbands; j++) { 265 per_chain[i * n_subbands + j] = 266 cpu_to_le16(prof->chains[i].subbands[j]); 267 IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n", 268 j, prof->chains[i].subbands[j]); 269 } 270 } 271 272 return 0; 273 } 274 275 int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt, 276 __le16 *per_chain, u32 n_tables, u32 n_subbands, 277 int prof_a, int prof_b) 278 { 279 int i, ret = 0; 280 281 for (i = 0; i < n_tables; i++) { 282 ret = iwl_sar_fill_table(fwrt, 283 &per_chain[i * n_subbands * BIOS_SAR_NUM_CHAINS], 284 n_subbands, prof_a, prof_b); 285 if (ret) 286 break; 287 } 288 289 return ret; 290 } 291 IWL_EXPORT_SYMBOL(iwl_sar_fill_profile); 292 293 static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain, 294 int subband) 295 { 296 s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband]; 297 298 if ((subband == 0 && 299 (ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) || 300 (subband != 0 && 301 (ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) { 302 IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val); 303 return false; 304 } 305 return true; 306 } 307 308 /* Utility function for iwlmvm and iwlxvt */ 309 int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt, 310 union iwl_ppag_table_cmd *cmd, int *cmd_size) 311 { 312 u8 cmd_ver; 313 int i, j, num_sub_bands; 314 s8 *gain; 315 bool send_ppag_always; 316 317 /* many firmware images for JF lie about this */ 318 if (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id) == 319 CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF)) 320 return -EOPNOTSUPP; 321 322 if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) { 323 IWL_DEBUG_RADIO(fwrt, 324 "PPAG capability not supported by FW, command not sent.\n"); 325 return -EINVAL; 326 } 327 328 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, 329 WIDE_ID(PHY_OPS_GROUP, 330 PER_PLATFORM_ANT_GAIN_CMD), 1); 331 /* 332 * Starting from ver 4, driver needs to send the PPAG CMD regardless 333 * if PPAG is enabled/disabled or valid/invalid. 334 */ 335 send_ppag_always = cmd_ver > 3; 336 337 /* Don't send PPAG if it is disabled */ 338 if (!send_ppag_always && !fwrt->ppag_flags) { 339 IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n"); 340 return -EINVAL; 341 } 342 343 IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver); 344 if (cmd_ver == 1) { 345 num_sub_bands = IWL_NUM_SUB_BANDS_V1; 346 gain = cmd->v1.gain[0]; 347 *cmd_size = sizeof(cmd->v1); 348 cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V1_MASK); 349 if (fwrt->ppag_bios_rev >= 1) { 350 /* in this case FW supports revision 0 */ 351 IWL_DEBUG_RADIO(fwrt, 352 "PPAG table rev is %d, send truncated table\n", 353 fwrt->ppag_bios_rev); 354 } 355 } else if (cmd_ver == 5) { 356 num_sub_bands = IWL_NUM_SUB_BANDS_V2; 357 gain = cmd->v5.gain[0]; 358 *cmd_size = sizeof(cmd->v5); 359 cmd->v5.flags = cpu_to_le32(fwrt->ppag_flags & IWL_PPAG_CMD_V5_MASK); 360 if (fwrt->ppag_bios_rev == 0) { 361 /* in this case FW supports revisions 1,2 or 3 */ 362 IWL_DEBUG_RADIO(fwrt, 363 "PPAG table rev is 0, send padded table\n"); 364 } 365 } else if (cmd_ver == 7) { 366 num_sub_bands = IWL_NUM_SUB_BANDS_V2; 367 gain = cmd->v7.gain[0]; 368 *cmd_size = sizeof(cmd->v7); 369 cmd->v7.ppag_config_info.table_source = fwrt->ppag_bios_source; 370 cmd->v7.ppag_config_info.table_revision = fwrt->ppag_bios_rev; 371 cmd->v7.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags); 372 } else { 373 IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n"); 374 return -EINVAL; 375 } 376 377 /* ppag mode */ 378 IWL_DEBUG_RADIO(fwrt, 379 "PPAG MODE bits were read from bios: %d\n", 380 fwrt->ppag_flags); 381 382 if (cmd_ver == 1 && 383 !fw_has_capa(&fwrt->fw->ucode_capa, 384 IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) { 385 cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK); 386 IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n"); 387 } else { 388 IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n"); 389 } 390 391 /* The 'flags' field is the same in v1 and v5 so we can just 392 * use v1 to access it. 393 */ 394 IWL_DEBUG_RADIO(fwrt, 395 "PPAG MODE bits going to be sent: %d\n", 396 (cmd_ver < 7) ? le32_to_cpu(cmd->v1.flags) : 397 le32_to_cpu(cmd->v7.ppag_config_info.value)); 398 399 for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { 400 for (j = 0; j < num_sub_bands; j++) { 401 if (!send_ppag_always && 402 !iwl_ppag_value_valid(fwrt, i, j)) 403 return -EINVAL; 404 405 gain[i * num_sub_bands + j] = 406 fwrt->ppag_chains[i].subbands[j]; 407 IWL_DEBUG_RADIO(fwrt, 408 "PPAG table: chain[%d] band[%d]: gain = %d\n", 409 i, j, gain[i * num_sub_bands + j]); 410 } 411 } 412 413 return 0; 414 } 415 IWL_EXPORT_SYMBOL(iwl_fill_ppag_table); 416 417 bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt) 418 { 419 if (!dmi_check_system(dmi_ppag_approved_list)) { 420 IWL_DEBUG_RADIO(fwrt, 421 "System vendor '%s' is not in the approved list, disabling PPAG.\n", 422 dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>"); 423 fwrt->ppag_flags = 0; 424 return false; 425 } 426 427 return true; 428 } 429 IWL_EXPORT_SYMBOL(iwl_is_ppag_approved); 430 431 bool iwl_is_tas_approved(void) 432 { 433 return dmi_check_system(dmi_tas_approved_list); 434 } 435 IWL_EXPORT_SYMBOL(iwl_is_tas_approved); 436 437 struct iwl_tas_selection_data 438 iwl_parse_tas_selection(const u32 tas_selection_in, const u8 tbl_rev) 439 { 440 struct iwl_tas_selection_data tas_selection_out = {}; 441 u8 override_iec = u32_get_bits(tas_selection_in, 442 IWL_WTAS_OVERRIDE_IEC_MSK); 443 u8 canada_tas_uhb = u32_get_bits(tas_selection_in, 444 IWL_WTAS_CANADA_UHB_MSK); 445 u8 enabled_iec = u32_get_bits(tas_selection_in, 446 IWL_WTAS_ENABLE_IEC_MSK); 447 u8 usa_tas_uhb = u32_get_bits(tas_selection_in, 448 IWL_WTAS_USA_UHB_MSK); 449 450 if (tbl_rev > 0) { 451 tas_selection_out.usa_tas_uhb_allowed = usa_tas_uhb; 452 tas_selection_out.override_tas_iec = override_iec; 453 tas_selection_out.enable_tas_iec = enabled_iec; 454 } 455 456 if (tbl_rev > 1) 457 tas_selection_out.canada_tas_uhb_allowed = canada_tas_uhb; 458 459 return tas_selection_out; 460 } 461 IWL_EXPORT_SYMBOL(iwl_parse_tas_selection); 462 463 bool iwl_add_mcc_to_tas_block_list(u16 *list, u8 *size, u16 mcc) 464 { 465 for (int i = 0; i < *size; i++) { 466 if (list[i] == mcc) 467 return true; 468 } 469 470 /* Verify that there is room for another country 471 * If *size == IWL_WTAS_BLACK_LIST_MAX, then the table is full. 472 */ 473 if (*size >= IWL_WTAS_BLACK_LIST_MAX) 474 return false; 475 476 list[*size++] = mcc; 477 return true; 478 } 479 IWL_EXPORT_SYMBOL(iwl_add_mcc_to_tas_block_list); 480 481 __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt) 482 { 483 int ret; 484 u32 val; 485 __le32 config_bitmap = 0; 486 487 switch (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id)) { 488 case IWL_CFG_RF_TYPE_HR1: 489 case IWL_CFG_RF_TYPE_HR2: 490 case IWL_CFG_RF_TYPE_JF1: 491 case IWL_CFG_RF_TYPE_JF2: 492 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_INDONESIA_5G2, 493 &val); 494 495 if (!ret && val == DSM_VALUE_INDONESIA_ENABLE) 496 config_bitmap |= 497 cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK); 498 break; 499 default: 500 break; 501 } 502 503 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_DISABLE_SRD, &val); 504 if (!ret) { 505 if (val == DSM_VALUE_SRD_PASSIVE) 506 config_bitmap |= 507 cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK); 508 else if (val == DSM_VALUE_SRD_DISABLE) 509 config_bitmap |= 510 cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK); 511 } 512 513 if (fw_has_capa(&fwrt->fw->ucode_capa, 514 IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) { 515 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_REGULATORY_CONFIG, 516 &val); 517 /* 518 * China 2022 enable if the BIOS object does not exist or 519 * if it is enabled in BIOS. 520 */ 521 if (ret < 0 || val & DSM_MASK_CHINA_22_REG) 522 config_bitmap |= 523 cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK); 524 } 525 526 return config_bitmap; 527 } 528 IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap); 529 530 static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver) 531 { 532 size_t cmd_size; 533 534 switch (cmd_ver) { 535 case 12: 536 case 11: 537 cmd_size = sizeof(struct iwl_lari_config_change_cmd); 538 break; 539 case 10: 540 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v10); 541 break; 542 case 9: 543 case 8: 544 case 7: 545 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7); 546 break; 547 case 6: 548 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6); 549 break; 550 case 5: 551 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5); 552 break; 553 case 4: 554 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4); 555 break; 556 case 3: 557 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3); 558 break; 559 case 2: 560 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2); 561 break; 562 default: 563 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1); 564 break; 565 } 566 return cmd_size; 567 } 568 569 int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, 570 struct iwl_lari_config_change_cmd *cmd, 571 size_t *cmd_size) 572 { 573 int ret; 574 u32 value; 575 bool has_raw_dsm_capa = fw_has_capa(&fwrt->fw->ucode_capa, 576 IWL_UCODE_TLV_CAPA_FW_ACCEPTS_RAW_DSM_TABLE); 577 u8 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw, 578 WIDE_ID(REGULATORY_AND_NVM_GROUP, 579 LARI_CONFIG_CHANGE), 1); 580 581 if (WARN_ONCE(cmd_ver > 12, 582 "Don't add newer versions to this function\n")) 583 return -EINVAL; 584 585 memset(cmd, 0, sizeof(*cmd)); 586 *cmd_size = iwl_get_lari_config_cmd_size(cmd_ver); 587 588 cmd->config_bitmap = iwl_get_lari_config_bitmap(fwrt); 589 590 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value); 591 if (!ret) { 592 if (!has_raw_dsm_capa) 593 value &= DSM_11AX_ALLOW_BITMAP; 594 cmd->oem_11ax_allow_bitmap = cpu_to_le32(value); 595 } 596 597 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value); 598 if (!ret) { 599 if (!has_raw_dsm_capa) 600 value &= DSM_UNII4_ALLOW_BITMAP; 601 602 /* Since version 9, bits 4 and 5 are supported 603 * regardless of this capability, By pass this masking 604 * if firmware has capability of accepting raw DSM table. 605 */ 606 if (!has_raw_dsm_capa && cmd_ver < 9 && 607 !fw_has_capa(&fwrt->fw->ucode_capa, 608 IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA)) 609 value &= ~(DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK | 610 DSM_VALUE_UNII4_CANADA_EN_MSK); 611 612 cmd->oem_unii4_allow_bitmap = cpu_to_le32(value); 613 } 614 615 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value); 616 if (!ret) { 617 if (!has_raw_dsm_capa) 618 value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V12; 619 620 if (!has_raw_dsm_capa && cmd_ver < 8) 621 value &= ~ACTIVATE_5G2_IN_WW_MASK; 622 623 /* Since version 12, bits 5 and 6 are supported 624 * regardless of this capability, By pass this masking 625 * if firmware has capability of accepting raw DSM table. 626 */ 627 if (!has_raw_dsm_capa && cmd_ver < 12 && 628 !fw_has_capa(&fwrt->fw->ucode_capa, 629 IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA)) 630 value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V11; 631 632 cmd->chan_state_active_bitmap = cpu_to_le32(value); 633 } 634 635 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value); 636 if (!ret) 637 cmd->oem_uhb_allow_bitmap = cpu_to_le32(value); 638 639 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value); 640 if (!ret) { 641 if (!has_raw_dsm_capa) 642 value &= DSM_FORCE_DISABLE_CHANNELS_ALLOWED_BITMAP; 643 cmd->force_disable_channels_bitmap = cpu_to_le32(value); 644 } 645 646 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD, 647 &value); 648 if (!ret) { 649 if (!has_raw_dsm_capa) 650 value &= DSM_EDT_ALLOWED_BITMAP; 651 cmd->edt_bitmap = cpu_to_le32(value); 652 } 653 654 ret = iwl_bios_get_wbem(fwrt, &value); 655 if (!ret) 656 cmd->oem_320mhz_allow_bitmap = cpu_to_le32(value); 657 658 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value); 659 if (!ret) 660 cmd->oem_11be_allow_bitmap = cpu_to_le32(value); 661 662 if (cmd->config_bitmap || 663 cmd->oem_uhb_allow_bitmap || 664 cmd->oem_11ax_allow_bitmap || 665 cmd->oem_unii4_allow_bitmap || 666 cmd->chan_state_active_bitmap || 667 cmd->force_disable_channels_bitmap || 668 cmd->edt_bitmap || 669 cmd->oem_320mhz_allow_bitmap || 670 cmd->oem_11be_allow_bitmap) { 671 IWL_DEBUG_RADIO(fwrt, 672 "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n", 673 le32_to_cpu(cmd->config_bitmap), 674 le32_to_cpu(cmd->oem_11ax_allow_bitmap)); 675 IWL_DEBUG_RADIO(fwrt, 676 "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n", 677 le32_to_cpu(cmd->oem_unii4_allow_bitmap), 678 le32_to_cpu(cmd->chan_state_active_bitmap), 679 cmd_ver); 680 IWL_DEBUG_RADIO(fwrt, 681 "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n", 682 le32_to_cpu(cmd->oem_uhb_allow_bitmap), 683 le32_to_cpu(cmd->force_disable_channels_bitmap)); 684 IWL_DEBUG_RADIO(fwrt, 685 "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n", 686 le32_to_cpu(cmd->edt_bitmap), 687 le32_to_cpu(cmd->oem_320mhz_allow_bitmap)); 688 IWL_DEBUG_RADIO(fwrt, 689 "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n", 690 le32_to_cpu(cmd->oem_11be_allow_bitmap)); 691 } else { 692 return 1; 693 } 694 695 return 0; 696 } 697 IWL_EXPORT_SYMBOL(iwl_fill_lari_config); 698 699 int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func, 700 u32 *value) 701 { 702 GET_BIOS_TABLE(dsm, fwrt, func, value); 703 } 704 IWL_EXPORT_SYMBOL(iwl_bios_get_dsm); 705 706 bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc) 707 { 708 /* Some kind of regulatory mess means we need to currently disallow 709 * puncturing in the US and Canada unless enabled in BIOS. 710 */ 711 switch (mcc) { 712 case IWL_MCC_US: 713 return puncturing & IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK; 714 case IWL_MCC_CANADA: 715 return puncturing & IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK; 716 default: 717 return true; 718 } 719 } 720 IWL_EXPORT_SYMBOL(iwl_puncturing_is_allowed_in_bios); 721 722 bool iwl_rfi_is_enabled_in_bios(struct iwl_fw_runtime *fwrt) 723 { 724 /* default behaviour is disabled */ 725 u32 value = 0; 726 int ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_RFI_CONFIG, &value); 727 728 if (ret < 0) { 729 IWL_DEBUG_RADIO(fwrt, "Failed to get DSM RFI, ret=%d\n", ret); 730 return false; 731 } 732 733 value &= DSM_VALUE_RFI_DISABLE; 734 /* RFI BIOS CONFIG value can be 0 or 3 only. 735 * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled. 736 * 1 and 2 are invalid BIOS configurations, So, it's not possible to 737 * disable ddr/dlvr separately. 738 */ 739 if (!value) { 740 IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to enable\n"); 741 return true; 742 } else if (value == DSM_VALUE_RFI_DISABLE) { 743 IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to disable\n"); 744 } else { 745 IWL_DEBUG_RADIO(fwrt, 746 "DSM RFI got invalid value, value=%d\n", value); 747 } 748 749 return false; 750 } 751 IWL_EXPORT_SYMBOL(iwl_rfi_is_enabled_in_bios); 752