1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2023, 2025 Intel Corporation
4 */
5 #if defined(__FreeBSD__)
6 #include <linux/bitfield.h>
7 #endif
8 #include <linux/dmi.h>
9 #include "iwl-drv.h"
10 #include "iwl-debug.h"
11 #include "regulatory.h"
12 #include "fw/runtime.h"
13 #include "fw/uefi.h"
14
15 #define GET_BIOS_TABLE(__name, ...) \
16 do { \
17 int ret = -ENOENT; \
18 if (fwrt->uefi_tables_lock_status > UEFI_WIFI_GUID_UNLOCKED) \
19 ret = iwl_uefi_get_ ## __name(__VA_ARGS__); \
20 if (ret < 0) \
21 ret = iwl_acpi_get_ ## __name(__VA_ARGS__); \
22 return ret; \
23 } while (0)
24
25 #define IWL_BIOS_TABLE_LOADER(__name) \
26 int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt) \
27 {GET_BIOS_TABLE(__name, fwrt); } \
28 IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
29
30 #define IWL_BIOS_TABLE_LOADER_DATA(__name, data_type) \
31 int iwl_bios_get_ ## __name(struct iwl_fw_runtime *fwrt, \
32 data_type * data) \
33 {GET_BIOS_TABLE(__name, fwrt, data); } \
34 IWL_EXPORT_SYMBOL(iwl_bios_get_ ## __name)
35
36 IWL_BIOS_TABLE_LOADER(wrds_table);
37 IWL_BIOS_TABLE_LOADER(ewrd_table);
38 IWL_BIOS_TABLE_LOADER(wgds_table);
39 IWL_BIOS_TABLE_LOADER(ppag_table);
40 IWL_BIOS_TABLE_LOADER(phy_filters);
41 IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data);
42 IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64);
43 IWL_BIOS_TABLE_LOADER_DATA(mcc, char);
44 IWL_BIOS_TABLE_LOADER_DATA(eckv, u32);
45 IWL_BIOS_TABLE_LOADER_DATA(wbem, u32);
46 IWL_BIOS_TABLE_LOADER_DATA(dsbr, u32);
47
48
49 static const struct dmi_system_id dmi_ppag_approved_list[] = {
50 { .ident = "HP",
51 .matches = {
52 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
53 },
54 },
55 { .ident = "SAMSUNG",
56 .matches = {
57 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
58 },
59 },
60 { .ident = "MSFT",
61 .matches = {
62 DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
63 },
64 },
65 { .ident = "ASUS",
66 .matches = {
67 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
68 },
69 },
70 { .ident = "GOOGLE-HP",
71 .matches = {
72 DMI_MATCH(DMI_SYS_VENDOR, "Google"),
73 DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
74 },
75 },
76 { .ident = "GOOGLE-ASUS",
77 .matches = {
78 DMI_MATCH(DMI_SYS_VENDOR, "Google"),
79 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek COMPUTER INC."),
80 },
81 },
82 { .ident = "GOOGLE-SAMSUNG",
83 .matches = {
84 DMI_MATCH(DMI_SYS_VENDOR, "Google"),
85 DMI_MATCH(DMI_BOARD_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
86 },
87 },
88 { .ident = "DELL",
89 .matches = {
90 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
91 },
92 },
93 { .ident = "DELL",
94 .matches = {
95 DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
96 },
97 },
98 { .ident = "RAZER",
99 .matches = {
100 DMI_MATCH(DMI_SYS_VENDOR, "Razer"),
101 },
102 },
103 { .ident = "Honor",
104 .matches = {
105 DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
106 },
107 },
108 { .ident = "WIKO",
109 .matches = {
110 DMI_MATCH(DMI_SYS_VENDOR, "WIKO"),
111 },
112 },
113 {}
114 };
115
116 static const struct dmi_system_id dmi_tas_approved_list[] = {
117 { .ident = "HP",
118 .matches = {
119 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
120 },
121 },
122 { .ident = "SAMSUNG",
123 .matches = {
124 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD"),
125 },
126 },
127 { .ident = "LENOVO",
128 .matches = {
129 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
130 },
131 },
132 { .ident = "DELL",
133 .matches = {
134 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
135 },
136 },
137 { .ident = "MSFT",
138 .matches = {
139 DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
140 },
141 },
142 { .ident = "Acer",
143 .matches = {
144 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
145 },
146 },
147 { .ident = "ASUS",
148 .matches = {
149 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
150 },
151 },
152 { .ident = "GOOGLE-HP",
153 .matches = {
154 DMI_MATCH(DMI_SYS_VENDOR, "Google"),
155 DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
156 },
157 },
158 { .ident = "MSI",
159 .matches = {
160 DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
161 },
162 },
163 { .ident = "Honor",
164 .matches = {
165 DMI_MATCH(DMI_SYS_VENDOR, "HONOR"),
166 },
167 },
168 /* keep last */
169 {}
170 };
171
iwl_sar_geo_support(struct iwl_fw_runtime * fwrt)172 bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
173 {
174 /*
175 * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
176 * earlier firmware versions. Unfortunately, we don't have a
177 * TLV API flag to rely on, so rely on the major version which
178 * is in the first byte of ucode_ver. This was implemented
179 * initially on version 38 and then backported to 17. It was
180 * also backported to 29, but only for 7265D devices. The
181 * intention was to have it in 36 as well, but not all 8000
182 * family got this feature enabled. The 8000 family is the
183 * only one using version 36, so skip this version entirely.
184 */
185 return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
186 (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
187 fwrt->trans->info.hw_rev != CSR_HW_REV_TYPE_3160) ||
188 (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
189 ((fwrt->trans->info.hw_rev & CSR_HW_REV_TYPE_MSK) ==
190 CSR_HW_REV_TYPE_7265D));
191 }
192 IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
193
iwl_sar_geo_fill_table(struct iwl_fw_runtime * fwrt,struct iwl_per_chain_offset * table,u32 n_bands,u32 n_profiles)194 int iwl_sar_geo_fill_table(struct iwl_fw_runtime *fwrt,
195 struct iwl_per_chain_offset *table,
196 u32 n_bands, u32 n_profiles)
197 {
198 int i, j;
199
200 if (!fwrt->geo_enabled)
201 return -ENODATA;
202
203 if (!iwl_sar_geo_support(fwrt))
204 return -EOPNOTSUPP;
205
206 for (i = 0; i < n_profiles; i++) {
207 for (j = 0; j < n_bands; j++) {
208 struct iwl_per_chain_offset *chain =
209 &table[i * n_bands + j];
210
211 chain->max_tx_power =
212 cpu_to_le16(fwrt->geo_profiles[i].bands[j].max);
213 chain->chain_a =
214 fwrt->geo_profiles[i].bands[j].chains[0];
215 chain->chain_b =
216 fwrt->geo_profiles[i].bands[j].chains[1];
217 IWL_DEBUG_RADIO(fwrt,
218 "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
219 i, j,
220 fwrt->geo_profiles[i].bands[j].chains[0],
221 fwrt->geo_profiles[i].bands[j].chains[1],
222 fwrt->geo_profiles[i].bands[j].max);
223 }
224 }
225
226 return 0;
227 }
228 IWL_EXPORT_SYMBOL(iwl_sar_geo_fill_table);
229
iwl_sar_fill_table(struct iwl_fw_runtime * fwrt,__le16 * per_chain,u32 n_subbands,int prof_a,int prof_b)230 static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
231 __le16 *per_chain, u32 n_subbands,
232 int prof_a, int prof_b)
233 {
234 int profs[BIOS_SAR_NUM_CHAINS] = { prof_a, prof_b };
235 int i, j;
236
237 for (i = 0; i < BIOS_SAR_NUM_CHAINS; i++) {
238 struct iwl_sar_profile *prof;
239
240 /* don't allow SAR to be disabled (profile 0 means disable) */
241 if (profs[i] == 0)
242 return -EPERM;
243
244 /* we are off by one, so allow up to BIOS_SAR_MAX_PROFILE_NUM */
245 if (profs[i] > BIOS_SAR_MAX_PROFILE_NUM)
246 return -EINVAL;
247
248 /* profiles go from 1 to 4, so decrement to access the array */
249 prof = &fwrt->sar_profiles[profs[i] - 1];
250
251 /* if the profile is disabled, do nothing */
252 if (!prof->enabled) {
253 IWL_DEBUG_RADIO(fwrt, "SAR profile %d is disabled.\n",
254 profs[i]);
255 /*
256 * if one of the profiles is disabled, we
257 * ignore all of them and return 1 to
258 * differentiate disabled from other failures.
259 */
260 return 1;
261 }
262
263 IWL_DEBUG_INFO(fwrt,
264 "SAR EWRD: chain %d profile index %d\n",
265 i, profs[i]);
266 IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
267 for (j = 0; j < n_subbands; j++) {
268 per_chain[i * n_subbands + j] =
269 cpu_to_le16(prof->chains[i].subbands[j]);
270 IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
271 j, prof->chains[i].subbands[j]);
272 }
273 }
274
275 return 0;
276 }
277
iwl_sar_fill_profile(struct iwl_fw_runtime * fwrt,__le16 * per_chain,u32 n_tables,u32 n_subbands,int prof_a,int prof_b)278 int iwl_sar_fill_profile(struct iwl_fw_runtime *fwrt,
279 __le16 *per_chain, u32 n_tables, u32 n_subbands,
280 int prof_a, int prof_b)
281 {
282 int i, ret = 0;
283
284 for (i = 0; i < n_tables; i++) {
285 ret = iwl_sar_fill_table(fwrt,
286 &per_chain[i * n_subbands * BIOS_SAR_NUM_CHAINS],
287 n_subbands, prof_a, prof_b);
288 if (ret)
289 break;
290 }
291
292 return ret;
293 }
294 IWL_EXPORT_SYMBOL(iwl_sar_fill_profile);
295
iwl_ppag_value_valid(struct iwl_fw_runtime * fwrt,int chain,int subband)296 static bool iwl_ppag_value_valid(struct iwl_fw_runtime *fwrt, int chain,
297 int subband)
298 {
299 s8 ppag_val = fwrt->ppag_chains[chain].subbands[subband];
300
301 if ((subband == 0 &&
302 (ppag_val > IWL_PPAG_MAX_LB || ppag_val < IWL_PPAG_MIN_LB)) ||
303 (subband != 0 &&
304 (ppag_val > IWL_PPAG_MAX_HB || ppag_val < IWL_PPAG_MIN_HB))) {
305 IWL_DEBUG_RADIO(fwrt, "Invalid PPAG value: %d\n", ppag_val);
306 return false;
307 }
308 return true;
309 }
310
iwl_fill_ppag_table(struct iwl_fw_runtime * fwrt,union iwl_ppag_table_cmd * cmd,int * cmd_size)311 int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
312 union iwl_ppag_table_cmd *cmd, int *cmd_size)
313 {
314 u8 cmd_ver;
315 int i, j, num_sub_bands;
316 s8 *gain;
317 bool send_ppag_always;
318
319 /* many firmware images for JF lie about this */
320 if (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id) ==
321 CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
322 return -EOPNOTSUPP;
323
324 if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
325 IWL_DEBUG_RADIO(fwrt,
326 "PPAG capability not supported by FW, command not sent.\n");
327 return -EINVAL;
328 }
329
330 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
331 WIDE_ID(PHY_OPS_GROUP,
332 PER_PLATFORM_ANT_GAIN_CMD), 1);
333 /*
334 * Starting from ver 4, driver needs to send the PPAG CMD regardless
335 * if PPAG is enabled/disabled or valid/invalid.
336 */
337 send_ppag_always = cmd_ver > 3;
338
339 /* Don't send PPAG if it is disabled */
340 if (!send_ppag_always && !fwrt->ppag_flags) {
341 IWL_DEBUG_RADIO(fwrt, "PPAG not enabled, command not sent.\n");
342 return -EINVAL;
343 }
344
345 IWL_DEBUG_RADIO(fwrt, "PPAG cmd ver is %d\n", cmd_ver);
346 if (cmd_ver == 1) {
347 num_sub_bands = IWL_NUM_SUB_BANDS_V1;
348 gain = cmd->v1.gain[0];
349 *cmd_size = sizeof(cmd->v1);
350 cmd->v1.flags = cpu_to_le32(fwrt->ppag_flags);
351 if (fwrt->ppag_bios_rev >= 1) {
352 /* in this case FW supports revision 0 */
353 IWL_DEBUG_RADIO(fwrt,
354 "PPAG table rev is %d, send truncated table\n",
355 fwrt->ppag_bios_rev);
356 }
357 } else if (cmd_ver >= 2 && cmd_ver <= 6) {
358 num_sub_bands = IWL_NUM_SUB_BANDS_V2;
359 gain = cmd->v2.gain[0];
360 *cmd_size = sizeof(cmd->v2);
361 cmd->v2.flags = cpu_to_le32(fwrt->ppag_flags);
362 if (fwrt->ppag_bios_rev == 0) {
363 /* in this case FW supports revisions 1,2 or 3 */
364 IWL_DEBUG_RADIO(fwrt,
365 "PPAG table rev is 0, send padded table\n");
366 }
367 } else if (cmd_ver == 7) {
368 num_sub_bands = IWL_NUM_SUB_BANDS_V2;
369 gain = cmd->v3.gain[0];
370 *cmd_size = sizeof(cmd->v3);
371 cmd->v3.ppag_config_info.table_source = fwrt->ppag_bios_source;
372 cmd->v3.ppag_config_info.table_revision = fwrt->ppag_bios_rev;
373 cmd->v3.ppag_config_info.value = cpu_to_le32(fwrt->ppag_flags);
374 } else {
375 IWL_DEBUG_RADIO(fwrt, "Unsupported PPAG command version\n");
376 return -EINVAL;
377 }
378
379 /* ppag mode */
380 IWL_DEBUG_RADIO(fwrt,
381 "PPAG MODE bits were read from bios: %d\n",
382 fwrt->ppag_flags);
383
384 if (cmd_ver == 6)
385 cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V6_MASK);
386 else if (cmd_ver == 5)
387 cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V5_MASK);
388 else if (cmd_ver < 5)
389 cmd->v1.flags &= cpu_to_le32(IWL_PPAG_CMD_V4_MASK);
390
391 if ((cmd_ver == 1 &&
392 !fw_has_capa(&fwrt->fw->ucode_capa,
393 IWL_UCODE_TLV_CAPA_PPAG_CHINA_BIOS_SUPPORT)) ||
394 (cmd_ver == 2 && fwrt->ppag_bios_rev >= 2)) {
395 cmd->v1.flags &= cpu_to_le32(IWL_PPAG_ETSI_MASK);
396 IWL_DEBUG_RADIO(fwrt, "masking ppag China bit\n");
397 } else {
398 IWL_DEBUG_RADIO(fwrt, "isn't masking ppag China bit\n");
399 }
400
401 /* The 'flags' field is the same in v1 and v2 so we can just
402 * use v1 to access it.
403 */
404 IWL_DEBUG_RADIO(fwrt,
405 "PPAG MODE bits going to be sent: %d\n",
406 (cmd_ver < 7) ? le32_to_cpu(cmd->v1.flags) :
407 le32_to_cpu(cmd->v3.ppag_config_info.value));
408
409 for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
410 for (j = 0; j < num_sub_bands; j++) {
411 if (!send_ppag_always &&
412 !iwl_ppag_value_valid(fwrt, i, j))
413 return -EINVAL;
414
415 gain[i * num_sub_bands + j] =
416 fwrt->ppag_chains[i].subbands[j];
417 IWL_DEBUG_RADIO(fwrt,
418 "PPAG table: chain[%d] band[%d]: gain = %d\n",
419 i, j, gain[i * num_sub_bands + j]);
420 }
421 }
422
423 return 0;
424 }
425 IWL_EXPORT_SYMBOL(iwl_fill_ppag_table);
426
iwl_is_ppag_approved(struct iwl_fw_runtime * fwrt)427 bool iwl_is_ppag_approved(struct iwl_fw_runtime *fwrt)
428 {
429 if (!dmi_check_system(dmi_ppag_approved_list)) {
430 IWL_DEBUG_RADIO(fwrt,
431 "System vendor '%s' is not in the approved list, disabling PPAG.\n",
432 dmi_get_system_info(DMI_SYS_VENDOR) ?: "<unknown>");
433 fwrt->ppag_flags = 0;
434 return false;
435 }
436
437 return true;
438 }
439 IWL_EXPORT_SYMBOL(iwl_is_ppag_approved);
440
iwl_is_tas_approved(void)441 bool iwl_is_tas_approved(void)
442 {
443 return dmi_check_system(dmi_tas_approved_list);
444 }
445 IWL_EXPORT_SYMBOL(iwl_is_tas_approved);
446
447 struct iwl_tas_selection_data
iwl_parse_tas_selection(const u32 tas_selection_in,const u8 tbl_rev)448 iwl_parse_tas_selection(const u32 tas_selection_in, const u8 tbl_rev)
449 {
450 struct iwl_tas_selection_data tas_selection_out = {};
451 u8 override_iec = u32_get_bits(tas_selection_in,
452 IWL_WTAS_OVERRIDE_IEC_MSK);
453 u8 canada_tas_uhb = u32_get_bits(tas_selection_in,
454 IWL_WTAS_CANADA_UHB_MSK);
455 u8 enabled_iec = u32_get_bits(tas_selection_in,
456 IWL_WTAS_ENABLE_IEC_MSK);
457 u8 usa_tas_uhb = u32_get_bits(tas_selection_in,
458 IWL_WTAS_USA_UHB_MSK);
459
460 if (tbl_rev > 0) {
461 tas_selection_out.usa_tas_uhb_allowed = usa_tas_uhb;
462 tas_selection_out.override_tas_iec = override_iec;
463 tas_selection_out.enable_tas_iec = enabled_iec;
464 }
465
466 if (tbl_rev > 1)
467 tas_selection_out.canada_tas_uhb_allowed = canada_tas_uhb;
468
469 return tas_selection_out;
470 }
471 IWL_EXPORT_SYMBOL(iwl_parse_tas_selection);
472
iwl_add_mcc_to_tas_block_list(u16 * list,u8 * size,u16 mcc)473 bool iwl_add_mcc_to_tas_block_list(u16 *list, u8 *size, u16 mcc)
474 {
475 for (int i = 0; i < *size; i++) {
476 if (list[i] == mcc)
477 return true;
478 }
479
480 /* Verify that there is room for another country
481 * If *size == IWL_WTAS_BLACK_LIST_MAX, then the table is full.
482 */
483 if (*size >= IWL_WTAS_BLACK_LIST_MAX)
484 return false;
485
486 list[*size++] = mcc;
487 return true;
488 }
489 IWL_EXPORT_SYMBOL(iwl_add_mcc_to_tas_block_list);
490
iwl_get_lari_config_bitmap(struct iwl_fw_runtime * fwrt)491 __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
492 {
493 int ret;
494 u32 val;
495 __le32 config_bitmap = 0;
496
497 switch (CSR_HW_RFID_TYPE(fwrt->trans->info.hw_rf_id)) {
498 case IWL_CFG_RF_TYPE_HR1:
499 case IWL_CFG_RF_TYPE_HR2:
500 case IWL_CFG_RF_TYPE_JF1:
501 case IWL_CFG_RF_TYPE_JF2:
502 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_INDONESIA_5G2,
503 &val);
504
505 if (!ret && val == DSM_VALUE_INDONESIA_ENABLE)
506 config_bitmap |=
507 cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
508 break;
509 default:
510 break;
511 }
512
513 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_DISABLE_SRD, &val);
514 if (!ret) {
515 if (val == DSM_VALUE_SRD_PASSIVE)
516 config_bitmap |=
517 cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
518 else if (val == DSM_VALUE_SRD_DISABLE)
519 config_bitmap |=
520 cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
521 }
522
523 if (fw_has_capa(&fwrt->fw->ucode_capa,
524 IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT)) {
525 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_REGULATORY_CONFIG,
526 &val);
527 /*
528 * China 2022 enable if the BIOS object does not exist or
529 * if it is enabled in BIOS.
530 */
531 if (ret < 0 || val & DSM_MASK_CHINA_22_REG)
532 config_bitmap |=
533 cpu_to_le32(LARI_CONFIG_ENABLE_CHINA_22_REG_SUPPORT_MSK);
534 }
535
536 return config_bitmap;
537 }
538 IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap);
539
iwl_get_lari_config_cmd_size(u8 cmd_ver)540 static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver)
541 {
542 size_t cmd_size;
543
544 switch (cmd_ver) {
545 case 12:
546 case 11:
547 cmd_size = sizeof(struct iwl_lari_config_change_cmd);
548 break;
549 case 10:
550 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v10);
551 break;
552 case 9:
553 case 8:
554 case 7:
555 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7);
556 break;
557 case 6:
558 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
559 break;
560 case 5:
561 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
562 break;
563 case 4:
564 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
565 break;
566 case 3:
567 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
568 break;
569 case 2:
570 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
571 break;
572 default:
573 cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
574 break;
575 }
576 return cmd_size;
577 }
578
iwl_fill_lari_config(struct iwl_fw_runtime * fwrt,struct iwl_lari_config_change_cmd * cmd,size_t * cmd_size)579 int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
580 struct iwl_lari_config_change_cmd *cmd,
581 size_t *cmd_size)
582 {
583 int ret;
584 u32 value;
585 bool has_raw_dsm_capa = fw_has_capa(&fwrt->fw->ucode_capa,
586 IWL_UCODE_TLV_CAPA_FW_ACCEPTS_RAW_DSM_TABLE);
587 u8 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
588 WIDE_ID(REGULATORY_AND_NVM_GROUP,
589 LARI_CONFIG_CHANGE), 1);
590
591 if (WARN_ONCE(cmd_ver > 12,
592 "Don't add newer versions to this function\n"))
593 return -EINVAL;
594
595 memset(cmd, 0, sizeof(*cmd));
596 *cmd_size = iwl_get_lari_config_cmd_size(cmd_ver);
597
598 cmd->config_bitmap = iwl_get_lari_config_bitmap(fwrt);
599
600 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
601 if (!ret) {
602 if (!has_raw_dsm_capa)
603 value &= DSM_11AX_ALLOW_BITMAP;
604 cmd->oem_11ax_allow_bitmap = cpu_to_le32(value);
605 }
606
607 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
608 if (!ret) {
609 if (!has_raw_dsm_capa)
610 value &= DSM_UNII4_ALLOW_BITMAP;
611
612 /* Since version 9, bits 4 and 5 are supported
613 * regardless of this capability, By pass this masking
614 * if firmware has capability of accepting raw DSM table.
615 */
616 if (!has_raw_dsm_capa && cmd_ver < 9 &&
617 !fw_has_capa(&fwrt->fw->ucode_capa,
618 IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_5G9_FOR_CA))
619 value &= ~(DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK |
620 DSM_VALUE_UNII4_CANADA_EN_MSK);
621
622 cmd->oem_unii4_allow_bitmap = cpu_to_le32(value);
623 }
624
625 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
626 if (!ret) {
627 if (!has_raw_dsm_capa)
628 value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V12;
629
630 if (!has_raw_dsm_capa && cmd_ver < 8)
631 value &= ~ACTIVATE_5G2_IN_WW_MASK;
632
633 /* Since version 12, bits 5 and 6 are supported
634 * regardless of this capability, By pass this masking
635 * if firmware has capability of accepting raw DSM table.
636 */
637 if (!has_raw_dsm_capa && cmd_ver < 12 &&
638 !fw_has_capa(&fwrt->fw->ucode_capa,
639 IWL_UCODE_TLV_CAPA_BIOS_OVERRIDE_UNII4_US_CA))
640 value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V11;
641
642 cmd->chan_state_active_bitmap = cpu_to_le32(value);
643 }
644
645 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value);
646 if (!ret)
647 cmd->oem_uhb_allow_bitmap = cpu_to_le32(value);
648
649 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value);
650 if (!ret) {
651 if (!has_raw_dsm_capa)
652 value &= DSM_FORCE_DISABLE_CHANNELS_ALLOWED_BITMAP;
653 cmd->force_disable_channels_bitmap = cpu_to_le32(value);
654 }
655
656 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
657 &value);
658 if (!ret) {
659 if (!has_raw_dsm_capa)
660 value &= DSM_EDT_ALLOWED_BITMAP;
661 cmd->edt_bitmap = cpu_to_le32(value);
662 }
663
664 ret = iwl_bios_get_wbem(fwrt, &value);
665 if (!ret)
666 cmd->oem_320mhz_allow_bitmap = cpu_to_le32(value);
667
668 ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value);
669 if (!ret)
670 cmd->oem_11be_allow_bitmap = cpu_to_le32(value);
671
672 if (cmd->config_bitmap ||
673 cmd->oem_uhb_allow_bitmap ||
674 cmd->oem_11ax_allow_bitmap ||
675 cmd->oem_unii4_allow_bitmap ||
676 cmd->chan_state_active_bitmap ||
677 cmd->force_disable_channels_bitmap ||
678 cmd->edt_bitmap ||
679 cmd->oem_320mhz_allow_bitmap ||
680 cmd->oem_11be_allow_bitmap) {
681 IWL_DEBUG_RADIO(fwrt,
682 "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
683 le32_to_cpu(cmd->config_bitmap),
684 le32_to_cpu(cmd->oem_11ax_allow_bitmap));
685 IWL_DEBUG_RADIO(fwrt,
686 "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
687 le32_to_cpu(cmd->oem_unii4_allow_bitmap),
688 le32_to_cpu(cmd->chan_state_active_bitmap),
689 cmd_ver);
690 IWL_DEBUG_RADIO(fwrt,
691 "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
692 le32_to_cpu(cmd->oem_uhb_allow_bitmap),
693 le32_to_cpu(cmd->force_disable_channels_bitmap));
694 IWL_DEBUG_RADIO(fwrt,
695 "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n",
696 le32_to_cpu(cmd->edt_bitmap),
697 le32_to_cpu(cmd->oem_320mhz_allow_bitmap));
698 IWL_DEBUG_RADIO(fwrt,
699 "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n",
700 le32_to_cpu(cmd->oem_11be_allow_bitmap));
701 } else {
702 return 1;
703 }
704
705 return 0;
706 }
707 IWL_EXPORT_SYMBOL(iwl_fill_lari_config);
708
iwl_bios_get_dsm(struct iwl_fw_runtime * fwrt,enum iwl_dsm_funcs func,u32 * value)709 int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
710 u32 *value)
711 {
712 GET_BIOS_TABLE(dsm, fwrt, func, value);
713 }
714 IWL_EXPORT_SYMBOL(iwl_bios_get_dsm);
715
iwl_puncturing_is_allowed_in_bios(u32 puncturing,u16 mcc)716 bool iwl_puncturing_is_allowed_in_bios(u32 puncturing, u16 mcc)
717 {
718 /* Some kind of regulatory mess means we need to currently disallow
719 * puncturing in the US and Canada unless enabled in BIOS.
720 */
721 switch (mcc) {
722 case IWL_MCC_US:
723 return puncturing & IWL_UEFI_CNV_PUNCTURING_USA_EN_MSK;
724 case IWL_MCC_CANADA:
725 return puncturing & IWL_UEFI_CNV_PUNCTURING_CANADA_EN_MSK;
726 default:
727 return true;
728 }
729 }
730 IWL_EXPORT_SYMBOL(iwl_puncturing_is_allowed_in_bios);
731
iwl_rfi_is_enabled_in_bios(struct iwl_fw_runtime * fwrt)732 bool iwl_rfi_is_enabled_in_bios(struct iwl_fw_runtime *fwrt)
733 {
734 /* default behaviour is disabled */
735 u32 value = 0;
736 int ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_RFI_CONFIG, &value);
737
738 if (ret < 0) {
739 IWL_DEBUG_RADIO(fwrt, "Failed to get DSM RFI, ret=%d\n", ret);
740 return false;
741 }
742
743 value &= DSM_VALUE_RFI_DISABLE;
744 /* RFI BIOS CONFIG value can be 0 or 3 only.
745 * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled.
746 * 1 and 2 are invalid BIOS configurations, So, it's not possible to
747 * disable ddr/dlvr separately.
748 */
749 if (!value) {
750 IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to enable\n");
751 return true;
752 } else if (value == DSM_VALUE_RFI_DISABLE) {
753 IWL_DEBUG_RADIO(fwrt, "DSM RFI is evaluated to disable\n");
754 } else {
755 IWL_DEBUG_RADIO(fwrt,
756 "DSM RFI got invalid value, value=%d\n", value);
757 }
758
759 return false;
760 }
761 IWL_EXPORT_SYMBOL(iwl_rfi_is_enabled_in_bios);
762