1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 - 2019 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * The full GNU General Public License is included in this distribution 23 * in the file called COPYING. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <linuxwifi@intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 * BSD LICENSE 30 * 31 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34 * Copyright(c) 2018 - 2019 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name Intel Corporation nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 *****************************************************************************/ 63 #include <linux/types.h> 64 #include <linux/slab.h> 65 #include <linux/export.h> 66 #include <linux/etherdevice.h> 67 #include <linux/pci.h> 68 #include <linux/firmware.h> 69 70 #include "iwl-drv.h" 71 #include "iwl-modparams.h" 72 #include "iwl-nvm-parse.h" 73 #include "iwl-prph.h" 74 #include "iwl-io.h" 75 #include "iwl-csr.h" 76 #include "fw/acpi.h" 77 #include "fw/api/nvm-reg.h" 78 #include "fw/api/commands.h" 79 #include "fw/api/cmdhdr.h" 80 #include "fw/img.h" 81 82 /* NVM offsets (in words) definitions */ 83 enum nvm_offsets { 84 /* NVM HW-Section offset (in words) definitions */ 85 SUBSYSTEM_ID = 0x0A, 86 HW_ADDR = 0x15, 87 88 /* NVM SW-Section offset (in words) definitions */ 89 NVM_SW_SECTION = 0x1C0, 90 NVM_VERSION = 0, 91 RADIO_CFG = 1, 92 SKU = 2, 93 N_HW_ADDRS = 3, 94 NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION, 95 96 /* NVM calibration section offset (in words) definitions */ 97 NVM_CALIB_SECTION = 0x2B8, 98 XTAL_CALIB = 0x316 - NVM_CALIB_SECTION, 99 100 /* NVM REGULATORY -Section offset (in words) definitions */ 101 NVM_CHANNELS_SDP = 0, 102 }; 103 104 enum ext_nvm_offsets { 105 /* NVM HW-Section offset (in words) definitions */ 106 MAC_ADDRESS_OVERRIDE_EXT_NVM = 1, 107 108 /* NVM SW-Section offset (in words) definitions */ 109 NVM_VERSION_EXT_NVM = 0, 110 RADIO_CFG_FAMILY_EXT_NVM = 0, 111 SKU_FAMILY_8000 = 2, 112 N_HW_ADDRS_FAMILY_8000 = 3, 113 114 /* NVM REGULATORY -Section offset (in words) definitions */ 115 NVM_CHANNELS_EXTENDED = 0, 116 NVM_LAR_OFFSET_OLD = 0x4C7, 117 NVM_LAR_OFFSET = 0x507, 118 NVM_LAR_ENABLED = 0x7, 119 }; 120 121 /* SKU Capabilities (actual values from NVM definition) */ 122 enum nvm_sku_bits { 123 NVM_SKU_CAP_BAND_24GHZ = BIT(0), 124 NVM_SKU_CAP_BAND_52GHZ = BIT(1), 125 NVM_SKU_CAP_11N_ENABLE = BIT(2), 126 NVM_SKU_CAP_11AC_ENABLE = BIT(3), 127 NVM_SKU_CAP_MIMO_DISABLE = BIT(5), 128 }; 129 130 /* 131 * These are the channel numbers in the order that they are stored in the NVM 132 */ 133 static const u16 iwl_nvm_channels[] = { 134 /* 2.4 GHz */ 135 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 136 /* 5 GHz */ 137 36, 40, 44 , 48, 52, 56, 60, 64, 138 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 139 149, 153, 157, 161, 165 140 }; 141 142 static const u16 iwl_ext_nvm_channels[] = { 143 /* 2.4 GHz */ 144 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 145 /* 5 GHz */ 146 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 147 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 148 149, 153, 157, 161, 165, 169, 173, 177, 181 149 }; 150 151 static const u16 iwl_uhb_nvm_channels[] = { 152 /* 2.4 GHz */ 153 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 154 /* 5 GHz */ 155 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92, 156 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144, 157 149, 153, 157, 161, 165, 169, 173, 177, 181, 158 /* 6-7 GHz */ 159 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233, 237, 241, 160 245, 249, 253, 257, 261, 265, 269, 273, 277, 281, 285, 289, 293, 297, 161 301, 305, 309, 313, 317, 321, 325, 329, 333, 337, 341, 345, 349, 353, 162 357, 361, 365, 369, 373, 377, 381, 385, 389, 393, 397, 401, 405, 409, 163 413, 417, 421 164 }; 165 166 #define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels) 167 #define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels) 168 #define IWL_NVM_NUM_CHANNELS_UHB ARRAY_SIZE(iwl_uhb_nvm_channels) 169 #define NUM_2GHZ_CHANNELS 14 170 #define FIRST_2GHZ_HT_MINUS 5 171 #define LAST_2GHZ_HT_PLUS 9 172 #define N_HW_ADDR_MASK 0xF 173 174 /* rate data (static) */ 175 static struct ieee80211_rate iwl_cfg80211_rates[] = { 176 { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, 177 { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1, 178 .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, 179 { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2, 180 .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, 181 { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3, 182 .flags = IEEE80211_RATE_SHORT_PREAMBLE, }, 183 { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, }, 184 { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, }, 185 { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, }, 186 { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, }, 187 { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, }, 188 { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, }, 189 { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, }, 190 { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, }, 191 }; 192 #define RATES_24_OFFS 0 193 #define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates) 194 #define RATES_52_OFFS 4 195 #define N_RATES_52 (N_RATES_24 - RATES_52_OFFS) 196 197 /** 198 * enum iwl_nvm_channel_flags - channel flags in NVM 199 * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo 200 * @NVM_CHANNEL_IBSS: usable as an IBSS channel 201 * @NVM_CHANNEL_ACTIVE: active scanning allowed 202 * @NVM_CHANNEL_RADAR: radar detection required 203 * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed 204 * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS 205 * on same channel on 2.4 or same UNII band on 5.2 206 * @NVM_CHANNEL_UNIFORM: uniform spreading required 207 * @NVM_CHANNEL_20MHZ: 20 MHz channel okay 208 * @NVM_CHANNEL_40MHZ: 40 MHz channel okay 209 * @NVM_CHANNEL_80MHZ: 80 MHz channel okay 210 * @NVM_CHANNEL_160MHZ: 160 MHz channel okay 211 * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?) 212 */ 213 enum iwl_nvm_channel_flags { 214 NVM_CHANNEL_VALID = BIT(0), 215 NVM_CHANNEL_IBSS = BIT(1), 216 NVM_CHANNEL_ACTIVE = BIT(3), 217 NVM_CHANNEL_RADAR = BIT(4), 218 NVM_CHANNEL_INDOOR_ONLY = BIT(5), 219 NVM_CHANNEL_GO_CONCURRENT = BIT(6), 220 NVM_CHANNEL_UNIFORM = BIT(7), 221 NVM_CHANNEL_20MHZ = BIT(8), 222 NVM_CHANNEL_40MHZ = BIT(9), 223 NVM_CHANNEL_80MHZ = BIT(10), 224 NVM_CHANNEL_160MHZ = BIT(11), 225 NVM_CHANNEL_DC_HIGH = BIT(12), 226 }; 227 228 static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, 229 int chan, u32 flags) 230 { 231 #define CHECK_AND_PRINT_I(x) \ 232 ((flags & NVM_CHANNEL_##x) ? " " #x : "") 233 234 if (!(flags & NVM_CHANNEL_VALID)) { 235 IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n", 236 chan, flags); 237 return; 238 } 239 240 /* Note: already can print up to 101 characters, 110 is the limit! */ 241 IWL_DEBUG_DEV(dev, level, 242 "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n", 243 chan, flags, 244 CHECK_AND_PRINT_I(VALID), 245 CHECK_AND_PRINT_I(IBSS), 246 CHECK_AND_PRINT_I(ACTIVE), 247 CHECK_AND_PRINT_I(RADAR), 248 CHECK_AND_PRINT_I(INDOOR_ONLY), 249 CHECK_AND_PRINT_I(GO_CONCURRENT), 250 CHECK_AND_PRINT_I(UNIFORM), 251 CHECK_AND_PRINT_I(20MHZ), 252 CHECK_AND_PRINT_I(40MHZ), 253 CHECK_AND_PRINT_I(80MHZ), 254 CHECK_AND_PRINT_I(160MHZ), 255 CHECK_AND_PRINT_I(DC_HIGH)); 256 #undef CHECK_AND_PRINT_I 257 } 258 259 static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band, 260 u32 nvm_flags, const struct iwl_cfg *cfg) 261 { 262 u32 flags = IEEE80211_CHAN_NO_HT40; 263 264 if (band == NL80211_BAND_2GHZ && (nvm_flags & NVM_CHANNEL_40MHZ)) { 265 if (ch_num <= LAST_2GHZ_HT_PLUS) 266 flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 267 if (ch_num >= FIRST_2GHZ_HT_MINUS) 268 flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 269 } else if (nvm_flags & NVM_CHANNEL_40MHZ) { 270 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) 271 flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 272 else 273 flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 274 } 275 if (!(nvm_flags & NVM_CHANNEL_80MHZ)) 276 flags |= IEEE80211_CHAN_NO_80MHZ; 277 if (!(nvm_flags & NVM_CHANNEL_160MHZ)) 278 flags |= IEEE80211_CHAN_NO_160MHZ; 279 280 if (!(nvm_flags & NVM_CHANNEL_IBSS)) 281 flags |= IEEE80211_CHAN_NO_IR; 282 283 if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) 284 flags |= IEEE80211_CHAN_NO_IR; 285 286 if (nvm_flags & NVM_CHANNEL_RADAR) 287 flags |= IEEE80211_CHAN_RADAR; 288 289 if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) 290 flags |= IEEE80211_CHAN_INDOOR_ONLY; 291 292 /* Set the GO concurrent flag only in case that NO_IR is set. 293 * Otherwise it is meaningless 294 */ 295 if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && 296 (flags & IEEE80211_CHAN_NO_IR)) 297 flags |= IEEE80211_CHAN_IR_CONCURRENT; 298 299 return flags; 300 } 301 302 static enum nl80211_band iwl_nl80211_band_from_channel_idx(int ch_idx) 303 { 304 if (ch_idx >= NUM_2GHZ_CHANNELS) 305 return NL80211_BAND_5GHZ; 306 return NL80211_BAND_2GHZ; 307 } 308 309 static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, 310 struct iwl_nvm_data *data, 311 const void * const nvm_ch_flags, 312 u32 sbands_flags, bool v4) 313 { 314 int ch_idx; 315 int n_channels = 0; 316 struct ieee80211_channel *channel; 317 u32 ch_flags; 318 int num_of_ch; 319 const u16 *nvm_chan; 320 321 if (cfg->uhb_supported) { 322 num_of_ch = IWL_NVM_NUM_CHANNELS_UHB; 323 nvm_chan = iwl_uhb_nvm_channels; 324 } else if (cfg->nvm_type == IWL_NVM_EXT) { 325 num_of_ch = IWL_NVM_NUM_CHANNELS_EXT; 326 nvm_chan = iwl_ext_nvm_channels; 327 } else { 328 num_of_ch = IWL_NVM_NUM_CHANNELS; 329 nvm_chan = iwl_nvm_channels; 330 } 331 332 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 333 enum nl80211_band band = 334 iwl_nl80211_band_from_channel_idx(ch_idx); 335 336 if (v4) 337 ch_flags = 338 __le32_to_cpup((__le32 *)nvm_ch_flags + ch_idx); 339 else 340 ch_flags = 341 __le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx); 342 343 if (band == NL80211_BAND_5GHZ && 344 !data->sku_cap_band_52ghz_enable) 345 continue; 346 347 /* workaround to disable wide channels in 5GHz */ 348 if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) && 349 band == NL80211_BAND_5GHZ) { 350 ch_flags &= ~(NVM_CHANNEL_40MHZ | 351 NVM_CHANNEL_80MHZ | 352 NVM_CHANNEL_160MHZ); 353 } 354 355 if (ch_flags & NVM_CHANNEL_160MHZ) 356 data->vht160_supported = true; 357 358 if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) && 359 !(ch_flags & NVM_CHANNEL_VALID)) { 360 /* 361 * Channels might become valid later if lar is 362 * supported, hence we still want to add them to 363 * the list of supported channels to cfg80211. 364 */ 365 iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, 366 nvm_chan[ch_idx], ch_flags); 367 continue; 368 } 369 370 channel = &data->channels[n_channels]; 371 n_channels++; 372 373 channel->hw_value = nvm_chan[ch_idx]; 374 channel->band = band; 375 channel->center_freq = 376 ieee80211_channel_to_frequency( 377 channel->hw_value, channel->band); 378 379 /* Initialize regulatory-based run-time data */ 380 381 /* 382 * Default value - highest tx power value. max_power 383 * is not used in mvm, and is used for backwards compatibility 384 */ 385 channel->max_power = IWL_DEFAULT_MAX_TX_POWER; 386 387 /* don't put limitations in case we're using LAR */ 388 if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR)) 389 channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx], 390 ch_idx, band, 391 ch_flags, cfg); 392 else 393 channel->flags = 0; 394 395 iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, 396 channel->hw_value, ch_flags); 397 IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n", 398 channel->hw_value, channel->max_power); 399 } 400 401 return n_channels; 402 } 403 404 static void iwl_init_vht_hw_capab(struct iwl_trans *trans, 405 struct iwl_nvm_data *data, 406 struct ieee80211_sta_vht_cap *vht_cap, 407 u8 tx_chains, u8 rx_chains) 408 { 409 const struct iwl_cfg *cfg = trans->cfg; 410 int num_rx_ants = num_of_ant(rx_chains); 411 int num_tx_ants = num_of_ant(tx_chains); 412 unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?: 413 IEEE80211_VHT_MAX_AMPDU_1024K); 414 415 vht_cap->vht_supported = true; 416 417 vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 | 418 IEEE80211_VHT_CAP_RXSTBC_1 | 419 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 420 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT | 421 max_ampdu_exponent << 422 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 423 424 if (data->vht160_supported) 425 vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ | 426 IEEE80211_VHT_CAP_SHORT_GI_160; 427 428 if (cfg->vht_mu_mimo_supported) 429 vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; 430 431 if (cfg->ht_params->ldpc) 432 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; 433 434 if (data->sku_cap_mimo_disabled) { 435 num_rx_ants = 1; 436 num_tx_ants = 1; 437 } 438 439 if (num_tx_ants > 1) 440 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; 441 else 442 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; 443 444 switch (iwlwifi_mod_params.amsdu_size) { 445 case IWL_AMSDU_DEF: 446 if (trans->trans_cfg->mq_rx_supported) 447 vht_cap->cap |= 448 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; 449 else 450 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; 451 break; 452 case IWL_AMSDU_2K: 453 if (trans->trans_cfg->mq_rx_supported) 454 vht_cap->cap |= 455 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; 456 else 457 WARN(1, "RB size of 2K is not supported by this device\n"); 458 break; 459 case IWL_AMSDU_4K: 460 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; 461 break; 462 case IWL_AMSDU_8K: 463 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; 464 break; 465 case IWL_AMSDU_12K: 466 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; 467 break; 468 default: 469 break; 470 } 471 472 vht_cap->vht_mcs.rx_mcs_map = 473 cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | 474 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | 475 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | 476 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | 477 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | 478 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | 479 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | 480 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14); 481 482 if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) { 483 vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN; 484 /* this works because NOT_SUPPORTED == 3 */ 485 vht_cap->vht_mcs.rx_mcs_map |= 486 cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2); 487 } 488 489 vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; 490 491 vht_cap->vht_mcs.tx_highest |= 492 cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); 493 } 494 495 static struct ieee80211_sband_iftype_data iwl_he_capa[] = { 496 { 497 .types_mask = BIT(NL80211_IFTYPE_STATION), 498 .he_cap = { 499 .has_he = true, 500 .he_cap_elem = { 501 .mac_cap_info[0] = 502 IEEE80211_HE_MAC_CAP0_HTC_HE | 503 IEEE80211_HE_MAC_CAP0_TWT_REQ, 504 .mac_cap_info[1] = 505 IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | 506 IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, 507 .mac_cap_info[2] = 508 IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP | 509 IEEE80211_HE_MAC_CAP2_ACK_EN, 510 .mac_cap_info[3] = 511 IEEE80211_HE_MAC_CAP3_OMI_CONTROL | 512 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, 513 .mac_cap_info[4] = 514 IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU | 515 IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39, 516 .mac_cap_info[5] = 517 IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 | 518 IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 | 519 IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU | 520 IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS | 521 IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX, 522 .phy_cap_info[0] = 523 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | 524 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | 525 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, 526 .phy_cap_info[1] = 527 IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | 528 IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | 529 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, 530 .phy_cap_info[2] = 531 IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US, 532 .phy_cap_info[3] = 533 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM | 534 IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | 535 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM | 536 IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, 537 .phy_cap_info[4] = 538 IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | 539 IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 | 540 IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, 541 .phy_cap_info[5] = 542 IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | 543 IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2, 544 .phy_cap_info[6] = 545 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 546 .phy_cap_info[7] = 547 IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | 548 IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | 549 IEEE80211_HE_PHY_CAP7_MAX_NC_1, 550 .phy_cap_info[8] = 551 IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | 552 IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | 553 IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | 554 IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | 555 IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996, 556 .phy_cap_info[9] = 557 IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | 558 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | 559 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | 560 IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED, 561 }, 562 /* 563 * Set default Tx/Rx HE MCS NSS Support field. 564 * Indicate support for up to 2 spatial streams and all 565 * MCS, without any special cases 566 */ 567 .he_mcs_nss_supp = { 568 .rx_mcs_80 = cpu_to_le16(0xfffa), 569 .tx_mcs_80 = cpu_to_le16(0xfffa), 570 .rx_mcs_160 = cpu_to_le16(0xfffa), 571 .tx_mcs_160 = cpu_to_le16(0xfffa), 572 .rx_mcs_80p80 = cpu_to_le16(0xffff), 573 .tx_mcs_80p80 = cpu_to_le16(0xffff), 574 }, 575 /* 576 * Set default PPE thresholds, with PPET16 set to 0, 577 * PPET8 set to 7 578 */ 579 .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, 580 }, 581 }, 582 { 583 .types_mask = BIT(NL80211_IFTYPE_AP), 584 .he_cap = { 585 .has_he = true, 586 .he_cap_elem = { 587 .mac_cap_info[0] = 588 IEEE80211_HE_MAC_CAP0_HTC_HE, 589 .mac_cap_info[1] = 590 IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | 591 IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, 592 .mac_cap_info[2] = 593 IEEE80211_HE_MAC_CAP2_BSR | 594 IEEE80211_HE_MAC_CAP2_ACK_EN, 595 .mac_cap_info[3] = 596 IEEE80211_HE_MAC_CAP3_OMI_CONTROL | 597 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, 598 .mac_cap_info[4] = 599 IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, 600 .mac_cap_info[5] = 601 IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU, 602 .phy_cap_info[0] = 603 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | 604 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | 605 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, 606 .phy_cap_info[1] = 607 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD, 608 .phy_cap_info[2] = 609 IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US, 610 .phy_cap_info[3] = 611 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM | 612 IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | 613 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM | 614 IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, 615 .phy_cap_info[4] = 616 IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | 617 IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 | 618 IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, 619 .phy_cap_info[5] = 620 IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | 621 IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2, 622 .phy_cap_info[6] = 623 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, 624 .phy_cap_info[7] = 625 IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | 626 IEEE80211_HE_PHY_CAP7_MAX_NC_1, 627 .phy_cap_info[8] = 628 IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | 629 IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | 630 IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | 631 IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | 632 IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996, 633 .phy_cap_info[9] = 634 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | 635 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB | 636 IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED, 637 }, 638 /* 639 * Set default Tx/Rx HE MCS NSS Support field. 640 * Indicate support for up to 2 spatial streams and all 641 * MCS, without any special cases 642 */ 643 .he_mcs_nss_supp = { 644 .rx_mcs_80 = cpu_to_le16(0xfffa), 645 .tx_mcs_80 = cpu_to_le16(0xfffa), 646 .rx_mcs_160 = cpu_to_le16(0xfffa), 647 .tx_mcs_160 = cpu_to_le16(0xfffa), 648 .rx_mcs_80p80 = cpu_to_le16(0xffff), 649 .tx_mcs_80p80 = cpu_to_le16(0xffff), 650 }, 651 /* 652 * Set default PPE thresholds, with PPET16 set to 0, 653 * PPET8 set to 7 654 */ 655 .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, 656 }, 657 }, 658 }; 659 660 static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband, 661 u8 tx_chains, u8 rx_chains) 662 { 663 sband->iftype_data = iwl_he_capa; 664 sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa); 665 666 /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */ 667 if ((tx_chains & rx_chains) != ANT_AB) { 668 int i; 669 670 for (i = 0; i < sband->n_iftype_data; i++) { 671 iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[1] &= 672 ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; 673 iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[2] &= 674 ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS; 675 iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[7] &= 676 ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK; 677 } 678 } 679 } 680 681 static void iwl_init_sbands(struct iwl_trans *trans, 682 struct iwl_nvm_data *data, 683 const void *nvm_ch_flags, u8 tx_chains, 684 u8 rx_chains, u32 sbands_flags, bool v4) 685 { 686 struct device *dev = trans->dev; 687 const struct iwl_cfg *cfg = trans->cfg; 688 int n_channels; 689 int n_used = 0; 690 struct ieee80211_supported_band *sband; 691 692 n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, 693 sbands_flags, v4); 694 sband = &data->bands[NL80211_BAND_2GHZ]; 695 sband->band = NL80211_BAND_2GHZ; 696 sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; 697 sband->n_bitrates = N_RATES_24; 698 n_used += iwl_init_sband_channels(data, sband, n_channels, 699 NL80211_BAND_2GHZ); 700 iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_2GHZ, 701 tx_chains, rx_chains); 702 703 if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) 704 iwl_init_he_hw_capab(sband, tx_chains, rx_chains); 705 706 sband = &data->bands[NL80211_BAND_5GHZ]; 707 sband->band = NL80211_BAND_5GHZ; 708 sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; 709 sband->n_bitrates = N_RATES_52; 710 n_used += iwl_init_sband_channels(data, sband, n_channels, 711 NL80211_BAND_5GHZ); 712 iwl_init_ht_hw_capab(trans, data, &sband->ht_cap, NL80211_BAND_5GHZ, 713 tx_chains, rx_chains); 714 if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) 715 iwl_init_vht_hw_capab(trans, data, &sband->vht_cap, 716 tx_chains, rx_chains); 717 718 if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) 719 iwl_init_he_hw_capab(sband, tx_chains, rx_chains); 720 721 if (n_channels != n_used) 722 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", 723 n_used, n_channels); 724 } 725 726 static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, 727 const __le16 *phy_sku) 728 { 729 if (cfg->nvm_type != IWL_NVM_EXT) 730 return le16_to_cpup(nvm_sw + SKU); 731 732 return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000)); 733 } 734 735 static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) 736 { 737 if (cfg->nvm_type != IWL_NVM_EXT) 738 return le16_to_cpup(nvm_sw + NVM_VERSION); 739 else 740 return le32_to_cpup((__le32 *)(nvm_sw + 741 NVM_VERSION_EXT_NVM)); 742 } 743 744 static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, 745 const __le16 *phy_sku) 746 { 747 if (cfg->nvm_type != IWL_NVM_EXT) 748 return le16_to_cpup(nvm_sw + RADIO_CFG); 749 750 return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM)); 751 752 } 753 754 static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw) 755 { 756 int n_hw_addr; 757 758 if (cfg->nvm_type != IWL_NVM_EXT) 759 return le16_to_cpup(nvm_sw + N_HW_ADDRS); 760 761 n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); 762 763 return n_hw_addr & N_HW_ADDR_MASK; 764 } 765 766 static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, 767 struct iwl_nvm_data *data, 768 u32 radio_cfg) 769 { 770 if (cfg->nvm_type != IWL_NVM_EXT) { 771 data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); 772 data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg); 773 data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg); 774 data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg); 775 return; 776 } 777 778 /* set the radio configuration for family 8000 */ 779 data->radio_cfg_type = EXT_NVM_RF_CFG_TYPE_MSK(radio_cfg); 780 data->radio_cfg_step = EXT_NVM_RF_CFG_STEP_MSK(radio_cfg); 781 data->radio_cfg_dash = EXT_NVM_RF_CFG_DASH_MSK(radio_cfg); 782 data->radio_cfg_pnum = EXT_NVM_RF_CFG_FLAVOR_MSK(radio_cfg); 783 data->valid_tx_ant = EXT_NVM_RF_CFG_TX_ANT_MSK(radio_cfg); 784 data->valid_rx_ant = EXT_NVM_RF_CFG_RX_ANT_MSK(radio_cfg); 785 } 786 787 static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) 788 { 789 const u8 *hw_addr; 790 791 hw_addr = (const u8 *)&mac_addr0; 792 dest[0] = hw_addr[3]; 793 dest[1] = hw_addr[2]; 794 dest[2] = hw_addr[1]; 795 dest[3] = hw_addr[0]; 796 797 hw_addr = (const u8 *)&mac_addr1; 798 dest[4] = hw_addr[1]; 799 dest[5] = hw_addr[0]; 800 } 801 802 static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, 803 struct iwl_nvm_data *data) 804 { 805 __le32 mac_addr0 = 806 cpu_to_le32(iwl_read32(trans, 807 trans->trans_cfg->csr->mac_addr0_strap)); 808 __le32 mac_addr1 = 809 cpu_to_le32(iwl_read32(trans, 810 trans->trans_cfg->csr->mac_addr1_strap)); 811 812 iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); 813 /* 814 * If the OEM fused a valid address, use it instead of the one in the 815 * OTP 816 */ 817 if (is_valid_ether_addr(data->hw_addr)) 818 return; 819 820 mac_addr0 = cpu_to_le32(iwl_read32(trans, 821 trans->trans_cfg->csr->mac_addr0_otp)); 822 mac_addr1 = cpu_to_le32(iwl_read32(trans, 823 trans->trans_cfg->csr->mac_addr1_otp)); 824 825 iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); 826 } 827 828 static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, 829 const struct iwl_cfg *cfg, 830 struct iwl_nvm_data *data, 831 const __le16 *mac_override, 832 const __be16 *nvm_hw) 833 { 834 const u8 *hw_addr; 835 836 if (mac_override) { 837 static const u8 reserved_mac[] = { 838 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00 839 }; 840 841 hw_addr = (const u8 *)(mac_override + 842 MAC_ADDRESS_OVERRIDE_EXT_NVM); 843 844 /* 845 * Store the MAC address from MAO section. 846 * No byte swapping is required in MAO section 847 */ 848 memcpy(data->hw_addr, hw_addr, ETH_ALEN); 849 850 /* 851 * Force the use of the OTP MAC address in case of reserved MAC 852 * address in the NVM, or if address is given but invalid. 853 */ 854 if (is_valid_ether_addr(data->hw_addr) && 855 memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) 856 return; 857 858 IWL_ERR(trans, 859 "mac address from nvm override section is not valid\n"); 860 } 861 862 if (nvm_hw) { 863 /* read the mac address from WFMP registers */ 864 __le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans, 865 WFMP_MAC_ADDR_0)); 866 __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans, 867 WFMP_MAC_ADDR_1)); 868 869 iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); 870 871 return; 872 } 873 874 IWL_ERR(trans, "mac address is not found\n"); 875 } 876 877 static int iwl_set_hw_address(struct iwl_trans *trans, 878 const struct iwl_cfg *cfg, 879 struct iwl_nvm_data *data, const __be16 *nvm_hw, 880 const __le16 *mac_override) 881 { 882 if (cfg->mac_addr_from_csr) { 883 iwl_set_hw_address_from_csr(trans, data); 884 } else if (cfg->nvm_type != IWL_NVM_EXT) { 885 const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); 886 887 /* The byte order is little endian 16 bit, meaning 214365 */ 888 data->hw_addr[0] = hw_addr[1]; 889 data->hw_addr[1] = hw_addr[0]; 890 data->hw_addr[2] = hw_addr[3]; 891 data->hw_addr[3] = hw_addr[2]; 892 data->hw_addr[4] = hw_addr[5]; 893 data->hw_addr[5] = hw_addr[4]; 894 } else { 895 iwl_set_hw_address_family_8000(trans, cfg, data, 896 mac_override, nvm_hw); 897 } 898 899 if (!is_valid_ether_addr(data->hw_addr)) { 900 IWL_ERR(trans, "no valid mac address was found\n"); 901 return -EINVAL; 902 } 903 904 IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr); 905 906 return 0; 907 } 908 909 static bool 910 iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, 911 const __be16 *nvm_hw) 912 { 913 /* 914 * Workaround a bug in Indonesia SKUs where the regulatory in 915 * some 7000-family OTPs erroneously allow wide channels in 916 * 5GHz. To check for Indonesia, we take the SKU value from 917 * bits 1-4 in the subsystem ID and check if it is either 5 or 918 * 9. In those cases, we need to force-disable wide channels 919 * in 5GHz otherwise the FW will throw a sysassert when we try 920 * to use them. 921 */ 922 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) { 923 /* 924 * Unlike the other sections in the NVM, the hw 925 * section uses big-endian. 926 */ 927 u16 subsystem_id = be16_to_cpup(nvm_hw + SUBSYSTEM_ID); 928 u8 sku = (subsystem_id & 0x1e) >> 1; 929 930 if (sku == 5 || sku == 9) { 931 IWL_DEBUG_EEPROM(trans->dev, 932 "disabling wide channels in 5GHz (0x%0x %d)\n", 933 subsystem_id, sku); 934 return true; 935 } 936 } 937 938 return false; 939 } 940 941 struct iwl_nvm_data * 942 iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, 943 const __be16 *nvm_hw, const __le16 *nvm_sw, 944 const __le16 *nvm_calib, const __le16 *regulatory, 945 const __le16 *mac_override, const __le16 *phy_sku, 946 u8 tx_chains, u8 rx_chains, bool lar_fw_supported) 947 { 948 struct iwl_nvm_data *data; 949 bool lar_enabled; 950 u32 sku, radio_cfg; 951 u32 sbands_flags = 0; 952 u16 lar_config; 953 const __le16 *ch_section; 954 955 if (cfg->uhb_supported) 956 data = kzalloc(struct_size(data, channels, 957 IWL_NVM_NUM_CHANNELS_UHB), 958 GFP_KERNEL); 959 else if (cfg->nvm_type != IWL_NVM_EXT) 960 data = kzalloc(struct_size(data, channels, 961 IWL_NVM_NUM_CHANNELS), 962 GFP_KERNEL); 963 else 964 data = kzalloc(struct_size(data, channels, 965 IWL_NVM_NUM_CHANNELS_EXT), 966 GFP_KERNEL); 967 if (!data) 968 return NULL; 969 970 data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw); 971 972 radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku); 973 iwl_set_radio_cfg(cfg, data, radio_cfg); 974 if (data->valid_tx_ant) 975 tx_chains &= data->valid_tx_ant; 976 if (data->valid_rx_ant) 977 rx_chains &= data->valid_rx_ant; 978 979 sku = iwl_get_sku(cfg, nvm_sw, phy_sku); 980 data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; 981 data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; 982 data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; 983 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) 984 data->sku_cap_11n_enable = false; 985 data->sku_cap_11ac_enable = data->sku_cap_11n_enable && 986 (sku & NVM_SKU_CAP_11AC_ENABLE); 987 data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE; 988 989 data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); 990 991 if (cfg->nvm_type != IWL_NVM_EXT) { 992 /* Checking for required sections */ 993 if (!nvm_calib) { 994 IWL_ERR(trans, 995 "Can't parse empty Calib NVM sections\n"); 996 kfree(data); 997 return NULL; 998 } 999 1000 ch_section = cfg->nvm_type == IWL_NVM_SDP ? 1001 ®ulatory[NVM_CHANNELS_SDP] : 1002 &nvm_sw[NVM_CHANNELS]; 1003 1004 /* in family 8000 Xtal calibration values moved to OTP */ 1005 data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); 1006 data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); 1007 lar_enabled = true; 1008 } else { 1009 u16 lar_offset = data->nvm_version < 0xE39 ? 1010 NVM_LAR_OFFSET_OLD : 1011 NVM_LAR_OFFSET; 1012 1013 lar_config = le16_to_cpup(regulatory + lar_offset); 1014 data->lar_enabled = !!(lar_config & 1015 NVM_LAR_ENABLED); 1016 lar_enabled = data->lar_enabled; 1017 ch_section = ®ulatory[NVM_CHANNELS_EXTENDED]; 1018 } 1019 1020 /* If no valid mac address was found - bail out */ 1021 if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) { 1022 kfree(data); 1023 return NULL; 1024 } 1025 1026 if (lar_fw_supported && lar_enabled) 1027 sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; 1028 1029 if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw)) 1030 sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ; 1031 1032 iwl_init_sbands(trans, data, ch_section, tx_chains, rx_chains, 1033 sbands_flags, false); 1034 data->calib_version = 255; 1035 1036 return data; 1037 } 1038 IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); 1039 1040 static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, 1041 int ch_idx, u16 nvm_flags, 1042 const struct iwl_cfg *cfg) 1043 { 1044 u32 flags = NL80211_RRF_NO_HT40; 1045 1046 if (ch_idx < NUM_2GHZ_CHANNELS && 1047 (nvm_flags & NVM_CHANNEL_40MHZ)) { 1048 if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) 1049 flags &= ~NL80211_RRF_NO_HT40PLUS; 1050 if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) 1051 flags &= ~NL80211_RRF_NO_HT40MINUS; 1052 } else if (nvm_flags & NVM_CHANNEL_40MHZ) { 1053 if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) 1054 flags &= ~NL80211_RRF_NO_HT40PLUS; 1055 else 1056 flags &= ~NL80211_RRF_NO_HT40MINUS; 1057 } 1058 1059 if (!(nvm_flags & NVM_CHANNEL_80MHZ)) 1060 flags |= NL80211_RRF_NO_80MHZ; 1061 if (!(nvm_flags & NVM_CHANNEL_160MHZ)) 1062 flags |= NL80211_RRF_NO_160MHZ; 1063 1064 if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) 1065 flags |= NL80211_RRF_NO_IR; 1066 1067 if (nvm_flags & NVM_CHANNEL_RADAR) 1068 flags |= NL80211_RRF_DFS; 1069 1070 if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) 1071 flags |= NL80211_RRF_NO_OUTDOOR; 1072 1073 /* Set the GO concurrent flag only in case that NO_IR is set. 1074 * Otherwise it is meaningless 1075 */ 1076 if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && 1077 (flags & NL80211_RRF_NO_IR)) 1078 flags |= NL80211_RRF_GO_CONCURRENT; 1079 1080 return flags; 1081 } 1082 1083 struct ieee80211_regdomain * 1084 iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, 1085 int num_of_ch, __le32 *channels, u16 fw_mcc, 1086 u16 geo_info) 1087 { 1088 int ch_idx; 1089 u16 ch_flags; 1090 u32 reg_rule_flags, prev_reg_rule_flags = 0; 1091 const u16 *nvm_chan; 1092 struct ieee80211_regdomain *regd, *copy_rd; 1093 struct ieee80211_reg_rule *rule; 1094 enum nl80211_band band; 1095 int center_freq, prev_center_freq = 0; 1096 int valid_rules = 0; 1097 bool new_rule; 1098 int max_num_ch; 1099 1100 if (cfg->uhb_supported) { 1101 max_num_ch = IWL_NVM_NUM_CHANNELS_UHB; 1102 nvm_chan = iwl_uhb_nvm_channels; 1103 } else if (cfg->nvm_type == IWL_NVM_EXT) { 1104 max_num_ch = IWL_NVM_NUM_CHANNELS_EXT; 1105 nvm_chan = iwl_ext_nvm_channels; 1106 } else { 1107 max_num_ch = IWL_NVM_NUM_CHANNELS; 1108 nvm_chan = iwl_nvm_channels; 1109 } 1110 1111 if (WARN_ON(num_of_ch > max_num_ch)) 1112 num_of_ch = max_num_ch; 1113 1114 if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) 1115 return ERR_PTR(-EINVAL); 1116 1117 IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n", 1118 num_of_ch); 1119 1120 /* build a regdomain rule for every valid channel */ 1121 regd = kzalloc(struct_size(regd, reg_rules, num_of_ch), GFP_KERNEL); 1122 if (!regd) 1123 return ERR_PTR(-ENOMEM); 1124 1125 /* set alpha2 from FW. */ 1126 regd->alpha2[0] = fw_mcc >> 8; 1127 regd->alpha2[1] = fw_mcc & 0xff; 1128 1129 for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 1130 ch_flags = (u16)__le32_to_cpup(channels + ch_idx); 1131 band = (ch_idx < NUM_2GHZ_CHANNELS) ? 1132 NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; 1133 center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], 1134 band); 1135 new_rule = false; 1136 1137 if (!(ch_flags & NVM_CHANNEL_VALID)) { 1138 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, 1139 nvm_chan[ch_idx], ch_flags); 1140 continue; 1141 } 1142 1143 reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, 1144 ch_flags, cfg); 1145 1146 /* we can't continue the same rule */ 1147 if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags || 1148 center_freq - prev_center_freq > 20) { 1149 valid_rules++; 1150 new_rule = true; 1151 } 1152 1153 rule = ®d->reg_rules[valid_rules - 1]; 1154 1155 if (new_rule) 1156 rule->freq_range.start_freq_khz = 1157 MHZ_TO_KHZ(center_freq - 10); 1158 1159 rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10); 1160 1161 /* this doesn't matter - not used by FW */ 1162 rule->power_rule.max_antenna_gain = DBI_TO_MBI(6); 1163 rule->power_rule.max_eirp = 1164 DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); 1165 1166 rule->flags = reg_rule_flags; 1167 1168 /* rely on auto-calculation to merge BW of contiguous chans */ 1169 rule->flags |= NL80211_RRF_AUTO_BW; 1170 rule->freq_range.max_bandwidth_khz = 0; 1171 1172 prev_center_freq = center_freq; 1173 prev_reg_rule_flags = reg_rule_flags; 1174 1175 iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, 1176 nvm_chan[ch_idx], ch_flags); 1177 1178 if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) || 1179 band == NL80211_BAND_2GHZ) 1180 continue; 1181 1182 reg_query_regdb_wmm(regd->alpha2, center_freq, rule); 1183 } 1184 1185 regd->n_reg_rules = valid_rules; 1186 1187 /* 1188 * Narrow down regdom for unused regulatory rules to prevent hole 1189 * between reg rules to wmm rules. 1190 */ 1191 copy_rd = kmemdup(regd, struct_size(regd, reg_rules, valid_rules), 1192 GFP_KERNEL); 1193 if (!copy_rd) 1194 copy_rd = ERR_PTR(-ENOMEM); 1195 1196 kfree(regd); 1197 return copy_rd; 1198 } 1199 IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info); 1200 1201 #define IWL_MAX_NVM_SECTION_SIZE 0x1b58 1202 #define IWL_MAX_EXT_NVM_SECTION_SIZE 0x1ffc 1203 #define MAX_NVM_FILE_LEN 16384 1204 1205 void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data, 1206 unsigned int len) 1207 { 1208 #define IWL_4165_DEVICE_ID 0x5501 1209 #define NVM_SKU_CAP_MIMO_DISABLE BIT(5) 1210 1211 if (section == NVM_SECTION_TYPE_PHY_SKU && 1212 hw_id == IWL_4165_DEVICE_ID && data && len >= 5 && 1213 (data[4] & NVM_SKU_CAP_MIMO_DISABLE)) 1214 /* OTP 0x52 bug work around: it's a 1x1 device */ 1215 data[3] = ANT_B | (ANT_B << 4); 1216 } 1217 IWL_EXPORT_SYMBOL(iwl_nvm_fixups); 1218 1219 /* 1220 * Reads external NVM from a file into mvm->nvm_sections 1221 * 1222 * HOW TO CREATE THE NVM FILE FORMAT: 1223 * ------------------------------ 1224 * 1. create hex file, format: 1225 * 3800 -> header 1226 * 0000 -> header 1227 * 5a40 -> data 1228 * 1229 * rev - 6 bit (word1) 1230 * len - 10 bit (word1) 1231 * id - 4 bit (word2) 1232 * rsv - 12 bit (word2) 1233 * 1234 * 2. flip 8bits with 8 bits per line to get the right NVM file format 1235 * 1236 * 3. create binary file from the hex file 1237 * 1238 * 4. save as "iNVM_xxx.bin" under /lib/firmware 1239 */ 1240 int iwl_read_external_nvm(struct iwl_trans *trans, 1241 const char *nvm_file_name, 1242 struct iwl_nvm_section *nvm_sections) 1243 { 1244 int ret, section_size; 1245 u16 section_id; 1246 const struct firmware *fw_entry; 1247 const struct { 1248 __le16 word1; 1249 __le16 word2; 1250 u8 data[]; 1251 } *file_sec; 1252 const u8 *eof; 1253 u8 *temp; 1254 int max_section_size; 1255 const __le32 *dword_buff; 1256 1257 #define NVM_WORD1_LEN(x) (8 * (x & 0x03FF)) 1258 #define NVM_WORD2_ID(x) (x >> 12) 1259 #define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8)) 1260 #define EXT_NVM_WORD1_ID(x) ((x) >> 4) 1261 #define NVM_HEADER_0 (0x2A504C54) 1262 #define NVM_HEADER_1 (0x4E564D2A) 1263 #define NVM_HEADER_SIZE (4 * sizeof(u32)) 1264 1265 IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n"); 1266 1267 /* Maximal size depends on NVM version */ 1268 if (trans->cfg->nvm_type != IWL_NVM_EXT) 1269 max_section_size = IWL_MAX_NVM_SECTION_SIZE; 1270 else 1271 max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; 1272 1273 /* 1274 * Obtain NVM image via request_firmware. Since we already used 1275 * request_firmware_nowait() for the firmware binary load and only 1276 * get here after that we assume the NVM request can be satisfied 1277 * synchronously. 1278 */ 1279 ret = request_firmware(&fw_entry, nvm_file_name, trans->dev); 1280 if (ret) { 1281 IWL_ERR(trans, "ERROR: %s isn't available %d\n", 1282 nvm_file_name, ret); 1283 return ret; 1284 } 1285 1286 IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n", 1287 nvm_file_name, fw_entry->size); 1288 1289 if (fw_entry->size > MAX_NVM_FILE_LEN) { 1290 IWL_ERR(trans, "NVM file too large\n"); 1291 ret = -EINVAL; 1292 goto out; 1293 } 1294 1295 eof = fw_entry->data + fw_entry->size; 1296 dword_buff = (__le32 *)fw_entry->data; 1297 1298 /* some NVM file will contain a header. 1299 * The header is identified by 2 dwords header as follow: 1300 * dword[0] = 0x2A504C54 1301 * dword[1] = 0x4E564D2A 1302 * 1303 * This header must be skipped when providing the NVM data to the FW. 1304 */ 1305 if (fw_entry->size > NVM_HEADER_SIZE && 1306 dword_buff[0] == cpu_to_le32(NVM_HEADER_0) && 1307 dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) { 1308 file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE); 1309 IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2])); 1310 IWL_INFO(trans, "NVM Manufacturing date %08X\n", 1311 le32_to_cpu(dword_buff[3])); 1312 1313 /* nvm file validation, dword_buff[2] holds the file version */ 1314 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_8000 && 1315 CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP && 1316 le32_to_cpu(dword_buff[2]) < 0xE4A) { 1317 ret = -EFAULT; 1318 goto out; 1319 } 1320 } else { 1321 file_sec = (void *)fw_entry->data; 1322 } 1323 1324 while (true) { 1325 if (file_sec->data > eof) { 1326 IWL_ERR(trans, 1327 "ERROR - NVM file too short for section header\n"); 1328 ret = -EINVAL; 1329 break; 1330 } 1331 1332 /* check for EOF marker */ 1333 if (!file_sec->word1 && !file_sec->word2) { 1334 ret = 0; 1335 break; 1336 } 1337 1338 if (trans->cfg->nvm_type != IWL_NVM_EXT) { 1339 section_size = 1340 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); 1341 section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); 1342 } else { 1343 section_size = 2 * EXT_NVM_WORD2_LEN( 1344 le16_to_cpu(file_sec->word2)); 1345 section_id = EXT_NVM_WORD1_ID( 1346 le16_to_cpu(file_sec->word1)); 1347 } 1348 1349 if (section_size > max_section_size) { 1350 IWL_ERR(trans, "ERROR - section too large (%d)\n", 1351 section_size); 1352 ret = -EINVAL; 1353 break; 1354 } 1355 1356 if (!section_size) { 1357 IWL_ERR(trans, "ERROR - section empty\n"); 1358 ret = -EINVAL; 1359 break; 1360 } 1361 1362 if (file_sec->data + section_size > eof) { 1363 IWL_ERR(trans, 1364 "ERROR - NVM file too short for section (%d bytes)\n", 1365 section_size); 1366 ret = -EINVAL; 1367 break; 1368 } 1369 1370 if (WARN(section_id >= NVM_MAX_NUM_SECTIONS, 1371 "Invalid NVM section ID %d\n", section_id)) { 1372 ret = -EINVAL; 1373 break; 1374 } 1375 1376 temp = kmemdup(file_sec->data, section_size, GFP_KERNEL); 1377 if (!temp) { 1378 ret = -ENOMEM; 1379 break; 1380 } 1381 1382 iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size); 1383 1384 kfree(nvm_sections[section_id].data); 1385 nvm_sections[section_id].data = temp; 1386 nvm_sections[section_id].length = section_size; 1387 1388 /* advance to the next section */ 1389 file_sec = (void *)(file_sec->data + section_size); 1390 } 1391 out: 1392 release_firmware(fw_entry); 1393 return ret; 1394 } 1395 IWL_EXPORT_SYMBOL(iwl_read_external_nvm); 1396 1397 struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, 1398 const struct iwl_fw *fw) 1399 { 1400 struct iwl_nvm_get_info cmd = {}; 1401 struct iwl_nvm_data *nvm; 1402 struct iwl_host_cmd hcmd = { 1403 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, 1404 .data = { &cmd, }, 1405 .len = { sizeof(cmd) }, 1406 .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) 1407 }; 1408 int ret; 1409 bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && 1410 fw_has_capa(&fw->ucode_capa, 1411 IWL_UCODE_TLV_CAPA_LAR_SUPPORT); 1412 bool empty_otp; 1413 u32 mac_flags; 1414 u32 sbands_flags = 0; 1415 /* 1416 * All the values in iwl_nvm_get_info_rsp v4 are the same as 1417 * in v3, except for the channel profile part of the 1418 * regulatory. So we can just access the new struct, with the 1419 * exception of the latter. 1420 */ 1421 struct iwl_nvm_get_info_rsp *rsp; 1422 struct iwl_nvm_get_info_rsp_v3 *rsp_v3; 1423 bool v4 = fw_has_api(&fw->ucode_capa, 1424 IWL_UCODE_TLV_API_REGULATORY_NVM_INFO); 1425 size_t rsp_size = v4 ? sizeof(*rsp) : sizeof(*rsp_v3); 1426 void *channel_profile; 1427 1428 ret = iwl_trans_send_cmd(trans, &hcmd); 1429 if (ret) 1430 return ERR_PTR(ret); 1431 1432 if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != rsp_size, 1433 "Invalid payload len in NVM response from FW %d", 1434 iwl_rx_packet_payload_len(hcmd.resp_pkt))) { 1435 ret = -EINVAL; 1436 goto out; 1437 } 1438 1439 rsp = (void *)hcmd.resp_pkt->data; 1440 empty_otp = !!(le32_to_cpu(rsp->general.flags) & 1441 NVM_GENERAL_FLAGS_EMPTY_OTP); 1442 if (empty_otp) 1443 IWL_INFO(trans, "OTP is empty\n"); 1444 1445 nvm = kzalloc(struct_size(nvm, channels, IWL_NUM_CHANNELS), GFP_KERNEL); 1446 if (!nvm) { 1447 ret = -ENOMEM; 1448 goto out; 1449 } 1450 1451 iwl_set_hw_address_from_csr(trans, nvm); 1452 /* TODO: if platform NVM has MAC address - override it here */ 1453 1454 if (!is_valid_ether_addr(nvm->hw_addr)) { 1455 IWL_ERR(trans, "no valid mac address was found\n"); 1456 ret = -EINVAL; 1457 goto err_free; 1458 } 1459 1460 IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr); 1461 1462 /* Initialize general data */ 1463 nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); 1464 nvm->n_hw_addrs = rsp->general.n_hw_addrs; 1465 if (nvm->n_hw_addrs == 0) 1466 IWL_WARN(trans, 1467 "Firmware declares no reserved mac addresses. OTP is empty: %d\n", 1468 empty_otp); 1469 1470 /* Initialize MAC sku data */ 1471 mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags); 1472 nvm->sku_cap_11ac_enable = 1473 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); 1474 nvm->sku_cap_11n_enable = 1475 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); 1476 nvm->sku_cap_11ax_enable = 1477 !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED); 1478 nvm->sku_cap_band_24ghz_enable = 1479 !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); 1480 nvm->sku_cap_band_52ghz_enable = 1481 !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED); 1482 nvm->sku_cap_mimo_disabled = 1483 !!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED); 1484 1485 /* Initialize PHY sku data */ 1486 nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); 1487 nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); 1488 1489 if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) { 1490 nvm->lar_enabled = true; 1491 sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; 1492 } 1493 1494 rsp_v3 = (void *)rsp; 1495 channel_profile = v4 ? (void *)rsp->regulatory.channel_profile : 1496 (void *)rsp_v3->regulatory.channel_profile; 1497 1498 iwl_init_sbands(trans, nvm, 1499 channel_profile, 1500 nvm->valid_tx_ant & fw->valid_tx_ant, 1501 nvm->valid_rx_ant & fw->valid_rx_ant, 1502 sbands_flags, v4); 1503 1504 iwl_free_resp(&hcmd); 1505 return nvm; 1506 1507 err_free: 1508 kfree(nvm); 1509 out: 1510 iwl_free_resp(&hcmd); 1511 return ERR_PTR(ret); 1512 } 1513 IWL_EXPORT_SYMBOL(iwl_get_nvm); 1514