1bfcc09ddSBjoern A. Zeeb // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2bfcc09ddSBjoern A. Zeeb /* 3*a4128aadSBjoern A. Zeeb * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation 4bfcc09ddSBjoern A. Zeeb * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5bfcc09ddSBjoern A. Zeeb * Copyright (C) 2016-2017 Intel Deutschland GmbH 6bfcc09ddSBjoern A. Zeeb */ 7bfcc09ddSBjoern A. Zeeb #include <linux/firmware.h> 8bfcc09ddSBjoern A. Zeeb #if defined(__linux__) 9bfcc09ddSBjoern A. Zeeb #include <linux/rtnetlink.h> 10bfcc09ddSBjoern A. Zeeb #endif 11bfcc09ddSBjoern A. Zeeb #include "iwl-trans.h" 12bfcc09ddSBjoern A. Zeeb #include "iwl-csr.h" 13bfcc09ddSBjoern A. Zeeb #include "mvm.h" 14*a4128aadSBjoern A. Zeeb #include "iwl-nvm-utils.h" 15bfcc09ddSBjoern A. Zeeb #include "iwl-nvm-parse.h" 16bfcc09ddSBjoern A. Zeeb #include "iwl-prph.h" 17bfcc09ddSBjoern A. Zeeb #include "fw/acpi.h" 18bfcc09ddSBjoern A. Zeeb 19bfcc09ddSBjoern A. Zeeb /* Default NVM size to read */ 20bfcc09ddSBjoern A. Zeeb #define IWL_NVM_DEFAULT_CHUNK_SIZE (2 * 1024) 21bfcc09ddSBjoern A. Zeeb 22bfcc09ddSBjoern A. Zeeb #define NVM_WRITE_OPCODE 1 23bfcc09ddSBjoern A. Zeeb #define NVM_READ_OPCODE 0 24bfcc09ddSBjoern A. Zeeb 25bfcc09ddSBjoern A. Zeeb /* load nvm chunk response */ 26bfcc09ddSBjoern A. Zeeb enum { 27bfcc09ddSBjoern A. Zeeb READ_NVM_CHUNK_SUCCEED = 0, 28bfcc09ddSBjoern A. Zeeb READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1 29bfcc09ddSBjoern A. Zeeb }; 30bfcc09ddSBjoern A. Zeeb 31bfcc09ddSBjoern A. Zeeb /* 32bfcc09ddSBjoern A. Zeeb * prepare the NVM host command w/ the pointers to the nvm buffer 33bfcc09ddSBjoern A. Zeeb * and send it to fw 34bfcc09ddSBjoern A. Zeeb */ 35bfcc09ddSBjoern A. Zeeb static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section, 36bfcc09ddSBjoern A. Zeeb u16 offset, u16 length, const u8 *data) 37bfcc09ddSBjoern A. Zeeb { 38bfcc09ddSBjoern A. Zeeb struct iwl_nvm_access_cmd nvm_access_cmd = { 39bfcc09ddSBjoern A. Zeeb .offset = cpu_to_le16(offset), 40bfcc09ddSBjoern A. Zeeb .length = cpu_to_le16(length), 41bfcc09ddSBjoern A. Zeeb .type = cpu_to_le16(section), 42bfcc09ddSBjoern A. Zeeb .op_code = NVM_WRITE_OPCODE, 43bfcc09ddSBjoern A. Zeeb }; 44bfcc09ddSBjoern A. Zeeb struct iwl_host_cmd cmd = { 45bfcc09ddSBjoern A. Zeeb .id = NVM_ACCESS_CMD, 46bfcc09ddSBjoern A. Zeeb .len = { sizeof(struct iwl_nvm_access_cmd), length }, 47bfcc09ddSBjoern A. Zeeb .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, 48bfcc09ddSBjoern A. Zeeb .data = { &nvm_access_cmd, data }, 49bfcc09ddSBjoern A. Zeeb /* data may come from vmalloc, so use _DUP */ 50bfcc09ddSBjoern A. Zeeb .dataflags = { 0, IWL_HCMD_DFL_DUP }, 51bfcc09ddSBjoern A. Zeeb }; 52bfcc09ddSBjoern A. Zeeb struct iwl_rx_packet *pkt; 53bfcc09ddSBjoern A. Zeeb struct iwl_nvm_access_resp *nvm_resp; 54bfcc09ddSBjoern A. Zeeb int ret; 55bfcc09ddSBjoern A. Zeeb 56bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd(mvm, &cmd); 57bfcc09ddSBjoern A. Zeeb if (ret) 58bfcc09ddSBjoern A. Zeeb return ret; 59bfcc09ddSBjoern A. Zeeb 60bfcc09ddSBjoern A. Zeeb pkt = cmd.resp_pkt; 61bfcc09ddSBjoern A. Zeeb /* Extract & check NVM write response */ 62bfcc09ddSBjoern A. Zeeb nvm_resp = (void *)pkt->data; 63bfcc09ddSBjoern A. Zeeb if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) { 64bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, 65bfcc09ddSBjoern A. Zeeb "NVM access write command failed for section %u (status = 0x%x)\n", 66bfcc09ddSBjoern A. Zeeb section, le16_to_cpu(nvm_resp->status)); 67bfcc09ddSBjoern A. Zeeb ret = -EIO; 68bfcc09ddSBjoern A. Zeeb } 69bfcc09ddSBjoern A. Zeeb 70bfcc09ddSBjoern A. Zeeb iwl_free_resp(&cmd); 71bfcc09ddSBjoern A. Zeeb return ret; 72bfcc09ddSBjoern A. Zeeb } 73bfcc09ddSBjoern A. Zeeb 74bfcc09ddSBjoern A. Zeeb static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section, 75bfcc09ddSBjoern A. Zeeb u16 offset, u16 length, u8 *data) 76bfcc09ddSBjoern A. Zeeb { 77bfcc09ddSBjoern A. Zeeb struct iwl_nvm_access_cmd nvm_access_cmd = { 78bfcc09ddSBjoern A. Zeeb .offset = cpu_to_le16(offset), 79bfcc09ddSBjoern A. Zeeb .length = cpu_to_le16(length), 80bfcc09ddSBjoern A. Zeeb .type = cpu_to_le16(section), 81bfcc09ddSBjoern A. Zeeb .op_code = NVM_READ_OPCODE, 82bfcc09ddSBjoern A. Zeeb }; 83bfcc09ddSBjoern A. Zeeb struct iwl_nvm_access_resp *nvm_resp; 84bfcc09ddSBjoern A. Zeeb struct iwl_rx_packet *pkt; 85bfcc09ddSBjoern A. Zeeb struct iwl_host_cmd cmd = { 86bfcc09ddSBjoern A. Zeeb .id = NVM_ACCESS_CMD, 87bfcc09ddSBjoern A. Zeeb .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, 88bfcc09ddSBjoern A. Zeeb .data = { &nvm_access_cmd, }, 89bfcc09ddSBjoern A. Zeeb }; 90bfcc09ddSBjoern A. Zeeb int ret, bytes_read, offset_read; 91bfcc09ddSBjoern A. Zeeb u8 *resp_data; 92bfcc09ddSBjoern A. Zeeb 93bfcc09ddSBjoern A. Zeeb cmd.len[0] = sizeof(struct iwl_nvm_access_cmd); 94bfcc09ddSBjoern A. Zeeb 95bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd(mvm, &cmd); 96bfcc09ddSBjoern A. Zeeb if (ret) 97bfcc09ddSBjoern A. Zeeb return ret; 98bfcc09ddSBjoern A. Zeeb 99bfcc09ddSBjoern A. Zeeb pkt = cmd.resp_pkt; 100bfcc09ddSBjoern A. Zeeb 101bfcc09ddSBjoern A. Zeeb /* Extract NVM response */ 102bfcc09ddSBjoern A. Zeeb nvm_resp = (void *)pkt->data; 103bfcc09ddSBjoern A. Zeeb ret = le16_to_cpu(nvm_resp->status); 104bfcc09ddSBjoern A. Zeeb bytes_read = le16_to_cpu(nvm_resp->length); 105bfcc09ddSBjoern A. Zeeb offset_read = le16_to_cpu(nvm_resp->offset); 106bfcc09ddSBjoern A. Zeeb resp_data = nvm_resp->data; 107bfcc09ddSBjoern A. Zeeb if (ret) { 108bfcc09ddSBjoern A. Zeeb if ((offset != 0) && 109bfcc09ddSBjoern A. Zeeb (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) { 110bfcc09ddSBjoern A. Zeeb /* 111bfcc09ddSBjoern A. Zeeb * meaning of NOT_VALID_ADDRESS: 112bfcc09ddSBjoern A. Zeeb * driver try to read chunk from address that is 113bfcc09ddSBjoern A. Zeeb * multiple of 2K and got an error since addr is empty. 114bfcc09ddSBjoern A. Zeeb * meaning of (offset != 0): driver already 115bfcc09ddSBjoern A. Zeeb * read valid data from another chunk so this case 116bfcc09ddSBjoern A. Zeeb * is not an error. 117bfcc09ddSBjoern A. Zeeb */ 118bfcc09ddSBjoern A. Zeeb IWL_DEBUG_EEPROM(mvm->trans->dev, 119bfcc09ddSBjoern A. Zeeb "NVM access command failed on offset 0x%x since that section size is multiple 2K\n", 120bfcc09ddSBjoern A. Zeeb offset); 121bfcc09ddSBjoern A. Zeeb ret = 0; 122bfcc09ddSBjoern A. Zeeb } else { 123bfcc09ddSBjoern A. Zeeb IWL_DEBUG_EEPROM(mvm->trans->dev, 124bfcc09ddSBjoern A. Zeeb "NVM access command failed with status %d (device: %s)\n", 125bfcc09ddSBjoern A. Zeeb ret, mvm->trans->name); 126bfcc09ddSBjoern A. Zeeb ret = -ENODATA; 127bfcc09ddSBjoern A. Zeeb } 128bfcc09ddSBjoern A. Zeeb goto exit; 129bfcc09ddSBjoern A. Zeeb } 130bfcc09ddSBjoern A. Zeeb 131bfcc09ddSBjoern A. Zeeb if (offset_read != offset) { 132bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n", 133bfcc09ddSBjoern A. Zeeb offset_read); 134bfcc09ddSBjoern A. Zeeb ret = -EINVAL; 135bfcc09ddSBjoern A. Zeeb goto exit; 136bfcc09ddSBjoern A. Zeeb } 137bfcc09ddSBjoern A. Zeeb 138bfcc09ddSBjoern A. Zeeb /* Write data to NVM */ 139bfcc09ddSBjoern A. Zeeb memcpy(data + offset, resp_data, bytes_read); 140bfcc09ddSBjoern A. Zeeb ret = bytes_read; 141bfcc09ddSBjoern A. Zeeb 142bfcc09ddSBjoern A. Zeeb exit: 143bfcc09ddSBjoern A. Zeeb iwl_free_resp(&cmd); 144bfcc09ddSBjoern A. Zeeb return ret; 145bfcc09ddSBjoern A. Zeeb } 146bfcc09ddSBjoern A. Zeeb 147bfcc09ddSBjoern A. Zeeb static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section, 148bfcc09ddSBjoern A. Zeeb const u8 *data, u16 length) 149bfcc09ddSBjoern A. Zeeb { 150bfcc09ddSBjoern A. Zeeb int offset = 0; 151bfcc09ddSBjoern A. Zeeb 152bfcc09ddSBjoern A. Zeeb /* copy data in chunks of 2k (and remainder if any) */ 153bfcc09ddSBjoern A. Zeeb 154bfcc09ddSBjoern A. Zeeb while (offset < length) { 155bfcc09ddSBjoern A. Zeeb int chunk_size, ret; 156bfcc09ddSBjoern A. Zeeb 157bfcc09ddSBjoern A. Zeeb chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE, 158bfcc09ddSBjoern A. Zeeb length - offset); 159bfcc09ddSBjoern A. Zeeb 160bfcc09ddSBjoern A. Zeeb ret = iwl_nvm_write_chunk(mvm, section, offset, 161bfcc09ddSBjoern A. Zeeb chunk_size, data + offset); 162bfcc09ddSBjoern A. Zeeb if (ret < 0) 163bfcc09ddSBjoern A. Zeeb return ret; 164bfcc09ddSBjoern A. Zeeb 165bfcc09ddSBjoern A. Zeeb offset += chunk_size; 166bfcc09ddSBjoern A. Zeeb } 167bfcc09ddSBjoern A. Zeeb 168bfcc09ddSBjoern A. Zeeb return 0; 169bfcc09ddSBjoern A. Zeeb } 170bfcc09ddSBjoern A. Zeeb 171bfcc09ddSBjoern A. Zeeb /* 172bfcc09ddSBjoern A. Zeeb * Reads an NVM section completely. 173bfcc09ddSBjoern A. Zeeb * NICs prior to 7000 family doesn't have a real NVM, but just read 174bfcc09ddSBjoern A. Zeeb * section 0 which is the EEPROM. Because the EEPROM reading is unlimited 175bfcc09ddSBjoern A. Zeeb * by uCode, we need to manually check in this case that we don't 176bfcc09ddSBjoern A. Zeeb * overflow and try to read more than the EEPROM size. 177bfcc09ddSBjoern A. Zeeb * For 7000 family NICs, we supply the maximal size we can read, and 178bfcc09ddSBjoern A. Zeeb * the uCode fills the response with as much data as we can, 179bfcc09ddSBjoern A. Zeeb * without overflowing, so no check is needed. 180bfcc09ddSBjoern A. Zeeb */ 181bfcc09ddSBjoern A. Zeeb static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section, 182bfcc09ddSBjoern A. Zeeb u8 *data, u32 size_read) 183bfcc09ddSBjoern A. Zeeb { 184bfcc09ddSBjoern A. Zeeb u16 length, offset = 0; 185bfcc09ddSBjoern A. Zeeb int ret; 186bfcc09ddSBjoern A. Zeeb 187bfcc09ddSBjoern A. Zeeb /* Set nvm section read length */ 188bfcc09ddSBjoern A. Zeeb length = IWL_NVM_DEFAULT_CHUNK_SIZE; 189bfcc09ddSBjoern A. Zeeb 190bfcc09ddSBjoern A. Zeeb ret = length; 191bfcc09ddSBjoern A. Zeeb 192bfcc09ddSBjoern A. Zeeb /* Read the NVM until exhausted (reading less than requested) */ 193bfcc09ddSBjoern A. Zeeb while (ret == length) { 194bfcc09ddSBjoern A. Zeeb /* Check no memory assumptions fail and cause an overflow */ 195bfcc09ddSBjoern A. Zeeb if ((size_read + offset + length) > 196bfcc09ddSBjoern A. Zeeb mvm->trans->trans_cfg->base_params->eeprom_size) { 197bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "EEPROM size is too small for NVM\n"); 198bfcc09ddSBjoern A. Zeeb return -ENOBUFS; 199bfcc09ddSBjoern A. Zeeb } 200bfcc09ddSBjoern A. Zeeb 201bfcc09ddSBjoern A. Zeeb ret = iwl_nvm_read_chunk(mvm, section, offset, length, data); 202bfcc09ddSBjoern A. Zeeb if (ret < 0) { 203bfcc09ddSBjoern A. Zeeb IWL_DEBUG_EEPROM(mvm->trans->dev, 204bfcc09ddSBjoern A. Zeeb "Cannot read NVM from section %d offset %d, length %d\n", 205bfcc09ddSBjoern A. Zeeb section, offset, length); 206bfcc09ddSBjoern A. Zeeb return ret; 207bfcc09ddSBjoern A. Zeeb } 208bfcc09ddSBjoern A. Zeeb offset += ret; 209bfcc09ddSBjoern A. Zeeb } 210bfcc09ddSBjoern A. Zeeb 211bfcc09ddSBjoern A. Zeeb iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset); 212bfcc09ddSBjoern A. Zeeb 213bfcc09ddSBjoern A. Zeeb IWL_DEBUG_EEPROM(mvm->trans->dev, 214bfcc09ddSBjoern A. Zeeb "NVM section %d read completed\n", section); 215bfcc09ddSBjoern A. Zeeb return offset; 216bfcc09ddSBjoern A. Zeeb } 217bfcc09ddSBjoern A. Zeeb 218bfcc09ddSBjoern A. Zeeb static struct iwl_nvm_data * 219bfcc09ddSBjoern A. Zeeb iwl_parse_nvm_sections(struct iwl_mvm *mvm) 220bfcc09ddSBjoern A. Zeeb { 221bfcc09ddSBjoern A. Zeeb struct iwl_nvm_section *sections = mvm->nvm_sections; 222bfcc09ddSBjoern A. Zeeb const __be16 *hw; 223bfcc09ddSBjoern A. Zeeb const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; 224*a4128aadSBjoern A. Zeeb u8 tx_ant = mvm->fw->valid_tx_ant; 225*a4128aadSBjoern A. Zeeb u8 rx_ant = mvm->fw->valid_rx_ant; 226bfcc09ddSBjoern A. Zeeb int regulatory_type; 227bfcc09ddSBjoern A. Zeeb 228bfcc09ddSBjoern A. Zeeb /* Checking for required sections */ 229bfcc09ddSBjoern A. Zeeb if (mvm->trans->cfg->nvm_type == IWL_NVM) { 230bfcc09ddSBjoern A. Zeeb if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || 231bfcc09ddSBjoern A. Zeeb !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { 232bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); 233bfcc09ddSBjoern A. Zeeb return NULL; 234bfcc09ddSBjoern A. Zeeb } 235bfcc09ddSBjoern A. Zeeb } else { 236bfcc09ddSBjoern A. Zeeb if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP) 237bfcc09ddSBjoern A. Zeeb regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP; 238bfcc09ddSBjoern A. Zeeb else 239bfcc09ddSBjoern A. Zeeb regulatory_type = NVM_SECTION_TYPE_REGULATORY; 240bfcc09ddSBjoern A. Zeeb 241bfcc09ddSBjoern A. Zeeb /* SW and REGULATORY sections are mandatory */ 242bfcc09ddSBjoern A. Zeeb if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || 243bfcc09ddSBjoern A. Zeeb !mvm->nvm_sections[regulatory_type].data) { 244bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, 245bfcc09ddSBjoern A. Zeeb "Can't parse empty family 8000 OTP/NVM sections\n"); 246bfcc09ddSBjoern A. Zeeb return NULL; 247bfcc09ddSBjoern A. Zeeb } 248bfcc09ddSBjoern A. Zeeb /* MAC_OVERRIDE or at least HW section must exist */ 249bfcc09ddSBjoern A. Zeeb if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data && 250bfcc09ddSBjoern A. Zeeb !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) { 251bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, 252bfcc09ddSBjoern A. Zeeb "Can't parse mac_address, empty sections\n"); 253bfcc09ddSBjoern A. Zeeb return NULL; 254bfcc09ddSBjoern A. Zeeb } 255bfcc09ddSBjoern A. Zeeb 256bfcc09ddSBjoern A. Zeeb /* PHY_SKU section is mandatory in B0 */ 257bfcc09ddSBjoern A. Zeeb if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT && 258bfcc09ddSBjoern A. Zeeb !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) { 259bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, 260bfcc09ddSBjoern A. Zeeb "Can't parse phy_sku in B0, empty sections\n"); 261bfcc09ddSBjoern A. Zeeb return NULL; 262bfcc09ddSBjoern A. Zeeb } 263bfcc09ddSBjoern A. Zeeb } 264bfcc09ddSBjoern A. Zeeb 265bfcc09ddSBjoern A. Zeeb hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data; 266bfcc09ddSBjoern A. Zeeb sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; 267bfcc09ddSBjoern A. Zeeb calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; 268bfcc09ddSBjoern A. Zeeb mac_override = 269bfcc09ddSBjoern A. Zeeb (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data; 270bfcc09ddSBjoern A. Zeeb phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data; 271bfcc09ddSBjoern A. Zeeb 272bfcc09ddSBjoern A. Zeeb regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ? 273bfcc09ddSBjoern A. Zeeb (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data : 274bfcc09ddSBjoern A. Zeeb (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; 275bfcc09ddSBjoern A. Zeeb 276*a4128aadSBjoern A. Zeeb if (mvm->set_tx_ant) 277*a4128aadSBjoern A. Zeeb tx_ant &= mvm->set_tx_ant; 278*a4128aadSBjoern A. Zeeb 279*a4128aadSBjoern A. Zeeb if (mvm->set_rx_ant) 280*a4128aadSBjoern A. Zeeb rx_ant &= mvm->set_rx_ant; 281*a4128aadSBjoern A. Zeeb 282bfcc09ddSBjoern A. Zeeb return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib, 283bfcc09ddSBjoern A. Zeeb regulatory, mac_override, phy_sku, 284*a4128aadSBjoern A. Zeeb tx_ant, rx_ant); 285bfcc09ddSBjoern A. Zeeb } 286bfcc09ddSBjoern A. Zeeb 287bfcc09ddSBjoern A. Zeeb /* Loads the NVM data stored in mvm->nvm_sections into the NIC */ 288bfcc09ddSBjoern A. Zeeb int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm) 289bfcc09ddSBjoern A. Zeeb { 290bfcc09ddSBjoern A. Zeeb int i, ret = 0; 291bfcc09ddSBjoern A. Zeeb struct iwl_nvm_section *sections = mvm->nvm_sections; 292bfcc09ddSBjoern A. Zeeb 293bfcc09ddSBjoern A. Zeeb IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n"); 294bfcc09ddSBjoern A. Zeeb 295bfcc09ddSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) { 296bfcc09ddSBjoern A. Zeeb if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length) 297bfcc09ddSBjoern A. Zeeb continue; 298bfcc09ddSBjoern A. Zeeb ret = iwl_nvm_write_section(mvm, i, sections[i].data, 299bfcc09ddSBjoern A. Zeeb sections[i].length); 300bfcc09ddSBjoern A. Zeeb if (ret < 0) { 301bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret); 302bfcc09ddSBjoern A. Zeeb break; 303bfcc09ddSBjoern A. Zeeb } 304bfcc09ddSBjoern A. Zeeb } 305bfcc09ddSBjoern A. Zeeb return ret; 306bfcc09ddSBjoern A. Zeeb } 307bfcc09ddSBjoern A. Zeeb 308bfcc09ddSBjoern A. Zeeb int iwl_nvm_init(struct iwl_mvm *mvm) 309bfcc09ddSBjoern A. Zeeb { 310bfcc09ddSBjoern A. Zeeb int ret, section; 311bfcc09ddSBjoern A. Zeeb u32 size_read = 0; 312bfcc09ddSBjoern A. Zeeb u8 *nvm_buffer, *temp; 313bfcc09ddSBjoern A. Zeeb const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step; 314bfcc09ddSBjoern A. Zeeb 315bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS)) 316bfcc09ddSBjoern A. Zeeb return -EINVAL; 317bfcc09ddSBjoern A. Zeeb 318bfcc09ddSBjoern A. Zeeb /* load NVM values from nic */ 319bfcc09ddSBjoern A. Zeeb /* Read From FW NVM */ 320bfcc09ddSBjoern A. Zeeb IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); 321bfcc09ddSBjoern A. Zeeb 322bfcc09ddSBjoern A. Zeeb nvm_buffer = kmalloc(mvm->trans->trans_cfg->base_params->eeprom_size, 323bfcc09ddSBjoern A. Zeeb GFP_KERNEL); 324bfcc09ddSBjoern A. Zeeb if (!nvm_buffer) 325bfcc09ddSBjoern A. Zeeb return -ENOMEM; 326bfcc09ddSBjoern A. Zeeb for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) { 327bfcc09ddSBjoern A. Zeeb /* we override the constness for initial read */ 328bfcc09ddSBjoern A. Zeeb ret = iwl_nvm_read_section(mvm, section, nvm_buffer, 329bfcc09ddSBjoern A. Zeeb size_read); 330bfcc09ddSBjoern A. Zeeb if (ret == -ENODATA) { 331bfcc09ddSBjoern A. Zeeb ret = 0; 332bfcc09ddSBjoern A. Zeeb continue; 333bfcc09ddSBjoern A. Zeeb } 334bfcc09ddSBjoern A. Zeeb if (ret < 0) 335bfcc09ddSBjoern A. Zeeb break; 336bfcc09ddSBjoern A. Zeeb size_read += ret; 337bfcc09ddSBjoern A. Zeeb temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); 338bfcc09ddSBjoern A. Zeeb if (!temp) { 339bfcc09ddSBjoern A. Zeeb ret = -ENOMEM; 340bfcc09ddSBjoern A. Zeeb break; 341bfcc09ddSBjoern A. Zeeb } 342bfcc09ddSBjoern A. Zeeb 343bfcc09ddSBjoern A. Zeeb iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret); 344bfcc09ddSBjoern A. Zeeb 345bfcc09ddSBjoern A. Zeeb mvm->nvm_sections[section].data = temp; 346bfcc09ddSBjoern A. Zeeb mvm->nvm_sections[section].length = ret; 347bfcc09ddSBjoern A. Zeeb 348bfcc09ddSBjoern A. Zeeb #ifdef CONFIG_IWLWIFI_DEBUGFS 349bfcc09ddSBjoern A. Zeeb switch (section) { 350bfcc09ddSBjoern A. Zeeb case NVM_SECTION_TYPE_SW: 351bfcc09ddSBjoern A. Zeeb mvm->nvm_sw_blob.data = temp; 352bfcc09ddSBjoern A. Zeeb mvm->nvm_sw_blob.size = ret; 353bfcc09ddSBjoern A. Zeeb break; 354bfcc09ddSBjoern A. Zeeb case NVM_SECTION_TYPE_CALIBRATION: 355bfcc09ddSBjoern A. Zeeb mvm->nvm_calib_blob.data = temp; 356bfcc09ddSBjoern A. Zeeb mvm->nvm_calib_blob.size = ret; 357bfcc09ddSBjoern A. Zeeb break; 358bfcc09ddSBjoern A. Zeeb case NVM_SECTION_TYPE_PRODUCTION: 359bfcc09ddSBjoern A. Zeeb mvm->nvm_prod_blob.data = temp; 360bfcc09ddSBjoern A. Zeeb mvm->nvm_prod_blob.size = ret; 361bfcc09ddSBjoern A. Zeeb break; 362bfcc09ddSBjoern A. Zeeb case NVM_SECTION_TYPE_PHY_SKU: 363bfcc09ddSBjoern A. Zeeb mvm->nvm_phy_sku_blob.data = temp; 364bfcc09ddSBjoern A. Zeeb mvm->nvm_phy_sku_blob.size = ret; 365bfcc09ddSBjoern A. Zeeb break; 366bfcc09ddSBjoern A. Zeeb case NVM_SECTION_TYPE_REGULATORY_SDP: 367bfcc09ddSBjoern A. Zeeb case NVM_SECTION_TYPE_REGULATORY: 368bfcc09ddSBjoern A. Zeeb mvm->nvm_reg_blob.data = temp; 369bfcc09ddSBjoern A. Zeeb mvm->nvm_reg_blob.size = ret; 370bfcc09ddSBjoern A. Zeeb break; 371bfcc09ddSBjoern A. Zeeb default: 372bfcc09ddSBjoern A. Zeeb if (section == mvm->cfg->nvm_hw_section_num) { 373bfcc09ddSBjoern A. Zeeb mvm->nvm_hw_blob.data = temp; 374bfcc09ddSBjoern A. Zeeb mvm->nvm_hw_blob.size = ret; 375bfcc09ddSBjoern A. Zeeb break; 376bfcc09ddSBjoern A. Zeeb } 377bfcc09ddSBjoern A. Zeeb } 378bfcc09ddSBjoern A. Zeeb #endif 379bfcc09ddSBjoern A. Zeeb } 380bfcc09ddSBjoern A. Zeeb if (!size_read) 381bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "OTP is blank\n"); 382bfcc09ddSBjoern A. Zeeb kfree(nvm_buffer); 383bfcc09ddSBjoern A. Zeeb 384bfcc09ddSBjoern A. Zeeb /* Only if PNVM selected in the mod param - load external NVM */ 385bfcc09ddSBjoern A. Zeeb if (mvm->nvm_file_name) { 386bfcc09ddSBjoern A. Zeeb /* read External NVM file from the mod param */ 387bfcc09ddSBjoern A. Zeeb ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name, 388bfcc09ddSBjoern A. Zeeb mvm->nvm_sections); 389bfcc09ddSBjoern A. Zeeb if (ret) { 390bfcc09ddSBjoern A. Zeeb mvm->nvm_file_name = nvm_file_C; 391bfcc09ddSBjoern A. Zeeb 392bfcc09ddSBjoern A. Zeeb if ((ret == -EFAULT || ret == -ENOENT) && 393bfcc09ddSBjoern A. Zeeb mvm->nvm_file_name) { 394bfcc09ddSBjoern A. Zeeb /* in case nvm file was failed try again */ 395bfcc09ddSBjoern A. Zeeb ret = iwl_read_external_nvm(mvm->trans, 396bfcc09ddSBjoern A. Zeeb mvm->nvm_file_name, 397bfcc09ddSBjoern A. Zeeb mvm->nvm_sections); 398bfcc09ddSBjoern A. Zeeb if (ret) 399bfcc09ddSBjoern A. Zeeb return ret; 400bfcc09ddSBjoern A. Zeeb } else { 401bfcc09ddSBjoern A. Zeeb return ret; 402bfcc09ddSBjoern A. Zeeb } 403bfcc09ddSBjoern A. Zeeb } 404bfcc09ddSBjoern A. Zeeb } 405bfcc09ddSBjoern A. Zeeb 406bfcc09ddSBjoern A. Zeeb /* parse the relevant nvm sections */ 407bfcc09ddSBjoern A. Zeeb mvm->nvm_data = iwl_parse_nvm_sections(mvm); 408bfcc09ddSBjoern A. Zeeb if (!mvm->nvm_data) 409bfcc09ddSBjoern A. Zeeb return -ENODATA; 410bfcc09ddSBjoern A. Zeeb IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n", 411bfcc09ddSBjoern A. Zeeb mvm->nvm_data->nvm_version); 412bfcc09ddSBjoern A. Zeeb 413bfcc09ddSBjoern A. Zeeb return ret < 0 ? ret : 0; 414bfcc09ddSBjoern A. Zeeb } 415bfcc09ddSBjoern A. Zeeb 4169af1bba4SBjoern A. Zeeb struct iwl_mcc_update_resp_v8 * 417bfcc09ddSBjoern A. Zeeb iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, 418bfcc09ddSBjoern A. Zeeb enum iwl_mcc_source src_id) 419bfcc09ddSBjoern A. Zeeb { 420bfcc09ddSBjoern A. Zeeb struct iwl_mcc_update_cmd mcc_update_cmd = { 421bfcc09ddSBjoern A. Zeeb .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]), 422bfcc09ddSBjoern A. Zeeb .source_id = (u8)src_id, 423bfcc09ddSBjoern A. Zeeb }; 4249af1bba4SBjoern A. Zeeb struct iwl_mcc_update_resp_v8 *resp_cp; 425bfcc09ddSBjoern A. Zeeb struct iwl_rx_packet *pkt; 426bfcc09ddSBjoern A. Zeeb struct iwl_host_cmd cmd = { 427bfcc09ddSBjoern A. Zeeb .id = MCC_UPDATE_CMD, 428bfcc09ddSBjoern A. Zeeb .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, 429bfcc09ddSBjoern A. Zeeb .data = { &mcc_update_cmd }, 430bfcc09ddSBjoern A. Zeeb }; 431bfcc09ddSBjoern A. Zeeb 4329af1bba4SBjoern A. Zeeb int ret, resp_ver; 433bfcc09ddSBjoern A. Zeeb u32 status; 434bfcc09ddSBjoern A. Zeeb int resp_len, n_channels; 435bfcc09ddSBjoern A. Zeeb u16 mcc; 436bfcc09ddSBjoern A. Zeeb 437bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) 438bfcc09ddSBjoern A. Zeeb return ERR_PTR(-EOPNOTSUPP); 439bfcc09ddSBjoern A. Zeeb 440bfcc09ddSBjoern A. Zeeb cmd.len[0] = sizeof(struct iwl_mcc_update_cmd); 441bfcc09ddSBjoern A. Zeeb 442bfcc09ddSBjoern A. Zeeb IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n", 443bfcc09ddSBjoern A. Zeeb alpha2[0], alpha2[1], src_id); 444bfcc09ddSBjoern A. Zeeb 445bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd(mvm, &cmd); 446bfcc09ddSBjoern A. Zeeb if (ret) 447bfcc09ddSBjoern A. Zeeb return ERR_PTR(ret); 448bfcc09ddSBjoern A. Zeeb 449bfcc09ddSBjoern A. Zeeb pkt = cmd.resp_pkt; 450bfcc09ddSBjoern A. Zeeb 4519af1bba4SBjoern A. Zeeb resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, 4529af1bba4SBjoern A. Zeeb MCC_UPDATE_CMD, 0); 453bfcc09ddSBjoern A. Zeeb 4549af1bba4SBjoern A. Zeeb /* Extract MCC response */ 4559af1bba4SBjoern A. Zeeb if (resp_ver >= 8) { 4569af1bba4SBjoern A. Zeeb struct iwl_mcc_update_resp_v8 *mcc_resp_v8 = (void *)pkt->data; 4579af1bba4SBjoern A. Zeeb 4589af1bba4SBjoern A. Zeeb n_channels = __le32_to_cpu(mcc_resp_v8->n_channels); 4599af1bba4SBjoern A. Zeeb if (iwl_rx_packet_payload_len(pkt) != 4609af1bba4SBjoern A. Zeeb struct_size(mcc_resp_v8, channels, n_channels)) { 4619af1bba4SBjoern A. Zeeb resp_cp = ERR_PTR(-EINVAL); 4629af1bba4SBjoern A. Zeeb goto exit; 4639af1bba4SBjoern A. Zeeb } 4649af1bba4SBjoern A. Zeeb resp_len = struct_size(resp_cp, channels, n_channels); 4659af1bba4SBjoern A. Zeeb resp_cp = kzalloc(resp_len, GFP_KERNEL); 466bfcc09ddSBjoern A. Zeeb if (!resp_cp) { 467bfcc09ddSBjoern A. Zeeb resp_cp = ERR_PTR(-ENOMEM); 468bfcc09ddSBjoern A. Zeeb goto exit; 469bfcc09ddSBjoern A. Zeeb } 4709af1bba4SBjoern A. Zeeb resp_cp->status = mcc_resp_v8->status; 4719af1bba4SBjoern A. Zeeb resp_cp->mcc = mcc_resp_v8->mcc; 4729af1bba4SBjoern A. Zeeb resp_cp->cap = mcc_resp_v8->cap; 4739af1bba4SBjoern A. Zeeb resp_cp->source_id = mcc_resp_v8->source_id; 4749af1bba4SBjoern A. Zeeb resp_cp->time = mcc_resp_v8->time; 4759af1bba4SBjoern A. Zeeb resp_cp->geo_info = mcc_resp_v8->geo_info; 4769af1bba4SBjoern A. Zeeb resp_cp->n_channels = mcc_resp_v8->n_channels; 4779af1bba4SBjoern A. Zeeb memcpy(resp_cp->channels, mcc_resp_v8->channels, 4789af1bba4SBjoern A. Zeeb n_channels * sizeof(__le32)); 4799af1bba4SBjoern A. Zeeb } else if (fw_has_capa(&mvm->fw->ucode_capa, 4809af1bba4SBjoern A. Zeeb IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT)) { 4819af1bba4SBjoern A. Zeeb struct iwl_mcc_update_resp_v4 *mcc_resp_v4 = (void *)pkt->data; 4829af1bba4SBjoern A. Zeeb 4839af1bba4SBjoern A. Zeeb n_channels = __le32_to_cpu(mcc_resp_v4->n_channels); 4849af1bba4SBjoern A. Zeeb if (iwl_rx_packet_payload_len(pkt) != 4859af1bba4SBjoern A. Zeeb struct_size(mcc_resp_v4, channels, n_channels)) { 4869af1bba4SBjoern A. Zeeb resp_cp = ERR_PTR(-EINVAL); 4879af1bba4SBjoern A. Zeeb goto exit; 4889af1bba4SBjoern A. Zeeb } 4899af1bba4SBjoern A. Zeeb resp_len = struct_size(resp_cp, channels, n_channels); 4909af1bba4SBjoern A. Zeeb resp_cp = kzalloc(resp_len, GFP_KERNEL); 4919af1bba4SBjoern A. Zeeb if (!resp_cp) { 4929af1bba4SBjoern A. Zeeb resp_cp = ERR_PTR(-ENOMEM); 4939af1bba4SBjoern A. Zeeb goto exit; 4949af1bba4SBjoern A. Zeeb } 4959af1bba4SBjoern A. Zeeb 4969af1bba4SBjoern A. Zeeb resp_cp->status = mcc_resp_v4->status; 4979af1bba4SBjoern A. Zeeb resp_cp->mcc = mcc_resp_v4->mcc; 4989af1bba4SBjoern A. Zeeb resp_cp->cap = cpu_to_le32(le16_to_cpu(mcc_resp_v4->cap)); 4999af1bba4SBjoern A. Zeeb resp_cp->source_id = mcc_resp_v4->source_id; 5009af1bba4SBjoern A. Zeeb resp_cp->time = mcc_resp_v4->time; 5019af1bba4SBjoern A. Zeeb resp_cp->geo_info = mcc_resp_v4->geo_info; 5029af1bba4SBjoern A. Zeeb resp_cp->n_channels = mcc_resp_v4->n_channels; 5039af1bba4SBjoern A. Zeeb memcpy(resp_cp->channels, mcc_resp_v4->channels, 5049af1bba4SBjoern A. Zeeb n_channels * sizeof(__le32)); 505bfcc09ddSBjoern A. Zeeb } else { 506bfcc09ddSBjoern A. Zeeb struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data; 507bfcc09ddSBjoern A. Zeeb 508bfcc09ddSBjoern A. Zeeb n_channels = __le32_to_cpu(mcc_resp_v3->n_channels); 5099af1bba4SBjoern A. Zeeb if (iwl_rx_packet_payload_len(pkt) != 5109af1bba4SBjoern A. Zeeb struct_size(mcc_resp_v3, channels, n_channels)) { 5119af1bba4SBjoern A. Zeeb resp_cp = ERR_PTR(-EINVAL); 5129af1bba4SBjoern A. Zeeb goto exit; 5139af1bba4SBjoern A. Zeeb } 5149af1bba4SBjoern A. Zeeb resp_len = struct_size(resp_cp, channels, n_channels); 515bfcc09ddSBjoern A. Zeeb resp_cp = kzalloc(resp_len, GFP_KERNEL); 516bfcc09ddSBjoern A. Zeeb if (!resp_cp) { 517bfcc09ddSBjoern A. Zeeb resp_cp = ERR_PTR(-ENOMEM); 518bfcc09ddSBjoern A. Zeeb goto exit; 519bfcc09ddSBjoern A. Zeeb } 520bfcc09ddSBjoern A. Zeeb 521bfcc09ddSBjoern A. Zeeb resp_cp->status = mcc_resp_v3->status; 522bfcc09ddSBjoern A. Zeeb resp_cp->mcc = mcc_resp_v3->mcc; 5239af1bba4SBjoern A. Zeeb resp_cp->cap = cpu_to_le32(mcc_resp_v3->cap); 524bfcc09ddSBjoern A. Zeeb resp_cp->source_id = mcc_resp_v3->source_id; 525bfcc09ddSBjoern A. Zeeb resp_cp->time = mcc_resp_v3->time; 526bfcc09ddSBjoern A. Zeeb resp_cp->geo_info = mcc_resp_v3->geo_info; 527bfcc09ddSBjoern A. Zeeb resp_cp->n_channels = mcc_resp_v3->n_channels; 528bfcc09ddSBjoern A. Zeeb memcpy(resp_cp->channels, mcc_resp_v3->channels, 529bfcc09ddSBjoern A. Zeeb n_channels * sizeof(__le32)); 530bfcc09ddSBjoern A. Zeeb } 531bfcc09ddSBjoern A. Zeeb 532bfcc09ddSBjoern A. Zeeb status = le32_to_cpu(resp_cp->status); 533bfcc09ddSBjoern A. Zeeb 534bfcc09ddSBjoern A. Zeeb mcc = le16_to_cpu(resp_cp->mcc); 535bfcc09ddSBjoern A. Zeeb 536bfcc09ddSBjoern A. Zeeb /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ 537bfcc09ddSBjoern A. Zeeb if (mcc == 0) { 538bfcc09ddSBjoern A. Zeeb mcc = 0x3030; /* "00" - world */ 539bfcc09ddSBjoern A. Zeeb resp_cp->mcc = cpu_to_le16(mcc); 540bfcc09ddSBjoern A. Zeeb } 541bfcc09ddSBjoern A. Zeeb 542bfcc09ddSBjoern A. Zeeb IWL_DEBUG_LAR(mvm, 543bfcc09ddSBjoern A. Zeeb "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n", 544bfcc09ddSBjoern A. Zeeb status, mcc, mcc >> 8, mcc & 0xff, n_channels); 545bfcc09ddSBjoern A. Zeeb 546bfcc09ddSBjoern A. Zeeb exit: 547bfcc09ddSBjoern A. Zeeb iwl_free_resp(&cmd); 548bfcc09ddSBjoern A. Zeeb return resp_cp; 549bfcc09ddSBjoern A. Zeeb } 550bfcc09ddSBjoern A. Zeeb 551bfcc09ddSBjoern A. Zeeb int iwl_mvm_init_mcc(struct iwl_mvm *mvm) 552bfcc09ddSBjoern A. Zeeb { 553bfcc09ddSBjoern A. Zeeb bool tlv_lar; 554bfcc09ddSBjoern A. Zeeb bool nvm_lar; 555bfcc09ddSBjoern A. Zeeb int retval; 556bfcc09ddSBjoern A. Zeeb struct ieee80211_regdomain *regd; 557bfcc09ddSBjoern A. Zeeb char mcc[3]; 558bfcc09ddSBjoern A. Zeeb 559bfcc09ddSBjoern A. Zeeb if (mvm->cfg->nvm_type == IWL_NVM_EXT) { 560bfcc09ddSBjoern A. Zeeb tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, 561bfcc09ddSBjoern A. Zeeb IWL_UCODE_TLV_CAPA_LAR_SUPPORT); 562bfcc09ddSBjoern A. Zeeb nvm_lar = mvm->nvm_data->lar_enabled; 563bfcc09ddSBjoern A. Zeeb if (tlv_lar != nvm_lar) 564bfcc09ddSBjoern A. Zeeb IWL_INFO(mvm, 565bfcc09ddSBjoern A. Zeeb "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n", 566bfcc09ddSBjoern A. Zeeb tlv_lar ? "enabled" : "disabled", 567bfcc09ddSBjoern A. Zeeb nvm_lar ? "enabled" : "disabled"); 568bfcc09ddSBjoern A. Zeeb } 569bfcc09ddSBjoern A. Zeeb 570bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_is_lar_supported(mvm)) 571bfcc09ddSBjoern A. Zeeb return 0; 572bfcc09ddSBjoern A. Zeeb 573bfcc09ddSBjoern A. Zeeb /* 574bfcc09ddSBjoern A. Zeeb * try to replay the last set MCC to FW. If it doesn't exist, 575bfcc09ddSBjoern A. Zeeb * queue an update to cfg80211 to retrieve the default alpha2 from FW. 576bfcc09ddSBjoern A. Zeeb */ 577*a4128aadSBjoern A. Zeeb retval = iwl_mvm_init_fw_regd(mvm, true); 578bfcc09ddSBjoern A. Zeeb if (retval != -ENOENT) 579bfcc09ddSBjoern A. Zeeb return retval; 580bfcc09ddSBjoern A. Zeeb 581bfcc09ddSBjoern A. Zeeb /* 582bfcc09ddSBjoern A. Zeeb * Driver regulatory hint for initial update, this also informs the 583bfcc09ddSBjoern A. Zeeb * firmware we support wifi location updates. 584bfcc09ddSBjoern A. Zeeb * Disallow scans that might crash the FW while the LAR regdomain 585bfcc09ddSBjoern A. Zeeb * is not set. 586bfcc09ddSBjoern A. Zeeb */ 587bfcc09ddSBjoern A. Zeeb mvm->lar_regdom_set = false; 588bfcc09ddSBjoern A. Zeeb 589bfcc09ddSBjoern A. Zeeb regd = iwl_mvm_get_current_regdomain(mvm, NULL); 590bfcc09ddSBjoern A. Zeeb if (IS_ERR_OR_NULL(regd)) 591bfcc09ddSBjoern A. Zeeb return -EIO; 592bfcc09ddSBjoern A. Zeeb 593bfcc09ddSBjoern A. Zeeb if (iwl_mvm_is_wifi_mcc_supported(mvm) && 594*a4128aadSBjoern A. Zeeb !iwl_bios_get_mcc(&mvm->fwrt, mcc)) { 595bfcc09ddSBjoern A. Zeeb kfree(regd); 596bfcc09ddSBjoern A. Zeeb regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, 597bfcc09ddSBjoern A. Zeeb MCC_SOURCE_BIOS, NULL); 598bfcc09ddSBjoern A. Zeeb if (IS_ERR_OR_NULL(regd)) 599bfcc09ddSBjoern A. Zeeb return -EIO; 600bfcc09ddSBjoern A. Zeeb } 601bfcc09ddSBjoern A. Zeeb 602bfcc09ddSBjoern A. Zeeb retval = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd); 603bfcc09ddSBjoern A. Zeeb kfree(regd); 604bfcc09ddSBjoern A. Zeeb return retval; 605bfcc09ddSBjoern A. Zeeb } 606bfcc09ddSBjoern A. Zeeb 607bfcc09ddSBjoern A. Zeeb void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, 608bfcc09ddSBjoern A. Zeeb struct iwl_rx_cmd_buffer *rxb) 609bfcc09ddSBjoern A. Zeeb { 610bfcc09ddSBjoern A. Zeeb struct iwl_rx_packet *pkt = rxb_addr(rxb); 611bfcc09ddSBjoern A. Zeeb struct iwl_mcc_chub_notif *notif = (void *)pkt->data; 612bfcc09ddSBjoern A. Zeeb enum iwl_mcc_source src; 613bfcc09ddSBjoern A. Zeeb char mcc[3]; 614bfcc09ddSBjoern A. Zeeb struct ieee80211_regdomain *regd; 615bfcc09ddSBjoern A. Zeeb int wgds_tbl_idx; 616bfcc09ddSBjoern A. Zeeb 617bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 618bfcc09ddSBjoern A. Zeeb 619bfcc09ddSBjoern A. Zeeb if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) { 620bfcc09ddSBjoern A. Zeeb IWL_DEBUG_LAR(mvm, "Ignore mcc update while associated\n"); 621bfcc09ddSBjoern A. Zeeb return; 622bfcc09ddSBjoern A. Zeeb } 623bfcc09ddSBjoern A. Zeeb 624bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) 625bfcc09ddSBjoern A. Zeeb return; 626bfcc09ddSBjoern A. Zeeb 627bfcc09ddSBjoern A. Zeeb mcc[0] = le16_to_cpu(notif->mcc) >> 8; 628bfcc09ddSBjoern A. Zeeb mcc[1] = le16_to_cpu(notif->mcc) & 0xff; 629bfcc09ddSBjoern A. Zeeb mcc[2] = '\0'; 630bfcc09ddSBjoern A. Zeeb src = notif->source_id; 631bfcc09ddSBjoern A. Zeeb 632bfcc09ddSBjoern A. Zeeb IWL_DEBUG_LAR(mvm, 633bfcc09ddSBjoern A. Zeeb "RX: received chub update mcc cmd (mcc '%s' src %d)\n", 634bfcc09ddSBjoern A. Zeeb mcc, src); 635bfcc09ddSBjoern A. Zeeb regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL); 636bfcc09ddSBjoern A. Zeeb if (IS_ERR_OR_NULL(regd)) 637bfcc09ddSBjoern A. Zeeb return; 638bfcc09ddSBjoern A. Zeeb 639bfcc09ddSBjoern A. Zeeb wgds_tbl_idx = iwl_mvm_get_sar_geo_profile(mvm); 640bfcc09ddSBjoern A. Zeeb if (wgds_tbl_idx < 1) 641bfcc09ddSBjoern A. Zeeb IWL_DEBUG_INFO(mvm, 642bfcc09ddSBjoern A. Zeeb "SAR WGDS is disabled or error received (%d)\n", 643bfcc09ddSBjoern A. Zeeb wgds_tbl_idx); 644bfcc09ddSBjoern A. Zeeb else 645bfcc09ddSBjoern A. Zeeb IWL_DEBUG_INFO(mvm, "SAR WGDS: geo profile %d is configured\n", 646bfcc09ddSBjoern A. Zeeb wgds_tbl_idx); 647bfcc09ddSBjoern A. Zeeb 648bfcc09ddSBjoern A. Zeeb regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); 649bfcc09ddSBjoern A. Zeeb kfree(regd); 650bfcc09ddSBjoern A. Zeeb } 651