1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38 * All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 44 * * Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * * Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in 48 * the documentation and/or other materials provided with the 49 * distribution. 50 * * Neither the name Intel Corporation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 *****************************************************************************/ 67 #include <net/mac80211.h> 68 #include <linux/netdevice.h> 69 #include <linux/acpi.h> 70 71 #include "iwl-trans.h" 72 #include "iwl-op-mode.h" 73 #include "iwl-fw.h" 74 #include "iwl-debug.h" 75 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */ 76 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */ 77 #include "iwl-prph.h" 78 #include "iwl-eeprom-parse.h" 79 80 #include "mvm.h" 81 #include "fw-dbg.h" 82 #include "iwl-phy-db.h" 83 84 #define MVM_UCODE_ALIVE_TIMEOUT HZ 85 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ) 86 87 #define UCODE_VALID_OK cpu_to_le32(0x1) 88 89 struct iwl_mvm_alive_data { 90 bool valid; 91 u32 scd_base_addr; 92 }; 93 94 static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) 95 { 96 struct iwl_tx_ant_cfg_cmd tx_ant_cmd = { 97 .valid = cpu_to_le32(valid_tx_ant), 98 }; 99 100 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); 101 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0, 102 sizeof(tx_ant_cmd), &tx_ant_cmd); 103 } 104 105 static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) 106 { 107 int i; 108 struct iwl_rss_config_cmd cmd = { 109 .flags = cpu_to_le32(IWL_RSS_ENABLE), 110 .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | 111 IWL_RSS_HASH_TYPE_IPV4_UDP | 112 IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | 113 IWL_RSS_HASH_TYPE_IPV6_TCP | 114 IWL_RSS_HASH_TYPE_IPV6_UDP | 115 IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, 116 }; 117 118 if (mvm->trans->num_rx_queues == 1) 119 return 0; 120 121 /* Do not direct RSS traffic to Q 0 which is our fallback queue */ 122 for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) 123 cmd.indirection_table[i] = 124 1 + (i % (mvm->trans->num_rx_queues - 1)); 125 netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key)); 126 127 return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); 128 } 129 130 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) 131 { 132 struct iwl_dqa_enable_cmd dqa_cmd = { 133 .cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE), 134 }; 135 u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0); 136 int ret; 137 138 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd); 139 if (ret) 140 IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret); 141 else 142 IWL_DEBUG_FW(mvm, "Working in DQA mode\n"); 143 144 return ret; 145 } 146 147 void iwl_free_fw_paging(struct iwl_mvm *mvm) 148 { 149 int i; 150 151 if (!mvm->fw_paging_db[0].fw_paging_block) 152 return; 153 154 for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { 155 struct iwl_fw_paging *paging = &mvm->fw_paging_db[i]; 156 157 if (!paging->fw_paging_block) { 158 IWL_DEBUG_FW(mvm, 159 "Paging: block %d already freed, continue to next page\n", 160 i); 161 162 continue; 163 } 164 dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys, 165 paging->fw_paging_size, DMA_BIDIRECTIONAL); 166 167 __free_pages(paging->fw_paging_block, 168 get_order(paging->fw_paging_size)); 169 paging->fw_paging_block = NULL; 170 } 171 kfree(mvm->trans->paging_download_buf); 172 mvm->trans->paging_download_buf = NULL; 173 mvm->trans->paging_db = NULL; 174 175 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); 176 } 177 178 static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) 179 { 180 int sec_idx, idx; 181 u32 offset = 0; 182 183 /* 184 * find where is the paging image start point: 185 * if CPU2 exist and it's in paging format, then the image looks like: 186 * CPU1 sections (2 or more) 187 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 188 * CPU2 sections (not paged) 189 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 190 * non paged to CPU2 paging sec 191 * CPU2 paging CSS 192 * CPU2 paging image (including instruction and data) 193 */ 194 for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) { 195 if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { 196 sec_idx++; 197 break; 198 } 199 } 200 201 /* 202 * If paging is enabled there should be at least 2 more sections left 203 * (one for CSS and one for Paging data) 204 */ 205 if (sec_idx >= image->num_sec - 1) { 206 IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n"); 207 iwl_free_fw_paging(mvm); 208 return -EINVAL; 209 } 210 211 /* copy the CSS block to the dram */ 212 IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n", 213 sec_idx); 214 215 memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block), 216 image->sec[sec_idx].data, 217 mvm->fw_paging_db[0].fw_paging_size); 218 dma_sync_single_for_device(mvm->trans->dev, 219 mvm->fw_paging_db[0].fw_paging_phys, 220 mvm->fw_paging_db[0].fw_paging_size, 221 DMA_BIDIRECTIONAL); 222 223 IWL_DEBUG_FW(mvm, 224 "Paging: copied %d CSS bytes to first block\n", 225 mvm->fw_paging_db[0].fw_paging_size); 226 227 sec_idx++; 228 229 /* 230 * copy the paging blocks to the dram 231 * loop index start from 1 since that CSS block already copied to dram 232 * and CSS index is 0. 233 * loop stop at num_of_paging_blk since that last block is not full. 234 */ 235 for (idx = 1; idx < mvm->num_of_paging_blk; idx++) { 236 struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; 237 238 memcpy(page_address(block->fw_paging_block), 239 image->sec[sec_idx].data + offset, 240 block->fw_paging_size); 241 dma_sync_single_for_device(mvm->trans->dev, 242 block->fw_paging_phys, 243 block->fw_paging_size, 244 DMA_BIDIRECTIONAL); 245 246 247 IWL_DEBUG_FW(mvm, 248 "Paging: copied %d paging bytes to block %d\n", 249 mvm->fw_paging_db[idx].fw_paging_size, 250 idx); 251 252 offset += mvm->fw_paging_db[idx].fw_paging_size; 253 } 254 255 /* copy the last paging block */ 256 if (mvm->num_of_pages_in_last_blk > 0) { 257 struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; 258 259 memcpy(page_address(block->fw_paging_block), 260 image->sec[sec_idx].data + offset, 261 FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk); 262 dma_sync_single_for_device(mvm->trans->dev, 263 block->fw_paging_phys, 264 block->fw_paging_size, 265 DMA_BIDIRECTIONAL); 266 267 IWL_DEBUG_FW(mvm, 268 "Paging: copied %d pages in the last block %d\n", 269 mvm->num_of_pages_in_last_blk, idx); 270 } 271 272 return 0; 273 } 274 275 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, 276 struct iwl_rx_cmd_buffer *rxb) 277 { 278 struct iwl_rx_packet *pkt = rxb_addr(rxb); 279 struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data; 280 __le32 *dump_data = mfu_dump_notif->data; 281 int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32); 282 int i; 283 284 if (mfu_dump_notif->index_num == 0) 285 IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n", 286 le32_to_cpu(mfu_dump_notif->assert_id)); 287 288 for (i = 0; i < n_words; i++) 289 IWL_DEBUG_INFO(mvm, 290 "MFUART assert dump, dword %u: 0x%08x\n", 291 le16_to_cpu(mfu_dump_notif->index_num) * 292 n_words + i, 293 le32_to_cpu(dump_data[i])); 294 } 295 296 static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, 297 const struct fw_img *image) 298 { 299 struct page *block; 300 dma_addr_t phys = 0; 301 int blk_idx, order, num_of_pages, size, dma_enabled; 302 303 if (mvm->fw_paging_db[0].fw_paging_block) 304 return 0; 305 306 dma_enabled = is_device_dma_capable(mvm->trans->dev); 307 308 /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ 309 BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); 310 311 num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; 312 mvm->num_of_paging_blk = 313 DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP); 314 mvm->num_of_pages_in_last_blk = 315 num_of_pages - 316 NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); 317 318 IWL_DEBUG_FW(mvm, 319 "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", 320 mvm->num_of_paging_blk, 321 mvm->num_of_pages_in_last_blk); 322 323 /* 324 * Allocate CSS and paging blocks in dram. 325 */ 326 for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { 327 /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */ 328 size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE; 329 order = get_order(size); 330 block = alloc_pages(GFP_KERNEL, order); 331 if (!block) { 332 /* free all the previous pages since we failed */ 333 iwl_free_fw_paging(mvm); 334 return -ENOMEM; 335 } 336 337 mvm->fw_paging_db[blk_idx].fw_paging_block = block; 338 mvm->fw_paging_db[blk_idx].fw_paging_size = size; 339 340 if (dma_enabled) { 341 phys = dma_map_page(mvm->trans->dev, block, 0, 342 PAGE_SIZE << order, 343 DMA_BIDIRECTIONAL); 344 if (dma_mapping_error(mvm->trans->dev, phys)) { 345 /* 346 * free the previous pages and the current one 347 * since we failed to map_page. 348 */ 349 iwl_free_fw_paging(mvm); 350 return -ENOMEM; 351 } 352 mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; 353 } else { 354 mvm->fw_paging_db[blk_idx].fw_paging_phys = 355 PAGING_ADDR_SIG | 356 blk_idx << BLOCK_2_EXP_SIZE; 357 } 358 359 if (!blk_idx) 360 IWL_DEBUG_FW(mvm, 361 "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", 362 order); 363 else 364 IWL_DEBUG_FW(mvm, 365 "Paging: allocated 32K bytes (order %d) for firmware paging.\n", 366 order); 367 } 368 369 return 0; 370 } 371 372 static int iwl_save_fw_paging(struct iwl_mvm *mvm, 373 const struct fw_img *fw) 374 { 375 int ret; 376 377 ret = iwl_alloc_fw_paging_mem(mvm, fw); 378 if (ret) 379 return ret; 380 381 return iwl_fill_paging_mem(mvm, fw); 382 } 383 384 /* send paging cmd to FW in case CPU2 has paging image */ 385 static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) 386 { 387 struct iwl_fw_paging_cmd paging_cmd = { 388 .flags = 389 cpu_to_le32(PAGING_CMD_IS_SECURED | 390 PAGING_CMD_IS_ENABLED | 391 (mvm->num_of_pages_in_last_blk << 392 PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), 393 .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), 394 .block_num = cpu_to_le32(mvm->num_of_paging_blk), 395 }; 396 int blk_idx, size = sizeof(paging_cmd); 397 398 /* A bit hard coded - but this is the old API and will be deprecated */ 399 if (!iwl_mvm_has_new_tx_api(mvm)) 400 size -= NUM_OF_FW_PAGING_BLOCKS * 4; 401 402 /* loop for for all paging blocks + CSS block */ 403 for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { 404 dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys; 405 406 addr = addr >> PAGE_2_EXP_SIZE; 407 408 if (iwl_mvm_has_new_tx_api(mvm)) { 409 __le64 phy_addr = cpu_to_le64(addr); 410 411 paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr; 412 } else { 413 __le32 phy_addr = cpu_to_le32(addr); 414 415 paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr; 416 } 417 } 418 419 return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, 420 IWL_ALWAYS_LONG_GROUP, 0), 421 0, size, &paging_cmd); 422 } 423 424 /* 425 * Send paging item cmd to FW in case CPU2 has paging image 426 */ 427 static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) 428 { 429 int ret; 430 struct iwl_fw_get_item_cmd fw_get_item_cmd = { 431 .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), 432 }; 433 434 struct iwl_fw_get_item_resp *item_resp; 435 struct iwl_host_cmd cmd = { 436 .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), 437 .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, 438 .data = { &fw_get_item_cmd, }, 439 }; 440 441 cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd); 442 443 ret = iwl_mvm_send_cmd(mvm, &cmd); 444 if (ret) { 445 IWL_ERR(mvm, 446 "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", 447 ret); 448 return ret; 449 } 450 451 item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; 452 if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { 453 IWL_ERR(mvm, 454 "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", 455 le32_to_cpu(item_resp->item_id)); 456 ret = -EIO; 457 goto exit; 458 } 459 460 /* Add an extra page for headers */ 461 mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE + 462 FW_PAGING_SIZE, 463 GFP_KERNEL); 464 if (!mvm->trans->paging_download_buf) { 465 ret = -ENOMEM; 466 goto exit; 467 } 468 mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); 469 mvm->trans->paging_db = mvm->fw_paging_db; 470 IWL_DEBUG_FW(mvm, 471 "Paging: got paging request address (paging_req_addr 0x%08x)\n", 472 mvm->trans->paging_req_addr); 473 474 exit: 475 iwl_free_resp(&cmd); 476 477 return ret; 478 } 479 480 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, 481 struct iwl_rx_packet *pkt, void *data) 482 { 483 struct iwl_mvm *mvm = 484 container_of(notif_wait, struct iwl_mvm, notif_wait); 485 struct iwl_mvm_alive_data *alive_data = data; 486 struct mvm_alive_resp_v3 *palive3; 487 struct mvm_alive_resp *palive; 488 struct iwl_umac_alive *umac; 489 struct iwl_lmac_alive *lmac1; 490 struct iwl_lmac_alive *lmac2 = NULL; 491 u16 status; 492 493 if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) { 494 palive = (void *)pkt->data; 495 umac = &palive->umac_data; 496 lmac1 = &palive->lmac_data[0]; 497 lmac2 = &palive->lmac_data[1]; 498 status = le16_to_cpu(palive->status); 499 } else { 500 palive3 = (void *)pkt->data; 501 umac = &palive3->umac_data; 502 lmac1 = &palive3->lmac_data; 503 status = le16_to_cpu(palive3->status); 504 } 505 506 mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr); 507 if (lmac2) 508 mvm->error_event_table[1] = 509 le32_to_cpu(lmac2->error_event_table_ptr); 510 mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr); 511 mvm->sf_space.addr = le32_to_cpu(lmac1->st_fwrd_addr); 512 mvm->sf_space.size = le32_to_cpu(lmac1->st_fwrd_size); 513 514 mvm->umac_error_event_table = le32_to_cpu(umac->error_info_addr); 515 516 alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr); 517 alive_data->valid = status == IWL_ALIVE_STATUS_OK; 518 if (mvm->umac_error_event_table) 519 mvm->support_umac_log = true; 520 521 IWL_DEBUG_FW(mvm, 522 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n", 523 status, lmac1->ver_type, lmac1->ver_subtype); 524 525 if (lmac2) 526 IWL_DEBUG_FW(mvm, "Alive ucode CDB\n"); 527 528 IWL_DEBUG_FW(mvm, 529 "UMAC version: Major - 0x%x, Minor - 0x%x\n", 530 le32_to_cpu(umac->umac_major), 531 le32_to_cpu(umac->umac_minor)); 532 533 return true; 534 } 535 536 static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait, 537 struct iwl_rx_packet *pkt, void *data) 538 { 539 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); 540 541 return true; 542 } 543 544 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, 545 struct iwl_rx_packet *pkt, void *data) 546 { 547 struct iwl_phy_db *phy_db = data; 548 549 if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) { 550 WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF); 551 return true; 552 } 553 554 WARN_ON(iwl_phy_db_set_section(phy_db, pkt)); 555 556 return false; 557 } 558 559 static int iwl_mvm_init_paging(struct iwl_mvm *mvm) 560 { 561 const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode]; 562 int ret; 563 564 /* 565 * Configure and operate fw paging mechanism. 566 * The driver configures the paging flow only once. 567 * The CPU2 paging image is included in the IWL_UCODE_INIT image. 568 */ 569 if (!fw->paging_mem_size) 570 return 0; 571 572 /* 573 * When dma is not enabled, the driver needs to copy / write 574 * the downloaded / uploaded page to / from the smem. 575 * This gets the location of the place were the pages are 576 * stored. 577 */ 578 if (!is_device_dma_capable(mvm->trans->dev)) { 579 ret = iwl_trans_get_paging_item(mvm); 580 if (ret) { 581 IWL_ERR(mvm, "failed to get FW paging item\n"); 582 return ret; 583 } 584 } 585 586 ret = iwl_save_fw_paging(mvm, fw); 587 if (ret) { 588 IWL_ERR(mvm, "failed to save the FW paging image\n"); 589 return ret; 590 } 591 592 ret = iwl_send_paging_cmd(mvm, fw); 593 if (ret) { 594 IWL_ERR(mvm, "failed to send the paging cmd\n"); 595 iwl_free_fw_paging(mvm); 596 return ret; 597 } 598 599 return 0; 600 } 601 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, 602 enum iwl_ucode_type ucode_type) 603 { 604 struct iwl_notification_wait alive_wait; 605 struct iwl_mvm_alive_data alive_data; 606 const struct fw_img *fw; 607 int ret, i; 608 enum iwl_ucode_type old_type = mvm->cur_ucode; 609 static const u16 alive_cmd[] = { MVM_ALIVE }; 610 struct iwl_sf_region st_fwrd_space; 611 612 if (ucode_type == IWL_UCODE_REGULAR && 613 iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && 614 !(fw_has_capa(&mvm->fw->ucode_capa, 615 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED))) 616 fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER); 617 else 618 fw = iwl_get_ucode_image(mvm->fw, ucode_type); 619 if (WARN_ON(!fw)) 620 return -EINVAL; 621 mvm->cur_ucode = ucode_type; 622 mvm->ucode_loaded = false; 623 624 iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, 625 alive_cmd, ARRAY_SIZE(alive_cmd), 626 iwl_alive_fn, &alive_data); 627 628 ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT); 629 if (ret) { 630 mvm->cur_ucode = old_type; 631 iwl_remove_notification(&mvm->notif_wait, &alive_wait); 632 return ret; 633 } 634 635 /* 636 * Some things may run in the background now, but we 637 * just wait for the ALIVE notification here. 638 */ 639 ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait, 640 MVM_UCODE_ALIVE_TIMEOUT); 641 if (ret) { 642 struct iwl_trans *trans = mvm->trans; 643 644 if (trans->cfg->gen2) 645 IWL_ERR(mvm, 646 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 647 iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS), 648 iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS)); 649 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 650 IWL_ERR(mvm, 651 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", 652 iwl_read_prph(trans, SB_CPU_1_STATUS), 653 iwl_read_prph(trans, SB_CPU_2_STATUS)); 654 mvm->cur_ucode = old_type; 655 return ret; 656 } 657 658 if (!alive_data.valid) { 659 IWL_ERR(mvm, "Loaded ucode is not valid!\n"); 660 mvm->cur_ucode = old_type; 661 return -EIO; 662 } 663 664 /* 665 * update the sdio allocation according to the pointer we get in the 666 * alive notification. 667 */ 668 st_fwrd_space.addr = mvm->sf_space.addr; 669 st_fwrd_space.size = mvm->sf_space.size; 670 ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space); 671 if (ret) { 672 IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret); 673 return ret; 674 } 675 676 iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr); 677 678 /* 679 * Note: all the queues are enabled as part of the interface 680 * initialization, but in firmware restart scenarios they 681 * could be stopped, so wake them up. In firmware restart, 682 * mac80211 will have the queues stopped as well until the 683 * reconfiguration completes. During normal startup, they 684 * will be empty. 685 */ 686 687 memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); 688 if (iwl_mvm_is_dqa_supported(mvm)) 689 mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; 690 else 691 mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; 692 693 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) 694 atomic_set(&mvm->mac80211_queue_stop_count[i], 0); 695 696 mvm->ucode_loaded = true; 697 698 return 0; 699 } 700 701 static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) 702 { 703 struct iwl_notification_wait init_wait; 704 struct iwl_nvm_access_complete_cmd nvm_complete = {}; 705 struct iwl_init_extended_cfg_cmd init_cfg = { 706 .init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)), 707 }; 708 static const u16 init_complete[] = { 709 INIT_COMPLETE_NOTIF, 710 }; 711 int ret; 712 713 lockdep_assert_held(&mvm->mutex); 714 715 iwl_init_notification_wait(&mvm->notif_wait, 716 &init_wait, 717 init_complete, 718 ARRAY_SIZE(init_complete), 719 iwl_wait_init_complete, 720 NULL); 721 722 /* Will also start the device */ 723 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); 724 if (ret) { 725 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); 726 goto error; 727 } 728 729 /* Send init config command to mark that we are sending NVM access 730 * commands 731 */ 732 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP, 733 INIT_EXTENDED_CFG_CMD), 0, 734 sizeof(init_cfg), &init_cfg); 735 if (ret) { 736 IWL_ERR(mvm, "Failed to run init config command: %d\n", 737 ret); 738 goto error; 739 } 740 741 /* Read the NVM only at driver load time, no need to do this twice */ 742 if (read_nvm) { 743 /* Read nvm */ 744 ret = iwl_nvm_init(mvm, true); 745 if (ret) { 746 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); 747 goto error; 748 } 749 } 750 751 /* In case we read the NVM from external file, load it to the NIC */ 752 if (mvm->nvm_file_name) 753 iwl_mvm_load_nvm_to_nic(mvm); 754 755 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); 756 if (WARN_ON(ret)) 757 goto error; 758 759 ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, 760 NVM_ACCESS_COMPLETE), 0, 761 sizeof(nvm_complete), &nvm_complete); 762 if (ret) { 763 IWL_ERR(mvm, "Failed to run complete NVM access: %d\n", 764 ret); 765 goto error; 766 } 767 768 /* We wait for the INIT complete notification */ 769 return iwl_wait_notification(&mvm->notif_wait, &init_wait, 770 MVM_UCODE_ALIVE_TIMEOUT); 771 772 error: 773 iwl_remove_notification(&mvm->notif_wait, &init_wait); 774 return ret; 775 } 776 777 static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) 778 { 779 struct iwl_phy_cfg_cmd phy_cfg_cmd; 780 enum iwl_ucode_type ucode_type = mvm->cur_ucode; 781 782 /* Set parameters */ 783 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); 784 phy_cfg_cmd.calib_control.event_trigger = 785 mvm->fw->default_calib[ucode_type].event_trigger; 786 phy_cfg_cmd.calib_control.flow_trigger = 787 mvm->fw->default_calib[ucode_type].flow_trigger; 788 789 IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n", 790 phy_cfg_cmd.phy_cfg); 791 792 return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0, 793 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 794 } 795 796 int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) 797 { 798 struct iwl_notification_wait calib_wait; 799 static const u16 init_complete[] = { 800 INIT_COMPLETE_NOTIF, 801 CALIB_RES_NOTIF_PHY_DB 802 }; 803 int ret; 804 805 if (iwl_mvm_has_new_tx_api(mvm)) 806 return iwl_run_unified_mvm_ucode(mvm, true); 807 808 lockdep_assert_held(&mvm->mutex); 809 810 if (WARN_ON_ONCE(mvm->calibrating)) 811 return 0; 812 813 iwl_init_notification_wait(&mvm->notif_wait, 814 &calib_wait, 815 init_complete, 816 ARRAY_SIZE(init_complete), 817 iwl_wait_phy_db_entry, 818 mvm->phy_db); 819 820 /* Will also start the device */ 821 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); 822 if (ret) { 823 IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); 824 goto error; 825 } 826 827 ret = iwl_send_bt_init_conf(mvm); 828 if (ret) 829 goto error; 830 831 /* Read the NVM only at driver load time, no need to do this twice */ 832 if (read_nvm) { 833 /* Read nvm */ 834 ret = iwl_nvm_init(mvm, true); 835 if (ret) { 836 IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); 837 goto error; 838 } 839 } 840 841 /* In case we read the NVM from external file, load it to the NIC */ 842 if (mvm->nvm_file_name) 843 iwl_mvm_load_nvm_to_nic(mvm); 844 845 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); 846 WARN_ON(ret); 847 848 /* 849 * abort after reading the nvm in case RF Kill is on, we will complete 850 * the init seq later when RF kill will switch to off 851 */ 852 if (iwl_mvm_is_radio_hw_killed(mvm)) { 853 IWL_DEBUG_RF_KILL(mvm, 854 "jump over all phy activities due to RF kill\n"); 855 iwl_remove_notification(&mvm->notif_wait, &calib_wait); 856 ret = 1; 857 goto out; 858 } 859 860 mvm->calibrating = true; 861 862 /* Send TX valid antennas before triggering calibrations */ 863 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); 864 if (ret) 865 goto error; 866 867 /* 868 * Send phy configurations command to init uCode 869 * to start the 16.0 uCode init image internal calibrations. 870 */ 871 ret = iwl_send_phy_cfg_cmd(mvm); 872 if (ret) { 873 IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", 874 ret); 875 goto error; 876 } 877 878 /* 879 * Some things may run in the background now, but we 880 * just wait for the calibration complete notification. 881 */ 882 ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, 883 MVM_UCODE_CALIB_TIMEOUT); 884 885 if (ret && iwl_mvm_is_radio_hw_killed(mvm)) { 886 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); 887 ret = 1; 888 } 889 goto out; 890 891 error: 892 iwl_remove_notification(&mvm->notif_wait, &calib_wait); 893 out: 894 mvm->calibrating = false; 895 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { 896 /* we want to debug INIT and we have no NVM - fake */ 897 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + 898 sizeof(struct ieee80211_channel) + 899 sizeof(struct ieee80211_rate), 900 GFP_KERNEL); 901 if (!mvm->nvm_data) 902 return -ENOMEM; 903 mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels; 904 mvm->nvm_data->bands[0].n_channels = 1; 905 mvm->nvm_data->bands[0].n_bitrates = 1; 906 mvm->nvm_data->bands[0].bitrates = 907 (void *)mvm->nvm_data->channels + 1; 908 mvm->nvm_data->bands[0].bitrates->hw_value = 10; 909 } 910 911 return ret; 912 } 913 914 static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm, 915 struct iwl_rx_packet *pkt) 916 { 917 struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; 918 int i, lmac; 919 int lmac_num = le32_to_cpu(mem_cfg->lmac_num); 920 921 if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem))) 922 return; 923 924 mvm->smem_cfg.num_lmacs = lmac_num; 925 mvm->smem_cfg.num_txfifo_entries = 926 ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size); 927 mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size); 928 929 for (lmac = 0; lmac < lmac_num; lmac++) { 930 struct iwl_shared_mem_lmac_cfg *lmac_cfg = 931 &mem_cfg->lmac_smem[lmac]; 932 933 for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++) 934 mvm->smem_cfg.lmac[lmac].txfifo_size[i] = 935 le32_to_cpu(lmac_cfg->txfifo_size[i]); 936 mvm->smem_cfg.lmac[lmac].rxfifo1_size = 937 le32_to_cpu(lmac_cfg->rxfifo1_size); 938 } 939 } 940 941 static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm, 942 struct iwl_rx_packet *pkt) 943 { 944 struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data; 945 int i; 946 947 mvm->smem_cfg.num_lmacs = 1; 948 949 mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size); 950 for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) 951 mvm->smem_cfg.lmac[0].txfifo_size[i] = 952 le32_to_cpu(mem_cfg->txfifo_size[i]); 953 954 mvm->smem_cfg.lmac[0].rxfifo1_size = 955 le32_to_cpu(mem_cfg->rxfifo_size[0]); 956 mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]); 957 958 /* new API has more data, from rxfifo_addr field and on */ 959 if (fw_has_capa(&mvm->fw->ucode_capa, 960 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { 961 BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) != 962 sizeof(mem_cfg->internal_txfifo_size)); 963 964 for (i = 0; 965 i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size); 966 i++) 967 mvm->smem_cfg.internal_txfifo_size[i] = 968 le32_to_cpu(mem_cfg->internal_txfifo_size[i]); 969 } 970 } 971 972 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) 973 { 974 struct iwl_host_cmd cmd = { 975 .flags = CMD_WANT_SKB, 976 .data = { NULL, }, 977 .len = { 0, }, 978 }; 979 struct iwl_rx_packet *pkt; 980 981 lockdep_assert_held(&mvm->mutex); 982 983 if (fw_has_capa(&mvm->fw->ucode_capa, 984 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) 985 cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); 986 else 987 cmd.id = SHARED_MEM_CFG; 988 989 if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) 990 return; 991 992 pkt = cmd.resp_pkt; 993 if (iwl_mvm_has_new_tx_api(mvm)) 994 iwl_mvm_parse_shared_mem_a000(mvm, pkt); 995 else 996 iwl_mvm_parse_shared_mem(mvm, pkt); 997 998 IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); 999 1000 iwl_free_resp(&cmd); 1001 } 1002 1003 static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) 1004 { 1005 struct iwl_ltr_config_cmd cmd = { 1006 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), 1007 }; 1008 1009 if (!mvm->trans->ltr_enabled) 1010 return 0; 1011 1012 return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, 1013 sizeof(cmd), &cmd); 1014 } 1015 1016 #ifdef CONFIG_ACPI 1017 #define ACPI_WRDS_METHOD "WRDS" 1018 #define ACPI_EWRD_METHOD "EWRD" 1019 #define ACPI_WGDS_METHOD "WGDS" 1020 #define ACPI_WIFI_DOMAIN (0x07) 1021 #define ACPI_WRDS_WIFI_DATA_SIZE (IWL_MVM_SAR_TABLE_SIZE + 2) 1022 #define ACPI_EWRD_WIFI_DATA_SIZE ((IWL_MVM_SAR_PROFILE_NUM - 1) * \ 1023 IWL_MVM_SAR_TABLE_SIZE + 3) 1024 #define ACPI_WGDS_WIFI_DATA_SIZE 18 1025 #define ACPI_WGDS_NUM_BANDS 2 1026 #define ACPI_WGDS_TABLE_SIZE 3 1027 1028 static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm, 1029 union acpi_object *table, 1030 struct iwl_mvm_sar_profile *profile, 1031 bool enabled) 1032 { 1033 int i; 1034 1035 profile->enabled = enabled; 1036 1037 for (i = 0; i < IWL_MVM_SAR_TABLE_SIZE; i++) { 1038 if ((table[i].type != ACPI_TYPE_INTEGER) || 1039 (table[i].integer.value > U8_MAX)) 1040 return -EINVAL; 1041 1042 profile->table[i] = table[i].integer.value; 1043 } 1044 1045 return 0; 1046 } 1047 1048 static union acpi_object *iwl_mvm_sar_find_wifi_pkg(struct iwl_mvm *mvm, 1049 union acpi_object *data, 1050 int data_size) 1051 { 1052 int i; 1053 union acpi_object *wifi_pkg; 1054 1055 /* 1056 * We need at least two packages, one for the revision and one 1057 * for the data itself. Also check that the revision is valid 1058 * (i.e. it is an integer set to 0). 1059 */ 1060 if (data->type != ACPI_TYPE_PACKAGE || 1061 data->package.count < 2 || 1062 data->package.elements[0].type != ACPI_TYPE_INTEGER || 1063 data->package.elements[0].integer.value != 0) { 1064 IWL_DEBUG_RADIO(mvm, "Unsupported packages structure\n"); 1065 return ERR_PTR(-EINVAL); 1066 } 1067 1068 /* loop through all the packages to find the one for WiFi */ 1069 for (i = 1; i < data->package.count; i++) { 1070 union acpi_object *domain; 1071 1072 wifi_pkg = &data->package.elements[i]; 1073 1074 /* Skip anything that is not a package with the right 1075 * amount of elements (i.e. domain_type, 1076 * enabled/disabled plus the actual data size. 1077 */ 1078 if (wifi_pkg->type != ACPI_TYPE_PACKAGE || 1079 wifi_pkg->package.count != data_size) 1080 continue; 1081 1082 domain = &wifi_pkg->package.elements[0]; 1083 if (domain->type == ACPI_TYPE_INTEGER && 1084 domain->integer.value == ACPI_WIFI_DOMAIN) 1085 break; 1086 1087 wifi_pkg = NULL; 1088 } 1089 1090 if (!wifi_pkg) 1091 return ERR_PTR(-ENOENT); 1092 1093 return wifi_pkg; 1094 } 1095 1096 static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm) 1097 { 1098 union acpi_object *wifi_pkg, *table; 1099 acpi_handle root_handle; 1100 acpi_handle handle; 1101 struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL}; 1102 acpi_status status; 1103 bool enabled; 1104 int ret; 1105 1106 root_handle = ACPI_HANDLE(mvm->dev); 1107 if (!root_handle) { 1108 IWL_DEBUG_RADIO(mvm, 1109 "Could not retrieve root port ACPI handle\n"); 1110 return -ENOENT; 1111 } 1112 1113 /* Get the method's handle */ 1114 status = acpi_get_handle(root_handle, (acpi_string)ACPI_WRDS_METHOD, 1115 &handle); 1116 if (ACPI_FAILURE(status)) { 1117 IWL_DEBUG_RADIO(mvm, "WRDS method not found\n"); 1118 return -ENOENT; 1119 } 1120 1121 /* Call WRDS with no arguments */ 1122 status = acpi_evaluate_object(handle, NULL, NULL, &wrds); 1123 if (ACPI_FAILURE(status)) { 1124 IWL_DEBUG_RADIO(mvm, "WRDS invocation failed (0x%x)\n", status); 1125 return -ENOENT; 1126 } 1127 1128 wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wrds.pointer, 1129 ACPI_WRDS_WIFI_DATA_SIZE); 1130 if (IS_ERR(wifi_pkg)) { 1131 ret = PTR_ERR(wifi_pkg); 1132 goto out_free; 1133 } 1134 1135 if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { 1136 ret = -EINVAL; 1137 goto out_free; 1138 } 1139 1140 enabled = !!(wifi_pkg->package.elements[1].integer.value); 1141 1142 /* position of the actual table */ 1143 table = &wifi_pkg->package.elements[2]; 1144 1145 /* The profile from WRDS is officially profile 1, but goes 1146 * into sar_profiles[0] (because we don't have a profile 0). 1147 */ 1148 ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0], 1149 enabled); 1150 1151 out_free: 1152 kfree(wrds.pointer); 1153 return ret; 1154 } 1155 1156 static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) 1157 { 1158 union acpi_object *wifi_pkg; 1159 acpi_handle root_handle; 1160 acpi_handle handle; 1161 struct acpi_buffer ewrd = {ACPI_ALLOCATE_BUFFER, NULL}; 1162 acpi_status status; 1163 bool enabled; 1164 int i, n_profiles, ret; 1165 1166 root_handle = ACPI_HANDLE(mvm->dev); 1167 if (!root_handle) { 1168 IWL_DEBUG_RADIO(mvm, 1169 "Could not retrieve root port ACPI handle\n"); 1170 return -ENOENT; 1171 } 1172 1173 /* Get the method's handle */ 1174 status = acpi_get_handle(root_handle, (acpi_string)ACPI_EWRD_METHOD, 1175 &handle); 1176 if (ACPI_FAILURE(status)) { 1177 IWL_DEBUG_RADIO(mvm, "EWRD method not found\n"); 1178 return -ENOENT; 1179 } 1180 1181 /* Call EWRD with no arguments */ 1182 status = acpi_evaluate_object(handle, NULL, NULL, &ewrd); 1183 if (ACPI_FAILURE(status)) { 1184 IWL_DEBUG_RADIO(mvm, "EWRD invocation failed (0x%x)\n", status); 1185 return -ENOENT; 1186 } 1187 1188 wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, ewrd.pointer, 1189 ACPI_EWRD_WIFI_DATA_SIZE); 1190 if (IS_ERR(wifi_pkg)) { 1191 ret = PTR_ERR(wifi_pkg); 1192 goto out_free; 1193 } 1194 1195 if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) || 1196 (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) { 1197 ret = -EINVAL; 1198 goto out_free; 1199 } 1200 1201 enabled = !!(wifi_pkg->package.elements[1].integer.value); 1202 n_profiles = wifi_pkg->package.elements[2].integer.value; 1203 1204 /* in case of BIOS bug */ 1205 if (n_profiles <= 0) { 1206 ret = -EINVAL; 1207 goto out_free; 1208 } 1209 1210 for (i = 0; i < n_profiles; i++) { 1211 /* the tables start at element 3 */ 1212 static int pos = 3; 1213 1214 /* The EWRD profiles officially go from 2 to 4, but we 1215 * save them in sar_profiles[1-3] (because we don't 1216 * have profile 0). So in the array we start from 1. 1217 */ 1218 ret = iwl_mvm_sar_set_profile(mvm, 1219 &wifi_pkg->package.elements[pos], 1220 &mvm->sar_profiles[i + 1], 1221 enabled); 1222 if (ret < 0) 1223 break; 1224 1225 /* go to the next table */ 1226 pos += IWL_MVM_SAR_TABLE_SIZE; 1227 } 1228 1229 out_free: 1230 kfree(ewrd.pointer); 1231 return ret; 1232 } 1233 1234 static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm, 1235 struct iwl_mvm_geo_table *geo_table) 1236 { 1237 union acpi_object *wifi_pkg; 1238 acpi_handle root_handle; 1239 acpi_handle handle; 1240 struct acpi_buffer wgds = {ACPI_ALLOCATE_BUFFER, NULL}; 1241 acpi_status status; 1242 int i, ret; 1243 1244 root_handle = ACPI_HANDLE(mvm->dev); 1245 if (!root_handle) { 1246 IWL_DEBUG_RADIO(mvm, 1247 "Could not retrieve root port ACPI handle\n"); 1248 return -ENOENT; 1249 } 1250 1251 /* Get the method's handle */ 1252 status = acpi_get_handle(root_handle, (acpi_string)ACPI_WGDS_METHOD, 1253 &handle); 1254 if (ACPI_FAILURE(status)) { 1255 IWL_DEBUG_RADIO(mvm, "WGDS method not found\n"); 1256 return -ENOENT; 1257 } 1258 1259 /* Call WGDS with no arguments */ 1260 status = acpi_evaluate_object(handle, NULL, NULL, &wgds); 1261 if (ACPI_FAILURE(status)) { 1262 IWL_DEBUG_RADIO(mvm, "WGDS invocation failed (0x%x)\n", status); 1263 return -ENOENT; 1264 } 1265 1266 wifi_pkg = iwl_mvm_sar_find_wifi_pkg(mvm, wgds.pointer, 1267 ACPI_WGDS_WIFI_DATA_SIZE); 1268 if (IS_ERR(wifi_pkg)) { 1269 ret = PTR_ERR(wifi_pkg); 1270 goto out_free; 1271 } 1272 1273 for (i = 0; i < ACPI_WGDS_WIFI_DATA_SIZE; i++) { 1274 union acpi_object *entry; 1275 1276 entry = &wifi_pkg->package.elements[i + 1]; 1277 if ((entry->type != ACPI_TYPE_INTEGER) || 1278 (entry->integer.value > U8_MAX)) 1279 return -EINVAL; 1280 1281 geo_table->values[i] = entry->integer.value; 1282 } 1283 ret = 0; 1284 out_free: 1285 kfree(wgds.pointer); 1286 return ret; 1287 } 1288 1289 int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) 1290 { 1291 struct iwl_dev_tx_power_cmd cmd = { 1292 .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS), 1293 }; 1294 int i, j, idx; 1295 int profs[IWL_NUM_CHAIN_LIMITS] = { prof_a, prof_b }; 1296 int len = sizeof(cmd); 1297 1298 BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS < 2); 1299 BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS != 1300 IWL_MVM_SAR_TABLE_SIZE); 1301 1302 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) 1303 len = sizeof(cmd.v3); 1304 1305 for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) { 1306 struct iwl_mvm_sar_profile *prof; 1307 1308 /* don't allow SAR to be disabled (profile 0 means disable) */ 1309 if (profs[i] == 0) 1310 return -EPERM; 1311 1312 /* we are off by one, so allow up to IWL_MVM_SAR_PROFILE_NUM */ 1313 if (profs[i] > IWL_MVM_SAR_PROFILE_NUM) 1314 return -EINVAL; 1315 1316 /* profiles go from 1 to 4, so decrement to access the array */ 1317 prof = &mvm->sar_profiles[profs[i] - 1]; 1318 1319 /* if the profile is disabled, do nothing */ 1320 if (!prof->enabled) { 1321 IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n", 1322 profs[i]); 1323 /* if one of the profiles is disabled, we fail all */ 1324 return -ENOENT; 1325 } 1326 1327 IWL_DEBUG_RADIO(mvm, " Chain[%d]:\n", i); 1328 for (j = 0; j < IWL_NUM_SUB_BANDS; j++) { 1329 idx = (i * IWL_NUM_SUB_BANDS) + j; 1330 cmd.v3.per_chain_restriction[i][j] = 1331 cpu_to_le16(prof->table[idx]); 1332 IWL_DEBUG_RADIO(mvm, " Band[%d] = %d * .125dBm\n", 1333 j, prof->table[idx]); 1334 } 1335 } 1336 1337 IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n"); 1338 1339 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); 1340 } 1341 1342 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) 1343 { 1344 struct iwl_mvm_geo_table geo_table; 1345 struct iwl_geo_tx_power_profiles_cmd cmd = { 1346 .ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES), 1347 }; 1348 int ret, i, j, idx; 1349 u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT); 1350 1351 ret = iwl_mvm_sar_get_wgds_table(mvm, &geo_table); 1352 if (ret < 0) { 1353 IWL_DEBUG_RADIO(mvm, 1354 "Geo SAR BIOS table invalid or unavailable. (%d)\n", 1355 ret); 1356 /* we don't fail if the table is not available */ 1357 return 0; 1358 } 1359 1360 IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); 1361 1362 BUILD_BUG_ON(IWL_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * 1363 ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE); 1364 1365 for (i = 0; i < IWL_NUM_GEO_PROFILES; i++) { 1366 struct iwl_per_chain_offset *chain = 1367 (struct iwl_per_chain_offset *)&cmd.table[i]; 1368 1369 for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) { 1370 u8 *value; 1371 1372 idx = i * ACPI_WGDS_NUM_BANDS * ACPI_WGDS_TABLE_SIZE + 1373 j * ACPI_WGDS_TABLE_SIZE; 1374 value = &geo_table.values[idx]; 1375 chain[j].max_tx_power = cpu_to_le16(value[0]); 1376 chain[j].chain_a = value[1]; 1377 chain[j].chain_b = value[2]; 1378 IWL_DEBUG_RADIO(mvm, 1379 "SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n", 1380 i, j, value[1], value[2], value[0]); 1381 } 1382 } 1383 return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd); 1384 } 1385 1386 #else /* CONFIG_ACPI */ 1387 static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm) 1388 { 1389 return -ENOENT; 1390 } 1391 1392 static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) 1393 { 1394 return -ENOENT; 1395 } 1396 1397 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) 1398 { 1399 return 0; 1400 } 1401 #endif /* CONFIG_ACPI */ 1402 1403 static int iwl_mvm_sar_init(struct iwl_mvm *mvm) 1404 { 1405 int ret; 1406 1407 ret = iwl_mvm_sar_get_wrds_table(mvm); 1408 if (ret < 0) { 1409 IWL_DEBUG_RADIO(mvm, 1410 "WRDS SAR BIOS table invalid or unavailable. (%d)\n", 1411 ret); 1412 /* if not available, don't fail and don't bother with EWRD */ 1413 return 0; 1414 } 1415 1416 ret = iwl_mvm_sar_get_ewrd_table(mvm); 1417 /* if EWRD is not available, we can still use WRDS, so don't fail */ 1418 if (ret < 0) 1419 IWL_DEBUG_RADIO(mvm, 1420 "EWRD SAR BIOS table invalid or unavailable. (%d)\n", 1421 ret); 1422 1423 /* choose profile 1 (WRDS) as default for both chains */ 1424 ret = iwl_mvm_sar_select_profile(mvm, 1, 1); 1425 1426 /* if we don't have profile 0 from BIOS, just skip it */ 1427 if (ret == -ENOENT) 1428 return 0; 1429 1430 return ret; 1431 } 1432 1433 static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) 1434 { 1435 int ret; 1436 1437 if (iwl_mvm_has_new_tx_api(mvm)) 1438 return iwl_run_unified_mvm_ucode(mvm, false); 1439 1440 ret = iwl_run_init_mvm_ucode(mvm, false); 1441 1442 if (iwlmvm_mod_params.init_dbg) 1443 return 0; 1444 1445 if (ret) { 1446 IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); 1447 /* this can't happen */ 1448 if (WARN_ON(ret > 0)) 1449 ret = -ERFKILL; 1450 return ret; 1451 } 1452 1453 /* 1454 * Stop and start the transport without entering low power 1455 * mode. This will save the state of other components on the 1456 * device that are triggered by the INIT firwmare (MFUART). 1457 */ 1458 _iwl_trans_stop_device(mvm->trans, false); 1459 ret = _iwl_trans_start_hw(mvm->trans, false); 1460 if (ret) 1461 return ret; 1462 1463 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); 1464 if (ret) 1465 return ret; 1466 1467 return iwl_mvm_init_paging(mvm); 1468 } 1469 1470 int iwl_mvm_up(struct iwl_mvm *mvm) 1471 { 1472 int ret, i; 1473 struct ieee80211_channel *chan; 1474 struct cfg80211_chan_def chandef; 1475 1476 lockdep_assert_held(&mvm->mutex); 1477 1478 ret = iwl_trans_start_hw(mvm->trans); 1479 if (ret) 1480 return ret; 1481 1482 ret = iwl_mvm_load_rt_fw(mvm); 1483 if (ret) { 1484 IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); 1485 goto error; 1486 } 1487 1488 iwl_mvm_get_shared_mem_conf(mvm); 1489 1490 ret = iwl_mvm_sf_update(mvm, NULL, false); 1491 if (ret) 1492 IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); 1493 1494 mvm->fw_dbg_conf = FW_DBG_INVALID; 1495 /* if we have a destination, assume EARLY START */ 1496 if (mvm->fw->dbg_dest_tlv) 1497 mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE; 1498 iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE); 1499 1500 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); 1501 if (ret) 1502 goto error; 1503 1504 ret = iwl_send_bt_init_conf(mvm); 1505 if (ret) 1506 goto error; 1507 1508 /* Send phy db control command and then phy db calibration*/ 1509 if (!iwl_mvm_has_new_tx_api(mvm)) { 1510 ret = iwl_send_phy_db_data(mvm->phy_db); 1511 if (ret) 1512 goto error; 1513 1514 ret = iwl_send_phy_cfg_cmd(mvm); 1515 if (ret) 1516 goto error; 1517 } 1518 1519 /* Init RSS configuration */ 1520 /* TODO - remove a000 disablement when we have RXQ config API */ 1521 if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { 1522 ret = iwl_send_rss_cfg_cmd(mvm); 1523 if (ret) { 1524 IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", 1525 ret); 1526 goto error; 1527 } 1528 } 1529 1530 /* init the fw <-> mac80211 STA mapping */ 1531 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) 1532 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); 1533 1534 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; 1535 1536 /* reset quota debouncing buffer - 0xff will yield invalid data */ 1537 memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); 1538 1539 /* Enable DQA-mode if required */ 1540 if (iwl_mvm_is_dqa_supported(mvm)) { 1541 ret = iwl_mvm_send_dqa_cmd(mvm); 1542 if (ret) 1543 goto error; 1544 } else { 1545 IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n"); 1546 } 1547 1548 /* Add auxiliary station for scanning */ 1549 ret = iwl_mvm_add_aux_sta(mvm); 1550 if (ret) 1551 goto error; 1552 1553 /* Add all the PHY contexts */ 1554 chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0]; 1555 cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); 1556 for (i = 0; i < NUM_PHY_CTX; i++) { 1557 /* 1558 * The channel used here isn't relevant as it's 1559 * going to be overwritten in the other flows. 1560 * For now use the first channel we have. 1561 */ 1562 ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i], 1563 &chandef, 1, 1); 1564 if (ret) 1565 goto error; 1566 } 1567 1568 #ifdef CONFIG_THERMAL 1569 if (iwl_mvm_is_tt_in_fw(mvm)) { 1570 /* in order to give the responsibility of ct-kill and 1571 * TX backoff to FW we need to send empty temperature reporting 1572 * cmd during init time 1573 */ 1574 iwl_mvm_send_temp_report_ths_cmd(mvm); 1575 } else { 1576 /* Initialize tx backoffs to the minimal possible */ 1577 iwl_mvm_tt_tx_backoff(mvm, 0); 1578 } 1579 1580 /* TODO: read the budget from BIOS / Platform NVM */ 1581 if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) { 1582 ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, 1583 mvm->cooling_dev.cur_state); 1584 if (ret) 1585 goto error; 1586 } 1587 #else 1588 /* Initialize tx backoffs to the minimal possible */ 1589 iwl_mvm_tt_tx_backoff(mvm, 0); 1590 #endif 1591 1592 WARN_ON(iwl_mvm_config_ltr(mvm)); 1593 1594 ret = iwl_mvm_power_update_device(mvm); 1595 if (ret) 1596 goto error; 1597 1598 /* 1599 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx 1600 * anyway, so don't init MCC. 1601 */ 1602 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) { 1603 ret = iwl_mvm_init_mcc(mvm); 1604 if (ret) 1605 goto error; 1606 } 1607 1608 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 1609 mvm->scan_type = IWL_SCAN_TYPE_NOT_SET; 1610 ret = iwl_mvm_config_scan(mvm); 1611 if (ret) 1612 goto error; 1613 } 1614 1615 /* allow FW/transport low power modes if not during restart */ 1616 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1617 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); 1618 1619 ret = iwl_mvm_sar_init(mvm); 1620 if (ret) 1621 goto error; 1622 1623 ret = iwl_mvm_sar_geo_init(mvm); 1624 if (ret) 1625 goto error; 1626 1627 IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); 1628 return 0; 1629 error: 1630 iwl_mvm_stop_device(mvm); 1631 return ret; 1632 } 1633 1634 int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) 1635 { 1636 int ret, i; 1637 1638 lockdep_assert_held(&mvm->mutex); 1639 1640 ret = iwl_trans_start_hw(mvm->trans); 1641 if (ret) 1642 return ret; 1643 1644 ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN); 1645 if (ret) { 1646 IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret); 1647 goto error; 1648 } 1649 1650 ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); 1651 if (ret) 1652 goto error; 1653 1654 /* Send phy db control command and then phy db calibration*/ 1655 ret = iwl_send_phy_db_data(mvm->phy_db); 1656 if (ret) 1657 goto error; 1658 1659 ret = iwl_send_phy_cfg_cmd(mvm); 1660 if (ret) 1661 goto error; 1662 1663 /* init the fw <-> mac80211 STA mapping */ 1664 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) 1665 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); 1666 1667 /* Add auxiliary station for scanning */ 1668 ret = iwl_mvm_add_aux_sta(mvm); 1669 if (ret) 1670 goto error; 1671 1672 return 0; 1673 error: 1674 iwl_mvm_stop_device(mvm); 1675 return ret; 1676 } 1677 1678 void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm, 1679 struct iwl_rx_cmd_buffer *rxb) 1680 { 1681 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1682 struct iwl_card_state_notif *card_state_notif = (void *)pkt->data; 1683 u32 flags = le32_to_cpu(card_state_notif->flags); 1684 1685 IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n", 1686 (flags & HW_CARD_DISABLED) ? "Kill" : "On", 1687 (flags & SW_CARD_DISABLED) ? "Kill" : "On", 1688 (flags & CT_KILL_CARD_DISABLED) ? 1689 "Reached" : "Not reached"); 1690 } 1691 1692 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm, 1693 struct iwl_rx_cmd_buffer *rxb) 1694 { 1695 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1696 struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data; 1697 1698 IWL_DEBUG_INFO(mvm, 1699 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n", 1700 le32_to_cpu(mfuart_notif->installed_ver), 1701 le32_to_cpu(mfuart_notif->external_ver), 1702 le32_to_cpu(mfuart_notif->status), 1703 le32_to_cpu(mfuart_notif->duration)); 1704 1705 if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif)) 1706 IWL_DEBUG_INFO(mvm, 1707 "MFUART: image size: 0x%08x\n", 1708 le32_to_cpu(mfuart_notif->image_size)); 1709 } 1710