1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2007-2015, 2018-2024 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/debugfs.h> 10 #include <linux/sched.h> 11 #include <linux/bitops.h> 12 #include <linux/gfp.h> 13 #include <linux/vmalloc.h> 14 #include <linux/module.h> 15 #include <linux/wait.h> 16 #include <linux/seq_file.h> 17 18 #include "iwl-drv.h" 19 #include "iwl-trans.h" 20 #include "iwl-csr.h" 21 #include "iwl-prph.h" 22 #include "iwl-scd.h" 23 #include "iwl-agn-hw.h" 24 #include "fw/error-dump.h" 25 #include "fw/dbg.h" 26 #include "fw/api/tx.h" 27 #include "fw/acpi.h" 28 #include "mei/iwl-mei.h" 29 #include "internal.h" 30 #include "iwl-fh.h" 31 #include "pcie/iwl-context-info-v2.h" 32 #include "pcie/utils.h" 33 34 /* extended range in FW SRAM */ 35 #define IWL_FW_MEM_EXTENDED_START 0x40000 36 #define IWL_FW_MEM_EXTENDED_END 0x57FFF 37 38 int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership) 39 { 40 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ 41 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 42 iwl_set_bit(trans, CSR_GP_CNTRL, 43 CSR_GP_CNTRL_REG_FLAG_SW_RESET); 44 usleep_range(10000, 20000); 45 } else { 46 iwl_set_bit(trans, CSR_RESET, 47 CSR_RESET_REG_FLAG_SW_RESET); 48 usleep_range(5000, 6000); 49 } 50 51 if (retake_ownership) 52 return iwl_pcie_prepare_card_hw(trans); 53 54 return 0; 55 } 56 57 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) 58 { 59 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 60 61 if (!fw_mon->size) 62 return; 63 64 dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block, 65 fw_mon->physical); 66 67 fw_mon->block = NULL; 68 fw_mon->physical = 0; 69 fw_mon->size = 0; 70 } 71 72 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, 73 u8 max_power) 74 { 75 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 76 void *block = NULL; 77 dma_addr_t physical = 0; 78 u32 size = 0; 79 u8 power; 80 81 if (fw_mon->size) { 82 memset(fw_mon->block, 0, fw_mon->size); 83 return; 84 } 85 86 /* need at least 2 KiB, so stop at 11 */ 87 for (power = max_power; power >= 11; power--) { 88 size = BIT(power); 89 block = dma_alloc_coherent(trans->dev, size, &physical, 90 GFP_KERNEL | __GFP_NOWARN); 91 if (!block) 92 continue; 93 94 IWL_INFO(trans, 95 "Allocated 0x%08x bytes for firmware monitor.\n", 96 size); 97 break; 98 } 99 100 if (WARN_ON_ONCE(!block)) 101 return; 102 103 if (power != max_power) 104 IWL_ERR(trans, 105 "Sorry - debug buffer is only %luK while you requested %luK\n", 106 (unsigned long)BIT(power - 10), 107 (unsigned long)BIT(max_power - 10)); 108 109 fw_mon->block = block; 110 fw_mon->physical = physical; 111 fw_mon->size = size; 112 } 113 114 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) 115 { 116 if (!max_power) { 117 /* default max_power is maximum */ 118 max_power = 26; 119 } else { 120 max_power += 11; 121 } 122 123 if (WARN(max_power > 26, 124 "External buffer size for monitor is too big %d, check the FW TLV\n", 125 max_power)) 126 return; 127 128 iwl_pcie_alloc_fw_monitor_block(trans, max_power); 129 } 130 131 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) 132 { 133 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 134 ((reg & 0x0000ffff) | (2 << 28))); 135 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); 136 } 137 138 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) 139 { 140 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); 141 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 142 ((reg & 0x0000ffff) | (3 << 28))); 143 } 144 145 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 146 { 147 if (trans->mac_cfg->base->apmg_not_supported) 148 return; 149 150 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 151 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 152 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 153 ~APMG_PS_CTRL_MSK_PWR_SRC); 154 else 155 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 156 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 157 ~APMG_PS_CTRL_MSK_PWR_SRC); 158 } 159 160 /* PCI registers */ 161 #define PCI_CFG_RETRY_TIMEOUT 0x041 162 163 void iwl_pcie_apm_config(struct iwl_trans *trans) 164 { 165 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 166 u16 lctl; 167 u16 cap; 168 169 /* 170 * L0S states have been found to be unstable with our devices 171 * and in newer hardware they are not officially supported at 172 * all, so we must always set the L0S_DISABLED bit. 173 */ 174 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED); 175 176 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); 177 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 178 179 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); 180 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; 181 IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n", 182 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", 183 trans->ltr_enabled ? "En" : "Dis"); 184 } 185 186 /* 187 * Start up NIC's basic functionality after it has been reset 188 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) 189 * NOTE: This does not load uCode nor start the embedded processor 190 */ 191 static int iwl_pcie_apm_init(struct iwl_trans *trans) 192 { 193 int ret; 194 195 IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); 196 197 /* 198 * Use "set_bit" below rather than "write", to preserve any hardware 199 * bits already set by default after reset. 200 */ 201 202 /* Disable L0S exit timer (platform NMI Work/Around) */ 203 if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_8000) 204 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 205 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 206 207 /* 208 * Disable L0s without affecting L1; 209 * don't wait for ICH L0s (ICH bug W/A) 210 */ 211 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 212 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 213 214 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 215 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 216 217 /* 218 * Enable HAP INTA (interrupt from management bus) to 219 * wake device's PCI Express link L1a -> L0s 220 */ 221 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 222 CSR_HW_IF_CONFIG_REG_HAP_WAKE); 223 224 iwl_pcie_apm_config(trans); 225 226 /* Configure analog phase-lock-loop before activating to D0A */ 227 if (trans->mac_cfg->base->pll_cfg) 228 iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 229 230 ret = iwl_finish_nic_init(trans); 231 if (ret) 232 return ret; 233 234 if (trans->cfg->host_interrupt_operation_mode) { 235 /* 236 * This is a bit of an abuse - This is needed for 7260 / 3160 237 * only check host_interrupt_operation_mode even if this is 238 * not related to host_interrupt_operation_mode. 239 * 240 * Enable the oscillator to count wake up time for L1 exit. This 241 * consumes slightly more power (100uA) - but allows to be sure 242 * that we wake up from L1 on time. 243 * 244 * This looks weird: read twice the same register, discard the 245 * value, set a bit, and yet again, read that same register 246 * just to discard the value. But that's the way the hardware 247 * seems to like it. 248 */ 249 iwl_read_prph(trans, OSC_CLK); 250 iwl_read_prph(trans, OSC_CLK); 251 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); 252 iwl_read_prph(trans, OSC_CLK); 253 iwl_read_prph(trans, OSC_CLK); 254 } 255 256 /* 257 * Enable DMA clock and wait for it to stabilize. 258 * 259 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" 260 * bits do not disable clocks. This preserves any hardware 261 * bits already set by default in "CLK_CTRL_REG" after reset. 262 */ 263 if (!trans->mac_cfg->base->apmg_not_supported) { 264 iwl_write_prph(trans, APMG_CLK_EN_REG, 265 APMG_CLK_VAL_DMA_CLK_RQT); 266 udelay(20); 267 268 /* Disable L1-Active */ 269 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 270 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 271 272 /* Clear the interrupt in APMG if the NIC is in RFKILL */ 273 iwl_write_prph(trans, APMG_RTC_INT_STT_REG, 274 APMG_RTC_INT_STT_RFKILL); 275 } 276 277 set_bit(STATUS_DEVICE_ENABLED, &trans->status); 278 279 return 0; 280 } 281 282 /* 283 * Enable LP XTAL to avoid HW bug where device may consume much power if 284 * FW is not loaded after device reset. LP XTAL is disabled by default 285 * after device HW reset. Do it only if XTAL is fed by internal source. 286 * Configure device's "persistence" mode to avoid resetting XTAL again when 287 * SHRD_HW_RST occurs in S3. 288 */ 289 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) 290 { 291 int ret; 292 u32 apmg_gp1_reg; 293 u32 apmg_xtal_cfg_reg; 294 u32 dl_cfg_reg; 295 296 /* Force XTAL ON */ 297 iwl_trans_set_bit(trans, CSR_GP_CNTRL, 298 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 299 300 ret = iwl_trans_pcie_sw_reset(trans, true); 301 302 if (!ret) 303 ret = iwl_finish_nic_init(trans); 304 305 if (WARN_ON(ret)) { 306 /* Release XTAL ON request */ 307 iwl_trans_clear_bit(trans, CSR_GP_CNTRL, 308 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 309 return; 310 } 311 312 /* 313 * Clear "disable persistence" to avoid LP XTAL resetting when 314 * SHRD_HW_RST is applied in S3. 315 */ 316 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 317 APMG_PCIDEV_STT_VAL_PERSIST_DIS); 318 319 /* 320 * Force APMG XTAL to be active to prevent its disabling by HW 321 * caused by APMG idle state. 322 */ 323 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, 324 SHR_APMG_XTAL_CFG_REG); 325 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 326 apmg_xtal_cfg_reg | 327 SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 328 329 ret = iwl_trans_pcie_sw_reset(trans, true); 330 if (ret) 331 IWL_ERR(trans, 332 "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n"); 333 334 /* Enable LP XTAL by indirect access through CSR */ 335 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); 336 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | 337 SHR_APMG_GP1_WF_XTAL_LP_EN | 338 SHR_APMG_GP1_CHICKEN_BIT_SELECT); 339 340 /* Clear delay line clock power up */ 341 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); 342 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & 343 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); 344 345 /* 346 * Enable persistence mode to avoid LP XTAL resetting when 347 * SHRD_HW_RST is applied in S3. 348 */ 349 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 350 CSR_HW_IF_CONFIG_REG_PERSISTENCE); 351 352 /* 353 * Clear "initialization complete" bit to move adapter from 354 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 355 */ 356 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 357 358 /* Activates XTAL resources monitor */ 359 iwl_trans_set_bit(trans, CSR_MONITOR_CFG_REG, 360 CSR_MONITOR_XTAL_RESOURCES); 361 362 /* Release XTAL ON request */ 363 iwl_trans_clear_bit(trans, CSR_GP_CNTRL, 364 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 365 udelay(10); 366 367 /* Release APMG XTAL */ 368 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 369 apmg_xtal_cfg_reg & 370 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 371 } 372 373 void iwl_pcie_apm_stop_master(struct iwl_trans *trans) 374 { 375 int ret; 376 377 /* stop device's busmaster DMA activity */ 378 379 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 380 iwl_set_bit(trans, CSR_GP_CNTRL, 381 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); 382 383 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 384 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 385 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 386 100); 387 usleep_range(10000, 20000); 388 } else { 389 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 390 391 ret = iwl_poll_bit(trans, CSR_RESET, 392 CSR_RESET_REG_FLAG_MASTER_DISABLED, 393 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 394 } 395 396 if (ret < 0) 397 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 398 399 IWL_DEBUG_INFO(trans, "stop master\n"); 400 } 401 402 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) 403 { 404 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); 405 406 if (op_mode_leave) { 407 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 408 iwl_pcie_apm_init(trans); 409 410 /* inform ME that we are leaving */ 411 if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000) 412 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 413 APMG_PCIDEV_STT_VAL_WAKE_ME); 414 else if (trans->mac_cfg->device_family >= 415 IWL_DEVICE_FAMILY_8000) { 416 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 417 CSR_RESET_LINK_PWR_MGMT_DISABLED); 418 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 419 CSR_HW_IF_CONFIG_REG_WAKE_ME | 420 CSR_HW_IF_CONFIG_REG_WAKE_ME_PCIE_OWNER_EN); 421 mdelay(1); 422 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 423 CSR_RESET_LINK_PWR_MGMT_DISABLED); 424 } 425 mdelay(5); 426 } 427 428 clear_bit(STATUS_DEVICE_ENABLED, &trans->status); 429 430 /* Stop device's DMA activity */ 431 iwl_pcie_apm_stop_master(trans); 432 433 if (trans->cfg->lp_xtal_workaround) { 434 iwl_pcie_apm_lp_xtal_enable(trans); 435 return; 436 } 437 438 iwl_trans_pcie_sw_reset(trans, false); 439 440 /* 441 * Clear "initialization complete" bit to move adapter from 442 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 443 */ 444 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 445 } 446 447 static int iwl_pcie_nic_init(struct iwl_trans *trans) 448 { 449 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 450 int ret; 451 452 /* nic_init */ 453 spin_lock_bh(&trans_pcie->irq_lock); 454 ret = iwl_pcie_apm_init(trans); 455 spin_unlock_bh(&trans_pcie->irq_lock); 456 457 if (ret) 458 return ret; 459 460 iwl_pcie_set_pwr(trans, false); 461 462 iwl_op_mode_nic_config(trans->op_mode); 463 464 /* Allocate the RX queue, or reset if it is already allocated */ 465 ret = iwl_pcie_rx_init(trans); 466 if (ret) 467 return ret; 468 469 /* Allocate or reset and init all Tx and Command queues */ 470 if (iwl_pcie_tx_init(trans)) { 471 iwl_pcie_rx_free(trans); 472 return -ENOMEM; 473 } 474 475 if (trans->mac_cfg->base->shadow_reg_enable) { 476 /* enable shadow regs in HW */ 477 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); 478 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); 479 } 480 481 return 0; 482 } 483 484 #define HW_READY_TIMEOUT (50) 485 486 /* Note: returns poll_bit return value, which is >= 0 if success */ 487 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) 488 { 489 int ret; 490 491 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 492 CSR_HW_IF_CONFIG_REG_PCI_OWN_SET); 493 494 /* See if we got it */ 495 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 496 CSR_HW_IF_CONFIG_REG_PCI_OWN_SET, 497 CSR_HW_IF_CONFIG_REG_PCI_OWN_SET, 498 HW_READY_TIMEOUT); 499 500 if (ret >= 0) 501 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); 502 503 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); 504 return ret; 505 } 506 507 /* Note: returns standard 0/-ERROR code */ 508 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) 509 { 510 int ret; 511 int iter; 512 513 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 514 515 ret = iwl_pcie_set_hw_ready(trans); 516 /* If the card is ready, exit 0 */ 517 if (ret >= 0) { 518 trans->csme_own = false; 519 return 0; 520 } 521 522 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 523 CSR_RESET_LINK_PWR_MGMT_DISABLED); 524 usleep_range(1000, 2000); 525 526 for (iter = 0; iter < 10; iter++) { 527 int t = 0; 528 529 /* If HW is not ready, prepare the conditions to check again */ 530 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 531 CSR_HW_IF_CONFIG_REG_WAKE_ME); 532 533 do { 534 ret = iwl_pcie_set_hw_ready(trans); 535 if (ret >= 0) { 536 trans->csme_own = false; 537 return 0; 538 } 539 540 if (iwl_mei_is_connected()) { 541 IWL_DEBUG_INFO(trans, 542 "Couldn't prepare the card but SAP is connected\n"); 543 trans->csme_own = true; 544 if (trans->mac_cfg->device_family != 545 IWL_DEVICE_FAMILY_9000) 546 IWL_ERR(trans, 547 "SAP not supported for this NIC family\n"); 548 549 return -EBUSY; 550 } 551 552 usleep_range(200, 1000); 553 t += 200; 554 } while (t < 150000); 555 msleep(25); 556 } 557 558 IWL_ERR(trans, "Couldn't prepare the card\n"); 559 560 return ret; 561 } 562 563 /* 564 * ucode 565 */ 566 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, 567 u32 dst_addr, dma_addr_t phy_addr, 568 u32 byte_cnt) 569 { 570 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 571 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 572 573 iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), 574 dst_addr); 575 576 iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 577 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 578 579 iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 580 (iwl_get_dma_hi_addr(phy_addr) 581 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 582 583 iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 584 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | 585 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | 586 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 587 588 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 589 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 590 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 591 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 592 } 593 594 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, 595 u32 dst_addr, dma_addr_t phy_addr, 596 u32 byte_cnt) 597 { 598 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 599 int ret; 600 601 trans_pcie->ucode_write_complete = false; 602 603 if (!iwl_trans_grab_nic_access(trans)) 604 return -EIO; 605 606 iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, 607 byte_cnt); 608 iwl_trans_release_nic_access(trans); 609 610 ret = wait_event_timeout(trans_pcie->ucode_write_waitq, 611 trans_pcie->ucode_write_complete, 5 * HZ); 612 if (!ret) { 613 IWL_ERR(trans, "Failed to load firmware chunk!\n"); 614 iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev); 615 return -ETIMEDOUT; 616 } 617 618 return 0; 619 } 620 621 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, 622 const struct fw_desc *section) 623 { 624 u8 *v_addr; 625 dma_addr_t p_addr; 626 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); 627 int ret = 0; 628 629 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 630 section_num); 631 632 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, 633 GFP_KERNEL | __GFP_NOWARN); 634 if (!v_addr) { 635 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); 636 chunk_sz = PAGE_SIZE; 637 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, 638 &p_addr, GFP_KERNEL); 639 if (!v_addr) 640 return -ENOMEM; 641 } 642 643 for (offset = 0; offset < section->len; offset += chunk_sz) { 644 u32 copy_size, dst_addr; 645 bool extended_addr = false; 646 647 copy_size = min_t(u32, chunk_sz, section->len - offset); 648 dst_addr = section->offset + offset; 649 650 if (dst_addr >= IWL_FW_MEM_EXTENDED_START && 651 dst_addr <= IWL_FW_MEM_EXTENDED_END) 652 extended_addr = true; 653 654 if (extended_addr) 655 iwl_set_bits_prph(trans, LMPM_CHICK, 656 LMPM_CHICK_EXTENDED_ADDR_SPACE); 657 658 memcpy(v_addr, (const u8 *)section->data + offset, copy_size); 659 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, 660 copy_size); 661 662 if (extended_addr) 663 iwl_clear_bits_prph(trans, LMPM_CHICK, 664 LMPM_CHICK_EXTENDED_ADDR_SPACE); 665 666 if (ret) { 667 IWL_ERR(trans, 668 "Could not load the [%d] uCode section\n", 669 section_num); 670 break; 671 } 672 } 673 674 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); 675 return ret; 676 } 677 678 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, 679 const struct fw_img *image, 680 int cpu, 681 int *first_ucode_section) 682 { 683 int shift_param; 684 int i, ret = 0, sec_num = 0x1; 685 u32 val, last_read_idx = 0; 686 687 if (cpu == 1) { 688 shift_param = 0; 689 *first_ucode_section = 0; 690 } else { 691 shift_param = 16; 692 (*first_ucode_section)++; 693 } 694 695 for (i = *first_ucode_section; i < image->num_sec; i++) { 696 last_read_idx = i; 697 698 /* 699 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 700 * CPU1 to CPU2. 701 * PAGING_SEPARATOR_SECTION delimiter - separate between 702 * CPU2 non paged to CPU2 paging sec. 703 */ 704 if (!image->sec[i].data || 705 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 706 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 707 IWL_DEBUG_FW(trans, 708 "Break since Data not valid or Empty section, sec = %d\n", 709 i); 710 break; 711 } 712 713 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 714 if (ret) 715 return ret; 716 717 /* Notify ucode of loaded section number and status */ 718 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); 719 val = val | (sec_num << shift_param); 720 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); 721 722 sec_num = (sec_num << 1) | 0x1; 723 } 724 725 *first_ucode_section = last_read_idx; 726 727 iwl_enable_interrupts(trans); 728 729 if (trans->mac_cfg->gen2) { 730 if (cpu == 1) 731 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 732 0xFFFF); 733 else 734 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 735 0xFFFFFFFF); 736 } else { 737 if (cpu == 1) 738 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 739 0xFFFF); 740 else 741 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 742 0xFFFFFFFF); 743 } 744 745 return 0; 746 } 747 748 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, 749 const struct fw_img *image, 750 int cpu, 751 int *first_ucode_section) 752 { 753 int i, ret = 0; 754 u32 last_read_idx = 0; 755 756 if (cpu == 1) 757 *first_ucode_section = 0; 758 else 759 (*first_ucode_section)++; 760 761 for (i = *first_ucode_section; i < image->num_sec; i++) { 762 last_read_idx = i; 763 764 /* 765 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 766 * CPU1 to CPU2. 767 * PAGING_SEPARATOR_SECTION delimiter - separate between 768 * CPU2 non paged to CPU2 paging sec. 769 */ 770 if (!image->sec[i].data || 771 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 772 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 773 IWL_DEBUG_FW(trans, 774 "Break since Data not valid or Empty section, sec = %d\n", 775 i); 776 break; 777 } 778 779 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 780 if (ret) 781 return ret; 782 } 783 784 *first_ucode_section = last_read_idx; 785 786 return 0; 787 } 788 789 static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans) 790 { 791 enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; 792 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg = 793 &trans->dbg.fw_mon_cfg[alloc_id]; 794 struct iwl_dram_data *frag; 795 796 if (!iwl_trans_dbg_ini_valid(trans)) 797 return; 798 799 if (le32_to_cpu(fw_mon_cfg->buf_location) == 800 IWL_FW_INI_LOCATION_SRAM_PATH) { 801 IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n"); 802 /* set sram monitor by enabling bit 7 */ 803 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 804 CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM); 805 806 return; 807 } 808 809 if (le32_to_cpu(fw_mon_cfg->buf_location) != 810 IWL_FW_INI_LOCATION_DRAM_PATH || 811 !trans->dbg.fw_mon_ini[alloc_id].num_frags) 812 return; 813 814 frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0]; 815 816 IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n", 817 alloc_id); 818 819 iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, 820 frag->physical >> MON_BUFF_SHIFT_VER2); 821 iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2, 822 (frag->physical + frag->size - 256) >> 823 MON_BUFF_SHIFT_VER2); 824 } 825 826 void iwl_pcie_apply_destination(struct iwl_trans *trans) 827 { 828 const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv; 829 const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 830 int i; 831 832 if (iwl_trans_dbg_ini_valid(trans)) { 833 iwl_pcie_apply_destination_ini(trans); 834 return; 835 } 836 837 IWL_INFO(trans, "Applying debug destination %s\n", 838 get_fw_dbg_mode_string(dest->monitor_mode)); 839 840 if (dest->monitor_mode == EXTERNAL_MODE) 841 iwl_pcie_alloc_fw_monitor(trans, dest->size_power); 842 else 843 IWL_WARN(trans, "PCI should have external buffer debug\n"); 844 845 for (i = 0; i < trans->dbg.n_dest_reg; i++) { 846 u32 addr = le32_to_cpu(dest->reg_ops[i].addr); 847 u32 val = le32_to_cpu(dest->reg_ops[i].val); 848 849 switch (dest->reg_ops[i].op) { 850 case CSR_ASSIGN: 851 iwl_write32(trans, addr, val); 852 break; 853 case CSR_SETBIT: 854 iwl_set_bit(trans, addr, BIT(val)); 855 break; 856 case CSR_CLEARBIT: 857 iwl_clear_bit(trans, addr, BIT(val)); 858 break; 859 case PRPH_ASSIGN: 860 iwl_write_prph(trans, addr, val); 861 break; 862 case PRPH_SETBIT: 863 iwl_set_bits_prph(trans, addr, BIT(val)); 864 break; 865 case PRPH_CLEARBIT: 866 iwl_clear_bits_prph(trans, addr, BIT(val)); 867 break; 868 case PRPH_BLOCKBIT: 869 if (iwl_read_prph(trans, addr) & BIT(val)) { 870 IWL_ERR(trans, 871 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", 872 val, addr); 873 goto monitor; 874 } 875 break; 876 default: 877 IWL_ERR(trans, "FW debug - unknown OP %d\n", 878 dest->reg_ops[i].op); 879 break; 880 } 881 } 882 883 monitor: 884 if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) { 885 iwl_write_prph(trans, le32_to_cpu(dest->base_reg), 886 fw_mon->physical >> dest->base_shift); 887 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 888 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 889 (fw_mon->physical + fw_mon->size - 890 256) >> dest->end_shift); 891 else 892 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 893 (fw_mon->physical + fw_mon->size) >> 894 dest->end_shift); 895 } 896 } 897 898 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, 899 const struct fw_img *image) 900 { 901 int ret = 0; 902 int first_ucode_section; 903 904 IWL_DEBUG_FW(trans, "working with %s CPU\n", 905 image->is_dual_cpus ? "Dual" : "Single"); 906 907 /* load to FW the binary non secured sections of CPU1 */ 908 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); 909 if (ret) 910 return ret; 911 912 if (image->is_dual_cpus) { 913 /* set CPU2 header address */ 914 iwl_write_prph(trans, 915 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 916 LMPM_SECURE_CPU2_HDR_MEM_SPACE); 917 918 /* load to FW the binary sections of CPU2 */ 919 ret = iwl_pcie_load_cpu_sections(trans, image, 2, 920 &first_ucode_section); 921 if (ret) 922 return ret; 923 } 924 925 if (iwl_pcie_dbg_on(trans)) 926 iwl_pcie_apply_destination(trans); 927 928 iwl_enable_interrupts(trans); 929 930 /* release CPU reset */ 931 iwl_write32(trans, CSR_RESET, 0); 932 933 return 0; 934 } 935 936 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, 937 const struct fw_img *image) 938 { 939 int ret = 0; 940 int first_ucode_section; 941 942 IWL_DEBUG_FW(trans, "working with %s CPU\n", 943 image->is_dual_cpus ? "Dual" : "Single"); 944 945 if (iwl_pcie_dbg_on(trans)) 946 iwl_pcie_apply_destination(trans); 947 948 IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", 949 iwl_read_prph(trans, WFPM_GP2)); 950 951 /* 952 * Set default value. On resume reading the values that were 953 * zeored can provide debug data on the resume flow. 954 * This is for debugging only and has no functional impact. 955 */ 956 iwl_write_prph(trans, WFPM_GP2, 0x01010101); 957 958 /* configure the ucode to be ready to get the secured image */ 959 /* release CPU reset */ 960 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); 961 962 /* load to FW the binary Secured sections of CPU1 */ 963 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, 964 &first_ucode_section); 965 if (ret) 966 return ret; 967 968 /* load to FW the binary sections of CPU2 */ 969 return iwl_pcie_load_cpu_sections_8000(trans, image, 2, 970 &first_ucode_section); 971 } 972 973 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) 974 { 975 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 976 bool hw_rfkill = iwl_is_rfkill_set(trans); 977 bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 978 bool report; 979 980 if (hw_rfkill) { 981 set_bit(STATUS_RFKILL_HW, &trans->status); 982 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 983 } else { 984 clear_bit(STATUS_RFKILL_HW, &trans->status); 985 if (trans_pcie->opmode_down) 986 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 987 } 988 989 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 990 991 if (prev != report) 992 iwl_trans_pcie_rf_kill(trans, report, false); 993 994 return hw_rfkill; 995 } 996 997 struct iwl_causes_list { 998 u16 mask_reg; 999 u8 bit; 1000 u8 addr; 1001 }; 1002 1003 #define IWL_CAUSE(reg, mask) \ 1004 { \ 1005 .mask_reg = reg, \ 1006 .bit = ilog2(mask), \ 1007 .addr = ilog2(mask) + \ 1008 ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \ 1009 (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \ 1010 0xffff), /* causes overflow warning */ \ 1011 } 1012 1013 static const struct iwl_causes_list causes_list_common[] = { 1014 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM), 1015 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM), 1016 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D), 1017 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR), 1018 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE), 1019 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP), 1020 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE), 1021 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR), 1022 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL), 1023 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL), 1024 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC), 1025 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD), 1026 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX), 1027 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR), 1028 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP), 1029 }; 1030 1031 static const struct iwl_causes_list causes_list_pre_bz[] = { 1032 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR), 1033 }; 1034 1035 static const struct iwl_causes_list causes_list_bz[] = { 1036 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ), 1037 }; 1038 1039 static void iwl_pcie_map_list(struct iwl_trans *trans, 1040 const struct iwl_causes_list *causes, 1041 int arr_size, int val) 1042 { 1043 int i; 1044 1045 for (i = 0; i < arr_size; i++) { 1046 iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); 1047 iwl_clear_bit(trans, causes[i].mask_reg, 1048 BIT(causes[i].bit)); 1049 } 1050 } 1051 1052 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) 1053 { 1054 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1055 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; 1056 /* 1057 * Access all non RX causes and map them to the default irq. 1058 * In case we are missing at least one interrupt vector, 1059 * the first interrupt vector will serve non-RX and FBQ causes. 1060 */ 1061 iwl_pcie_map_list(trans, causes_list_common, 1062 ARRAY_SIZE(causes_list_common), val); 1063 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1064 iwl_pcie_map_list(trans, causes_list_bz, 1065 ARRAY_SIZE(causes_list_bz), val); 1066 else 1067 iwl_pcie_map_list(trans, causes_list_pre_bz, 1068 ARRAY_SIZE(causes_list_pre_bz), val); 1069 } 1070 1071 static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) 1072 { 1073 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1074 u32 offset = 1075 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 1076 u32 val, idx; 1077 1078 /* 1079 * The first RX queue - fallback queue, which is designated for 1080 * management frame, command responses etc, is always mapped to the 1081 * first interrupt vector. The other RX queues are mapped to 1082 * the other (N - 2) interrupt vectors. 1083 */ 1084 val = BIT(MSIX_FH_INT_CAUSES_Q(0)); 1085 for (idx = 1; idx < trans->info.num_rxqs; idx++) { 1086 iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), 1087 MSIX_FH_INT_CAUSES_Q(idx - offset)); 1088 val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); 1089 } 1090 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); 1091 1092 val = MSIX_FH_INT_CAUSES_Q(0); 1093 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) 1094 val |= MSIX_NON_AUTO_CLEAR_CAUSE; 1095 iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); 1096 1097 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) 1098 iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); 1099 } 1100 1101 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) 1102 { 1103 struct iwl_trans *trans = trans_pcie->trans; 1104 1105 if (!trans_pcie->msix_enabled) { 1106 if (trans->mac_cfg->mq_rx_supported && 1107 test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1108 iwl_write_umac_prph(trans, UREG_CHICK, 1109 UREG_CHICK_MSI_ENABLE); 1110 return; 1111 } 1112 /* 1113 * The IVAR table needs to be configured again after reset, 1114 * but if the device is disabled, we can't write to 1115 * prph. 1116 */ 1117 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1118 iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); 1119 1120 /* 1121 * Each cause from the causes list above and the RX causes is 1122 * represented as a byte in the IVAR table. The first nibble 1123 * represents the bound interrupt vector of the cause, the second 1124 * represents no auto clear for this cause. This will be set if its 1125 * interrupt vector is bound to serve other causes. 1126 */ 1127 iwl_pcie_map_rx_causes(trans); 1128 1129 iwl_pcie_map_non_rx_causes(trans); 1130 } 1131 1132 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) 1133 { 1134 struct iwl_trans *trans = trans_pcie->trans; 1135 1136 iwl_pcie_conf_msix_hw(trans_pcie); 1137 1138 if (!trans_pcie->msix_enabled) 1139 return; 1140 1141 trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); 1142 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 1143 trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); 1144 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 1145 } 1146 1147 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq) 1148 { 1149 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1150 1151 lockdep_assert_held(&trans_pcie->mutex); 1152 1153 if (trans_pcie->is_down) 1154 return; 1155 1156 trans_pcie->is_down = true; 1157 1158 /* tell the device to stop sending interrupts */ 1159 iwl_disable_interrupts(trans); 1160 1161 /* device going down, Stop using ICT table */ 1162 iwl_pcie_disable_ict(trans); 1163 1164 /* 1165 * If a HW restart happens during firmware loading, 1166 * then the firmware loading might call this function 1167 * and later it might be called again due to the 1168 * restart. So don't process again if the device is 1169 * already dead. 1170 */ 1171 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1172 IWL_DEBUG_INFO(trans, 1173 "DEVICE_ENABLED bit was set and is now cleared\n"); 1174 if (!from_irq) 1175 iwl_pcie_synchronize_irqs(trans); 1176 iwl_pcie_rx_napi_sync(trans); 1177 iwl_pcie_tx_stop(trans); 1178 iwl_pcie_rx_stop(trans); 1179 1180 /* Power-down device's busmaster DMA clocks */ 1181 if (!trans->mac_cfg->base->apmg_not_supported) { 1182 iwl_write_prph(trans, APMG_CLK_DIS_REG, 1183 APMG_CLK_VAL_DMA_CLK_RQT); 1184 udelay(5); 1185 } 1186 } 1187 1188 /* Make sure (redundant) we've released our request to stay awake */ 1189 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1190 iwl_clear_bit(trans, CSR_GP_CNTRL, 1191 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); 1192 else 1193 iwl_clear_bit(trans, CSR_GP_CNTRL, 1194 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1195 1196 /* Stop the device, and put it in low power state */ 1197 iwl_pcie_apm_stop(trans, false); 1198 1199 /* re-take ownership to prevent other users from stealing the device */ 1200 iwl_trans_pcie_sw_reset(trans, true); 1201 1202 /* 1203 * Upon stop, the IVAR table gets erased, so msi-x won't 1204 * work. This causes a bug in RF-KILL flows, since the interrupt 1205 * that enables radio won't fire on the correct irq, and the 1206 * driver won't be able to handle the interrupt. 1207 * Configure the IVAR table again after reset. 1208 */ 1209 iwl_pcie_conf_msix_hw(trans_pcie); 1210 1211 /* 1212 * Upon stop, the APM issues an interrupt if HW RF kill is set. 1213 * This is a bug in certain verions of the hardware. 1214 * Certain devices also keep sending HW RF kill interrupt all 1215 * the time, unless the interrupt is ACKed even if the interrupt 1216 * should be masked. Re-ACK all the interrupts here. 1217 */ 1218 iwl_disable_interrupts(trans); 1219 1220 /* clear all status bits */ 1221 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1222 clear_bit(STATUS_INT_ENABLED, &trans->status); 1223 clear_bit(STATUS_TPOWER_PMI, &trans->status); 1224 1225 /* 1226 * Even if we stop the HW, we still want the RF kill 1227 * interrupt 1228 */ 1229 iwl_enable_rfkill_int(trans); 1230 } 1231 1232 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) 1233 { 1234 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1235 1236 if (trans_pcie->msix_enabled) { 1237 int i; 1238 1239 for (i = 0; i < trans_pcie->alloc_vecs; i++) 1240 synchronize_irq(trans_pcie->msix_entries[i].vector); 1241 } else { 1242 synchronize_irq(trans_pcie->pci_dev->irq); 1243 } 1244 } 1245 1246 int iwl_trans_pcie_start_fw(struct iwl_trans *trans, 1247 const struct iwl_fw *fw, 1248 const struct fw_img *img, 1249 bool run_in_rfkill) 1250 { 1251 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1252 bool hw_rfkill; 1253 int ret; 1254 1255 /* This may fail if AMT took ownership of the device */ 1256 if (iwl_pcie_prepare_card_hw(trans)) { 1257 IWL_WARN(trans, "Exit HW not ready\n"); 1258 return -EIO; 1259 } 1260 1261 iwl_enable_rfkill_int(trans); 1262 1263 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1264 1265 /* 1266 * We enabled the RF-Kill interrupt and the handler may very 1267 * well be running. Disable the interrupts to make sure no other 1268 * interrupt can be fired. 1269 */ 1270 iwl_disable_interrupts(trans); 1271 1272 /* Make sure it finished running */ 1273 iwl_pcie_synchronize_irqs(trans); 1274 1275 mutex_lock(&trans_pcie->mutex); 1276 1277 /* If platform's RF_KILL switch is NOT set to KILL */ 1278 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1279 if (hw_rfkill && !run_in_rfkill) { 1280 ret = -ERFKILL; 1281 goto out; 1282 } 1283 1284 /* Someone called stop_device, don't try to start_fw */ 1285 if (trans_pcie->is_down) { 1286 IWL_WARN(trans, 1287 "Can't start_fw since the HW hasn't been started\n"); 1288 ret = -EIO; 1289 goto out; 1290 } 1291 1292 /* make sure rfkill handshake bits are cleared */ 1293 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1294 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, 1295 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 1296 1297 /* clear (again), then enable host interrupts */ 1298 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1299 1300 ret = iwl_pcie_nic_init(trans); 1301 if (ret) { 1302 IWL_ERR(trans, "Unable to init nic\n"); 1303 goto out; 1304 } 1305 1306 /* 1307 * Now, we load the firmware and don't want to be interrupted, even 1308 * by the RF-Kill interrupt (hence mask all the interrupt besides the 1309 * FH_TX interrupt which is needed to load the firmware). If the 1310 * RF-Kill switch is toggled, we will find out after having loaded 1311 * the firmware and return the proper value to the caller. 1312 */ 1313 iwl_enable_fw_load_int(trans); 1314 1315 /* really make sure rfkill handshake bits are cleared */ 1316 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1317 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1318 1319 /* Load the given image to the HW */ 1320 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 1321 ret = iwl_pcie_load_given_ucode_8000(trans, img); 1322 else 1323 ret = iwl_pcie_load_given_ucode(trans, img); 1324 1325 /* re-check RF-Kill state since we may have missed the interrupt */ 1326 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1327 if (hw_rfkill && !run_in_rfkill) 1328 ret = -ERFKILL; 1329 1330 out: 1331 mutex_unlock(&trans_pcie->mutex); 1332 return ret; 1333 } 1334 1335 void iwl_trans_pcie_fw_alive(struct iwl_trans *trans) 1336 { 1337 iwl_pcie_reset_ict(trans); 1338 iwl_pcie_tx_start(trans); 1339 } 1340 1341 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1342 bool was_in_rfkill) 1343 { 1344 bool hw_rfkill; 1345 1346 /* 1347 * Check again since the RF kill state may have changed while 1348 * all the interrupts were disabled, in this case we couldn't 1349 * receive the RF kill interrupt and update the state in the 1350 * op_mode. 1351 * Don't call the op_mode if the rkfill state hasn't changed. 1352 * This allows the op_mode to call stop_device from the rfkill 1353 * notification without endless recursion. Under very rare 1354 * circumstances, we might have a small recursion if the rfkill 1355 * state changed exactly now while we were called from stop_device. 1356 * This is very unlikely but can happen and is supported. 1357 */ 1358 hw_rfkill = iwl_is_rfkill_set(trans); 1359 if (hw_rfkill) { 1360 set_bit(STATUS_RFKILL_HW, &trans->status); 1361 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1362 } else { 1363 clear_bit(STATUS_RFKILL_HW, &trans->status); 1364 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1365 } 1366 if (hw_rfkill != was_in_rfkill) 1367 iwl_trans_pcie_rf_kill(trans, hw_rfkill, false); 1368 } 1369 1370 void iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1371 { 1372 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1373 bool was_in_rfkill; 1374 1375 iwl_op_mode_time_point(trans->op_mode, 1376 IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, 1377 NULL); 1378 1379 mutex_lock(&trans_pcie->mutex); 1380 trans_pcie->opmode_down = true; 1381 was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1382 _iwl_trans_pcie_stop_device(trans, false); 1383 iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); 1384 mutex_unlock(&trans_pcie->mutex); 1385 } 1386 1387 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq) 1388 { 1389 struct iwl_trans_pcie __maybe_unused *trans_pcie = 1390 IWL_TRANS_GET_PCIE_TRANS(trans); 1391 1392 lockdep_assert_held(&trans_pcie->mutex); 1393 1394 IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", 1395 state ? "disabled" : "enabled"); 1396 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) && 1397 !WARN_ON(trans->mac_cfg->gen2)) 1398 _iwl_trans_pcie_stop_device(trans, from_irq); 1399 } 1400 1401 static void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, 1402 bool test, bool reset) 1403 { 1404 iwl_disable_interrupts(trans); 1405 1406 /* 1407 * in testing mode, the host stays awake and the 1408 * hardware won't be reset (not even partially) 1409 */ 1410 if (test) 1411 return; 1412 1413 iwl_pcie_disable_ict(trans); 1414 1415 iwl_pcie_synchronize_irqs(trans); 1416 1417 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 1418 iwl_clear_bit(trans, CSR_GP_CNTRL, 1419 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); 1420 iwl_clear_bit(trans, CSR_GP_CNTRL, 1421 CSR_GP_CNTRL_REG_FLAG_MAC_INIT); 1422 } else { 1423 iwl_clear_bit(trans, CSR_GP_CNTRL, 1424 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1425 iwl_clear_bit(trans, CSR_GP_CNTRL, 1426 CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1427 } 1428 1429 if (reset) { 1430 /* 1431 * reset TX queues -- some of their registers reset during S3 1432 * so if we don't reset everything here the D3 image would try 1433 * to execute some invalid memory upon resume 1434 */ 1435 iwl_trans_pcie_tx_reset(trans); 1436 } 1437 1438 iwl_pcie_set_pwr(trans, true); 1439 } 1440 1441 static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) 1442 { 1443 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1444 int ret; 1445 1446 if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) 1447 return 0; 1448 1449 trans_pcie->sx_state = IWL_SX_WAITING; 1450 1451 if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_AX210) 1452 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, 1453 suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND : 1454 UREG_DOORBELL_TO_ISR6_RESUME); 1455 else 1456 iwl_write32(trans, CSR_IPC_SLEEP_CONTROL, 1457 suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND : 1458 CSR_IPC_SLEEP_CONTROL_RESUME); 1459 1460 ret = wait_event_timeout(trans_pcie->sx_waitq, 1461 trans_pcie->sx_state != IWL_SX_WAITING, 1462 2 * HZ); 1463 if (!ret) { 1464 IWL_ERR(trans, "Timeout %s D3\n", 1465 suspend ? "entering" : "exiting"); 1466 ret = -ETIMEDOUT; 1467 } else { 1468 ret = 0; 1469 } 1470 1471 if (trans_pcie->sx_state == IWL_SX_ERROR) { 1472 IWL_ERR(trans, "FW error while %s D3\n", 1473 suspend ? "entering" : "exiting"); 1474 ret = -EIO; 1475 } 1476 1477 /* Invalidate it toward next suspend or resume */ 1478 trans_pcie->sx_state = IWL_SX_INVALID; 1479 1480 return ret; 1481 } 1482 1483 int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset) 1484 { 1485 int ret; 1486 1487 if (!reset) 1488 /* Enable persistence mode to avoid reset */ 1489 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 1490 CSR_HW_IF_CONFIG_REG_PERSISTENCE); 1491 1492 ret = iwl_pcie_d3_handshake(trans, true); 1493 if (ret) 1494 return ret; 1495 1496 iwl_pcie_d3_complete_suspend(trans, test, reset); 1497 1498 return 0; 1499 } 1500 1501 int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, 1502 enum iwl_d3_status *status, 1503 bool test, bool reset) 1504 { 1505 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1506 u32 val; 1507 int ret; 1508 1509 if (test) { 1510 iwl_enable_interrupts(trans); 1511 *status = IWL_D3_STATUS_ALIVE; 1512 ret = 0; 1513 goto out; 1514 } 1515 1516 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1517 iwl_set_bit(trans, CSR_GP_CNTRL, 1518 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); 1519 else 1520 iwl_set_bit(trans, CSR_GP_CNTRL, 1521 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1522 1523 ret = iwl_finish_nic_init(trans); 1524 if (ret) 1525 return ret; 1526 1527 /* 1528 * Reconfigure IVAR table in case of MSIX or reset ict table in 1529 * MSI mode since HW reset erased it. 1530 * Also enables interrupts - none will happen as 1531 * the device doesn't know we're waking it up, only when 1532 * the opmode actually tells it after this call. 1533 */ 1534 iwl_pcie_conf_msix_hw(trans_pcie); 1535 if (!trans_pcie->msix_enabled) 1536 iwl_pcie_reset_ict(trans); 1537 iwl_enable_interrupts(trans); 1538 1539 iwl_pcie_set_pwr(trans, false); 1540 1541 if (!reset) { 1542 iwl_clear_bit(trans, CSR_GP_CNTRL, 1543 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1544 } else { 1545 iwl_trans_pcie_tx_reset(trans); 1546 1547 ret = iwl_pcie_rx_init(trans); 1548 if (ret) { 1549 IWL_ERR(trans, 1550 "Failed to resume the device (RX reset)\n"); 1551 return ret; 1552 } 1553 } 1554 1555 IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", 1556 iwl_read_umac_prph(trans, WFPM_GP2)); 1557 1558 val = iwl_read32(trans, CSR_RESET); 1559 if (val & CSR_RESET_REG_FLAG_NEVO_RESET) 1560 *status = IWL_D3_STATUS_RESET; 1561 else 1562 *status = IWL_D3_STATUS_ALIVE; 1563 1564 out: 1565 if (*status == IWL_D3_STATUS_ALIVE) 1566 ret = iwl_pcie_d3_handshake(trans, false); 1567 else 1568 trans->state = IWL_TRANS_NO_FW; 1569 1570 return ret; 1571 } 1572 1573 static void 1574 iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, 1575 struct iwl_trans *trans, 1576 const struct iwl_mac_cfg *mac_cfg, 1577 struct iwl_trans_info *info) 1578 { 1579 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1580 int max_irqs, num_irqs, i, ret; 1581 u16 pci_cmd; 1582 u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES; 1583 1584 if (!mac_cfg->mq_rx_supported) 1585 goto enable_msi; 1586 1587 if (mac_cfg->device_family <= IWL_DEVICE_FAMILY_9000) 1588 max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES; 1589 1590 max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues); 1591 for (i = 0; i < max_irqs; i++) 1592 trans_pcie->msix_entries[i].entry = i; 1593 1594 num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, 1595 MSIX_MIN_INTERRUPT_VECTORS, 1596 max_irqs); 1597 if (num_irqs < 0) { 1598 IWL_DEBUG_INFO(trans, 1599 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", 1600 num_irqs); 1601 goto enable_msi; 1602 } 1603 trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; 1604 1605 IWL_DEBUG_INFO(trans, 1606 "MSI-X enabled. %d interrupt vectors were allocated\n", 1607 num_irqs); 1608 1609 /* 1610 * In case the OS provides fewer interrupts than requested, different 1611 * causes will share the same interrupt vector as follows: 1612 * One interrupt less: non rx causes shared with FBQ. 1613 * Two interrupts less: non rx causes shared with FBQ and RSS. 1614 * More than two interrupts: we will use fewer RSS queues. 1615 */ 1616 if (num_irqs <= max_irqs - 2) { 1617 info->num_rxqs = num_irqs + 1; 1618 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | 1619 IWL_SHARED_IRQ_FIRST_RSS; 1620 } else if (num_irqs == max_irqs - 1) { 1621 info->num_rxqs = num_irqs; 1622 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; 1623 } else { 1624 info->num_rxqs = num_irqs - 1; 1625 } 1626 1627 IWL_DEBUG_INFO(trans, 1628 "MSI-X enabled with rx queues %d, vec mask 0x%x\n", 1629 info->num_rxqs, trans_pcie->shared_vec_mask); 1630 1631 WARN_ON(info->num_rxqs > IWL_MAX_RX_HW_QUEUES); 1632 1633 trans_pcie->alloc_vecs = num_irqs; 1634 trans_pcie->msix_enabled = true; 1635 return; 1636 1637 enable_msi: 1638 info->num_rxqs = 1; 1639 ret = pci_enable_msi(pdev); 1640 if (ret) { 1641 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); 1642 /* enable rfkill interrupt: hw bug w/a */ 1643 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 1644 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 1645 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 1646 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 1647 } 1648 } 1649 } 1650 1651 static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans, 1652 struct iwl_trans_info *info) 1653 { 1654 #if defined(CONFIG_SMP) 1655 int iter_rx_q, i, ret, cpu, offset; 1656 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1657 1658 i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; 1659 iter_rx_q = info->num_rxqs - 1 + i; 1660 offset = 1 + i; 1661 for (; i < iter_rx_q ; i++) { 1662 /* 1663 * Get the cpu prior to the place to search 1664 * (i.e. return will be > i - 1). 1665 */ 1666 cpu = cpumask_next(i - offset, cpu_online_mask); 1667 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); 1668 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, 1669 &trans_pcie->affinity_mask[i]); 1670 if (ret) 1671 IWL_ERR(trans_pcie->trans, 1672 "Failed to set affinity mask for IRQ %d\n", 1673 trans_pcie->msix_entries[i].vector); 1674 } 1675 #endif 1676 } 1677 1678 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, 1679 struct iwl_trans_pcie *trans_pcie, 1680 struct iwl_trans_info *info) 1681 { 1682 int i; 1683 1684 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 1685 int ret; 1686 struct msix_entry *msix_entry; 1687 const char *qname = queue_name(&pdev->dev, trans_pcie, i); 1688 1689 if (!qname) 1690 return -ENOMEM; 1691 1692 msix_entry = &trans_pcie->msix_entries[i]; 1693 ret = devm_request_threaded_irq(&pdev->dev, 1694 msix_entry->vector, 1695 iwl_pcie_msix_isr, 1696 (i == trans_pcie->def_irq) ? 1697 iwl_pcie_irq_msix_handler : 1698 iwl_pcie_irq_rx_msix_handler, 1699 IRQF_SHARED, 1700 qname, 1701 msix_entry); 1702 if (ret) { 1703 IWL_ERR(trans_pcie->trans, 1704 "Error allocating IRQ %d\n", i); 1705 1706 return ret; 1707 } 1708 } 1709 iwl_pcie_irq_set_affinity(trans_pcie->trans, info); 1710 1711 return 0; 1712 } 1713 1714 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) 1715 { 1716 u32 hpm, wprot; 1717 1718 switch (trans->mac_cfg->device_family) { 1719 case IWL_DEVICE_FAMILY_9000: 1720 wprot = PREG_PRPH_WPROT_9000; 1721 break; 1722 case IWL_DEVICE_FAMILY_22000: 1723 wprot = PREG_PRPH_WPROT_22000; 1724 break; 1725 default: 1726 return 0; 1727 } 1728 1729 hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); 1730 if (!iwl_trans_is_hw_error_value(hpm) && (hpm & PERSISTENCE_BIT)) { 1731 u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); 1732 1733 if (wprot_val & PREG_WFPM_ACCESS) { 1734 IWL_ERR(trans, 1735 "Error, can not clear persistence bit\n"); 1736 return -EPERM; 1737 } 1738 iwl_write_umac_prph_no_grab(trans, HPM_DEBUG, 1739 hpm & ~PERSISTENCE_BIT); 1740 } 1741 1742 return 0; 1743 } 1744 1745 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) 1746 { 1747 int ret; 1748 1749 ret = iwl_finish_nic_init(trans); 1750 if (ret < 0) 1751 return ret; 1752 1753 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1754 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1755 udelay(20); 1756 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1757 HPM_HIPM_GEN_CFG_CR_PG_EN | 1758 HPM_HIPM_GEN_CFG_CR_SLP_EN); 1759 udelay(20); 1760 iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, 1761 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1762 1763 return iwl_trans_pcie_sw_reset(trans, true); 1764 } 1765 1766 int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1767 { 1768 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1769 int err; 1770 1771 lockdep_assert_held(&trans_pcie->mutex); 1772 1773 err = iwl_pcie_prepare_card_hw(trans); 1774 if (err) { 1775 IWL_ERR(trans, "Error while preparing HW: %d\n", err); 1776 return err; 1777 } 1778 1779 err = iwl_trans_pcie_clear_persistence_bit(trans); 1780 if (err) 1781 return err; 1782 1783 err = iwl_trans_pcie_sw_reset(trans, true); 1784 if (err) 1785 return err; 1786 1787 if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_22000 && 1788 trans->mac_cfg->integrated) { 1789 err = iwl_pcie_gen2_force_power_gating(trans); 1790 if (err) 1791 return err; 1792 } 1793 1794 err = iwl_pcie_apm_init(trans); 1795 if (err) 1796 return err; 1797 1798 iwl_pcie_init_msix(trans_pcie); 1799 1800 /* From now on, the op_mode will be kept updated about RF kill state */ 1801 iwl_enable_rfkill_int(trans); 1802 1803 trans_pcie->opmode_down = false; 1804 1805 /* Set is_down to false here so that...*/ 1806 trans_pcie->is_down = false; 1807 1808 /* ...rfkill can call stop_device and set it false if needed */ 1809 iwl_pcie_check_hw_rf_kill(trans); 1810 1811 return 0; 1812 } 1813 1814 int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1815 { 1816 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1817 int ret; 1818 1819 mutex_lock(&trans_pcie->mutex); 1820 ret = _iwl_trans_pcie_start_hw(trans); 1821 mutex_unlock(&trans_pcie->mutex); 1822 1823 return ret; 1824 } 1825 1826 void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) 1827 { 1828 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1829 1830 mutex_lock(&trans_pcie->mutex); 1831 1832 /* disable interrupts - don't enable HW RF kill interrupt */ 1833 iwl_disable_interrupts(trans); 1834 1835 iwl_pcie_apm_stop(trans, true); 1836 1837 iwl_disable_interrupts(trans); 1838 1839 iwl_pcie_disable_ict(trans); 1840 1841 mutex_unlock(&trans_pcie->mutex); 1842 1843 iwl_pcie_synchronize_irqs(trans); 1844 } 1845 1846 void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1847 { 1848 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1849 } 1850 1851 void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1852 { 1853 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1854 } 1855 1856 u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) 1857 { 1858 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1859 } 1860 1861 static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) 1862 { 1863 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1864 return 0x00FFFFFF; 1865 else 1866 return 0x000FFFFF; 1867 } 1868 1869 u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) 1870 { 1871 u32 mask = iwl_trans_pcie_prph_msk(trans); 1872 1873 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, 1874 ((reg & mask) | (3 << 24))); 1875 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); 1876 } 1877 1878 void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val) 1879 { 1880 u32 mask = iwl_trans_pcie_prph_msk(trans); 1881 1882 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, 1883 ((addr & mask) | (3 << 24))); 1884 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 1885 } 1886 1887 void iwl_trans_pcie_op_mode_enter(struct iwl_trans *trans) 1888 { 1889 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1890 1891 /* free all first - we might be reconfigured for a different size */ 1892 iwl_pcie_free_rbs_pool(trans); 1893 1894 trans_pcie->rx_page_order = 1895 iwl_trans_get_rb_size_order(trans->conf.rx_buf_size); 1896 trans_pcie->rx_buf_bytes = 1897 iwl_trans_get_rb_size(trans->conf.rx_buf_size); 1898 } 1899 1900 void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, 1901 struct device *dev) 1902 { 1903 u8 i; 1904 struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc; 1905 1906 /* free DRAM payloads */ 1907 for (i = 0; i < dram_regions->n_regions; i++) { 1908 dma_free_coherent(dev, dram_regions->drams[i].size, 1909 dram_regions->drams[i].block, 1910 dram_regions->drams[i].physical); 1911 } 1912 dram_regions->n_regions = 0; 1913 1914 /* free DRAM addresses array */ 1915 if (desc_dram->block) { 1916 dma_free_coherent(dev, desc_dram->size, 1917 desc_dram->block, 1918 desc_dram->physical); 1919 } 1920 memset(desc_dram, 0, sizeof(*desc_dram)); 1921 } 1922 1923 static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans) 1924 { 1925 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1926 1927 iwl_pcie_free_dma_ptr(trans, &trans_pcie->invalid_tx_cmd); 1928 } 1929 1930 static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans) 1931 { 1932 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1933 struct iwl_cmd_header_wide bad_cmd = { 1934 .cmd = INVALID_WR_PTR_CMD, 1935 .group_id = DEBUG_GROUP, 1936 .sequence = cpu_to_le16(0xffff), 1937 .length = cpu_to_le16(0), 1938 .version = 0, 1939 }; 1940 int ret; 1941 1942 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->invalid_tx_cmd, 1943 sizeof(bad_cmd)); 1944 if (ret) 1945 return ret; 1946 memcpy(trans_pcie->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd)); 1947 return 0; 1948 } 1949 1950 void iwl_trans_pcie_free(struct iwl_trans *trans) 1951 { 1952 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1953 int i; 1954 1955 iwl_pcie_synchronize_irqs(trans); 1956 1957 if (trans->mac_cfg->gen2) 1958 iwl_txq_gen2_tx_free(trans); 1959 else 1960 iwl_pcie_tx_free(trans); 1961 iwl_pcie_rx_free(trans); 1962 1963 if (trans_pcie->rba.alloc_wq) { 1964 destroy_workqueue(trans_pcie->rba.alloc_wq); 1965 trans_pcie->rba.alloc_wq = NULL; 1966 } 1967 1968 if (trans_pcie->msix_enabled) { 1969 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 1970 irq_set_affinity_hint( 1971 trans_pcie->msix_entries[i].vector, 1972 NULL); 1973 } 1974 1975 trans_pcie->msix_enabled = false; 1976 } else { 1977 iwl_pcie_free_ict(trans); 1978 } 1979 1980 free_netdev(trans_pcie->napi_dev); 1981 1982 iwl_pcie_free_invalid_tx_cmd(trans); 1983 1984 iwl_pcie_free_fw_monitor(trans); 1985 1986 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data, 1987 trans->dev); 1988 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data, 1989 trans->dev); 1990 1991 mutex_destroy(&trans_pcie->mutex); 1992 1993 if (trans_pcie->txqs.tso_hdr_page) { 1994 for_each_possible_cpu(i) { 1995 struct iwl_tso_hdr_page *p = 1996 per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i); 1997 1998 if (p && p->page) 1999 __free_page(p->page); 2000 } 2001 2002 free_percpu(trans_pcie->txqs.tso_hdr_page); 2003 } 2004 2005 iwl_trans_free(trans); 2006 } 2007 2008 static union acpi_object * 2009 iwl_trans_pcie_call_prod_reset_dsm(struct pci_dev *pdev, u16 cmd, u16 value) 2010 { 2011 #ifdef CONFIG_ACPI 2012 struct iwl_dsm_internal_product_reset_cmd pldr_arg = { 2013 .cmd = cmd, 2014 .value = value, 2015 }; 2016 union acpi_object arg = { 2017 .buffer.type = ACPI_TYPE_BUFFER, 2018 .buffer.length = sizeof(pldr_arg), 2019 .buffer.pointer = (void *)&pldr_arg, 2020 }; 2021 static const guid_t dsm_guid = GUID_INIT(0x7266172C, 0x220B, 0x4B29, 2022 0x81, 0x4F, 0x75, 0xE4, 2023 0xDD, 0x26, 0xB5, 0xFD); 2024 2025 if (!acpi_check_dsm(ACPI_HANDLE(&pdev->dev), &dsm_guid, ACPI_DSM_REV, 2026 DSM_INTERNAL_FUNC_PRODUCT_RESET)) 2027 return ERR_PTR(-ENODEV); 2028 2029 return iwl_acpi_get_dsm_object(&pdev->dev, ACPI_DSM_REV, 2030 DSM_INTERNAL_FUNC_PRODUCT_RESET, 2031 &arg, &dsm_guid); 2032 #else 2033 return ERR_PTR(-EOPNOTSUPP); 2034 #endif 2035 } 2036 2037 void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev) 2038 { 2039 union acpi_object *res; 2040 2041 res = iwl_trans_pcie_call_prod_reset_dsm(pdev, 2042 DSM_INTERNAL_PLDR_CMD_GET_MODE, 2043 0); 2044 if (IS_ERR(res)) 2045 return; 2046 2047 if (res->type != ACPI_TYPE_INTEGER) 2048 IWL_ERR_DEV(&pdev->dev, 2049 "unexpected return type from product reset DSM\n"); 2050 else 2051 IWL_DEBUG_DEV_POWER(&pdev->dev, 2052 "product reset mode is 0x%llx\n", 2053 res->integer.value); 2054 2055 ACPI_FREE(res); 2056 } 2057 2058 static void iwl_trans_pcie_set_product_reset(struct pci_dev *pdev, bool enable, 2059 bool integrated) 2060 { 2061 union acpi_object *res; 2062 u16 mode = enable ? DSM_INTERNAL_PLDR_MODE_EN_PROD_RESET : 0; 2063 2064 if (!integrated) 2065 mode |= DSM_INTERNAL_PLDR_MODE_EN_WIFI_FLR | 2066 DSM_INTERNAL_PLDR_MODE_EN_BT_OFF_ON; 2067 2068 res = iwl_trans_pcie_call_prod_reset_dsm(pdev, 2069 DSM_INTERNAL_PLDR_CMD_SET_MODE, 2070 mode); 2071 if (IS_ERR(res)) { 2072 if (enable) 2073 IWL_ERR_DEV(&pdev->dev, 2074 "ACPI _DSM not available (%d), cannot do product reset\n", 2075 (int)PTR_ERR(res)); 2076 return; 2077 } 2078 2079 ACPI_FREE(res); 2080 IWL_DEBUG_DEV_POWER(&pdev->dev, "%sabled product reset via DSM\n", 2081 enable ? "En" : "Dis"); 2082 iwl_trans_pcie_check_product_reset_mode(pdev); 2083 } 2084 2085 void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev) 2086 { 2087 union acpi_object *res; 2088 2089 res = iwl_trans_pcie_call_prod_reset_dsm(pdev, 2090 DSM_INTERNAL_PLDR_CMD_GET_STATUS, 2091 0); 2092 if (IS_ERR(res)) 2093 return; 2094 2095 if (res->type != ACPI_TYPE_INTEGER) 2096 IWL_ERR_DEV(&pdev->dev, 2097 "unexpected return type from product reset DSM\n"); 2098 else 2099 IWL_DEBUG_DEV_POWER(&pdev->dev, 2100 "product reset status is 0x%llx\n", 2101 res->integer.value); 2102 2103 ACPI_FREE(res); 2104 } 2105 2106 static void iwl_trans_pcie_call_reset(struct pci_dev *pdev) 2107 { 2108 #ifdef CONFIG_ACPI 2109 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 2110 union acpi_object *p, *ref; 2111 acpi_status status; 2112 int ret = -EINVAL; 2113 2114 status = acpi_evaluate_object(ACPI_HANDLE(&pdev->dev), 2115 "_PRR", NULL, &buffer); 2116 if (ACPI_FAILURE(status)) { 2117 IWL_DEBUG_DEV_POWER(&pdev->dev, "No _PRR method found\n"); 2118 goto out; 2119 } 2120 p = buffer.pointer; 2121 2122 if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 1) { 2123 pci_err(pdev, "Bad _PRR return type\n"); 2124 goto out; 2125 } 2126 2127 ref = &p->package.elements[0]; 2128 if (ref->type != ACPI_TYPE_LOCAL_REFERENCE) { 2129 pci_err(pdev, "_PRR wasn't a reference\n"); 2130 goto out; 2131 } 2132 2133 status = acpi_evaluate_object(ref->reference.handle, 2134 "_RST", NULL, NULL); 2135 if (ACPI_FAILURE(status)) { 2136 pci_err(pdev, 2137 "Failed to call _RST on object returned by _PRR (%d)\n", 2138 status); 2139 goto out; 2140 } 2141 ret = 0; 2142 out: 2143 kfree(buffer.pointer); 2144 if (!ret) { 2145 IWL_DEBUG_DEV_POWER(&pdev->dev, "called _RST on _PRR object\n"); 2146 return; 2147 } 2148 IWL_DEBUG_DEV_POWER(&pdev->dev, 2149 "No BIOS support, using pci_reset_function()\n"); 2150 #endif 2151 pci_reset_function(pdev); 2152 } 2153 2154 struct iwl_trans_pcie_removal { 2155 struct pci_dev *pdev; 2156 struct work_struct work; 2157 enum iwl_reset_mode mode; 2158 bool integrated; 2159 }; 2160 2161 static void iwl_trans_pcie_removal_wk(struct work_struct *wk) 2162 { 2163 struct iwl_trans_pcie_removal *removal = 2164 container_of(wk, struct iwl_trans_pcie_removal, work); 2165 struct pci_dev *pdev = removal->pdev; 2166 static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; 2167 struct pci_bus *bus; 2168 2169 pci_lock_rescan_remove(); 2170 2171 bus = pdev->bus; 2172 /* in this case, something else already removed the device */ 2173 if (!bus) 2174 goto out; 2175 2176 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); 2177 2178 if (removal->mode == IWL_RESET_MODE_PROD_RESET) { 2179 struct pci_dev *bt = NULL; 2180 2181 if (!removal->integrated) { 2182 /* discrete devices have WiFi/BT at function 0/1 */ 2183 int slot = PCI_SLOT(pdev->devfn); 2184 int func = PCI_FUNC(pdev->devfn); 2185 2186 if (func == 0) 2187 bt = pci_get_slot(bus, PCI_DEVFN(slot, 1)); 2188 else 2189 pci_info(pdev, "Unexpected function %d\n", 2190 func); 2191 } else { 2192 /* on integrated we have to look up by ID (same bus) */ 2193 static const struct pci_device_id bt_device_ids[] = { 2194 #define BT_DEV(_id) { PCI_DEVICE(PCI_VENDOR_ID_INTEL, _id) } 2195 BT_DEV(0xA876), /* LNL */ 2196 BT_DEV(0xE476), /* PTL-P */ 2197 BT_DEV(0xE376), /* PTL-H */ 2198 BT_DEV(0xD346), /* NVL-H */ 2199 BT_DEV(0x6E74), /* NVL-S */ 2200 BT_DEV(0x4D76), /* WCL */ 2201 BT_DEV(0xD246), /* RZL-H */ 2202 BT_DEV(0x6C46), /* RZL-M */ 2203 {} 2204 }; 2205 struct pci_dev *tmp = NULL; 2206 2207 for_each_pci_dev(tmp) { 2208 if (tmp->bus != bus) 2209 continue; 2210 2211 if (pci_match_id(bt_device_ids, tmp)) { 2212 bt = tmp; 2213 break; 2214 } 2215 } 2216 } 2217 2218 if (bt) { 2219 pci_info(bt, "Removal by WiFi due to product reset\n"); 2220 pci_stop_and_remove_bus_device(bt); 2221 pci_dev_put(bt); 2222 } 2223 } 2224 2225 iwl_trans_pcie_set_product_reset(pdev, 2226 removal->mode == 2227 IWL_RESET_MODE_PROD_RESET, 2228 removal->integrated); 2229 if (removal->mode >= IWL_RESET_MODE_FUNC_RESET) 2230 iwl_trans_pcie_call_reset(pdev); 2231 2232 pci_stop_and_remove_bus_device(pdev); 2233 pci_dev_put(pdev); 2234 2235 if (removal->mode >= IWL_RESET_MODE_RESCAN) { 2236 if (bus->parent) 2237 bus = bus->parent; 2238 pci_rescan_bus(bus); 2239 } 2240 2241 out: 2242 pci_unlock_rescan_remove(); 2243 2244 kfree(removal); 2245 module_put(THIS_MODULE); 2246 } 2247 2248 void iwl_trans_pcie_reset(struct iwl_trans *trans, enum iwl_reset_mode mode) 2249 { 2250 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2251 struct iwl_trans_pcie_removal *removal; 2252 char _msg = 0, *msg = &_msg; 2253 2254 if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY || 2255 mode == IWL_RESET_MODE_BACKOFF)) 2256 return; 2257 2258 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2259 return; 2260 2261 if (trans_pcie->me_present && mode == IWL_RESET_MODE_PROD_RESET) { 2262 mode = IWL_RESET_MODE_FUNC_RESET; 2263 if (trans_pcie->me_present < 0) 2264 msg = " instead of product reset as ME may be present"; 2265 else 2266 msg = " instead of product reset as ME is present"; 2267 } 2268 2269 IWL_INFO(trans, "scheduling reset (mode=%d%s)\n", mode, msg); 2270 2271 iwl_pcie_dump_csr(trans); 2272 2273 /* 2274 * get a module reference to avoid doing this 2275 * while unloading anyway and to avoid 2276 * scheduling a work with code that's being 2277 * removed. 2278 */ 2279 if (!try_module_get(THIS_MODULE)) { 2280 IWL_ERR(trans, 2281 "Module is being unloaded - abort\n"); 2282 return; 2283 } 2284 2285 removal = kzalloc(sizeof(*removal), GFP_ATOMIC); 2286 if (!removal) { 2287 module_put(THIS_MODULE); 2288 return; 2289 } 2290 /* 2291 * we don't need to clear this flag, because 2292 * the trans will be freed and reallocated. 2293 */ 2294 set_bit(STATUS_TRANS_DEAD, &trans->status); 2295 2296 removal->pdev = to_pci_dev(trans->dev); 2297 removal->mode = mode; 2298 removal->integrated = trans->mac_cfg->integrated; 2299 INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); 2300 pci_dev_get(removal->pdev); 2301 schedule_work(&removal->work); 2302 } 2303 EXPORT_SYMBOL(iwl_trans_pcie_reset); 2304 2305 /* 2306 * This version doesn't disable BHs but rather assumes they're 2307 * already disabled. 2308 */ 2309 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent) 2310 { 2311 int ret; 2312 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2313 u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ; 2314 u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 2315 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; 2316 u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; 2317 2318 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2319 return false; 2320 2321 spin_lock(&trans_pcie->reg_lock); 2322 2323 if (trans_pcie->cmd_hold_nic_awake) 2324 goto out; 2325 2326 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 2327 write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ; 2328 mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; 2329 poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; 2330 } 2331 2332 /* this bit wakes up the NIC */ 2333 iwl_trans_set_bit(trans, CSR_GP_CNTRL, write); 2334 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 2335 udelay(2); 2336 2337 /* 2338 * These bits say the device is running, and should keep running for 2339 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 2340 * but they do not indicate that embedded SRAM is restored yet; 2341 * HW with volatile SRAM must save/restore contents to/from 2342 * host DRAM when sleeping/waking for power-saving. 2343 * Each direction takes approximately 1/4 millisecond; with this 2344 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 2345 * series of register accesses are expected (e.g. reading Event Log), 2346 * to keep device from sleeping. 2347 * 2348 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 2349 * SRAM is okay/restored. We don't check that here because this call 2350 * is just for hardware register access; but GP1 MAC_SLEEP 2351 * check is a good idea before accessing the SRAM of HW with 2352 * volatile SRAM (e.g. reading Event Log). 2353 * 2354 * 5000 series and later (including 1000 series) have non-volatile SRAM, 2355 * and do not save/restore SRAM when power cycling. 2356 */ 2357 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000); 2358 if (unlikely(ret < 0)) { 2359 u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); 2360 2361 if (silent) { 2362 spin_unlock(&trans_pcie->reg_lock); 2363 return false; 2364 } 2365 2366 WARN_ONCE(1, 2367 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", 2368 cntrl); 2369 2370 iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev); 2371 2372 if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) 2373 iwl_trans_pcie_reset(trans, 2374 IWL_RESET_MODE_REMOVE_ONLY); 2375 else 2376 iwl_write32(trans, CSR_RESET, 2377 CSR_RESET_REG_FLAG_FORCE_NMI); 2378 2379 spin_unlock(&trans_pcie->reg_lock); 2380 return false; 2381 } 2382 2383 out: 2384 /* 2385 * Fool sparse by faking we release the lock - sparse will 2386 * track nic_access anyway. 2387 */ 2388 __release(&trans_pcie->reg_lock); 2389 return true; 2390 } 2391 2392 bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) 2393 { 2394 bool ret; 2395 2396 local_bh_disable(); 2397 ret = __iwl_trans_pcie_grab_nic_access(trans, false); 2398 if (ret) { 2399 /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ 2400 return ret; 2401 } 2402 local_bh_enable(); 2403 return false; 2404 } 2405 2406 void __releases(nic_access_nobh) 2407 iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) 2408 { 2409 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2410 2411 lockdep_assert_held(&trans_pcie->reg_lock); 2412 2413 /* 2414 * Fool sparse by faking we acquiring the lock - sparse will 2415 * track nic_access anyway. 2416 */ 2417 __acquire(&trans_pcie->reg_lock); 2418 2419 if (trans_pcie->cmd_hold_nic_awake) 2420 goto out; 2421 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 2422 iwl_trans_clear_bit(trans, CSR_GP_CNTRL, 2423 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); 2424 else 2425 iwl_trans_clear_bit(trans, CSR_GP_CNTRL, 2426 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2427 /* 2428 * Above we read the CSR_GP_CNTRL register, which will flush 2429 * any previous writes, but we need the write that clears the 2430 * MAC_ACCESS_REQ bit to be performed before any other writes 2431 * scheduled on different CPUs (after we drop reg_lock). 2432 */ 2433 out: 2434 __release(nic_access_nobh); 2435 spin_unlock_bh(&trans_pcie->reg_lock); 2436 } 2437 2438 int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, 2439 void *buf, int dwords) 2440 { 2441 #define IWL_MAX_HW_ERRS 5 2442 unsigned int num_consec_hw_errors = 0; 2443 int offs = 0; 2444 u32 *vals = buf; 2445 2446 while (offs < dwords) { 2447 /* limit the time we spin here under lock to 1/2s */ 2448 unsigned long end = jiffies + HZ / 2; 2449 bool resched = false; 2450 2451 if (iwl_trans_grab_nic_access(trans)) { 2452 iwl_write32(trans, HBUS_TARG_MEM_RADDR, 2453 addr + 4 * offs); 2454 2455 while (offs < dwords) { 2456 vals[offs] = iwl_read32(trans, 2457 HBUS_TARG_MEM_RDAT); 2458 2459 if (iwl_trans_is_hw_error_value(vals[offs])) 2460 num_consec_hw_errors++; 2461 else 2462 num_consec_hw_errors = 0; 2463 2464 if (num_consec_hw_errors >= IWL_MAX_HW_ERRS) { 2465 iwl_trans_release_nic_access(trans); 2466 return -EIO; 2467 } 2468 2469 offs++; 2470 2471 if (time_after(jiffies, end)) { 2472 resched = true; 2473 break; 2474 } 2475 } 2476 iwl_trans_release_nic_access(trans); 2477 2478 if (resched) 2479 cond_resched(); 2480 } else { 2481 return -EBUSY; 2482 } 2483 } 2484 2485 return 0; 2486 } 2487 2488 int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, 2489 u32 *val) 2490 { 2491 return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev, 2492 ofs, val); 2493 } 2494 2495 #define IWL_FLUSH_WAIT_MS 2000 2496 2497 int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, 2498 struct iwl_trans_rxq_dma_data *data) 2499 { 2500 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2501 2502 if (queue >= trans->info.num_rxqs || !trans_pcie->rxq) 2503 return -EINVAL; 2504 2505 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; 2506 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; 2507 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; 2508 data->fr_bd_wid = 0; 2509 2510 return 0; 2511 } 2512 2513 int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) 2514 { 2515 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2516 struct iwl_txq *txq; 2517 unsigned long now = jiffies; 2518 bool overflow_tx; 2519 u8 wr_ptr; 2520 2521 /* Make sure the NIC is still alive in the bus */ 2522 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2523 return -ENODEV; 2524 2525 if (!test_bit(txq_idx, trans_pcie->txqs.queue_used)) 2526 return -EINVAL; 2527 2528 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); 2529 txq = trans_pcie->txqs.txq[txq_idx]; 2530 2531 spin_lock_bh(&txq->lock); 2532 overflow_tx = txq->overflow_tx || 2533 !skb_queue_empty(&txq->overflow_q); 2534 spin_unlock_bh(&txq->lock); 2535 2536 wr_ptr = READ_ONCE(txq->write_ptr); 2537 2538 while ((txq->read_ptr != READ_ONCE(txq->write_ptr) || 2539 overflow_tx) && 2540 !time_after(jiffies, 2541 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { 2542 u8 write_ptr = READ_ONCE(txq->write_ptr); 2543 2544 /* 2545 * If write pointer moved during the wait, warn only 2546 * if the TX came from op mode. In case TX came from 2547 * trans layer (overflow TX) don't warn. 2548 */ 2549 if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx, 2550 "WR pointer moved while flushing %d -> %d\n", 2551 wr_ptr, write_ptr)) 2552 return -ETIMEDOUT; 2553 wr_ptr = write_ptr; 2554 2555 usleep_range(1000, 2000); 2556 2557 spin_lock_bh(&txq->lock); 2558 overflow_tx = txq->overflow_tx || 2559 !skb_queue_empty(&txq->overflow_q); 2560 spin_unlock_bh(&txq->lock); 2561 } 2562 2563 if (txq->read_ptr != txq->write_ptr) { 2564 IWL_ERR(trans, 2565 "fail to flush all tx fifo queues Q %d\n", txq_idx); 2566 iwl_txq_log_scd_error(trans, txq); 2567 return -ETIMEDOUT; 2568 } 2569 2570 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx); 2571 2572 return 0; 2573 } 2574 2575 int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) 2576 { 2577 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2578 int cnt; 2579 int ret = 0; 2580 2581 /* waiting for all the tx frames complete might take a while */ 2582 for (cnt = 0; 2583 cnt < trans->mac_cfg->base->num_of_queues; 2584 cnt++) { 2585 2586 if (cnt == trans->conf.cmd_queue) 2587 continue; 2588 if (!test_bit(cnt, trans_pcie->txqs.queue_used)) 2589 continue; 2590 if (!(BIT(cnt) & txq_bm)) 2591 continue; 2592 2593 ret = iwl_trans_pcie_wait_txq_empty(trans, cnt); 2594 if (ret) 2595 break; 2596 } 2597 2598 return ret; 2599 } 2600 2601 void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, 2602 u32 mask, u32 value) 2603 { 2604 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2605 2606 spin_lock_bh(&trans_pcie->reg_lock); 2607 _iwl_trans_set_bits_mask(trans, reg, mask, value); 2608 spin_unlock_bh(&trans_pcie->reg_lock); 2609 } 2610 2611 static const char *get_csr_string(int cmd) 2612 { 2613 #define IWL_CMD(x) case x: return #x 2614 switch (cmd) { 2615 IWL_CMD(CSR_HW_IF_CONFIG_REG); 2616 IWL_CMD(CSR_INT_COALESCING); 2617 IWL_CMD(CSR_INT); 2618 IWL_CMD(CSR_INT_MASK); 2619 IWL_CMD(CSR_FH_INT_STATUS); 2620 IWL_CMD(CSR_GPIO_IN); 2621 IWL_CMD(CSR_RESET); 2622 IWL_CMD(CSR_GP_CNTRL); 2623 IWL_CMD(CSR_HW_REV); 2624 IWL_CMD(CSR_EEPROM_REG); 2625 IWL_CMD(CSR_EEPROM_GP); 2626 IWL_CMD(CSR_OTP_GP_REG); 2627 IWL_CMD(CSR_GIO_REG); 2628 IWL_CMD(CSR_GP_UCODE_REG); 2629 IWL_CMD(CSR_GP_DRIVER_REG); 2630 IWL_CMD(CSR_UCODE_DRV_GP1); 2631 IWL_CMD(CSR_UCODE_DRV_GP2); 2632 IWL_CMD(CSR_LED_REG); 2633 IWL_CMD(CSR_DRAM_INT_TBL_REG); 2634 IWL_CMD(CSR_GIO_CHICKEN_BITS); 2635 IWL_CMD(CSR_ANA_PLL_CFG); 2636 IWL_CMD(CSR_HW_REV_WA_REG); 2637 IWL_CMD(CSR_MONITOR_STATUS_REG); 2638 IWL_CMD(CSR_DBG_HPET_MEM_REG); 2639 default: 2640 return "UNKNOWN"; 2641 } 2642 #undef IWL_CMD 2643 } 2644 2645 void iwl_pcie_dump_csr(struct iwl_trans *trans) 2646 { 2647 int i; 2648 static const u32 csr_tbl[] = { 2649 CSR_HW_IF_CONFIG_REG, 2650 CSR_INT_COALESCING, 2651 CSR_INT, 2652 CSR_INT_MASK, 2653 CSR_FH_INT_STATUS, 2654 CSR_GPIO_IN, 2655 CSR_RESET, 2656 CSR_GP_CNTRL, 2657 CSR_HW_REV, 2658 CSR_EEPROM_REG, 2659 CSR_EEPROM_GP, 2660 CSR_OTP_GP_REG, 2661 CSR_GIO_REG, 2662 CSR_GP_UCODE_REG, 2663 CSR_GP_DRIVER_REG, 2664 CSR_UCODE_DRV_GP1, 2665 CSR_UCODE_DRV_GP2, 2666 CSR_LED_REG, 2667 CSR_DRAM_INT_TBL_REG, 2668 CSR_GIO_CHICKEN_BITS, 2669 CSR_ANA_PLL_CFG, 2670 CSR_MONITOR_STATUS_REG, 2671 CSR_HW_REV_WA_REG, 2672 CSR_DBG_HPET_MEM_REG 2673 }; 2674 IWL_ERR(trans, "CSR values:\n"); 2675 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " 2676 "CSR_INT_PERIODIC_REG)\n"); 2677 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { 2678 IWL_ERR(trans, " %25s: 0X%08x\n", 2679 get_csr_string(csr_tbl[i]), 2680 iwl_read32(trans, csr_tbl[i])); 2681 } 2682 } 2683 2684 #ifdef CONFIG_IWLWIFI_DEBUGFS 2685 /* create and remove of files */ 2686 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 2687 debugfs_create_file(#name, mode, parent, trans, \ 2688 &iwl_dbgfs_##name##_ops); \ 2689 } while (0) 2690 2691 /* file operation */ 2692 #define DEBUGFS_READ_FILE_OPS(name) \ 2693 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2694 .read = iwl_dbgfs_##name##_read, \ 2695 .open = simple_open, \ 2696 .llseek = generic_file_llseek, \ 2697 }; 2698 2699 #define DEBUGFS_WRITE_FILE_OPS(name) \ 2700 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2701 .write = iwl_dbgfs_##name##_write, \ 2702 .open = simple_open, \ 2703 .llseek = generic_file_llseek, \ 2704 }; 2705 2706 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 2707 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2708 .write = iwl_dbgfs_##name##_write, \ 2709 .read = iwl_dbgfs_##name##_read, \ 2710 .open = simple_open, \ 2711 .llseek = generic_file_llseek, \ 2712 }; 2713 2714 struct iwl_dbgfs_tx_queue_priv { 2715 struct iwl_trans *trans; 2716 }; 2717 2718 struct iwl_dbgfs_tx_queue_state { 2719 loff_t pos; 2720 }; 2721 2722 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos) 2723 { 2724 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2725 struct iwl_dbgfs_tx_queue_state *state; 2726 2727 if (*pos >= priv->trans->mac_cfg->base->num_of_queues) 2728 return NULL; 2729 2730 state = kmalloc(sizeof(*state), GFP_KERNEL); 2731 if (!state) 2732 return NULL; 2733 state->pos = *pos; 2734 return state; 2735 } 2736 2737 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq, 2738 void *v, loff_t *pos) 2739 { 2740 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2741 struct iwl_dbgfs_tx_queue_state *state = v; 2742 2743 *pos = ++state->pos; 2744 2745 if (*pos >= priv->trans->mac_cfg->base->num_of_queues) 2746 return NULL; 2747 2748 return state; 2749 } 2750 2751 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v) 2752 { 2753 kfree(v); 2754 } 2755 2756 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) 2757 { 2758 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2759 struct iwl_dbgfs_tx_queue_state *state = v; 2760 struct iwl_trans *trans = priv->trans; 2761 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2762 struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos]; 2763 2764 seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", 2765 (unsigned int)state->pos, 2766 !!test_bit(state->pos, trans_pcie->txqs.queue_used), 2767 !!test_bit(state->pos, trans_pcie->txqs.queue_stopped)); 2768 if (txq) 2769 seq_printf(seq, 2770 "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", 2771 txq->read_ptr, txq->write_ptr, 2772 txq->need_update, txq->frozen, 2773 txq->n_window, txq->ampdu); 2774 else 2775 seq_puts(seq, "(unallocated)"); 2776 2777 if (state->pos == trans->conf.cmd_queue) 2778 seq_puts(seq, " (HCMD)"); 2779 seq_puts(seq, "\n"); 2780 2781 return 0; 2782 } 2783 2784 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = { 2785 .start = iwl_dbgfs_tx_queue_seq_start, 2786 .next = iwl_dbgfs_tx_queue_seq_next, 2787 .stop = iwl_dbgfs_tx_queue_seq_stop, 2788 .show = iwl_dbgfs_tx_queue_seq_show, 2789 }; 2790 2791 static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp) 2792 { 2793 struct iwl_dbgfs_tx_queue_priv *priv; 2794 2795 priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops, 2796 sizeof(*priv)); 2797 2798 if (!priv) 2799 return -ENOMEM; 2800 2801 priv->trans = inode->i_private; 2802 return 0; 2803 } 2804 2805 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, 2806 char __user *user_buf, 2807 size_t count, loff_t *ppos) 2808 { 2809 struct iwl_trans *trans = file->private_data; 2810 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2811 char *buf; 2812 int pos = 0, i, ret; 2813 size_t bufsz; 2814 2815 bufsz = sizeof(char) * 121 * trans->info.num_rxqs; 2816 2817 if (!trans_pcie->rxq) 2818 return -EAGAIN; 2819 2820 buf = kzalloc(bufsz, GFP_KERNEL); 2821 if (!buf) 2822 return -ENOMEM; 2823 2824 for (i = 0; i < trans->info.num_rxqs && pos < bufsz; i++) { 2825 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 2826 2827 spin_lock_bh(&rxq->lock); 2828 2829 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", 2830 i); 2831 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", 2832 rxq->read); 2833 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", 2834 rxq->write); 2835 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", 2836 rxq->write_actual); 2837 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", 2838 rxq->need_update); 2839 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", 2840 rxq->free_count); 2841 if (rxq->rb_stts) { 2842 u32 r = iwl_get_closed_rb_stts(trans, rxq); 2843 pos += scnprintf(buf + pos, bufsz - pos, 2844 "\tclosed_rb_num: %u\n", r); 2845 } else { 2846 pos += scnprintf(buf + pos, bufsz - pos, 2847 "\tclosed_rb_num: Not Allocated\n"); 2848 } 2849 spin_unlock_bh(&rxq->lock); 2850 } 2851 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2852 kfree(buf); 2853 2854 return ret; 2855 } 2856 2857 static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 2858 char __user *user_buf, 2859 size_t count, loff_t *ppos) 2860 { 2861 struct iwl_trans *trans = file->private_data; 2862 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2863 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2864 2865 int pos = 0; 2866 char *buf; 2867 int bufsz = 24 * 64; /* 24 items * 64 char per item */ 2868 ssize_t ret; 2869 2870 buf = kzalloc(bufsz, GFP_KERNEL); 2871 if (!buf) 2872 return -ENOMEM; 2873 2874 pos += scnprintf(buf + pos, bufsz - pos, 2875 "Interrupt Statistics Report:\n"); 2876 2877 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", 2878 isr_stats->hw); 2879 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", 2880 isr_stats->sw); 2881 if (isr_stats->sw || isr_stats->hw) { 2882 pos += scnprintf(buf + pos, bufsz - pos, 2883 "\tLast Restarting Code: 0x%X\n", 2884 isr_stats->err_code); 2885 } 2886 #ifdef CONFIG_IWLWIFI_DEBUG 2887 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", 2888 isr_stats->sch); 2889 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", 2890 isr_stats->alive); 2891 #endif 2892 pos += scnprintf(buf + pos, bufsz - pos, 2893 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); 2894 2895 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", 2896 isr_stats->ctkill); 2897 2898 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", 2899 isr_stats->wakeup); 2900 2901 pos += scnprintf(buf + pos, bufsz - pos, 2902 "Rx command responses:\t\t %u\n", isr_stats->rx); 2903 2904 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", 2905 isr_stats->tx); 2906 2907 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", 2908 isr_stats->unhandled); 2909 2910 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2911 kfree(buf); 2912 return ret; 2913 } 2914 2915 static ssize_t iwl_dbgfs_interrupt_write(struct file *file, 2916 const char __user *user_buf, 2917 size_t count, loff_t *ppos) 2918 { 2919 struct iwl_trans *trans = file->private_data; 2920 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2921 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2922 u32 reset_flag; 2923 int ret; 2924 2925 ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag); 2926 if (ret) 2927 return ret; 2928 if (reset_flag == 0) 2929 memset(isr_stats, 0, sizeof(*isr_stats)); 2930 2931 return count; 2932 } 2933 2934 static ssize_t iwl_dbgfs_csr_write(struct file *file, 2935 const char __user *user_buf, 2936 size_t count, loff_t *ppos) 2937 { 2938 struct iwl_trans *trans = file->private_data; 2939 2940 iwl_pcie_dump_csr(trans); 2941 2942 return count; 2943 } 2944 2945 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 2946 char __user *user_buf, 2947 size_t count, loff_t *ppos) 2948 { 2949 struct iwl_trans *trans = file->private_data; 2950 char *buf = NULL; 2951 ssize_t ret; 2952 2953 ret = iwl_dump_fh(trans, &buf); 2954 if (ret < 0) 2955 return ret; 2956 if (!buf) 2957 return -EINVAL; 2958 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 2959 kfree(buf); 2960 return ret; 2961 } 2962 2963 static ssize_t iwl_dbgfs_rfkill_read(struct file *file, 2964 char __user *user_buf, 2965 size_t count, loff_t *ppos) 2966 { 2967 struct iwl_trans *trans = file->private_data; 2968 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2969 char buf[100]; 2970 int pos; 2971 2972 pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n", 2973 trans_pcie->debug_rfkill, 2974 !(iwl_read32(trans, CSR_GP_CNTRL) & 2975 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)); 2976 2977 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2978 } 2979 2980 static ssize_t iwl_dbgfs_rfkill_write(struct file *file, 2981 const char __user *user_buf, 2982 size_t count, loff_t *ppos) 2983 { 2984 struct iwl_trans *trans = file->private_data; 2985 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2986 bool new_value; 2987 int ret; 2988 2989 ret = kstrtobool_from_user(user_buf, count, &new_value); 2990 if (ret) 2991 return ret; 2992 if (new_value == trans_pcie->debug_rfkill) 2993 return count; 2994 IWL_WARN(trans, "changing debug rfkill %d->%d\n", 2995 trans_pcie->debug_rfkill, new_value); 2996 trans_pcie->debug_rfkill = new_value; 2997 iwl_pcie_handle_rfkill_irq(trans, false); 2998 2999 return count; 3000 } 3001 3002 static int iwl_dbgfs_monitor_data_open(struct inode *inode, 3003 struct file *file) 3004 { 3005 struct iwl_trans *trans = inode->i_private; 3006 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3007 3008 if (!trans->dbg.dest_tlv || 3009 trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) { 3010 IWL_ERR(trans, "Debug destination is not set to DRAM\n"); 3011 return -ENOENT; 3012 } 3013 3014 if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED) 3015 return -EBUSY; 3016 3017 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN; 3018 return simple_open(inode, file); 3019 } 3020 3021 static int iwl_dbgfs_monitor_data_release(struct inode *inode, 3022 struct file *file) 3023 { 3024 struct iwl_trans_pcie *trans_pcie = 3025 IWL_TRANS_GET_PCIE_TRANS(inode->i_private); 3026 3027 if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN) 3028 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 3029 return 0; 3030 } 3031 3032 static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, 3033 void *buf, ssize_t *size, 3034 ssize_t *bytes_copied) 3035 { 3036 ssize_t buf_size_left = count - *bytes_copied; 3037 3038 buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); 3039 if (*size > buf_size_left) 3040 *size = buf_size_left; 3041 3042 *size -= copy_to_user(user_buf, buf, *size); 3043 *bytes_copied += *size; 3044 3045 if (buf_size_left == *size) 3046 return true; 3047 return false; 3048 } 3049 3050 static ssize_t iwl_dbgfs_monitor_data_read(struct file *file, 3051 char __user *user_buf, 3052 size_t count, loff_t *ppos) 3053 { 3054 struct iwl_trans *trans = file->private_data; 3055 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3056 u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf; 3057 struct cont_rec *data = &trans_pcie->fw_mon_data; 3058 u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt; 3059 ssize_t size, bytes_copied = 0; 3060 bool b_full; 3061 3062 if (trans->dbg.dest_tlv) { 3063 write_ptr_addr = 3064 le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 3065 wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 3066 } else { 3067 write_ptr_addr = MON_BUFF_WRPTR; 3068 wrap_cnt_addr = MON_BUFF_CYCLE_CNT; 3069 } 3070 3071 if (unlikely(!trans->dbg.rec_on)) 3072 return 0; 3073 3074 mutex_lock(&data->mutex); 3075 if (data->state == 3076 IWL_FW_MON_DBGFS_STATE_DISABLED) { 3077 mutex_unlock(&data->mutex); 3078 return 0; 3079 } 3080 3081 /* write_ptr position in bytes rather then DW */ 3082 write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32); 3083 wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr); 3084 3085 if (data->prev_wrap_cnt == wrap_cnt) { 3086 size = write_ptr - data->prev_wr_ptr; 3087 curr_buf = cpu_addr + data->prev_wr_ptr; 3088 b_full = iwl_write_to_user_buf(user_buf, count, 3089 curr_buf, &size, 3090 &bytes_copied); 3091 data->prev_wr_ptr += size; 3092 3093 } else if (data->prev_wrap_cnt == wrap_cnt - 1 && 3094 write_ptr < data->prev_wr_ptr) { 3095 size = trans->dbg.fw_mon.size - data->prev_wr_ptr; 3096 curr_buf = cpu_addr + data->prev_wr_ptr; 3097 b_full = iwl_write_to_user_buf(user_buf, count, 3098 curr_buf, &size, 3099 &bytes_copied); 3100 data->prev_wr_ptr += size; 3101 3102 if (!b_full) { 3103 size = write_ptr; 3104 b_full = iwl_write_to_user_buf(user_buf, count, 3105 cpu_addr, &size, 3106 &bytes_copied); 3107 data->prev_wr_ptr = size; 3108 data->prev_wrap_cnt++; 3109 } 3110 } else { 3111 if (data->prev_wrap_cnt == wrap_cnt - 1 && 3112 write_ptr > data->prev_wr_ptr) 3113 IWL_WARN(trans, 3114 "write pointer passed previous write pointer, start copying from the beginning\n"); 3115 else if (!unlikely(data->prev_wrap_cnt == 0 && 3116 data->prev_wr_ptr == 0)) 3117 IWL_WARN(trans, 3118 "monitor data is out of sync, start copying from the beginning\n"); 3119 3120 size = write_ptr; 3121 b_full = iwl_write_to_user_buf(user_buf, count, 3122 cpu_addr, &size, 3123 &bytes_copied); 3124 data->prev_wr_ptr = size; 3125 data->prev_wrap_cnt = wrap_cnt; 3126 } 3127 3128 mutex_unlock(&data->mutex); 3129 3130 return bytes_copied; 3131 } 3132 3133 static ssize_t iwl_dbgfs_rf_read(struct file *file, 3134 char __user *user_buf, 3135 size_t count, loff_t *ppos) 3136 { 3137 struct iwl_trans *trans = file->private_data; 3138 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3139 3140 if (!trans_pcie->rf_name[0]) 3141 return -ENODEV; 3142 3143 return simple_read_from_buffer(user_buf, count, ppos, 3144 trans_pcie->rf_name, 3145 strlen(trans_pcie->rf_name)); 3146 } 3147 3148 static ssize_t iwl_dbgfs_reset_write(struct file *file, 3149 const char __user *user_buf, 3150 size_t count, loff_t *ppos) 3151 { 3152 struct iwl_trans *trans = file->private_data; 3153 static const char * const modes[] = { 3154 [IWL_RESET_MODE_SW_RESET] = "sw", 3155 [IWL_RESET_MODE_REPROBE] = "reprobe", 3156 [IWL_RESET_MODE_TOP_RESET] = "top", 3157 [IWL_RESET_MODE_REMOVE_ONLY] = "remove", 3158 [IWL_RESET_MODE_RESCAN] = "rescan", 3159 [IWL_RESET_MODE_FUNC_RESET] = "function", 3160 [IWL_RESET_MODE_PROD_RESET] = "product", 3161 }; 3162 char buf[10] = {}; 3163 int mode; 3164 3165 if (count > sizeof(buf) - 1) 3166 return -EINVAL; 3167 3168 if (copy_from_user(buf, user_buf, count)) 3169 return -EFAULT; 3170 3171 mode = sysfs_match_string(modes, buf); 3172 if (mode < 0) 3173 return mode; 3174 3175 if (mode < IWL_RESET_MODE_REMOVE_ONLY) { 3176 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 3177 return -EINVAL; 3178 if (mode == IWL_RESET_MODE_TOP_RESET) { 3179 if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_SC) 3180 return -EINVAL; 3181 trans->request_top_reset = 1; 3182 } 3183 iwl_op_mode_nic_error(trans->op_mode, IWL_ERR_TYPE_DEBUGFS); 3184 iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_DEBUGFS); 3185 return count; 3186 } 3187 3188 iwl_trans_pcie_reset(trans, mode); 3189 3190 return count; 3191 } 3192 3193 DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 3194 DEBUGFS_READ_FILE_OPS(fh_reg); 3195 DEBUGFS_READ_FILE_OPS(rx_queue); 3196 DEBUGFS_WRITE_FILE_OPS(csr); 3197 DEBUGFS_READ_WRITE_FILE_OPS(rfkill); 3198 DEBUGFS_READ_FILE_OPS(rf); 3199 DEBUGFS_WRITE_FILE_OPS(reset); 3200 3201 static const struct file_operations iwl_dbgfs_tx_queue_ops = { 3202 .owner = THIS_MODULE, 3203 .open = iwl_dbgfs_tx_queue_open, 3204 .read = seq_read, 3205 .llseek = seq_lseek, 3206 .release = seq_release_private, 3207 }; 3208 3209 static const struct file_operations iwl_dbgfs_monitor_data_ops = { 3210 .read = iwl_dbgfs_monitor_data_read, 3211 .open = iwl_dbgfs_monitor_data_open, 3212 .release = iwl_dbgfs_monitor_data_release, 3213 }; 3214 3215 /* Create the debugfs files and directories */ 3216 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 3217 { 3218 struct dentry *dir = trans->dbgfs_dir; 3219 3220 DEBUGFS_ADD_FILE(rx_queue, dir, 0400); 3221 DEBUGFS_ADD_FILE(tx_queue, dir, 0400); 3222 DEBUGFS_ADD_FILE(interrupt, dir, 0600); 3223 DEBUGFS_ADD_FILE(csr, dir, 0200); 3224 DEBUGFS_ADD_FILE(fh_reg, dir, 0400); 3225 DEBUGFS_ADD_FILE(rfkill, dir, 0600); 3226 DEBUGFS_ADD_FILE(monitor_data, dir, 0400); 3227 DEBUGFS_ADD_FILE(rf, dir, 0400); 3228 DEBUGFS_ADD_FILE(reset, dir, 0200); 3229 } 3230 3231 void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) 3232 { 3233 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3234 struct cont_rec *data = &trans_pcie->fw_mon_data; 3235 3236 mutex_lock(&data->mutex); 3237 data->state = IWL_FW_MON_DBGFS_STATE_DISABLED; 3238 mutex_unlock(&data->mutex); 3239 } 3240 #endif /*CONFIG_IWLWIFI_DEBUGFS */ 3241 3242 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) 3243 { 3244 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3245 u32 cmdlen = 0; 3246 int i; 3247 3248 for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++) 3249 cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i); 3250 3251 return cmdlen; 3252 } 3253 3254 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, 3255 struct iwl_fw_error_dump_data **data, 3256 int allocated_rb_nums) 3257 { 3258 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3259 int max_len = trans_pcie->rx_buf_bytes; 3260 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 3261 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3262 u32 i, r, j, rb_len = 0; 3263 3264 spin_lock_bh(&rxq->lock); 3265 3266 r = iwl_get_closed_rb_stts(trans, rxq); 3267 3268 for (i = rxq->read, j = 0; 3269 i != r && j < allocated_rb_nums; 3270 i = (i + 1) & RX_QUEUE_MASK, j++) { 3271 struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; 3272 struct iwl_fw_error_dump_rb *rb; 3273 3274 dma_sync_single_for_cpu(trans->dev, rxb->page_dma, 3275 max_len, DMA_FROM_DEVICE); 3276 3277 rb_len += sizeof(**data) + sizeof(*rb) + max_len; 3278 3279 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); 3280 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); 3281 rb = (void *)(*data)->data; 3282 rb->index = cpu_to_le32(i); 3283 memcpy(rb->data, page_address(rxb->page), max_len); 3284 3285 *data = iwl_fw_error_next_data(*data); 3286 } 3287 3288 spin_unlock_bh(&rxq->lock); 3289 3290 return rb_len; 3291 } 3292 #define IWL_CSR_TO_DUMP (0x250) 3293 3294 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, 3295 struct iwl_fw_error_dump_data **data) 3296 { 3297 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; 3298 __le32 *val; 3299 int i; 3300 3301 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); 3302 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); 3303 val = (void *)(*data)->data; 3304 3305 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) 3306 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 3307 3308 *data = iwl_fw_error_next_data(*data); 3309 3310 return csr_len; 3311 } 3312 3313 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, 3314 struct iwl_fw_error_dump_data **data) 3315 { 3316 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; 3317 __le32 *val; 3318 int i; 3319 3320 if (!iwl_trans_grab_nic_access(trans)) 3321 return 0; 3322 3323 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); 3324 (*data)->len = cpu_to_le32(fh_regs_len); 3325 val = (void *)(*data)->data; 3326 3327 if (!trans->mac_cfg->gen2) 3328 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; 3329 i += sizeof(u32)) 3330 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 3331 else 3332 for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2); 3333 i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2); 3334 i += sizeof(u32)) 3335 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, 3336 i)); 3337 3338 iwl_trans_release_nic_access(trans); 3339 3340 *data = iwl_fw_error_next_data(*data); 3341 3342 return sizeof(**data) + fh_regs_len; 3343 } 3344 3345 static u32 3346 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, 3347 struct iwl_fw_error_dump_fw_mon *fw_mon_data, 3348 u32 monitor_len) 3349 { 3350 u32 buf_size_in_dwords = (monitor_len >> 2); 3351 u32 *buffer = (u32 *)fw_mon_data->data; 3352 u32 i; 3353 3354 if (!iwl_trans_grab_nic_access(trans)) 3355 return 0; 3356 3357 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); 3358 for (i = 0; i < buf_size_in_dwords; i++) 3359 buffer[i] = iwl_read_umac_prph_no_grab(trans, 3360 MON_DMARB_RD_DATA_ADDR); 3361 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); 3362 3363 iwl_trans_release_nic_access(trans); 3364 3365 return monitor_len; 3366 } 3367 3368 static void 3369 iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, 3370 struct iwl_fw_error_dump_fw_mon *fw_mon_data) 3371 { 3372 u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; 3373 3374 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3375 base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; 3376 base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; 3377 write_ptr = DBGC_CUR_DBGBUF_STATUS; 3378 wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; 3379 } else if (trans->dbg.dest_tlv) { 3380 write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 3381 wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 3382 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3383 } else { 3384 base = MON_BUFF_BASE_ADDR; 3385 write_ptr = MON_BUFF_WRPTR; 3386 wrap_cnt = MON_BUFF_CYCLE_CNT; 3387 } 3388 3389 write_ptr_val = iwl_read_prph(trans, write_ptr); 3390 fw_mon_data->fw_mon_cycle_cnt = 3391 cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); 3392 fw_mon_data->fw_mon_base_ptr = 3393 cpu_to_le32(iwl_read_prph(trans, base)); 3394 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3395 fw_mon_data->fw_mon_base_high_ptr = 3396 cpu_to_le32(iwl_read_prph(trans, base_high)); 3397 write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; 3398 /* convert wrtPtr to DWs, to align with all HWs */ 3399 write_ptr_val >>= 2; 3400 } 3401 fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); 3402 } 3403 3404 static u32 3405 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, 3406 struct iwl_fw_error_dump_data **data, 3407 u32 monitor_len) 3408 { 3409 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 3410 u32 len = 0; 3411 3412 if (trans->dbg.dest_tlv || 3413 (fw_mon->size && 3414 (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_7000 || 3415 trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { 3416 struct iwl_fw_error_dump_fw_mon *fw_mon_data; 3417 3418 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); 3419 fw_mon_data = (void *)(*data)->data; 3420 3421 iwl_trans_pcie_dump_pointers(trans, fw_mon_data); 3422 3423 len += sizeof(**data) + sizeof(*fw_mon_data); 3424 if (fw_mon->size) { 3425 memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size); 3426 monitor_len = fw_mon->size; 3427 } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) { 3428 u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr); 3429 /* 3430 * Update pointers to reflect actual values after 3431 * shifting 3432 */ 3433 if (trans->dbg.dest_tlv->version) { 3434 base = (iwl_read_prph(trans, base) & 3435 IWL_LDBG_M2S_BUF_BA_MSK) << 3436 trans->dbg.dest_tlv->base_shift; 3437 base *= IWL_M2S_UNIT_SIZE; 3438 base += trans->mac_cfg->base->smem_offset; 3439 } else { 3440 base = iwl_read_prph(trans, base) << 3441 trans->dbg.dest_tlv->base_shift; 3442 } 3443 3444 iwl_trans_pcie_read_mem(trans, base, fw_mon_data->data, 3445 monitor_len / sizeof(u32)); 3446 } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) { 3447 monitor_len = 3448 iwl_trans_pci_dump_marbh_monitor(trans, 3449 fw_mon_data, 3450 monitor_len); 3451 } else { 3452 /* Didn't match anything - output no monitor data */ 3453 monitor_len = 0; 3454 } 3455 3456 len += monitor_len; 3457 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); 3458 } 3459 3460 return len; 3461 } 3462 3463 static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) 3464 { 3465 if (trans->dbg.fw_mon.size) { 3466 *len += sizeof(struct iwl_fw_error_dump_data) + 3467 sizeof(struct iwl_fw_error_dump_fw_mon) + 3468 trans->dbg.fw_mon.size; 3469 return trans->dbg.fw_mon.size; 3470 } else if (trans->dbg.dest_tlv) { 3471 u32 base, end, cfg_reg, monitor_len; 3472 3473 if (trans->dbg.dest_tlv->version == 1) { 3474 cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3475 cfg_reg = iwl_read_prph(trans, cfg_reg); 3476 base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) << 3477 trans->dbg.dest_tlv->base_shift; 3478 base *= IWL_M2S_UNIT_SIZE; 3479 base += trans->mac_cfg->base->smem_offset; 3480 3481 monitor_len = 3482 (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> 3483 trans->dbg.dest_tlv->end_shift; 3484 monitor_len *= IWL_M2S_UNIT_SIZE; 3485 } else { 3486 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3487 end = le32_to_cpu(trans->dbg.dest_tlv->end_reg); 3488 3489 base = iwl_read_prph(trans, base) << 3490 trans->dbg.dest_tlv->base_shift; 3491 end = iwl_read_prph(trans, end) << 3492 trans->dbg.dest_tlv->end_shift; 3493 3494 /* Make "end" point to the actual end */ 3495 if (trans->mac_cfg->device_family >= 3496 IWL_DEVICE_FAMILY_8000 || 3497 trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) 3498 end += (1 << trans->dbg.dest_tlv->end_shift); 3499 monitor_len = end - base; 3500 } 3501 *len += sizeof(struct iwl_fw_error_dump_data) + 3502 sizeof(struct iwl_fw_error_dump_fw_mon) + 3503 monitor_len; 3504 return monitor_len; 3505 } 3506 return 0; 3507 } 3508 3509 struct iwl_trans_dump_data * 3510 iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, 3511 const struct iwl_dump_sanitize_ops *sanitize_ops, 3512 void *sanitize_ctx) 3513 { 3514 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3515 struct iwl_fw_error_dump_data *data; 3516 struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans->conf.cmd_queue]; 3517 struct iwl_fw_error_dump_txcmd *txcmd; 3518 struct iwl_trans_dump_data *dump_data; 3519 u32 len, num_rbs = 0, monitor_len = 0; 3520 int i, ptr; 3521 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && 3522 !trans->mac_cfg->mq_rx_supported && 3523 dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); 3524 3525 if (!dump_mask) 3526 return NULL; 3527 3528 /* transport dump header */ 3529 len = sizeof(*dump_data); 3530 3531 /* host commands */ 3532 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) 3533 len += sizeof(*data) + 3534 cmdq->n_window * (sizeof(*txcmd) + 3535 TFD_MAX_PAYLOAD_SIZE); 3536 3537 /* FW monitor */ 3538 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3539 monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); 3540 3541 /* CSR registers */ 3542 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3543 len += sizeof(*data) + IWL_CSR_TO_DUMP; 3544 3545 /* FH registers */ 3546 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { 3547 if (trans->mac_cfg->gen2) 3548 len += sizeof(*data) + 3549 (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - 3550 iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); 3551 else 3552 len += sizeof(*data) + 3553 (FH_MEM_UPPER_BOUND - 3554 FH_MEM_LOWER_BOUND); 3555 } 3556 3557 if (dump_rbs) { 3558 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 3559 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3560 /* RBs */ 3561 spin_lock_bh(&rxq->lock); 3562 num_rbs = iwl_get_closed_rb_stts(trans, rxq); 3563 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; 3564 spin_unlock_bh(&rxq->lock); 3565 3566 len += num_rbs * (sizeof(*data) + 3567 sizeof(struct iwl_fw_error_dump_rb) + 3568 (PAGE_SIZE << trans_pcie->rx_page_order)); 3569 } 3570 3571 /* Paged memory for gen2 HW */ 3572 if (trans->mac_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) 3573 for (i = 0; i < trans->init_dram.paging_cnt; i++) 3574 len += sizeof(*data) + 3575 sizeof(struct iwl_fw_error_dump_paging) + 3576 trans->init_dram.paging[i].size; 3577 3578 dump_data = vzalloc(len); 3579 if (!dump_data) 3580 return NULL; 3581 3582 len = 0; 3583 data = (void *)dump_data->data; 3584 3585 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { 3586 u16 tfd_size = trans_pcie->txqs.tfd.size; 3587 3588 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); 3589 txcmd = (void *)data->data; 3590 spin_lock_bh(&cmdq->lock); 3591 ptr = cmdq->write_ptr; 3592 for (i = 0; i < cmdq->n_window; i++) { 3593 u8 idx = iwl_txq_get_cmd_index(cmdq, ptr); 3594 u8 tfdidx; 3595 u32 caplen, cmdlen; 3596 3597 if (trans->mac_cfg->gen2) 3598 tfdidx = idx; 3599 else 3600 tfdidx = ptr; 3601 3602 cmdlen = iwl_trans_pcie_get_cmdlen(trans, 3603 (u8 *)cmdq->tfds + 3604 tfd_size * tfdidx); 3605 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); 3606 3607 if (cmdlen) { 3608 len += sizeof(*txcmd) + caplen; 3609 txcmd->cmdlen = cpu_to_le32(cmdlen); 3610 txcmd->caplen = cpu_to_le32(caplen); 3611 memcpy(txcmd->data, cmdq->entries[idx].cmd, 3612 caplen); 3613 if (sanitize_ops && sanitize_ops->frob_hcmd) 3614 sanitize_ops->frob_hcmd(sanitize_ctx, 3615 txcmd->data, 3616 caplen); 3617 txcmd = (void *)((u8 *)txcmd->data + caplen); 3618 } 3619 3620 ptr = iwl_txq_dec_wrap(trans, ptr); 3621 } 3622 spin_unlock_bh(&cmdq->lock); 3623 3624 data->len = cpu_to_le32(len); 3625 len += sizeof(*data); 3626 data = iwl_fw_error_next_data(data); 3627 } 3628 3629 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3630 len += iwl_trans_pcie_dump_csr(trans, &data); 3631 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) 3632 len += iwl_trans_pcie_fh_regs_dump(trans, &data); 3633 if (dump_rbs) 3634 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); 3635 3636 /* Paged memory for gen2 HW */ 3637 if (trans->mac_cfg->gen2 && 3638 dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { 3639 for (i = 0; i < trans->init_dram.paging_cnt; i++) { 3640 struct iwl_fw_error_dump_paging *paging; 3641 u32 page_len = trans->init_dram.paging[i].size; 3642 3643 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); 3644 data->len = cpu_to_le32(sizeof(*paging) + page_len); 3645 paging = (void *)data->data; 3646 paging->index = cpu_to_le32(i); 3647 memcpy(paging->data, 3648 trans->init_dram.paging[i].block, page_len); 3649 data = iwl_fw_error_next_data(data); 3650 3651 len += sizeof(*data) + sizeof(*paging) + page_len; 3652 } 3653 } 3654 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3655 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 3656 3657 dump_data->len = len; 3658 3659 return dump_data; 3660 } 3661 3662 void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) 3663 { 3664 if (enable) 3665 iwl_enable_interrupts(trans); 3666 else 3667 iwl_disable_interrupts(trans); 3668 } 3669 3670 void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) 3671 { 3672 u32 inta_addr, sw_err_bit; 3673 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3674 3675 if (trans_pcie->msix_enabled) { 3676 inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; 3677 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 3678 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; 3679 else 3680 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; 3681 } else { 3682 inta_addr = CSR_INT; 3683 sw_err_bit = CSR_INT_BIT_SW_ERR; 3684 } 3685 3686 iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit); 3687 } 3688 3689 struct iwl_trans * 3690 iwl_trans_pcie_alloc(struct pci_dev *pdev, 3691 const struct iwl_mac_cfg *mac_cfg, 3692 struct iwl_trans_info *info) 3693 { 3694 struct iwl_trans_pcie *trans_pcie, **priv; 3695 struct iwl_trans *trans; 3696 unsigned int bc_tbl_n_entries; 3697 int ret, addr_size; 3698 u32 bar0; 3699 3700 /* reassign our BAR 0 if invalid due to possible runtime PM races */ 3701 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0); 3702 if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) { 3703 ret = pci_assign_resource(pdev, 0); 3704 if (ret) 3705 return ERR_PTR(ret); 3706 } 3707 3708 ret = pcim_enable_device(pdev); 3709 if (ret) 3710 return ERR_PTR(ret); 3711 3712 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, 3713 mac_cfg); 3714 if (!trans) 3715 return ERR_PTR(-ENOMEM); 3716 3717 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3718 3719 /* Initialize the wait queue for commands */ 3720 init_waitqueue_head(&trans_pcie->wait_command_queue); 3721 3722 if (trans->mac_cfg->gen2) { 3723 trans_pcie->txqs.tfd.addr_size = 64; 3724 trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS; 3725 trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); 3726 } else { 3727 trans_pcie->txqs.tfd.addr_size = 36; 3728 trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS; 3729 trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd); 3730 } 3731 3732 trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(12); 3733 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 3734 trans_pcie->supported_dma_mask = (u32)DMA_BIT_MASK(11); 3735 3736 info->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie); 3737 3738 trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); 3739 if (!trans_pcie->txqs.tso_hdr_page) { 3740 ret = -ENOMEM; 3741 goto out_free_trans; 3742 } 3743 3744 if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 3745 bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_BZ; 3746 else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 3747 bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_AX210; 3748 else 3749 bc_tbl_n_entries = TFD_QUEUE_BC_SIZE; 3750 3751 trans_pcie->txqs.bc_tbl_size = 3752 sizeof(struct iwl_bc_tbl_entry) * bc_tbl_n_entries; 3753 /* 3754 * For gen2 devices, we use a single allocation for each byte-count 3755 * table, but they're pretty small (1k) so use a DMA pool that we 3756 * allocate here. 3757 */ 3758 if (trans->mac_cfg->gen2) { 3759 trans_pcie->txqs.bc_pool = 3760 dmam_pool_create("iwlwifi:bc", trans->dev, 3761 trans_pcie->txqs.bc_tbl_size, 3762 256, 0); 3763 if (!trans_pcie->txqs.bc_pool) { 3764 ret = -ENOMEM; 3765 goto out_free_tso; 3766 } 3767 } 3768 3769 /* Some things must not change even if the config does */ 3770 WARN_ON(trans_pcie->txqs.tfd.addr_size != 3771 (trans->mac_cfg->gen2 ? 64 : 36)); 3772 3773 /* Initialize NAPI here - it should be before registering to mac80211 3774 * in the opmode but after the HW struct is allocated. 3775 */ 3776 trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *)); 3777 if (!trans_pcie->napi_dev) { 3778 ret = -ENOMEM; 3779 goto out_free_tso; 3780 } 3781 /* The private struct in netdev is a pointer to struct iwl_trans_pcie */ 3782 priv = netdev_priv(trans_pcie->napi_dev); 3783 *priv = trans_pcie; 3784 3785 trans_pcie->trans = trans; 3786 trans_pcie->opmode_down = true; 3787 spin_lock_init(&trans_pcie->irq_lock); 3788 spin_lock_init(&trans_pcie->reg_lock); 3789 spin_lock_init(&trans_pcie->alloc_page_lock); 3790 mutex_init(&trans_pcie->mutex); 3791 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 3792 init_waitqueue_head(&trans_pcie->fw_reset_waitq); 3793 init_waitqueue_head(&trans_pcie->imr_waitq); 3794 3795 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", 3796 WQ_HIGHPRI | WQ_UNBOUND, 0); 3797 if (!trans_pcie->rba.alloc_wq) { 3798 ret = -ENOMEM; 3799 goto out_free_ndev; 3800 } 3801 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); 3802 3803 trans_pcie->debug_rfkill = -1; 3804 3805 if (!mac_cfg->base->pcie_l1_allowed) { 3806 /* 3807 * W/A - seems to solve weird behavior. We need to remove this 3808 * if we don't want to stay in L1 all the time. This wastes a 3809 * lot of power. 3810 */ 3811 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 3812 PCIE_LINK_STATE_L1 | 3813 PCIE_LINK_STATE_CLKPM); 3814 } 3815 3816 pci_set_master(pdev); 3817 3818 addr_size = trans_pcie->txqs.tfd.addr_size; 3819 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size)); 3820 if (ret) { 3821 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3822 /* both attempts failed: */ 3823 if (ret) { 3824 dev_err(&pdev->dev, "No suitable DMA available\n"); 3825 goto out_no_pci; 3826 } 3827 } 3828 3829 ret = pcim_request_all_regions(pdev, DRV_NAME); 3830 if (ret) { 3831 dev_err(&pdev->dev, "Requesting all PCI BARs failed.\n"); 3832 goto out_no_pci; 3833 } 3834 3835 trans_pcie->hw_base = pcim_iomap(pdev, 0, 0); 3836 if (!trans_pcie->hw_base) { 3837 dev_err(&pdev->dev, "Could not ioremap PCI BAR 0.\n"); 3838 ret = -ENODEV; 3839 goto out_no_pci; 3840 } 3841 3842 /* We disable the RETRY_TIMEOUT register (0x41) to keep 3843 * PCI Tx retries from interfering with C3 CPU state */ 3844 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 3845 3846 trans_pcie->pci_dev = pdev; 3847 iwl_disable_interrupts(trans); 3848 3849 info->hw_rev = iwl_read32(trans, CSR_HW_REV); 3850 if (info->hw_rev == 0xffffffff) { 3851 dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); 3852 ret = -EIO; 3853 goto out_no_pci; 3854 } 3855 3856 /* 3857 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 3858 * changed, and now the revision step also includes bit 0-1 (no more 3859 * "dash" value). To keep hw_rev backwards compatible - we'll store it 3860 * in the old format. 3861 */ 3862 if (mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 3863 info->hw_rev_step = info->hw_rev & 0xF; 3864 else 3865 info->hw_rev_step = (info->hw_rev & 0xC) >> 2; 3866 3867 IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", info->hw_rev); 3868 3869 iwl_pcie_set_interrupt_capa(pdev, trans, mac_cfg, info); 3870 3871 init_waitqueue_head(&trans_pcie->sx_waitq); 3872 3873 ret = iwl_pcie_alloc_invalid_tx_cmd(trans); 3874 if (ret) 3875 goto out_no_pci; 3876 3877 if (trans_pcie->msix_enabled) { 3878 ret = iwl_pcie_init_msix_handler(pdev, trans_pcie, info); 3879 if (ret) 3880 goto out_no_pci; 3881 } else { 3882 ret = iwl_pcie_alloc_ict(trans); 3883 if (ret) 3884 goto out_no_pci; 3885 3886 ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, 3887 iwl_pcie_isr, 3888 iwl_pcie_irq_handler, 3889 IRQF_SHARED, DRV_NAME, trans); 3890 if (ret) { 3891 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 3892 goto out_free_ict; 3893 } 3894 } 3895 3896 #ifdef CONFIG_IWLWIFI_DEBUGFS 3897 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 3898 mutex_init(&trans_pcie->fw_mon_data.mutex); 3899 #endif 3900 3901 iwl_dbg_tlv_init(trans); 3902 3903 return trans; 3904 3905 out_free_ict: 3906 iwl_pcie_free_ict(trans); 3907 out_no_pci: 3908 destroy_workqueue(trans_pcie->rba.alloc_wq); 3909 out_free_ndev: 3910 free_netdev(trans_pcie->napi_dev); 3911 out_free_tso: 3912 free_percpu(trans_pcie->txqs.tso_hdr_page); 3913 out_free_trans: 3914 iwl_trans_free(trans); 3915 return ERR_PTR(ret); 3916 } 3917 3918 void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, 3919 u32 dst_addr, u64 src_addr, u32 byte_cnt) 3920 { 3921 iwl_write_prph(trans, IMR_UREG_CHICK, 3922 iwl_read_prph(trans, IMR_UREG_CHICK) | 3923 IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK); 3924 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr); 3925 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB, 3926 (u32)(src_addr & 0xFFFFFFFF)); 3927 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB, 3928 iwl_get_dma_hi_addr(src_addr)); 3929 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt); 3930 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL, 3931 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS | 3932 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS | 3933 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK); 3934 } 3935 3936 int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, 3937 u32 dst_addr, u64 src_addr, u32 byte_cnt) 3938 { 3939 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3940 int ret = -1; 3941 3942 trans_pcie->imr_status = IMR_D2S_REQUESTED; 3943 iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt); 3944 ret = wait_event_timeout(trans_pcie->imr_waitq, 3945 trans_pcie->imr_status != 3946 IMR_D2S_REQUESTED, 5 * HZ); 3947 if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) { 3948 IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n"); 3949 iwl_trans_pcie_dump_regs(trans, trans_pcie->pci_dev); 3950 return -ETIMEDOUT; 3951 } 3952 trans_pcie->imr_status = IMR_D2S_IDLE; 3953 return 0; 3954 } 3955