1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2007-2015, 2018-2023 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/debugfs.h> 10 #include <linux/sched.h> 11 #include <linux/bitops.h> 12 #include <linux/gfp.h> 13 #include <linux/vmalloc.h> 14 #include <linux/module.h> 15 #include <linux/wait.h> 16 #include <linux/seq_file.h> 17 18 #include "iwl-drv.h" 19 #include "iwl-trans.h" 20 #include "iwl-csr.h" 21 #include "iwl-prph.h" 22 #include "iwl-scd.h" 23 #include "iwl-agn-hw.h" 24 #include "fw/error-dump.h" 25 #include "fw/dbg.h" 26 #include "fw/api/tx.h" 27 #include "mei/iwl-mei.h" 28 #include "internal.h" 29 #include "iwl-fh.h" 30 #include "iwl-context-info-gen3.h" 31 32 /* extended range in FW SRAM */ 33 #define IWL_FW_MEM_EXTENDED_START 0x40000 34 #define IWL_FW_MEM_EXTENDED_END 0x57FFF 35 36 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) 37 { 38 #define PCI_DUMP_SIZE 352 39 #define PCI_MEM_DUMP_SIZE 64 40 #define PCI_PARENT_DUMP_SIZE 524 41 #define PREFIX_LEN 32 42 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 43 struct pci_dev *pdev = trans_pcie->pci_dev; 44 u32 i, pos, alloc_size, *ptr, *buf; 45 char *prefix; 46 47 if (trans_pcie->pcie_dbg_dumped_once) 48 return; 49 50 /* Should be a multiple of 4 */ 51 BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3); 52 BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3); 53 BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3); 54 55 /* Alloc a max size buffer */ 56 alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN; 57 alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN); 58 alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN); 59 alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN); 60 61 buf = kmalloc(alloc_size, GFP_ATOMIC); 62 if (!buf) 63 return; 64 prefix = (char *)buf + alloc_size - PREFIX_LEN; 65 66 IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n"); 67 68 /* Print wifi device registers */ 69 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 70 IWL_ERR(trans, "iwlwifi device config registers:\n"); 71 for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++) 72 if (pci_read_config_dword(pdev, i, ptr)) 73 goto err_read; 74 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 75 76 IWL_ERR(trans, "iwlwifi device memory mapped registers:\n"); 77 for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++) 78 *ptr = iwl_read32(trans, i); 79 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 80 81 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 82 if (pos) { 83 IWL_ERR(trans, "iwlwifi device AER capability structure:\n"); 84 for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++) 85 if (pci_read_config_dword(pdev, pos + i, ptr)) 86 goto err_read; 87 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 88 32, 4, buf, i, 0); 89 } 90 91 /* Print parent device registers next */ 92 if (!pdev->bus->self) 93 goto out; 94 95 pdev = pdev->bus->self; 96 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 97 98 IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n", 99 pci_name(pdev)); 100 for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++) 101 if (pci_read_config_dword(pdev, i, ptr)) 102 goto err_read; 103 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 104 105 /* Print root port AER registers */ 106 pos = 0; 107 pdev = pcie_find_root_port(pdev); 108 if (pdev) 109 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 110 if (pos) { 111 IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n", 112 pci_name(pdev)); 113 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 114 for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++) 115 if (pci_read_config_dword(pdev, pos + i, ptr)) 116 goto err_read; 117 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 118 4, buf, i, 0); 119 } 120 goto out; 121 122 err_read: 123 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 124 IWL_ERR(trans, "Read failed at 0x%X\n", i); 125 out: 126 trans_pcie->pcie_dbg_dumped_once = 1; 127 kfree(buf); 128 } 129 130 static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, 131 bool retake_ownership) 132 { 133 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ 134 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 135 iwl_set_bit(trans, CSR_GP_CNTRL, 136 CSR_GP_CNTRL_REG_FLAG_SW_RESET); 137 usleep_range(10000, 20000); 138 } else { 139 iwl_set_bit(trans, CSR_RESET, 140 CSR_RESET_REG_FLAG_SW_RESET); 141 usleep_range(5000, 6000); 142 } 143 144 if (retake_ownership) 145 return iwl_pcie_prepare_card_hw(trans); 146 147 return 0; 148 } 149 150 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) 151 { 152 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 153 154 if (!fw_mon->size) 155 return; 156 157 dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block, 158 fw_mon->physical); 159 160 fw_mon->block = NULL; 161 fw_mon->physical = 0; 162 fw_mon->size = 0; 163 } 164 165 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, 166 u8 max_power) 167 { 168 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 169 void *block = NULL; 170 dma_addr_t physical = 0; 171 u32 size = 0; 172 u8 power; 173 174 if (fw_mon->size) { 175 memset(fw_mon->block, 0, fw_mon->size); 176 return; 177 } 178 179 /* need at least 2 KiB, so stop at 11 */ 180 for (power = max_power; power >= 11; power--) { 181 size = BIT(power); 182 block = dma_alloc_coherent(trans->dev, size, &physical, 183 GFP_KERNEL | __GFP_NOWARN); 184 if (!block) 185 continue; 186 187 IWL_INFO(trans, 188 "Allocated 0x%08x bytes for firmware monitor.\n", 189 size); 190 break; 191 } 192 193 if (WARN_ON_ONCE(!block)) 194 return; 195 196 if (power != max_power) 197 IWL_ERR(trans, 198 "Sorry - debug buffer is only %luK while you requested %luK\n", 199 (unsigned long)BIT(power - 10), 200 (unsigned long)BIT(max_power - 10)); 201 202 fw_mon->block = block; 203 fw_mon->physical = physical; 204 fw_mon->size = size; 205 } 206 207 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) 208 { 209 if (!max_power) { 210 /* default max_power is maximum */ 211 max_power = 26; 212 } else { 213 max_power += 11; 214 } 215 216 if (WARN(max_power > 26, 217 "External buffer size for monitor is too big %d, check the FW TLV\n", 218 max_power)) 219 return; 220 221 iwl_pcie_alloc_fw_monitor_block(trans, max_power); 222 } 223 224 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) 225 { 226 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 227 ((reg & 0x0000ffff) | (2 << 28))); 228 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); 229 } 230 231 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) 232 { 233 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); 234 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 235 ((reg & 0x0000ffff) | (3 << 28))); 236 } 237 238 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 239 { 240 if (trans->cfg->apmg_not_supported) 241 return; 242 243 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 244 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 245 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 246 ~APMG_PS_CTRL_MSK_PWR_SRC); 247 else 248 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 249 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 250 ~APMG_PS_CTRL_MSK_PWR_SRC); 251 } 252 253 /* PCI registers */ 254 #define PCI_CFG_RETRY_TIMEOUT 0x041 255 256 void iwl_pcie_apm_config(struct iwl_trans *trans) 257 { 258 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 259 u16 lctl; 260 u16 cap; 261 262 /* 263 * L0S states have been found to be unstable with our devices 264 * and in newer hardware they are not officially supported at 265 * all, so we must always set the L0S_DISABLED bit. 266 */ 267 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED); 268 269 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); 270 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 271 272 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); 273 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; 274 IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n", 275 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", 276 trans->ltr_enabled ? "En" : "Dis"); 277 } 278 279 /* 280 * Start up NIC's basic functionality after it has been reset 281 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) 282 * NOTE: This does not load uCode nor start the embedded processor 283 */ 284 static int iwl_pcie_apm_init(struct iwl_trans *trans) 285 { 286 int ret; 287 288 IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); 289 290 /* 291 * Use "set_bit" below rather than "write", to preserve any hardware 292 * bits already set by default after reset. 293 */ 294 295 /* Disable L0S exit timer (platform NMI Work/Around) */ 296 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 297 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 298 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 299 300 /* 301 * Disable L0s without affecting L1; 302 * don't wait for ICH L0s (ICH bug W/A) 303 */ 304 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 305 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 306 307 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 308 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 309 310 /* 311 * Enable HAP INTA (interrupt from management bus) to 312 * wake device's PCI Express link L1a -> L0s 313 */ 314 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 315 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 316 317 iwl_pcie_apm_config(trans); 318 319 /* Configure analog phase-lock-loop before activating to D0A */ 320 if (trans->trans_cfg->base_params->pll_cfg) 321 iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 322 323 ret = iwl_finish_nic_init(trans); 324 if (ret) 325 return ret; 326 327 if (trans->cfg->host_interrupt_operation_mode) { 328 /* 329 * This is a bit of an abuse - This is needed for 7260 / 3160 330 * only check host_interrupt_operation_mode even if this is 331 * not related to host_interrupt_operation_mode. 332 * 333 * Enable the oscillator to count wake up time for L1 exit. This 334 * consumes slightly more power (100uA) - but allows to be sure 335 * that we wake up from L1 on time. 336 * 337 * This looks weird: read twice the same register, discard the 338 * value, set a bit, and yet again, read that same register 339 * just to discard the value. But that's the way the hardware 340 * seems to like it. 341 */ 342 iwl_read_prph(trans, OSC_CLK); 343 iwl_read_prph(trans, OSC_CLK); 344 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); 345 iwl_read_prph(trans, OSC_CLK); 346 iwl_read_prph(trans, OSC_CLK); 347 } 348 349 /* 350 * Enable DMA clock and wait for it to stabilize. 351 * 352 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" 353 * bits do not disable clocks. This preserves any hardware 354 * bits already set by default in "CLK_CTRL_REG" after reset. 355 */ 356 if (!trans->cfg->apmg_not_supported) { 357 iwl_write_prph(trans, APMG_CLK_EN_REG, 358 APMG_CLK_VAL_DMA_CLK_RQT); 359 udelay(20); 360 361 /* Disable L1-Active */ 362 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 363 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 364 365 /* Clear the interrupt in APMG if the NIC is in RFKILL */ 366 iwl_write_prph(trans, APMG_RTC_INT_STT_REG, 367 APMG_RTC_INT_STT_RFKILL); 368 } 369 370 set_bit(STATUS_DEVICE_ENABLED, &trans->status); 371 372 return 0; 373 } 374 375 /* 376 * Enable LP XTAL to avoid HW bug where device may consume much power if 377 * FW is not loaded after device reset. LP XTAL is disabled by default 378 * after device HW reset. Do it only if XTAL is fed by internal source. 379 * Configure device's "persistence" mode to avoid resetting XTAL again when 380 * SHRD_HW_RST occurs in S3. 381 */ 382 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) 383 { 384 int ret; 385 u32 apmg_gp1_reg; 386 u32 apmg_xtal_cfg_reg; 387 u32 dl_cfg_reg; 388 389 /* Force XTAL ON */ 390 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 391 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 392 393 ret = iwl_trans_pcie_sw_reset(trans, true); 394 395 if (!ret) 396 ret = iwl_finish_nic_init(trans); 397 398 if (WARN_ON(ret)) { 399 /* Release XTAL ON request */ 400 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 401 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 402 return; 403 } 404 405 /* 406 * Clear "disable persistence" to avoid LP XTAL resetting when 407 * SHRD_HW_RST is applied in S3. 408 */ 409 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 410 APMG_PCIDEV_STT_VAL_PERSIST_DIS); 411 412 /* 413 * Force APMG XTAL to be active to prevent its disabling by HW 414 * caused by APMG idle state. 415 */ 416 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, 417 SHR_APMG_XTAL_CFG_REG); 418 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 419 apmg_xtal_cfg_reg | 420 SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 421 422 ret = iwl_trans_pcie_sw_reset(trans, true); 423 if (ret) 424 IWL_ERR(trans, 425 "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n"); 426 427 /* Enable LP XTAL by indirect access through CSR */ 428 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); 429 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | 430 SHR_APMG_GP1_WF_XTAL_LP_EN | 431 SHR_APMG_GP1_CHICKEN_BIT_SELECT); 432 433 /* Clear delay line clock power up */ 434 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); 435 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & 436 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); 437 438 /* 439 * Enable persistence mode to avoid LP XTAL resetting when 440 * SHRD_HW_RST is applied in S3. 441 */ 442 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 443 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 444 445 /* 446 * Clear "initialization complete" bit to move adapter from 447 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 448 */ 449 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 450 451 /* Activates XTAL resources monitor */ 452 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, 453 CSR_MONITOR_XTAL_RESOURCES); 454 455 /* Release XTAL ON request */ 456 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 457 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 458 udelay(10); 459 460 /* Release APMG XTAL */ 461 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 462 apmg_xtal_cfg_reg & 463 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 464 } 465 466 void iwl_pcie_apm_stop_master(struct iwl_trans *trans) 467 { 468 int ret; 469 470 /* stop device's busmaster DMA activity */ 471 472 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 473 iwl_set_bit(trans, CSR_GP_CNTRL, 474 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); 475 476 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 477 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 478 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 479 100); 480 usleep_range(10000, 20000); 481 } else { 482 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 483 484 ret = iwl_poll_bit(trans, CSR_RESET, 485 CSR_RESET_REG_FLAG_MASTER_DISABLED, 486 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 487 } 488 489 if (ret < 0) 490 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 491 492 IWL_DEBUG_INFO(trans, "stop master\n"); 493 } 494 495 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) 496 { 497 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); 498 499 if (op_mode_leave) { 500 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 501 iwl_pcie_apm_init(trans); 502 503 /* inform ME that we are leaving */ 504 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) 505 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 506 APMG_PCIDEV_STT_VAL_WAKE_ME); 507 else if (trans->trans_cfg->device_family >= 508 IWL_DEVICE_FAMILY_8000) { 509 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 510 CSR_RESET_LINK_PWR_MGMT_DISABLED); 511 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 512 CSR_HW_IF_CONFIG_REG_PREPARE | 513 CSR_HW_IF_CONFIG_REG_ENABLE_PME); 514 mdelay(1); 515 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 516 CSR_RESET_LINK_PWR_MGMT_DISABLED); 517 } 518 mdelay(5); 519 } 520 521 clear_bit(STATUS_DEVICE_ENABLED, &trans->status); 522 523 /* Stop device's DMA activity */ 524 iwl_pcie_apm_stop_master(trans); 525 526 if (trans->cfg->lp_xtal_workaround) { 527 iwl_pcie_apm_lp_xtal_enable(trans); 528 return; 529 } 530 531 iwl_trans_pcie_sw_reset(trans, false); 532 533 /* 534 * Clear "initialization complete" bit to move adapter from 535 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 536 */ 537 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 538 } 539 540 static int iwl_pcie_nic_init(struct iwl_trans *trans) 541 { 542 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 543 int ret; 544 545 /* nic_init */ 546 spin_lock_bh(&trans_pcie->irq_lock); 547 ret = iwl_pcie_apm_init(trans); 548 spin_unlock_bh(&trans_pcie->irq_lock); 549 550 if (ret) 551 return ret; 552 553 iwl_pcie_set_pwr(trans, false); 554 555 iwl_op_mode_nic_config(trans->op_mode); 556 557 /* Allocate the RX queue, or reset if it is already allocated */ 558 ret = iwl_pcie_rx_init(trans); 559 if (ret) 560 return ret; 561 562 /* Allocate or reset and init all Tx and Command queues */ 563 if (iwl_pcie_tx_init(trans)) { 564 iwl_pcie_rx_free(trans); 565 return -ENOMEM; 566 } 567 568 if (trans->trans_cfg->base_params->shadow_reg_enable) { 569 /* enable shadow regs in HW */ 570 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); 571 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); 572 } 573 574 return 0; 575 } 576 577 #define HW_READY_TIMEOUT (50) 578 579 /* Note: returns poll_bit return value, which is >= 0 if success */ 580 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) 581 { 582 int ret; 583 584 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 585 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 586 587 /* See if we got it */ 588 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 589 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 590 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 591 HW_READY_TIMEOUT); 592 593 if (ret >= 0) 594 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); 595 596 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); 597 return ret; 598 } 599 600 /* Note: returns standard 0/-ERROR code */ 601 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) 602 { 603 int ret; 604 int iter; 605 606 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 607 608 ret = iwl_pcie_set_hw_ready(trans); 609 /* If the card is ready, exit 0 */ 610 if (ret >= 0) { 611 trans->csme_own = false; 612 return 0; 613 } 614 615 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 616 CSR_RESET_LINK_PWR_MGMT_DISABLED); 617 usleep_range(1000, 2000); 618 619 for (iter = 0; iter < 10; iter++) { 620 int t = 0; 621 622 /* If HW is not ready, prepare the conditions to check again */ 623 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 624 CSR_HW_IF_CONFIG_REG_PREPARE); 625 626 do { 627 ret = iwl_pcie_set_hw_ready(trans); 628 if (ret >= 0) { 629 trans->csme_own = false; 630 return 0; 631 } 632 633 if (iwl_mei_is_connected()) { 634 IWL_DEBUG_INFO(trans, 635 "Couldn't prepare the card but SAP is connected\n"); 636 trans->csme_own = true; 637 if (trans->trans_cfg->device_family != 638 IWL_DEVICE_FAMILY_9000) 639 IWL_ERR(trans, 640 "SAP not supported for this NIC family\n"); 641 642 return -EBUSY; 643 } 644 645 usleep_range(200, 1000); 646 t += 200; 647 } while (t < 150000); 648 msleep(25); 649 } 650 651 IWL_ERR(trans, "Couldn't prepare the card\n"); 652 653 return ret; 654 } 655 656 /* 657 * ucode 658 */ 659 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, 660 u32 dst_addr, dma_addr_t phy_addr, 661 u32 byte_cnt) 662 { 663 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 664 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 665 666 iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), 667 dst_addr); 668 669 iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 670 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 671 672 iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 673 (iwl_get_dma_hi_addr(phy_addr) 674 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 675 676 iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 677 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | 678 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | 679 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 680 681 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 682 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 683 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 684 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 685 } 686 687 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, 688 u32 dst_addr, dma_addr_t phy_addr, 689 u32 byte_cnt) 690 { 691 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 692 int ret; 693 694 trans_pcie->ucode_write_complete = false; 695 696 if (!iwl_trans_grab_nic_access(trans)) 697 return -EIO; 698 699 iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, 700 byte_cnt); 701 iwl_trans_release_nic_access(trans); 702 703 ret = wait_event_timeout(trans_pcie->ucode_write_waitq, 704 trans_pcie->ucode_write_complete, 5 * HZ); 705 if (!ret) { 706 IWL_ERR(trans, "Failed to load firmware chunk!\n"); 707 iwl_trans_pcie_dump_regs(trans); 708 return -ETIMEDOUT; 709 } 710 711 return 0; 712 } 713 714 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, 715 const struct fw_desc *section) 716 { 717 u8 *v_addr; 718 dma_addr_t p_addr; 719 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); 720 int ret = 0; 721 722 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 723 section_num); 724 725 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, 726 GFP_KERNEL | __GFP_NOWARN); 727 if (!v_addr) { 728 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); 729 chunk_sz = PAGE_SIZE; 730 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, 731 &p_addr, GFP_KERNEL); 732 if (!v_addr) 733 return -ENOMEM; 734 } 735 736 for (offset = 0; offset < section->len; offset += chunk_sz) { 737 u32 copy_size, dst_addr; 738 bool extended_addr = false; 739 740 copy_size = min_t(u32, chunk_sz, section->len - offset); 741 dst_addr = section->offset + offset; 742 743 if (dst_addr >= IWL_FW_MEM_EXTENDED_START && 744 dst_addr <= IWL_FW_MEM_EXTENDED_END) 745 extended_addr = true; 746 747 if (extended_addr) 748 iwl_set_bits_prph(trans, LMPM_CHICK, 749 LMPM_CHICK_EXTENDED_ADDR_SPACE); 750 751 memcpy(v_addr, (const u8 *)section->data + offset, copy_size); 752 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, 753 copy_size); 754 755 if (extended_addr) 756 iwl_clear_bits_prph(trans, LMPM_CHICK, 757 LMPM_CHICK_EXTENDED_ADDR_SPACE); 758 759 if (ret) { 760 IWL_ERR(trans, 761 "Could not load the [%d] uCode section\n", 762 section_num); 763 break; 764 } 765 } 766 767 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); 768 return ret; 769 } 770 771 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, 772 const struct fw_img *image, 773 int cpu, 774 int *first_ucode_section) 775 { 776 int shift_param; 777 int i, ret = 0, sec_num = 0x1; 778 u32 val, last_read_idx = 0; 779 780 if (cpu == 1) { 781 shift_param = 0; 782 *first_ucode_section = 0; 783 } else { 784 shift_param = 16; 785 (*first_ucode_section)++; 786 } 787 788 for (i = *first_ucode_section; i < image->num_sec; i++) { 789 last_read_idx = i; 790 791 /* 792 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 793 * CPU1 to CPU2. 794 * PAGING_SEPARATOR_SECTION delimiter - separate between 795 * CPU2 non paged to CPU2 paging sec. 796 */ 797 if (!image->sec[i].data || 798 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 799 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 800 IWL_DEBUG_FW(trans, 801 "Break since Data not valid or Empty section, sec = %d\n", 802 i); 803 break; 804 } 805 806 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 807 if (ret) 808 return ret; 809 810 /* Notify ucode of loaded section number and status */ 811 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); 812 val = val | (sec_num << shift_param); 813 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); 814 815 sec_num = (sec_num << 1) | 0x1; 816 } 817 818 *first_ucode_section = last_read_idx; 819 820 iwl_enable_interrupts(trans); 821 822 if (trans->trans_cfg->gen2) { 823 if (cpu == 1) 824 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 825 0xFFFF); 826 else 827 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 828 0xFFFFFFFF); 829 } else { 830 if (cpu == 1) 831 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 832 0xFFFF); 833 else 834 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 835 0xFFFFFFFF); 836 } 837 838 return 0; 839 } 840 841 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, 842 const struct fw_img *image, 843 int cpu, 844 int *first_ucode_section) 845 { 846 int i, ret = 0; 847 u32 last_read_idx = 0; 848 849 if (cpu == 1) 850 *first_ucode_section = 0; 851 else 852 (*first_ucode_section)++; 853 854 for (i = *first_ucode_section; i < image->num_sec; i++) { 855 last_read_idx = i; 856 857 /* 858 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 859 * CPU1 to CPU2. 860 * PAGING_SEPARATOR_SECTION delimiter - separate between 861 * CPU2 non paged to CPU2 paging sec. 862 */ 863 if (!image->sec[i].data || 864 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 865 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 866 IWL_DEBUG_FW(trans, 867 "Break since Data not valid or Empty section, sec = %d\n", 868 i); 869 break; 870 } 871 872 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 873 if (ret) 874 return ret; 875 } 876 877 *first_ucode_section = last_read_idx; 878 879 return 0; 880 } 881 882 static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans) 883 { 884 enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; 885 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg = 886 &trans->dbg.fw_mon_cfg[alloc_id]; 887 struct iwl_dram_data *frag; 888 889 if (!iwl_trans_dbg_ini_valid(trans)) 890 return; 891 892 if (le32_to_cpu(fw_mon_cfg->buf_location) == 893 IWL_FW_INI_LOCATION_SRAM_PATH) { 894 IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n"); 895 /* set sram monitor by enabling bit 7 */ 896 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 897 CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM); 898 899 return; 900 } 901 902 if (le32_to_cpu(fw_mon_cfg->buf_location) != 903 IWL_FW_INI_LOCATION_DRAM_PATH || 904 !trans->dbg.fw_mon_ini[alloc_id].num_frags) 905 return; 906 907 frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0]; 908 909 IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n", 910 alloc_id); 911 912 iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, 913 frag->physical >> MON_BUFF_SHIFT_VER2); 914 iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2, 915 (frag->physical + frag->size - 256) >> 916 MON_BUFF_SHIFT_VER2); 917 } 918 919 void iwl_pcie_apply_destination(struct iwl_trans *trans) 920 { 921 const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv; 922 const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 923 int i; 924 925 if (iwl_trans_dbg_ini_valid(trans)) { 926 iwl_pcie_apply_destination_ini(trans); 927 return; 928 } 929 930 IWL_INFO(trans, "Applying debug destination %s\n", 931 get_fw_dbg_mode_string(dest->monitor_mode)); 932 933 if (dest->monitor_mode == EXTERNAL_MODE) 934 iwl_pcie_alloc_fw_monitor(trans, dest->size_power); 935 else 936 IWL_WARN(trans, "PCI should have external buffer debug\n"); 937 938 for (i = 0; i < trans->dbg.n_dest_reg; i++) { 939 u32 addr = le32_to_cpu(dest->reg_ops[i].addr); 940 u32 val = le32_to_cpu(dest->reg_ops[i].val); 941 942 switch (dest->reg_ops[i].op) { 943 case CSR_ASSIGN: 944 iwl_write32(trans, addr, val); 945 break; 946 case CSR_SETBIT: 947 iwl_set_bit(trans, addr, BIT(val)); 948 break; 949 case CSR_CLEARBIT: 950 iwl_clear_bit(trans, addr, BIT(val)); 951 break; 952 case PRPH_ASSIGN: 953 iwl_write_prph(trans, addr, val); 954 break; 955 case PRPH_SETBIT: 956 iwl_set_bits_prph(trans, addr, BIT(val)); 957 break; 958 case PRPH_CLEARBIT: 959 iwl_clear_bits_prph(trans, addr, BIT(val)); 960 break; 961 case PRPH_BLOCKBIT: 962 if (iwl_read_prph(trans, addr) & BIT(val)) { 963 IWL_ERR(trans, 964 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", 965 val, addr); 966 goto monitor; 967 } 968 break; 969 default: 970 IWL_ERR(trans, "FW debug - unknown OP %d\n", 971 dest->reg_ops[i].op); 972 break; 973 } 974 } 975 976 monitor: 977 if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) { 978 iwl_write_prph(trans, le32_to_cpu(dest->base_reg), 979 fw_mon->physical >> dest->base_shift); 980 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 981 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 982 (fw_mon->physical + fw_mon->size - 983 256) >> dest->end_shift); 984 else 985 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 986 (fw_mon->physical + fw_mon->size) >> 987 dest->end_shift); 988 } 989 } 990 991 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, 992 const struct fw_img *image) 993 { 994 int ret = 0; 995 int first_ucode_section; 996 997 IWL_DEBUG_FW(trans, "working with %s CPU\n", 998 image->is_dual_cpus ? "Dual" : "Single"); 999 1000 /* load to FW the binary non secured sections of CPU1 */ 1001 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); 1002 if (ret) 1003 return ret; 1004 1005 if (image->is_dual_cpus) { 1006 /* set CPU2 header address */ 1007 iwl_write_prph(trans, 1008 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 1009 LMPM_SECURE_CPU2_HDR_MEM_SPACE); 1010 1011 /* load to FW the binary sections of CPU2 */ 1012 ret = iwl_pcie_load_cpu_sections(trans, image, 2, 1013 &first_ucode_section); 1014 if (ret) 1015 return ret; 1016 } 1017 1018 if (iwl_pcie_dbg_on(trans)) 1019 iwl_pcie_apply_destination(trans); 1020 1021 iwl_enable_interrupts(trans); 1022 1023 /* release CPU reset */ 1024 iwl_write32(trans, CSR_RESET, 0); 1025 1026 return 0; 1027 } 1028 1029 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, 1030 const struct fw_img *image) 1031 { 1032 int ret = 0; 1033 int first_ucode_section; 1034 1035 IWL_DEBUG_FW(trans, "working with %s CPU\n", 1036 image->is_dual_cpus ? "Dual" : "Single"); 1037 1038 if (iwl_pcie_dbg_on(trans)) 1039 iwl_pcie_apply_destination(trans); 1040 1041 IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", 1042 iwl_read_prph(trans, WFPM_GP2)); 1043 1044 /* 1045 * Set default value. On resume reading the values that were 1046 * zeored can provide debug data on the resume flow. 1047 * This is for debugging only and has no functional impact. 1048 */ 1049 iwl_write_prph(trans, WFPM_GP2, 0x01010101); 1050 1051 /* configure the ucode to be ready to get the secured image */ 1052 /* release CPU reset */ 1053 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); 1054 1055 /* load to FW the binary Secured sections of CPU1 */ 1056 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, 1057 &first_ucode_section); 1058 if (ret) 1059 return ret; 1060 1061 /* load to FW the binary sections of CPU2 */ 1062 return iwl_pcie_load_cpu_sections_8000(trans, image, 2, 1063 &first_ucode_section); 1064 } 1065 1066 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) 1067 { 1068 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1069 bool hw_rfkill = iwl_is_rfkill_set(trans); 1070 bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1071 bool report; 1072 1073 if (hw_rfkill) { 1074 set_bit(STATUS_RFKILL_HW, &trans->status); 1075 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1076 } else { 1077 clear_bit(STATUS_RFKILL_HW, &trans->status); 1078 if (trans_pcie->opmode_down) 1079 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1080 } 1081 1082 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1083 1084 if (prev != report) 1085 iwl_trans_pcie_rf_kill(trans, report, false); 1086 1087 return hw_rfkill; 1088 } 1089 1090 struct iwl_causes_list { 1091 u16 mask_reg; 1092 u8 bit; 1093 u8 addr; 1094 }; 1095 1096 #define IWL_CAUSE(reg, mask) \ 1097 { \ 1098 .mask_reg = reg, \ 1099 .bit = ilog2(mask), \ 1100 .addr = ilog2(mask) + \ 1101 ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \ 1102 (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \ 1103 0xffff), /* causes overflow warning */ \ 1104 } 1105 1106 static const struct iwl_causes_list causes_list_common[] = { 1107 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM), 1108 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM), 1109 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D), 1110 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR), 1111 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE), 1112 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP), 1113 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE), 1114 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR), 1115 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL), 1116 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL), 1117 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC), 1118 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD), 1119 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX), 1120 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR), 1121 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP), 1122 }; 1123 1124 static const struct iwl_causes_list causes_list_pre_bz[] = { 1125 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR), 1126 }; 1127 1128 static const struct iwl_causes_list causes_list_bz[] = { 1129 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ), 1130 }; 1131 1132 static void iwl_pcie_map_list(struct iwl_trans *trans, 1133 const struct iwl_causes_list *causes, 1134 int arr_size, int val) 1135 { 1136 int i; 1137 1138 for (i = 0; i < arr_size; i++) { 1139 iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); 1140 iwl_clear_bit(trans, causes[i].mask_reg, 1141 BIT(causes[i].bit)); 1142 } 1143 } 1144 1145 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) 1146 { 1147 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1148 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; 1149 /* 1150 * Access all non RX causes and map them to the default irq. 1151 * In case we are missing at least one interrupt vector, 1152 * the first interrupt vector will serve non-RX and FBQ causes. 1153 */ 1154 iwl_pcie_map_list(trans, causes_list_common, 1155 ARRAY_SIZE(causes_list_common), val); 1156 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1157 iwl_pcie_map_list(trans, causes_list_bz, 1158 ARRAY_SIZE(causes_list_bz), val); 1159 else 1160 iwl_pcie_map_list(trans, causes_list_pre_bz, 1161 ARRAY_SIZE(causes_list_pre_bz), val); 1162 } 1163 1164 static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) 1165 { 1166 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1167 u32 offset = 1168 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 1169 u32 val, idx; 1170 1171 /* 1172 * The first RX queue - fallback queue, which is designated for 1173 * management frame, command responses etc, is always mapped to the 1174 * first interrupt vector. The other RX queues are mapped to 1175 * the other (N - 2) interrupt vectors. 1176 */ 1177 val = BIT(MSIX_FH_INT_CAUSES_Q(0)); 1178 for (idx = 1; idx < trans->num_rx_queues; idx++) { 1179 iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), 1180 MSIX_FH_INT_CAUSES_Q(idx - offset)); 1181 val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); 1182 } 1183 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); 1184 1185 val = MSIX_FH_INT_CAUSES_Q(0); 1186 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) 1187 val |= MSIX_NON_AUTO_CLEAR_CAUSE; 1188 iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); 1189 1190 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) 1191 iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); 1192 } 1193 1194 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) 1195 { 1196 struct iwl_trans *trans = trans_pcie->trans; 1197 1198 if (!trans_pcie->msix_enabled) { 1199 if (trans->trans_cfg->mq_rx_supported && 1200 test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1201 iwl_write_umac_prph(trans, UREG_CHICK, 1202 UREG_CHICK_MSI_ENABLE); 1203 return; 1204 } 1205 /* 1206 * The IVAR table needs to be configured again after reset, 1207 * but if the device is disabled, we can't write to 1208 * prph. 1209 */ 1210 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1211 iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); 1212 1213 /* 1214 * Each cause from the causes list above and the RX causes is 1215 * represented as a byte in the IVAR table. The first nibble 1216 * represents the bound interrupt vector of the cause, the second 1217 * represents no auto clear for this cause. This will be set if its 1218 * interrupt vector is bound to serve other causes. 1219 */ 1220 iwl_pcie_map_rx_causes(trans); 1221 1222 iwl_pcie_map_non_rx_causes(trans); 1223 } 1224 1225 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) 1226 { 1227 struct iwl_trans *trans = trans_pcie->trans; 1228 1229 iwl_pcie_conf_msix_hw(trans_pcie); 1230 1231 if (!trans_pcie->msix_enabled) 1232 return; 1233 1234 trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); 1235 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 1236 trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); 1237 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 1238 } 1239 1240 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq) 1241 { 1242 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1243 1244 lockdep_assert_held(&trans_pcie->mutex); 1245 1246 if (trans_pcie->is_down) 1247 return; 1248 1249 trans_pcie->is_down = true; 1250 1251 /* tell the device to stop sending interrupts */ 1252 iwl_disable_interrupts(trans); 1253 1254 /* device going down, Stop using ICT table */ 1255 iwl_pcie_disable_ict(trans); 1256 1257 /* 1258 * If a HW restart happens during firmware loading, 1259 * then the firmware loading might call this function 1260 * and later it might be called again due to the 1261 * restart. So don't process again if the device is 1262 * already dead. 1263 */ 1264 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1265 IWL_DEBUG_INFO(trans, 1266 "DEVICE_ENABLED bit was set and is now cleared\n"); 1267 if (!from_irq) 1268 iwl_pcie_synchronize_irqs(trans); 1269 iwl_pcie_rx_napi_sync(trans); 1270 iwl_pcie_tx_stop(trans); 1271 iwl_pcie_rx_stop(trans); 1272 1273 /* Power-down device's busmaster DMA clocks */ 1274 if (!trans->cfg->apmg_not_supported) { 1275 iwl_write_prph(trans, APMG_CLK_DIS_REG, 1276 APMG_CLK_VAL_DMA_CLK_RQT); 1277 udelay(5); 1278 } 1279 } 1280 1281 /* Make sure (redundant) we've released our request to stay awake */ 1282 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1283 iwl_clear_bit(trans, CSR_GP_CNTRL, 1284 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); 1285 else 1286 iwl_clear_bit(trans, CSR_GP_CNTRL, 1287 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1288 1289 /* Stop the device, and put it in low power state */ 1290 iwl_pcie_apm_stop(trans, false); 1291 1292 /* re-take ownership to prevent other users from stealing the device */ 1293 iwl_trans_pcie_sw_reset(trans, true); 1294 1295 /* 1296 * Upon stop, the IVAR table gets erased, so msi-x won't 1297 * work. This causes a bug in RF-KILL flows, since the interrupt 1298 * that enables radio won't fire on the correct irq, and the 1299 * driver won't be able to handle the interrupt. 1300 * Configure the IVAR table again after reset. 1301 */ 1302 iwl_pcie_conf_msix_hw(trans_pcie); 1303 1304 /* 1305 * Upon stop, the APM issues an interrupt if HW RF kill is set. 1306 * This is a bug in certain verions of the hardware. 1307 * Certain devices also keep sending HW RF kill interrupt all 1308 * the time, unless the interrupt is ACKed even if the interrupt 1309 * should be masked. Re-ACK all the interrupts here. 1310 */ 1311 iwl_disable_interrupts(trans); 1312 1313 /* clear all status bits */ 1314 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1315 clear_bit(STATUS_INT_ENABLED, &trans->status); 1316 clear_bit(STATUS_TPOWER_PMI, &trans->status); 1317 1318 /* 1319 * Even if we stop the HW, we still want the RF kill 1320 * interrupt 1321 */ 1322 iwl_enable_rfkill_int(trans); 1323 } 1324 1325 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) 1326 { 1327 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1328 1329 if (trans_pcie->msix_enabled) { 1330 int i; 1331 1332 for (i = 0; i < trans_pcie->alloc_vecs; i++) 1333 synchronize_irq(trans_pcie->msix_entries[i].vector); 1334 } else { 1335 synchronize_irq(trans_pcie->pci_dev->irq); 1336 } 1337 } 1338 1339 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, 1340 const struct fw_img *fw, bool run_in_rfkill) 1341 { 1342 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1343 bool hw_rfkill; 1344 int ret; 1345 1346 /* This may fail if AMT took ownership of the device */ 1347 if (iwl_pcie_prepare_card_hw(trans)) { 1348 IWL_WARN(trans, "Exit HW not ready\n"); 1349 return -EIO; 1350 } 1351 1352 iwl_enable_rfkill_int(trans); 1353 1354 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1355 1356 /* 1357 * We enabled the RF-Kill interrupt and the handler may very 1358 * well be running. Disable the interrupts to make sure no other 1359 * interrupt can be fired. 1360 */ 1361 iwl_disable_interrupts(trans); 1362 1363 /* Make sure it finished running */ 1364 iwl_pcie_synchronize_irqs(trans); 1365 1366 mutex_lock(&trans_pcie->mutex); 1367 1368 /* If platform's RF_KILL switch is NOT set to KILL */ 1369 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1370 if (hw_rfkill && !run_in_rfkill) { 1371 ret = -ERFKILL; 1372 goto out; 1373 } 1374 1375 /* Someone called stop_device, don't try to start_fw */ 1376 if (trans_pcie->is_down) { 1377 IWL_WARN(trans, 1378 "Can't start_fw since the HW hasn't been started\n"); 1379 ret = -EIO; 1380 goto out; 1381 } 1382 1383 /* make sure rfkill handshake bits are cleared */ 1384 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1385 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, 1386 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 1387 1388 /* clear (again), then enable host interrupts */ 1389 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1390 1391 ret = iwl_pcie_nic_init(trans); 1392 if (ret) { 1393 IWL_ERR(trans, "Unable to init nic\n"); 1394 goto out; 1395 } 1396 1397 /* 1398 * Now, we load the firmware and don't want to be interrupted, even 1399 * by the RF-Kill interrupt (hence mask all the interrupt besides the 1400 * FH_TX interrupt which is needed to load the firmware). If the 1401 * RF-Kill switch is toggled, we will find out after having loaded 1402 * the firmware and return the proper value to the caller. 1403 */ 1404 iwl_enable_fw_load_int(trans); 1405 1406 /* really make sure rfkill handshake bits are cleared */ 1407 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1408 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1409 1410 /* Load the given image to the HW */ 1411 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 1412 ret = iwl_pcie_load_given_ucode_8000(trans, fw); 1413 else 1414 ret = iwl_pcie_load_given_ucode(trans, fw); 1415 1416 /* re-check RF-Kill state since we may have missed the interrupt */ 1417 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1418 if (hw_rfkill && !run_in_rfkill) 1419 ret = -ERFKILL; 1420 1421 out: 1422 mutex_unlock(&trans_pcie->mutex); 1423 return ret; 1424 } 1425 1426 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) 1427 { 1428 iwl_pcie_reset_ict(trans); 1429 iwl_pcie_tx_start(trans, scd_addr); 1430 } 1431 1432 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1433 bool was_in_rfkill) 1434 { 1435 bool hw_rfkill; 1436 1437 /* 1438 * Check again since the RF kill state may have changed while 1439 * all the interrupts were disabled, in this case we couldn't 1440 * receive the RF kill interrupt and update the state in the 1441 * op_mode. 1442 * Don't call the op_mode if the rkfill state hasn't changed. 1443 * This allows the op_mode to call stop_device from the rfkill 1444 * notification without endless recursion. Under very rare 1445 * circumstances, we might have a small recursion if the rfkill 1446 * state changed exactly now while we were called from stop_device. 1447 * This is very unlikely but can happen and is supported. 1448 */ 1449 hw_rfkill = iwl_is_rfkill_set(trans); 1450 if (hw_rfkill) { 1451 set_bit(STATUS_RFKILL_HW, &trans->status); 1452 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1453 } else { 1454 clear_bit(STATUS_RFKILL_HW, &trans->status); 1455 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1456 } 1457 if (hw_rfkill != was_in_rfkill) 1458 iwl_trans_pcie_rf_kill(trans, hw_rfkill, false); 1459 } 1460 1461 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1462 { 1463 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1464 bool was_in_rfkill; 1465 1466 iwl_op_mode_time_point(trans->op_mode, 1467 IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, 1468 NULL); 1469 1470 mutex_lock(&trans_pcie->mutex); 1471 trans_pcie->opmode_down = true; 1472 was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1473 _iwl_trans_pcie_stop_device(trans, false); 1474 iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); 1475 mutex_unlock(&trans_pcie->mutex); 1476 } 1477 1478 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq) 1479 { 1480 struct iwl_trans_pcie __maybe_unused *trans_pcie = 1481 IWL_TRANS_GET_PCIE_TRANS(trans); 1482 1483 lockdep_assert_held(&trans_pcie->mutex); 1484 1485 IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", 1486 state ? "disabled" : "enabled"); 1487 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { 1488 if (trans->trans_cfg->gen2) 1489 _iwl_trans_pcie_gen2_stop_device(trans); 1490 else 1491 _iwl_trans_pcie_stop_device(trans, from_irq); 1492 } 1493 } 1494 1495 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, 1496 bool test, bool reset) 1497 { 1498 iwl_disable_interrupts(trans); 1499 1500 /* 1501 * in testing mode, the host stays awake and the 1502 * hardware won't be reset (not even partially) 1503 */ 1504 if (test) 1505 return; 1506 1507 iwl_pcie_disable_ict(trans); 1508 1509 iwl_pcie_synchronize_irqs(trans); 1510 1511 iwl_clear_bit(trans, CSR_GP_CNTRL, 1512 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1513 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1514 1515 if (reset) { 1516 /* 1517 * reset TX queues -- some of their registers reset during S3 1518 * so if we don't reset everything here the D3 image would try 1519 * to execute some invalid memory upon resume 1520 */ 1521 iwl_trans_pcie_tx_reset(trans); 1522 } 1523 1524 iwl_pcie_set_pwr(trans, true); 1525 } 1526 1527 static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) 1528 { 1529 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1530 int ret; 1531 1532 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) 1533 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, 1534 suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND : 1535 UREG_DOORBELL_TO_ISR6_RESUME); 1536 else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1537 iwl_write32(trans, CSR_IPC_SLEEP_CONTROL, 1538 suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND : 1539 CSR_IPC_SLEEP_CONTROL_RESUME); 1540 else 1541 return 0; 1542 1543 ret = wait_event_timeout(trans_pcie->sx_waitq, 1544 trans_pcie->sx_complete, 2 * HZ); 1545 1546 /* Invalidate it toward next suspend or resume */ 1547 trans_pcie->sx_complete = false; 1548 1549 if (!ret) { 1550 IWL_ERR(trans, "Timeout %s D3\n", 1551 suspend ? "entering" : "exiting"); 1552 return -ETIMEDOUT; 1553 } 1554 1555 return 0; 1556 } 1557 1558 static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, 1559 bool reset) 1560 { 1561 int ret; 1562 1563 if (!reset) 1564 /* Enable persistence mode to avoid reset */ 1565 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 1566 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 1567 1568 ret = iwl_pcie_d3_handshake(trans, true); 1569 if (ret) 1570 return ret; 1571 1572 iwl_pcie_d3_complete_suspend(trans, test, reset); 1573 1574 return 0; 1575 } 1576 1577 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, 1578 enum iwl_d3_status *status, 1579 bool test, bool reset) 1580 { 1581 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1582 u32 val; 1583 int ret; 1584 1585 if (test) { 1586 iwl_enable_interrupts(trans); 1587 *status = IWL_D3_STATUS_ALIVE; 1588 ret = 0; 1589 goto out; 1590 } 1591 1592 iwl_set_bit(trans, CSR_GP_CNTRL, 1593 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1594 1595 ret = iwl_finish_nic_init(trans); 1596 if (ret) 1597 return ret; 1598 1599 /* 1600 * Reconfigure IVAR table in case of MSIX or reset ict table in 1601 * MSI mode since HW reset erased it. 1602 * Also enables interrupts - none will happen as 1603 * the device doesn't know we're waking it up, only when 1604 * the opmode actually tells it after this call. 1605 */ 1606 iwl_pcie_conf_msix_hw(trans_pcie); 1607 if (!trans_pcie->msix_enabled) 1608 iwl_pcie_reset_ict(trans); 1609 iwl_enable_interrupts(trans); 1610 1611 iwl_pcie_set_pwr(trans, false); 1612 1613 if (!reset) { 1614 iwl_clear_bit(trans, CSR_GP_CNTRL, 1615 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1616 } else { 1617 iwl_trans_pcie_tx_reset(trans); 1618 1619 ret = iwl_pcie_rx_init(trans); 1620 if (ret) { 1621 IWL_ERR(trans, 1622 "Failed to resume the device (RX reset)\n"); 1623 return ret; 1624 } 1625 } 1626 1627 IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", 1628 iwl_read_umac_prph(trans, WFPM_GP2)); 1629 1630 val = iwl_read32(trans, CSR_RESET); 1631 if (val & CSR_RESET_REG_FLAG_NEVO_RESET) 1632 *status = IWL_D3_STATUS_RESET; 1633 else 1634 *status = IWL_D3_STATUS_ALIVE; 1635 1636 out: 1637 if (*status == IWL_D3_STATUS_ALIVE) 1638 ret = iwl_pcie_d3_handshake(trans, false); 1639 1640 return ret; 1641 } 1642 1643 static void 1644 iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, 1645 struct iwl_trans *trans, 1646 const struct iwl_cfg_trans_params *cfg_trans) 1647 { 1648 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1649 int max_irqs, num_irqs, i, ret; 1650 u16 pci_cmd; 1651 u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES; 1652 1653 if (!cfg_trans->mq_rx_supported) 1654 goto enable_msi; 1655 1656 if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000) 1657 max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES; 1658 1659 max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues); 1660 for (i = 0; i < max_irqs; i++) 1661 trans_pcie->msix_entries[i].entry = i; 1662 1663 num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, 1664 MSIX_MIN_INTERRUPT_VECTORS, 1665 max_irqs); 1666 if (num_irqs < 0) { 1667 IWL_DEBUG_INFO(trans, 1668 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", 1669 num_irqs); 1670 goto enable_msi; 1671 } 1672 trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; 1673 1674 IWL_DEBUG_INFO(trans, 1675 "MSI-X enabled. %d interrupt vectors were allocated\n", 1676 num_irqs); 1677 1678 /* 1679 * In case the OS provides fewer interrupts than requested, different 1680 * causes will share the same interrupt vector as follows: 1681 * One interrupt less: non rx causes shared with FBQ. 1682 * Two interrupts less: non rx causes shared with FBQ and RSS. 1683 * More than two interrupts: we will use fewer RSS queues. 1684 */ 1685 if (num_irqs <= max_irqs - 2) { 1686 trans_pcie->trans->num_rx_queues = num_irqs + 1; 1687 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | 1688 IWL_SHARED_IRQ_FIRST_RSS; 1689 } else if (num_irqs == max_irqs - 1) { 1690 trans_pcie->trans->num_rx_queues = num_irqs; 1691 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; 1692 } else { 1693 trans_pcie->trans->num_rx_queues = num_irqs - 1; 1694 } 1695 1696 IWL_DEBUG_INFO(trans, 1697 "MSI-X enabled with rx queues %d, vec mask 0x%x\n", 1698 trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask); 1699 1700 WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); 1701 1702 trans_pcie->alloc_vecs = num_irqs; 1703 trans_pcie->msix_enabled = true; 1704 return; 1705 1706 enable_msi: 1707 ret = pci_enable_msi(pdev); 1708 if (ret) { 1709 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); 1710 /* enable rfkill interrupt: hw bug w/a */ 1711 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 1712 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 1713 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 1714 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 1715 } 1716 } 1717 } 1718 1719 static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) 1720 { 1721 int iter_rx_q, i, ret, cpu, offset; 1722 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1723 1724 i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; 1725 iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; 1726 offset = 1 + i; 1727 for (; i < iter_rx_q ; i++) { 1728 /* 1729 * Get the cpu prior to the place to search 1730 * (i.e. return will be > i - 1). 1731 */ 1732 cpu = cpumask_next(i - offset, cpu_online_mask); 1733 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); 1734 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, 1735 &trans_pcie->affinity_mask[i]); 1736 if (ret) 1737 IWL_ERR(trans_pcie->trans, 1738 "Failed to set affinity mask for IRQ %d\n", 1739 trans_pcie->msix_entries[i].vector); 1740 } 1741 } 1742 1743 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, 1744 struct iwl_trans_pcie *trans_pcie) 1745 { 1746 int i; 1747 1748 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 1749 int ret; 1750 struct msix_entry *msix_entry; 1751 const char *qname = queue_name(&pdev->dev, trans_pcie, i); 1752 1753 if (!qname) 1754 return -ENOMEM; 1755 1756 msix_entry = &trans_pcie->msix_entries[i]; 1757 ret = devm_request_threaded_irq(&pdev->dev, 1758 msix_entry->vector, 1759 iwl_pcie_msix_isr, 1760 (i == trans_pcie->def_irq) ? 1761 iwl_pcie_irq_msix_handler : 1762 iwl_pcie_irq_rx_msix_handler, 1763 IRQF_SHARED, 1764 qname, 1765 msix_entry); 1766 if (ret) { 1767 IWL_ERR(trans_pcie->trans, 1768 "Error allocating IRQ %d\n", i); 1769 1770 return ret; 1771 } 1772 } 1773 iwl_pcie_irq_set_affinity(trans_pcie->trans); 1774 1775 return 0; 1776 } 1777 1778 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) 1779 { 1780 u32 hpm, wprot; 1781 1782 switch (trans->trans_cfg->device_family) { 1783 case IWL_DEVICE_FAMILY_9000: 1784 wprot = PREG_PRPH_WPROT_9000; 1785 break; 1786 case IWL_DEVICE_FAMILY_22000: 1787 wprot = PREG_PRPH_WPROT_22000; 1788 break; 1789 default: 1790 return 0; 1791 } 1792 1793 hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); 1794 if (!iwl_trans_is_hw_error_value(hpm) && (hpm & PERSISTENCE_BIT)) { 1795 u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); 1796 1797 if (wprot_val & PREG_WFPM_ACCESS) { 1798 IWL_ERR(trans, 1799 "Error, can not clear persistence bit\n"); 1800 return -EPERM; 1801 } 1802 iwl_write_umac_prph_no_grab(trans, HPM_DEBUG, 1803 hpm & ~PERSISTENCE_BIT); 1804 } 1805 1806 return 0; 1807 } 1808 1809 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) 1810 { 1811 int ret; 1812 1813 ret = iwl_finish_nic_init(trans); 1814 if (ret < 0) 1815 return ret; 1816 1817 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1818 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1819 udelay(20); 1820 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1821 HPM_HIPM_GEN_CFG_CR_PG_EN | 1822 HPM_HIPM_GEN_CFG_CR_SLP_EN); 1823 udelay(20); 1824 iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, 1825 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1826 1827 return iwl_trans_pcie_sw_reset(trans, true); 1828 } 1829 1830 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1831 { 1832 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1833 int err; 1834 1835 lockdep_assert_held(&trans_pcie->mutex); 1836 1837 err = iwl_pcie_prepare_card_hw(trans); 1838 if (err) { 1839 IWL_ERR(trans, "Error while preparing HW: %d\n", err); 1840 return err; 1841 } 1842 1843 err = iwl_trans_pcie_clear_persistence_bit(trans); 1844 if (err) 1845 return err; 1846 1847 err = iwl_trans_pcie_sw_reset(trans, true); 1848 if (err) 1849 return err; 1850 1851 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && 1852 trans->trans_cfg->integrated) { 1853 err = iwl_pcie_gen2_force_power_gating(trans); 1854 if (err) 1855 return err; 1856 } 1857 1858 err = iwl_pcie_apm_init(trans); 1859 if (err) 1860 return err; 1861 1862 iwl_pcie_init_msix(trans_pcie); 1863 1864 /* From now on, the op_mode will be kept updated about RF kill state */ 1865 iwl_enable_rfkill_int(trans); 1866 1867 trans_pcie->opmode_down = false; 1868 1869 /* Set is_down to false here so that...*/ 1870 trans_pcie->is_down = false; 1871 1872 /* ...rfkill can call stop_device and set it false if needed */ 1873 iwl_pcie_check_hw_rf_kill(trans); 1874 1875 return 0; 1876 } 1877 1878 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1879 { 1880 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1881 int ret; 1882 1883 mutex_lock(&trans_pcie->mutex); 1884 ret = _iwl_trans_pcie_start_hw(trans); 1885 mutex_unlock(&trans_pcie->mutex); 1886 1887 return ret; 1888 } 1889 1890 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) 1891 { 1892 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1893 1894 mutex_lock(&trans_pcie->mutex); 1895 1896 /* disable interrupts - don't enable HW RF kill interrupt */ 1897 iwl_disable_interrupts(trans); 1898 1899 iwl_pcie_apm_stop(trans, true); 1900 1901 iwl_disable_interrupts(trans); 1902 1903 iwl_pcie_disable_ict(trans); 1904 1905 mutex_unlock(&trans_pcie->mutex); 1906 1907 iwl_pcie_synchronize_irqs(trans); 1908 } 1909 1910 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1911 { 1912 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1913 } 1914 1915 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1916 { 1917 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1918 } 1919 1920 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) 1921 { 1922 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1923 } 1924 1925 static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) 1926 { 1927 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1928 return 0x00FFFFFF; 1929 else 1930 return 0x000FFFFF; 1931 } 1932 1933 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) 1934 { 1935 u32 mask = iwl_trans_pcie_prph_msk(trans); 1936 1937 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, 1938 ((reg & mask) | (3 << 24))); 1939 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); 1940 } 1941 1942 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, 1943 u32 val) 1944 { 1945 u32 mask = iwl_trans_pcie_prph_msk(trans); 1946 1947 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, 1948 ((addr & mask) | (3 << 24))); 1949 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 1950 } 1951 1952 static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1953 const struct iwl_trans_config *trans_cfg) 1954 { 1955 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1956 1957 /* free all first - we might be reconfigured for a different size */ 1958 iwl_pcie_free_rbs_pool(trans); 1959 1960 trans->txqs.cmd.q_id = trans_cfg->cmd_queue; 1961 trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; 1962 trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; 1963 trans->txqs.page_offs = trans_cfg->cb_data_offs; 1964 trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); 1965 trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; 1966 1967 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) 1968 trans_pcie->n_no_reclaim_cmds = 0; 1969 else 1970 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; 1971 if (trans_pcie->n_no_reclaim_cmds) 1972 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 1973 trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 1974 1975 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; 1976 trans_pcie->rx_page_order = 1977 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); 1978 trans_pcie->rx_buf_bytes = 1979 iwl_trans_get_rb_size(trans_pcie->rx_buf_size); 1980 trans_pcie->supported_dma_mask = DMA_BIT_MASK(12); 1981 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1982 trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); 1983 1984 trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; 1985 trans_pcie->scd_set_active = trans_cfg->scd_set_active; 1986 1987 trans->command_groups = trans_cfg->command_groups; 1988 trans->command_groups_size = trans_cfg->command_groups_size; 1989 1990 /* Initialize NAPI here - it should be before registering to mac80211 1991 * in the opmode but after the HW struct is allocated. 1992 * As this function may be called again in some corner cases don't 1993 * do anything if NAPI was already initialized. 1994 */ 1995 if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) 1996 init_dummy_netdev(&trans_pcie->napi_dev); 1997 1998 trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; 1999 } 2000 2001 void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions, 2002 struct device *dev) 2003 { 2004 u8 i; 2005 struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc; 2006 2007 /* free DRAM payloads */ 2008 for (i = 0; i < dram_regions->n_regions; i++) { 2009 dma_free_coherent(dev, dram_regions->drams[i].size, 2010 dram_regions->drams[i].block, 2011 dram_regions->drams[i].physical); 2012 } 2013 dram_regions->n_regions = 0; 2014 2015 /* free DRAM addresses array */ 2016 if (desc_dram->block) { 2017 dma_free_coherent(dev, desc_dram->size, 2018 desc_dram->block, 2019 desc_dram->physical); 2020 } 2021 memset(desc_dram, 0, sizeof(*desc_dram)); 2022 } 2023 2024 static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans) 2025 { 2026 iwl_pcie_free_dma_ptr(trans, &trans->invalid_tx_cmd); 2027 } 2028 2029 static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans) 2030 { 2031 struct iwl_cmd_header_wide bad_cmd = { 2032 .cmd = INVALID_WR_PTR_CMD, 2033 .group_id = DEBUG_GROUP, 2034 .sequence = cpu_to_le16(0xffff), 2035 .length = cpu_to_le16(0), 2036 .version = 0, 2037 }; 2038 int ret; 2039 2040 ret = iwl_pcie_alloc_dma_ptr(trans, &trans->invalid_tx_cmd, 2041 sizeof(bad_cmd)); 2042 if (ret) 2043 return ret; 2044 memcpy(trans->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd)); 2045 return 0; 2046 } 2047 2048 void iwl_trans_pcie_free(struct iwl_trans *trans) 2049 { 2050 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2051 int i; 2052 2053 iwl_pcie_synchronize_irqs(trans); 2054 2055 if (trans->trans_cfg->gen2) 2056 iwl_txq_gen2_tx_free(trans); 2057 else 2058 iwl_pcie_tx_free(trans); 2059 iwl_pcie_rx_free(trans); 2060 2061 if (trans_pcie->rba.alloc_wq) { 2062 destroy_workqueue(trans_pcie->rba.alloc_wq); 2063 trans_pcie->rba.alloc_wq = NULL; 2064 } 2065 2066 if (trans_pcie->msix_enabled) { 2067 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 2068 irq_set_affinity_hint( 2069 trans_pcie->msix_entries[i].vector, 2070 NULL); 2071 } 2072 2073 trans_pcie->msix_enabled = false; 2074 } else { 2075 iwl_pcie_free_ict(trans); 2076 } 2077 2078 iwl_pcie_free_invalid_tx_cmd(trans); 2079 2080 iwl_pcie_free_fw_monitor(trans); 2081 2082 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data, 2083 trans->dev); 2084 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data, 2085 trans->dev); 2086 2087 mutex_destroy(&trans_pcie->mutex); 2088 iwl_trans_free(trans); 2089 } 2090 2091 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) 2092 { 2093 if (state) 2094 set_bit(STATUS_TPOWER_PMI, &trans->status); 2095 else 2096 clear_bit(STATUS_TPOWER_PMI, &trans->status); 2097 } 2098 2099 struct iwl_trans_pcie_removal { 2100 struct pci_dev *pdev; 2101 struct work_struct work; 2102 bool rescan; 2103 }; 2104 2105 static void iwl_trans_pcie_removal_wk(struct work_struct *wk) 2106 { 2107 struct iwl_trans_pcie_removal *removal = 2108 container_of(wk, struct iwl_trans_pcie_removal, work); 2109 struct pci_dev *pdev = removal->pdev; 2110 static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; 2111 struct pci_bus *bus; 2112 2113 pci_lock_rescan_remove(); 2114 2115 bus = pdev->bus; 2116 /* in this case, something else already removed the device */ 2117 if (!bus) 2118 goto out; 2119 2120 dev_err(&pdev->dev, "Device gone - attempting removal\n"); 2121 2122 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); 2123 2124 pci_stop_and_remove_bus_device(pdev); 2125 pci_dev_put(pdev); 2126 2127 if (removal->rescan) { 2128 if (bus->parent) 2129 bus = bus->parent; 2130 pci_rescan_bus(bus); 2131 } 2132 2133 out: 2134 pci_unlock_rescan_remove(); 2135 2136 kfree(removal); 2137 module_put(THIS_MODULE); 2138 } 2139 2140 void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan) 2141 { 2142 struct iwl_trans_pcie_removal *removal; 2143 2144 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2145 return; 2146 2147 IWL_ERR(trans, "Device gone - scheduling removal!\n"); 2148 iwl_pcie_dump_csr(trans); 2149 2150 /* 2151 * get a module reference to avoid doing this 2152 * while unloading anyway and to avoid 2153 * scheduling a work with code that's being 2154 * removed. 2155 */ 2156 if (!try_module_get(THIS_MODULE)) { 2157 IWL_ERR(trans, 2158 "Module is being unloaded - abort\n"); 2159 return; 2160 } 2161 2162 removal = kzalloc(sizeof(*removal), GFP_ATOMIC); 2163 if (!removal) { 2164 module_put(THIS_MODULE); 2165 return; 2166 } 2167 /* 2168 * we don't need to clear this flag, because 2169 * the trans will be freed and reallocated. 2170 */ 2171 set_bit(STATUS_TRANS_DEAD, &trans->status); 2172 2173 removal->pdev = to_pci_dev(trans->dev); 2174 removal->rescan = rescan; 2175 INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); 2176 pci_dev_get(removal->pdev); 2177 schedule_work(&removal->work); 2178 } 2179 EXPORT_SYMBOL(iwl_trans_pcie_remove); 2180 2181 /* 2182 * This version doesn't disable BHs but rather assumes they're 2183 * already disabled. 2184 */ 2185 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) 2186 { 2187 int ret; 2188 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2189 u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ; 2190 u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 2191 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; 2192 u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; 2193 2194 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2195 return false; 2196 2197 spin_lock(&trans_pcie->reg_lock); 2198 2199 if (trans_pcie->cmd_hold_nic_awake) 2200 goto out; 2201 2202 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 2203 write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ; 2204 mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; 2205 poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; 2206 } 2207 2208 /* this bit wakes up the NIC */ 2209 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write); 2210 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 2211 udelay(2); 2212 2213 /* 2214 * These bits say the device is running, and should keep running for 2215 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 2216 * but they do not indicate that embedded SRAM is restored yet; 2217 * HW with volatile SRAM must save/restore contents to/from 2218 * host DRAM when sleeping/waking for power-saving. 2219 * Each direction takes approximately 1/4 millisecond; with this 2220 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 2221 * series of register accesses are expected (e.g. reading Event Log), 2222 * to keep device from sleeping. 2223 * 2224 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 2225 * SRAM is okay/restored. We don't check that here because this call 2226 * is just for hardware register access; but GP1 MAC_SLEEP 2227 * check is a good idea before accessing the SRAM of HW with 2228 * volatile SRAM (e.g. reading Event Log). 2229 * 2230 * 5000 series and later (including 1000 series) have non-volatile SRAM, 2231 * and do not save/restore SRAM when power cycling. 2232 */ 2233 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000); 2234 if (unlikely(ret < 0)) { 2235 u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); 2236 2237 WARN_ONCE(1, 2238 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", 2239 cntrl); 2240 2241 iwl_trans_pcie_dump_regs(trans); 2242 2243 if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) 2244 iwl_trans_pcie_remove(trans, false); 2245 else 2246 iwl_write32(trans, CSR_RESET, 2247 CSR_RESET_REG_FLAG_FORCE_NMI); 2248 2249 spin_unlock(&trans_pcie->reg_lock); 2250 return false; 2251 } 2252 2253 out: 2254 /* 2255 * Fool sparse by faking we release the lock - sparse will 2256 * track nic_access anyway. 2257 */ 2258 __release(&trans_pcie->reg_lock); 2259 return true; 2260 } 2261 2262 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) 2263 { 2264 bool ret; 2265 2266 local_bh_disable(); 2267 ret = __iwl_trans_pcie_grab_nic_access(trans); 2268 if (ret) { 2269 /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ 2270 return ret; 2271 } 2272 local_bh_enable(); 2273 return false; 2274 } 2275 2276 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) 2277 { 2278 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2279 2280 lockdep_assert_held(&trans_pcie->reg_lock); 2281 2282 /* 2283 * Fool sparse by faking we acquiring the lock - sparse will 2284 * track nic_access anyway. 2285 */ 2286 __acquire(&trans_pcie->reg_lock); 2287 2288 if (trans_pcie->cmd_hold_nic_awake) 2289 goto out; 2290 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 2291 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 2292 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); 2293 else 2294 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 2295 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2296 /* 2297 * Above we read the CSR_GP_CNTRL register, which will flush 2298 * any previous writes, but we need the write that clears the 2299 * MAC_ACCESS_REQ bit to be performed before any other writes 2300 * scheduled on different CPUs (after we drop reg_lock). 2301 */ 2302 out: 2303 spin_unlock_bh(&trans_pcie->reg_lock); 2304 } 2305 2306 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, 2307 void *buf, int dwords) 2308 { 2309 #define IWL_MAX_HW_ERRS 5 2310 unsigned int num_consec_hw_errors = 0; 2311 int offs = 0; 2312 u32 *vals = buf; 2313 2314 while (offs < dwords) { 2315 /* limit the time we spin here under lock to 1/2s */ 2316 unsigned long end = jiffies + HZ / 2; 2317 bool resched = false; 2318 2319 if (iwl_trans_grab_nic_access(trans)) { 2320 iwl_write32(trans, HBUS_TARG_MEM_RADDR, 2321 addr + 4 * offs); 2322 2323 while (offs < dwords) { 2324 vals[offs] = iwl_read32(trans, 2325 HBUS_TARG_MEM_RDAT); 2326 2327 if (iwl_trans_is_hw_error_value(vals[offs])) 2328 num_consec_hw_errors++; 2329 else 2330 num_consec_hw_errors = 0; 2331 2332 if (num_consec_hw_errors >= IWL_MAX_HW_ERRS) { 2333 iwl_trans_release_nic_access(trans); 2334 return -EIO; 2335 } 2336 2337 offs++; 2338 2339 if (time_after(jiffies, end)) { 2340 resched = true; 2341 break; 2342 } 2343 } 2344 iwl_trans_release_nic_access(trans); 2345 2346 if (resched) 2347 cond_resched(); 2348 } else { 2349 return -EBUSY; 2350 } 2351 } 2352 2353 return 0; 2354 } 2355 2356 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, 2357 const void *buf, int dwords) 2358 { 2359 int offs, ret = 0; 2360 const u32 *vals = buf; 2361 2362 if (iwl_trans_grab_nic_access(trans)) { 2363 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); 2364 for (offs = 0; offs < dwords; offs++) 2365 iwl_write32(trans, HBUS_TARG_MEM_WDAT, 2366 vals ? vals[offs] : 0); 2367 iwl_trans_release_nic_access(trans); 2368 } else { 2369 ret = -EBUSY; 2370 } 2371 return ret; 2372 } 2373 2374 static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, 2375 u32 *val) 2376 { 2377 return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev, 2378 ofs, val); 2379 } 2380 2381 #define IWL_FLUSH_WAIT_MS 2000 2382 2383 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, 2384 struct iwl_trans_rxq_dma_data *data) 2385 { 2386 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2387 2388 if (queue >= trans->num_rx_queues || !trans_pcie->rxq) 2389 return -EINVAL; 2390 2391 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; 2392 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; 2393 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; 2394 data->fr_bd_wid = 0; 2395 2396 return 0; 2397 } 2398 2399 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) 2400 { 2401 struct iwl_txq *txq; 2402 unsigned long now = jiffies; 2403 bool overflow_tx; 2404 u8 wr_ptr; 2405 2406 /* Make sure the NIC is still alive in the bus */ 2407 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2408 return -ENODEV; 2409 2410 if (!test_bit(txq_idx, trans->txqs.queue_used)) 2411 return -EINVAL; 2412 2413 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); 2414 txq = trans->txqs.txq[txq_idx]; 2415 2416 spin_lock_bh(&txq->lock); 2417 overflow_tx = txq->overflow_tx || 2418 !skb_queue_empty(&txq->overflow_q); 2419 spin_unlock_bh(&txq->lock); 2420 2421 wr_ptr = READ_ONCE(txq->write_ptr); 2422 2423 while ((txq->read_ptr != READ_ONCE(txq->write_ptr) || 2424 overflow_tx) && 2425 !time_after(jiffies, 2426 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { 2427 u8 write_ptr = READ_ONCE(txq->write_ptr); 2428 2429 /* 2430 * If write pointer moved during the wait, warn only 2431 * if the TX came from op mode. In case TX came from 2432 * trans layer (overflow TX) don't warn. 2433 */ 2434 if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx, 2435 "WR pointer moved while flushing %d -> %d\n", 2436 wr_ptr, write_ptr)) 2437 return -ETIMEDOUT; 2438 wr_ptr = write_ptr; 2439 2440 usleep_range(1000, 2000); 2441 2442 spin_lock_bh(&txq->lock); 2443 overflow_tx = txq->overflow_tx || 2444 !skb_queue_empty(&txq->overflow_q); 2445 spin_unlock_bh(&txq->lock); 2446 } 2447 2448 if (txq->read_ptr != txq->write_ptr) { 2449 IWL_ERR(trans, 2450 "fail to flush all tx fifo queues Q %d\n", txq_idx); 2451 iwl_txq_log_scd_error(trans, txq); 2452 return -ETIMEDOUT; 2453 } 2454 2455 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx); 2456 2457 return 0; 2458 } 2459 2460 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) 2461 { 2462 int cnt; 2463 int ret = 0; 2464 2465 /* waiting for all the tx frames complete might take a while */ 2466 for (cnt = 0; 2467 cnt < trans->trans_cfg->base_params->num_of_queues; 2468 cnt++) { 2469 2470 if (cnt == trans->txqs.cmd.q_id) 2471 continue; 2472 if (!test_bit(cnt, trans->txqs.queue_used)) 2473 continue; 2474 if (!(BIT(cnt) & txq_bm)) 2475 continue; 2476 2477 ret = iwl_trans_pcie_wait_txq_empty(trans, cnt); 2478 if (ret) 2479 break; 2480 } 2481 2482 return ret; 2483 } 2484 2485 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, 2486 u32 mask, u32 value) 2487 { 2488 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2489 2490 spin_lock_bh(&trans_pcie->reg_lock); 2491 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); 2492 spin_unlock_bh(&trans_pcie->reg_lock); 2493 } 2494 2495 static const char *get_csr_string(int cmd) 2496 { 2497 #define IWL_CMD(x) case x: return #x 2498 switch (cmd) { 2499 IWL_CMD(CSR_HW_IF_CONFIG_REG); 2500 IWL_CMD(CSR_INT_COALESCING); 2501 IWL_CMD(CSR_INT); 2502 IWL_CMD(CSR_INT_MASK); 2503 IWL_CMD(CSR_FH_INT_STATUS); 2504 IWL_CMD(CSR_GPIO_IN); 2505 IWL_CMD(CSR_RESET); 2506 IWL_CMD(CSR_GP_CNTRL); 2507 IWL_CMD(CSR_HW_REV); 2508 IWL_CMD(CSR_EEPROM_REG); 2509 IWL_CMD(CSR_EEPROM_GP); 2510 IWL_CMD(CSR_OTP_GP_REG); 2511 IWL_CMD(CSR_GIO_REG); 2512 IWL_CMD(CSR_GP_UCODE_REG); 2513 IWL_CMD(CSR_GP_DRIVER_REG); 2514 IWL_CMD(CSR_UCODE_DRV_GP1); 2515 IWL_CMD(CSR_UCODE_DRV_GP2); 2516 IWL_CMD(CSR_LED_REG); 2517 IWL_CMD(CSR_DRAM_INT_TBL_REG); 2518 IWL_CMD(CSR_GIO_CHICKEN_BITS); 2519 IWL_CMD(CSR_ANA_PLL_CFG); 2520 IWL_CMD(CSR_HW_REV_WA_REG); 2521 IWL_CMD(CSR_MONITOR_STATUS_REG); 2522 IWL_CMD(CSR_DBG_HPET_MEM_REG); 2523 default: 2524 return "UNKNOWN"; 2525 } 2526 #undef IWL_CMD 2527 } 2528 2529 void iwl_pcie_dump_csr(struct iwl_trans *trans) 2530 { 2531 int i; 2532 static const u32 csr_tbl[] = { 2533 CSR_HW_IF_CONFIG_REG, 2534 CSR_INT_COALESCING, 2535 CSR_INT, 2536 CSR_INT_MASK, 2537 CSR_FH_INT_STATUS, 2538 CSR_GPIO_IN, 2539 CSR_RESET, 2540 CSR_GP_CNTRL, 2541 CSR_HW_REV, 2542 CSR_EEPROM_REG, 2543 CSR_EEPROM_GP, 2544 CSR_OTP_GP_REG, 2545 CSR_GIO_REG, 2546 CSR_GP_UCODE_REG, 2547 CSR_GP_DRIVER_REG, 2548 CSR_UCODE_DRV_GP1, 2549 CSR_UCODE_DRV_GP2, 2550 CSR_LED_REG, 2551 CSR_DRAM_INT_TBL_REG, 2552 CSR_GIO_CHICKEN_BITS, 2553 CSR_ANA_PLL_CFG, 2554 CSR_MONITOR_STATUS_REG, 2555 CSR_HW_REV_WA_REG, 2556 CSR_DBG_HPET_MEM_REG 2557 }; 2558 IWL_ERR(trans, "CSR values:\n"); 2559 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " 2560 "CSR_INT_PERIODIC_REG)\n"); 2561 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { 2562 IWL_ERR(trans, " %25s: 0X%08x\n", 2563 get_csr_string(csr_tbl[i]), 2564 iwl_read32(trans, csr_tbl[i])); 2565 } 2566 } 2567 2568 #ifdef CONFIG_IWLWIFI_DEBUGFS 2569 /* create and remove of files */ 2570 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 2571 debugfs_create_file(#name, mode, parent, trans, \ 2572 &iwl_dbgfs_##name##_ops); \ 2573 } while (0) 2574 2575 /* file operation */ 2576 #define DEBUGFS_READ_FILE_OPS(name) \ 2577 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2578 .read = iwl_dbgfs_##name##_read, \ 2579 .open = simple_open, \ 2580 .llseek = generic_file_llseek, \ 2581 }; 2582 2583 #define DEBUGFS_WRITE_FILE_OPS(name) \ 2584 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2585 .write = iwl_dbgfs_##name##_write, \ 2586 .open = simple_open, \ 2587 .llseek = generic_file_llseek, \ 2588 }; 2589 2590 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 2591 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2592 .write = iwl_dbgfs_##name##_write, \ 2593 .read = iwl_dbgfs_##name##_read, \ 2594 .open = simple_open, \ 2595 .llseek = generic_file_llseek, \ 2596 }; 2597 2598 struct iwl_dbgfs_tx_queue_priv { 2599 struct iwl_trans *trans; 2600 }; 2601 2602 struct iwl_dbgfs_tx_queue_state { 2603 loff_t pos; 2604 }; 2605 2606 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos) 2607 { 2608 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2609 struct iwl_dbgfs_tx_queue_state *state; 2610 2611 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) 2612 return NULL; 2613 2614 state = kmalloc(sizeof(*state), GFP_KERNEL); 2615 if (!state) 2616 return NULL; 2617 state->pos = *pos; 2618 return state; 2619 } 2620 2621 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq, 2622 void *v, loff_t *pos) 2623 { 2624 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2625 struct iwl_dbgfs_tx_queue_state *state = v; 2626 2627 *pos = ++state->pos; 2628 2629 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) 2630 return NULL; 2631 2632 return state; 2633 } 2634 2635 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v) 2636 { 2637 kfree(v); 2638 } 2639 2640 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) 2641 { 2642 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2643 struct iwl_dbgfs_tx_queue_state *state = v; 2644 struct iwl_trans *trans = priv->trans; 2645 struct iwl_txq *txq = trans->txqs.txq[state->pos]; 2646 2647 seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", 2648 (unsigned int)state->pos, 2649 !!test_bit(state->pos, trans->txqs.queue_used), 2650 !!test_bit(state->pos, trans->txqs.queue_stopped)); 2651 if (txq) 2652 seq_printf(seq, 2653 "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", 2654 txq->read_ptr, txq->write_ptr, 2655 txq->need_update, txq->frozen, 2656 txq->n_window, txq->ampdu); 2657 else 2658 seq_puts(seq, "(unallocated)"); 2659 2660 if (state->pos == trans->txqs.cmd.q_id) 2661 seq_puts(seq, " (HCMD)"); 2662 seq_puts(seq, "\n"); 2663 2664 return 0; 2665 } 2666 2667 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = { 2668 .start = iwl_dbgfs_tx_queue_seq_start, 2669 .next = iwl_dbgfs_tx_queue_seq_next, 2670 .stop = iwl_dbgfs_tx_queue_seq_stop, 2671 .show = iwl_dbgfs_tx_queue_seq_show, 2672 }; 2673 2674 static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp) 2675 { 2676 struct iwl_dbgfs_tx_queue_priv *priv; 2677 2678 priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops, 2679 sizeof(*priv)); 2680 2681 if (!priv) 2682 return -ENOMEM; 2683 2684 priv->trans = inode->i_private; 2685 return 0; 2686 } 2687 2688 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, 2689 char __user *user_buf, 2690 size_t count, loff_t *ppos) 2691 { 2692 struct iwl_trans *trans = file->private_data; 2693 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2694 char *buf; 2695 int pos = 0, i, ret; 2696 size_t bufsz; 2697 2698 bufsz = sizeof(char) * 121 * trans->num_rx_queues; 2699 2700 if (!trans_pcie->rxq) 2701 return -EAGAIN; 2702 2703 buf = kzalloc(bufsz, GFP_KERNEL); 2704 if (!buf) 2705 return -ENOMEM; 2706 2707 for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { 2708 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 2709 2710 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", 2711 i); 2712 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", 2713 rxq->read); 2714 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", 2715 rxq->write); 2716 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", 2717 rxq->write_actual); 2718 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", 2719 rxq->need_update); 2720 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", 2721 rxq->free_count); 2722 if (rxq->rb_stts) { 2723 u32 r = iwl_get_closed_rb_stts(trans, rxq); 2724 pos += scnprintf(buf + pos, bufsz - pos, 2725 "\tclosed_rb_num: %u\n", r); 2726 } else { 2727 pos += scnprintf(buf + pos, bufsz - pos, 2728 "\tclosed_rb_num: Not Allocated\n"); 2729 } 2730 } 2731 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2732 kfree(buf); 2733 2734 return ret; 2735 } 2736 2737 static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 2738 char __user *user_buf, 2739 size_t count, loff_t *ppos) 2740 { 2741 struct iwl_trans *trans = file->private_data; 2742 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2743 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2744 2745 int pos = 0; 2746 char *buf; 2747 int bufsz = 24 * 64; /* 24 items * 64 char per item */ 2748 ssize_t ret; 2749 2750 buf = kzalloc(bufsz, GFP_KERNEL); 2751 if (!buf) 2752 return -ENOMEM; 2753 2754 pos += scnprintf(buf + pos, bufsz - pos, 2755 "Interrupt Statistics Report:\n"); 2756 2757 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", 2758 isr_stats->hw); 2759 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", 2760 isr_stats->sw); 2761 if (isr_stats->sw || isr_stats->hw) { 2762 pos += scnprintf(buf + pos, bufsz - pos, 2763 "\tLast Restarting Code: 0x%X\n", 2764 isr_stats->err_code); 2765 } 2766 #ifdef CONFIG_IWLWIFI_DEBUG 2767 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", 2768 isr_stats->sch); 2769 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", 2770 isr_stats->alive); 2771 #endif 2772 pos += scnprintf(buf + pos, bufsz - pos, 2773 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); 2774 2775 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", 2776 isr_stats->ctkill); 2777 2778 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", 2779 isr_stats->wakeup); 2780 2781 pos += scnprintf(buf + pos, bufsz - pos, 2782 "Rx command responses:\t\t %u\n", isr_stats->rx); 2783 2784 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", 2785 isr_stats->tx); 2786 2787 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", 2788 isr_stats->unhandled); 2789 2790 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2791 kfree(buf); 2792 return ret; 2793 } 2794 2795 static ssize_t iwl_dbgfs_interrupt_write(struct file *file, 2796 const char __user *user_buf, 2797 size_t count, loff_t *ppos) 2798 { 2799 struct iwl_trans *trans = file->private_data; 2800 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2801 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2802 u32 reset_flag; 2803 int ret; 2804 2805 ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag); 2806 if (ret) 2807 return ret; 2808 if (reset_flag == 0) 2809 memset(isr_stats, 0, sizeof(*isr_stats)); 2810 2811 return count; 2812 } 2813 2814 static ssize_t iwl_dbgfs_csr_write(struct file *file, 2815 const char __user *user_buf, 2816 size_t count, loff_t *ppos) 2817 { 2818 struct iwl_trans *trans = file->private_data; 2819 2820 iwl_pcie_dump_csr(trans); 2821 2822 return count; 2823 } 2824 2825 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 2826 char __user *user_buf, 2827 size_t count, loff_t *ppos) 2828 { 2829 struct iwl_trans *trans = file->private_data; 2830 char *buf = NULL; 2831 ssize_t ret; 2832 2833 ret = iwl_dump_fh(trans, &buf); 2834 if (ret < 0) 2835 return ret; 2836 if (!buf) 2837 return -EINVAL; 2838 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 2839 kfree(buf); 2840 return ret; 2841 } 2842 2843 static ssize_t iwl_dbgfs_rfkill_read(struct file *file, 2844 char __user *user_buf, 2845 size_t count, loff_t *ppos) 2846 { 2847 struct iwl_trans *trans = file->private_data; 2848 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2849 char buf[100]; 2850 int pos; 2851 2852 pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n", 2853 trans_pcie->debug_rfkill, 2854 !(iwl_read32(trans, CSR_GP_CNTRL) & 2855 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)); 2856 2857 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2858 } 2859 2860 static ssize_t iwl_dbgfs_rfkill_write(struct file *file, 2861 const char __user *user_buf, 2862 size_t count, loff_t *ppos) 2863 { 2864 struct iwl_trans *trans = file->private_data; 2865 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2866 bool new_value; 2867 int ret; 2868 2869 ret = kstrtobool_from_user(user_buf, count, &new_value); 2870 if (ret) 2871 return ret; 2872 if (new_value == trans_pcie->debug_rfkill) 2873 return count; 2874 IWL_WARN(trans, "changing debug rfkill %d->%d\n", 2875 trans_pcie->debug_rfkill, new_value); 2876 trans_pcie->debug_rfkill = new_value; 2877 iwl_pcie_handle_rfkill_irq(trans, false); 2878 2879 return count; 2880 } 2881 2882 static int iwl_dbgfs_monitor_data_open(struct inode *inode, 2883 struct file *file) 2884 { 2885 struct iwl_trans *trans = inode->i_private; 2886 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2887 2888 if (!trans->dbg.dest_tlv || 2889 trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) { 2890 IWL_ERR(trans, "Debug destination is not set to DRAM\n"); 2891 return -ENOENT; 2892 } 2893 2894 if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED) 2895 return -EBUSY; 2896 2897 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN; 2898 return simple_open(inode, file); 2899 } 2900 2901 static int iwl_dbgfs_monitor_data_release(struct inode *inode, 2902 struct file *file) 2903 { 2904 struct iwl_trans_pcie *trans_pcie = 2905 IWL_TRANS_GET_PCIE_TRANS(inode->i_private); 2906 2907 if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN) 2908 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 2909 return 0; 2910 } 2911 2912 static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, 2913 void *buf, ssize_t *size, 2914 ssize_t *bytes_copied) 2915 { 2916 ssize_t buf_size_left = count - *bytes_copied; 2917 2918 buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); 2919 if (*size > buf_size_left) 2920 *size = buf_size_left; 2921 2922 *size -= copy_to_user(user_buf, buf, *size); 2923 *bytes_copied += *size; 2924 2925 if (buf_size_left == *size) 2926 return true; 2927 return false; 2928 } 2929 2930 static ssize_t iwl_dbgfs_monitor_data_read(struct file *file, 2931 char __user *user_buf, 2932 size_t count, loff_t *ppos) 2933 { 2934 struct iwl_trans *trans = file->private_data; 2935 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2936 u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf; 2937 struct cont_rec *data = &trans_pcie->fw_mon_data; 2938 u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt; 2939 ssize_t size, bytes_copied = 0; 2940 bool b_full; 2941 2942 if (trans->dbg.dest_tlv) { 2943 write_ptr_addr = 2944 le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 2945 wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 2946 } else { 2947 write_ptr_addr = MON_BUFF_WRPTR; 2948 wrap_cnt_addr = MON_BUFF_CYCLE_CNT; 2949 } 2950 2951 if (unlikely(!trans->dbg.rec_on)) 2952 return 0; 2953 2954 mutex_lock(&data->mutex); 2955 if (data->state == 2956 IWL_FW_MON_DBGFS_STATE_DISABLED) { 2957 mutex_unlock(&data->mutex); 2958 return 0; 2959 } 2960 2961 /* write_ptr position in bytes rather then DW */ 2962 write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32); 2963 wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr); 2964 2965 if (data->prev_wrap_cnt == wrap_cnt) { 2966 size = write_ptr - data->prev_wr_ptr; 2967 curr_buf = cpu_addr + data->prev_wr_ptr; 2968 b_full = iwl_write_to_user_buf(user_buf, count, 2969 curr_buf, &size, 2970 &bytes_copied); 2971 data->prev_wr_ptr += size; 2972 2973 } else if (data->prev_wrap_cnt == wrap_cnt - 1 && 2974 write_ptr < data->prev_wr_ptr) { 2975 size = trans->dbg.fw_mon.size - data->prev_wr_ptr; 2976 curr_buf = cpu_addr + data->prev_wr_ptr; 2977 b_full = iwl_write_to_user_buf(user_buf, count, 2978 curr_buf, &size, 2979 &bytes_copied); 2980 data->prev_wr_ptr += size; 2981 2982 if (!b_full) { 2983 size = write_ptr; 2984 b_full = iwl_write_to_user_buf(user_buf, count, 2985 cpu_addr, &size, 2986 &bytes_copied); 2987 data->prev_wr_ptr = size; 2988 data->prev_wrap_cnt++; 2989 } 2990 } else { 2991 if (data->prev_wrap_cnt == wrap_cnt - 1 && 2992 write_ptr > data->prev_wr_ptr) 2993 IWL_WARN(trans, 2994 "write pointer passed previous write pointer, start copying from the beginning\n"); 2995 else if (!unlikely(data->prev_wrap_cnt == 0 && 2996 data->prev_wr_ptr == 0)) 2997 IWL_WARN(trans, 2998 "monitor data is out of sync, start copying from the beginning\n"); 2999 3000 size = write_ptr; 3001 b_full = iwl_write_to_user_buf(user_buf, count, 3002 cpu_addr, &size, 3003 &bytes_copied); 3004 data->prev_wr_ptr = size; 3005 data->prev_wrap_cnt = wrap_cnt; 3006 } 3007 3008 mutex_unlock(&data->mutex); 3009 3010 return bytes_copied; 3011 } 3012 3013 static ssize_t iwl_dbgfs_rf_read(struct file *file, 3014 char __user *user_buf, 3015 size_t count, loff_t *ppos) 3016 { 3017 struct iwl_trans *trans = file->private_data; 3018 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3019 3020 if (!trans_pcie->rf_name[0]) 3021 return -ENODEV; 3022 3023 return simple_read_from_buffer(user_buf, count, ppos, 3024 trans_pcie->rf_name, 3025 strlen(trans_pcie->rf_name)); 3026 } 3027 3028 DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 3029 DEBUGFS_READ_FILE_OPS(fh_reg); 3030 DEBUGFS_READ_FILE_OPS(rx_queue); 3031 DEBUGFS_WRITE_FILE_OPS(csr); 3032 DEBUGFS_READ_WRITE_FILE_OPS(rfkill); 3033 DEBUGFS_READ_FILE_OPS(rf); 3034 3035 static const struct file_operations iwl_dbgfs_tx_queue_ops = { 3036 .owner = THIS_MODULE, 3037 .open = iwl_dbgfs_tx_queue_open, 3038 .read = seq_read, 3039 .llseek = seq_lseek, 3040 .release = seq_release_private, 3041 }; 3042 3043 static const struct file_operations iwl_dbgfs_monitor_data_ops = { 3044 .read = iwl_dbgfs_monitor_data_read, 3045 .open = iwl_dbgfs_monitor_data_open, 3046 .release = iwl_dbgfs_monitor_data_release, 3047 }; 3048 3049 /* Create the debugfs files and directories */ 3050 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 3051 { 3052 struct dentry *dir = trans->dbgfs_dir; 3053 3054 DEBUGFS_ADD_FILE(rx_queue, dir, 0400); 3055 DEBUGFS_ADD_FILE(tx_queue, dir, 0400); 3056 DEBUGFS_ADD_FILE(interrupt, dir, 0600); 3057 DEBUGFS_ADD_FILE(csr, dir, 0200); 3058 DEBUGFS_ADD_FILE(fh_reg, dir, 0400); 3059 DEBUGFS_ADD_FILE(rfkill, dir, 0600); 3060 DEBUGFS_ADD_FILE(monitor_data, dir, 0400); 3061 DEBUGFS_ADD_FILE(rf, dir, 0400); 3062 } 3063 3064 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) 3065 { 3066 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3067 struct cont_rec *data = &trans_pcie->fw_mon_data; 3068 3069 mutex_lock(&data->mutex); 3070 data->state = IWL_FW_MON_DBGFS_STATE_DISABLED; 3071 mutex_unlock(&data->mutex); 3072 } 3073 #endif /*CONFIG_IWLWIFI_DEBUGFS */ 3074 3075 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) 3076 { 3077 u32 cmdlen = 0; 3078 int i; 3079 3080 for (i = 0; i < trans->txqs.tfd.max_tbs; i++) 3081 cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i); 3082 3083 return cmdlen; 3084 } 3085 3086 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, 3087 struct iwl_fw_error_dump_data **data, 3088 int allocated_rb_nums) 3089 { 3090 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3091 int max_len = trans_pcie->rx_buf_bytes; 3092 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 3093 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3094 u32 i, r, j, rb_len = 0; 3095 3096 spin_lock_bh(&rxq->lock); 3097 3098 r = iwl_get_closed_rb_stts(trans, rxq); 3099 3100 for (i = rxq->read, j = 0; 3101 i != r && j < allocated_rb_nums; 3102 i = (i + 1) & RX_QUEUE_MASK, j++) { 3103 struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; 3104 struct iwl_fw_error_dump_rb *rb; 3105 3106 dma_sync_single_for_cpu(trans->dev, rxb->page_dma, 3107 max_len, DMA_FROM_DEVICE); 3108 3109 rb_len += sizeof(**data) + sizeof(*rb) + max_len; 3110 3111 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); 3112 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); 3113 rb = (void *)(*data)->data; 3114 rb->index = cpu_to_le32(i); 3115 memcpy(rb->data, page_address(rxb->page), max_len); 3116 3117 *data = iwl_fw_error_next_data(*data); 3118 } 3119 3120 spin_unlock_bh(&rxq->lock); 3121 3122 return rb_len; 3123 } 3124 #define IWL_CSR_TO_DUMP (0x250) 3125 3126 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, 3127 struct iwl_fw_error_dump_data **data) 3128 { 3129 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; 3130 __le32 *val; 3131 int i; 3132 3133 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); 3134 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); 3135 val = (void *)(*data)->data; 3136 3137 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) 3138 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 3139 3140 *data = iwl_fw_error_next_data(*data); 3141 3142 return csr_len; 3143 } 3144 3145 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, 3146 struct iwl_fw_error_dump_data **data) 3147 { 3148 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; 3149 __le32 *val; 3150 int i; 3151 3152 if (!iwl_trans_grab_nic_access(trans)) 3153 return 0; 3154 3155 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); 3156 (*data)->len = cpu_to_le32(fh_regs_len); 3157 val = (void *)(*data)->data; 3158 3159 if (!trans->trans_cfg->gen2) 3160 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; 3161 i += sizeof(u32)) 3162 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 3163 else 3164 for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2); 3165 i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2); 3166 i += sizeof(u32)) 3167 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, 3168 i)); 3169 3170 iwl_trans_release_nic_access(trans); 3171 3172 *data = iwl_fw_error_next_data(*data); 3173 3174 return sizeof(**data) + fh_regs_len; 3175 } 3176 3177 static u32 3178 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, 3179 struct iwl_fw_error_dump_fw_mon *fw_mon_data, 3180 u32 monitor_len) 3181 { 3182 u32 buf_size_in_dwords = (monitor_len >> 2); 3183 u32 *buffer = (u32 *)fw_mon_data->data; 3184 u32 i; 3185 3186 if (!iwl_trans_grab_nic_access(trans)) 3187 return 0; 3188 3189 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); 3190 for (i = 0; i < buf_size_in_dwords; i++) 3191 buffer[i] = iwl_read_umac_prph_no_grab(trans, 3192 MON_DMARB_RD_DATA_ADDR); 3193 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); 3194 3195 iwl_trans_release_nic_access(trans); 3196 3197 return monitor_len; 3198 } 3199 3200 static void 3201 iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, 3202 struct iwl_fw_error_dump_fw_mon *fw_mon_data) 3203 { 3204 u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; 3205 3206 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3207 base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; 3208 base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; 3209 write_ptr = DBGC_CUR_DBGBUF_STATUS; 3210 wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; 3211 } else if (trans->dbg.dest_tlv) { 3212 write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 3213 wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 3214 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3215 } else { 3216 base = MON_BUFF_BASE_ADDR; 3217 write_ptr = MON_BUFF_WRPTR; 3218 wrap_cnt = MON_BUFF_CYCLE_CNT; 3219 } 3220 3221 write_ptr_val = iwl_read_prph(trans, write_ptr); 3222 fw_mon_data->fw_mon_cycle_cnt = 3223 cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); 3224 fw_mon_data->fw_mon_base_ptr = 3225 cpu_to_le32(iwl_read_prph(trans, base)); 3226 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3227 fw_mon_data->fw_mon_base_high_ptr = 3228 cpu_to_le32(iwl_read_prph(trans, base_high)); 3229 write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; 3230 /* convert wrtPtr to DWs, to align with all HWs */ 3231 write_ptr_val >>= 2; 3232 } 3233 fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); 3234 } 3235 3236 static u32 3237 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, 3238 struct iwl_fw_error_dump_data **data, 3239 u32 monitor_len) 3240 { 3241 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 3242 u32 len = 0; 3243 3244 if (trans->dbg.dest_tlv || 3245 (fw_mon->size && 3246 (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 || 3247 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { 3248 struct iwl_fw_error_dump_fw_mon *fw_mon_data; 3249 3250 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); 3251 fw_mon_data = (void *)(*data)->data; 3252 3253 iwl_trans_pcie_dump_pointers(trans, fw_mon_data); 3254 3255 len += sizeof(**data) + sizeof(*fw_mon_data); 3256 if (fw_mon->size) { 3257 memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size); 3258 monitor_len = fw_mon->size; 3259 } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) { 3260 u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr); 3261 /* 3262 * Update pointers to reflect actual values after 3263 * shifting 3264 */ 3265 if (trans->dbg.dest_tlv->version) { 3266 base = (iwl_read_prph(trans, base) & 3267 IWL_LDBG_M2S_BUF_BA_MSK) << 3268 trans->dbg.dest_tlv->base_shift; 3269 base *= IWL_M2S_UNIT_SIZE; 3270 base += trans->cfg->smem_offset; 3271 } else { 3272 base = iwl_read_prph(trans, base) << 3273 trans->dbg.dest_tlv->base_shift; 3274 } 3275 3276 iwl_trans_read_mem(trans, base, fw_mon_data->data, 3277 monitor_len / sizeof(u32)); 3278 } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) { 3279 monitor_len = 3280 iwl_trans_pci_dump_marbh_monitor(trans, 3281 fw_mon_data, 3282 monitor_len); 3283 } else { 3284 /* Didn't match anything - output no monitor data */ 3285 monitor_len = 0; 3286 } 3287 3288 len += monitor_len; 3289 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); 3290 } 3291 3292 return len; 3293 } 3294 3295 static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) 3296 { 3297 if (trans->dbg.fw_mon.size) { 3298 *len += sizeof(struct iwl_fw_error_dump_data) + 3299 sizeof(struct iwl_fw_error_dump_fw_mon) + 3300 trans->dbg.fw_mon.size; 3301 return trans->dbg.fw_mon.size; 3302 } else if (trans->dbg.dest_tlv) { 3303 u32 base, end, cfg_reg, monitor_len; 3304 3305 if (trans->dbg.dest_tlv->version == 1) { 3306 cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3307 cfg_reg = iwl_read_prph(trans, cfg_reg); 3308 base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) << 3309 trans->dbg.dest_tlv->base_shift; 3310 base *= IWL_M2S_UNIT_SIZE; 3311 base += trans->cfg->smem_offset; 3312 3313 monitor_len = 3314 (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> 3315 trans->dbg.dest_tlv->end_shift; 3316 monitor_len *= IWL_M2S_UNIT_SIZE; 3317 } else { 3318 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3319 end = le32_to_cpu(trans->dbg.dest_tlv->end_reg); 3320 3321 base = iwl_read_prph(trans, base) << 3322 trans->dbg.dest_tlv->base_shift; 3323 end = iwl_read_prph(trans, end) << 3324 trans->dbg.dest_tlv->end_shift; 3325 3326 /* Make "end" point to the actual end */ 3327 if (trans->trans_cfg->device_family >= 3328 IWL_DEVICE_FAMILY_8000 || 3329 trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) 3330 end += (1 << trans->dbg.dest_tlv->end_shift); 3331 monitor_len = end - base; 3332 } 3333 *len += sizeof(struct iwl_fw_error_dump_data) + 3334 sizeof(struct iwl_fw_error_dump_fw_mon) + 3335 monitor_len; 3336 return monitor_len; 3337 } 3338 return 0; 3339 } 3340 3341 static struct iwl_trans_dump_data * 3342 iwl_trans_pcie_dump_data(struct iwl_trans *trans, 3343 u32 dump_mask, 3344 const struct iwl_dump_sanitize_ops *sanitize_ops, 3345 void *sanitize_ctx) 3346 { 3347 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3348 struct iwl_fw_error_dump_data *data; 3349 struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id]; 3350 struct iwl_fw_error_dump_txcmd *txcmd; 3351 struct iwl_trans_dump_data *dump_data; 3352 u32 len, num_rbs = 0, monitor_len = 0; 3353 int i, ptr; 3354 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && 3355 !trans->trans_cfg->mq_rx_supported && 3356 dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); 3357 3358 if (!dump_mask) 3359 return NULL; 3360 3361 /* transport dump header */ 3362 len = sizeof(*dump_data); 3363 3364 /* host commands */ 3365 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) 3366 len += sizeof(*data) + 3367 cmdq->n_window * (sizeof(*txcmd) + 3368 TFD_MAX_PAYLOAD_SIZE); 3369 3370 /* FW monitor */ 3371 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3372 monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); 3373 3374 /* CSR registers */ 3375 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3376 len += sizeof(*data) + IWL_CSR_TO_DUMP; 3377 3378 /* FH registers */ 3379 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { 3380 if (trans->trans_cfg->gen2) 3381 len += sizeof(*data) + 3382 (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - 3383 iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); 3384 else 3385 len += sizeof(*data) + 3386 (FH_MEM_UPPER_BOUND - 3387 FH_MEM_LOWER_BOUND); 3388 } 3389 3390 if (dump_rbs) { 3391 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 3392 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3393 /* RBs */ 3394 num_rbs = iwl_get_closed_rb_stts(trans, rxq); 3395 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; 3396 len += num_rbs * (sizeof(*data) + 3397 sizeof(struct iwl_fw_error_dump_rb) + 3398 (PAGE_SIZE << trans_pcie->rx_page_order)); 3399 } 3400 3401 /* Paged memory for gen2 HW */ 3402 if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) 3403 for (i = 0; i < trans->init_dram.paging_cnt; i++) 3404 len += sizeof(*data) + 3405 sizeof(struct iwl_fw_error_dump_paging) + 3406 trans->init_dram.paging[i].size; 3407 3408 dump_data = vzalloc(len); 3409 if (!dump_data) 3410 return NULL; 3411 3412 len = 0; 3413 data = (void *)dump_data->data; 3414 3415 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { 3416 u16 tfd_size = trans->txqs.tfd.size; 3417 3418 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); 3419 txcmd = (void *)data->data; 3420 spin_lock_bh(&cmdq->lock); 3421 ptr = cmdq->write_ptr; 3422 for (i = 0; i < cmdq->n_window; i++) { 3423 u8 idx = iwl_txq_get_cmd_index(cmdq, ptr); 3424 u8 tfdidx; 3425 u32 caplen, cmdlen; 3426 3427 if (trans->trans_cfg->gen2) 3428 tfdidx = idx; 3429 else 3430 tfdidx = ptr; 3431 3432 cmdlen = iwl_trans_pcie_get_cmdlen(trans, 3433 (u8 *)cmdq->tfds + 3434 tfd_size * tfdidx); 3435 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); 3436 3437 if (cmdlen) { 3438 len += sizeof(*txcmd) + caplen; 3439 txcmd->cmdlen = cpu_to_le32(cmdlen); 3440 txcmd->caplen = cpu_to_le32(caplen); 3441 memcpy(txcmd->data, cmdq->entries[idx].cmd, 3442 caplen); 3443 if (sanitize_ops && sanitize_ops->frob_hcmd) 3444 sanitize_ops->frob_hcmd(sanitize_ctx, 3445 txcmd->data, 3446 caplen); 3447 txcmd = (void *)((u8 *)txcmd->data + caplen); 3448 } 3449 3450 ptr = iwl_txq_dec_wrap(trans, ptr); 3451 } 3452 spin_unlock_bh(&cmdq->lock); 3453 3454 data->len = cpu_to_le32(len); 3455 len += sizeof(*data); 3456 data = iwl_fw_error_next_data(data); 3457 } 3458 3459 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3460 len += iwl_trans_pcie_dump_csr(trans, &data); 3461 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) 3462 len += iwl_trans_pcie_fh_regs_dump(trans, &data); 3463 if (dump_rbs) 3464 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); 3465 3466 /* Paged memory for gen2 HW */ 3467 if (trans->trans_cfg->gen2 && 3468 dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { 3469 for (i = 0; i < trans->init_dram.paging_cnt; i++) { 3470 struct iwl_fw_error_dump_paging *paging; 3471 u32 page_len = trans->init_dram.paging[i].size; 3472 3473 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); 3474 data->len = cpu_to_le32(sizeof(*paging) + page_len); 3475 paging = (void *)data->data; 3476 paging->index = cpu_to_le32(i); 3477 memcpy(paging->data, 3478 trans->init_dram.paging[i].block, page_len); 3479 data = iwl_fw_error_next_data(data); 3480 3481 len += sizeof(*data) + sizeof(*paging) + page_len; 3482 } 3483 } 3484 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3485 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 3486 3487 dump_data->len = len; 3488 3489 return dump_data; 3490 } 3491 3492 static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) 3493 { 3494 if (enable) 3495 iwl_enable_interrupts(trans); 3496 else 3497 iwl_disable_interrupts(trans); 3498 } 3499 3500 static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) 3501 { 3502 u32 inta_addr, sw_err_bit; 3503 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3504 3505 if (trans_pcie->msix_enabled) { 3506 inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; 3507 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 3508 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; 3509 else 3510 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; 3511 } else { 3512 inta_addr = CSR_INT; 3513 sw_err_bit = CSR_INT_BIT_SW_ERR; 3514 } 3515 3516 iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit); 3517 } 3518 3519 #define IWL_TRANS_COMMON_OPS \ 3520 .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ 3521 .write8 = iwl_trans_pcie_write8, \ 3522 .write32 = iwl_trans_pcie_write32, \ 3523 .read32 = iwl_trans_pcie_read32, \ 3524 .read_prph = iwl_trans_pcie_read_prph, \ 3525 .write_prph = iwl_trans_pcie_write_prph, \ 3526 .read_mem = iwl_trans_pcie_read_mem, \ 3527 .write_mem = iwl_trans_pcie_write_mem, \ 3528 .read_config32 = iwl_trans_pcie_read_config32, \ 3529 .configure = iwl_trans_pcie_configure, \ 3530 .set_pmi = iwl_trans_pcie_set_pmi, \ 3531 .sw_reset = iwl_trans_pcie_sw_reset, \ 3532 .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ 3533 .release_nic_access = iwl_trans_pcie_release_nic_access, \ 3534 .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ 3535 .dump_data = iwl_trans_pcie_dump_data, \ 3536 .d3_suspend = iwl_trans_pcie_d3_suspend, \ 3537 .d3_resume = iwl_trans_pcie_d3_resume, \ 3538 .interrupts = iwl_trans_pci_interrupts, \ 3539 .sync_nmi = iwl_trans_pcie_sync_nmi, \ 3540 .imr_dma_data = iwl_trans_pcie_copy_imr \ 3541 3542 static const struct iwl_trans_ops trans_ops_pcie = { 3543 IWL_TRANS_COMMON_OPS, 3544 .start_hw = iwl_trans_pcie_start_hw, 3545 .fw_alive = iwl_trans_pcie_fw_alive, 3546 .start_fw = iwl_trans_pcie_start_fw, 3547 .stop_device = iwl_trans_pcie_stop_device, 3548 3549 .send_cmd = iwl_pcie_enqueue_hcmd, 3550 3551 .tx = iwl_trans_pcie_tx, 3552 .reclaim = iwl_txq_reclaim, 3553 3554 .txq_disable = iwl_trans_pcie_txq_disable, 3555 .txq_enable = iwl_trans_pcie_txq_enable, 3556 3557 .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, 3558 3559 .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, 3560 3561 .freeze_txq_timer = iwl_trans_txq_freeze_timer, 3562 #ifdef CONFIG_IWLWIFI_DEBUGFS 3563 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, 3564 #endif 3565 }; 3566 3567 static const struct iwl_trans_ops trans_ops_pcie_gen2 = { 3568 IWL_TRANS_COMMON_OPS, 3569 .start_hw = iwl_trans_pcie_start_hw, 3570 .fw_alive = iwl_trans_pcie_gen2_fw_alive, 3571 .start_fw = iwl_trans_pcie_gen2_start_fw, 3572 .stop_device = iwl_trans_pcie_gen2_stop_device, 3573 3574 .send_cmd = iwl_pcie_gen2_enqueue_hcmd, 3575 3576 .tx = iwl_txq_gen2_tx, 3577 .reclaim = iwl_txq_reclaim, 3578 3579 .set_q_ptrs = iwl_txq_set_q_ptrs, 3580 3581 .txq_alloc = iwl_txq_dyn_alloc, 3582 .txq_free = iwl_txq_dyn_free, 3583 .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, 3584 .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, 3585 .load_pnvm = iwl_trans_pcie_ctx_info_gen3_load_pnvm, 3586 .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm, 3587 .load_reduce_power = iwl_trans_pcie_ctx_info_gen3_load_reduce_power, 3588 .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power, 3589 #ifdef CONFIG_IWLWIFI_DEBUGFS 3590 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, 3591 #endif 3592 }; 3593 3594 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 3595 const struct pci_device_id *ent, 3596 const struct iwl_cfg_trans_params *cfg_trans) 3597 { 3598 struct iwl_trans_pcie *trans_pcie; 3599 struct iwl_trans *trans; 3600 int ret, addr_size; 3601 const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; 3602 void __iomem * const *table; 3603 u32 bar0; 3604 3605 if (!cfg_trans->gen2) 3606 ops = &trans_ops_pcie; 3607 3608 /* reassign our BAR 0 if invalid due to possible runtime PM races */ 3609 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0); 3610 if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) { 3611 ret = pci_assign_resource(pdev, 0); 3612 if (ret) 3613 return ERR_PTR(ret); 3614 } 3615 3616 ret = pcim_enable_device(pdev); 3617 if (ret) 3618 return ERR_PTR(ret); 3619 3620 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, 3621 cfg_trans); 3622 if (!trans) 3623 return ERR_PTR(-ENOMEM); 3624 3625 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3626 3627 trans_pcie->trans = trans; 3628 trans_pcie->opmode_down = true; 3629 spin_lock_init(&trans_pcie->irq_lock); 3630 spin_lock_init(&trans_pcie->reg_lock); 3631 spin_lock_init(&trans_pcie->alloc_page_lock); 3632 mutex_init(&trans_pcie->mutex); 3633 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 3634 init_waitqueue_head(&trans_pcie->fw_reset_waitq); 3635 init_waitqueue_head(&trans_pcie->imr_waitq); 3636 3637 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", 3638 WQ_HIGHPRI | WQ_UNBOUND, 0); 3639 if (!trans_pcie->rba.alloc_wq) { 3640 ret = -ENOMEM; 3641 goto out_free_trans; 3642 } 3643 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); 3644 3645 trans_pcie->debug_rfkill = -1; 3646 3647 if (!cfg_trans->base_params->pcie_l1_allowed) { 3648 /* 3649 * W/A - seems to solve weird behavior. We need to remove this 3650 * if we don't want to stay in L1 all the time. This wastes a 3651 * lot of power. 3652 */ 3653 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 3654 PCIE_LINK_STATE_L1 | 3655 PCIE_LINK_STATE_CLKPM); 3656 } 3657 3658 pci_set_master(pdev); 3659 3660 addr_size = trans->txqs.tfd.addr_size; 3661 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size)); 3662 if (ret) { 3663 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3664 /* both attempts failed: */ 3665 if (ret) { 3666 dev_err(&pdev->dev, "No suitable DMA available\n"); 3667 goto out_no_pci; 3668 } 3669 } 3670 3671 ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); 3672 if (ret) { 3673 dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); 3674 goto out_no_pci; 3675 } 3676 3677 table = pcim_iomap_table(pdev); 3678 if (!table) { 3679 dev_err(&pdev->dev, "pcim_iomap_table failed\n"); 3680 ret = -ENOMEM; 3681 goto out_no_pci; 3682 } 3683 3684 trans_pcie->hw_base = table[0]; 3685 if (!trans_pcie->hw_base) { 3686 dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n"); 3687 ret = -ENODEV; 3688 goto out_no_pci; 3689 } 3690 3691 /* We disable the RETRY_TIMEOUT register (0x41) to keep 3692 * PCI Tx retries from interfering with C3 CPU state */ 3693 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 3694 3695 trans_pcie->pci_dev = pdev; 3696 iwl_disable_interrupts(trans); 3697 3698 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 3699 if (trans->hw_rev == 0xffffffff) { 3700 dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); 3701 ret = -EIO; 3702 goto out_no_pci; 3703 } 3704 3705 /* 3706 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 3707 * changed, and now the revision step also includes bit 0-1 (no more 3708 * "dash" value). To keep hw_rev backwards compatible - we'll store it 3709 * in the old format. 3710 */ 3711 if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) 3712 trans->hw_rev_step = trans->hw_rev & 0xF; 3713 else 3714 trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2; 3715 3716 IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); 3717 3718 iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans); 3719 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 3720 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 3721 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); 3722 3723 init_waitqueue_head(&trans_pcie->sx_waitq); 3724 3725 ret = iwl_pcie_alloc_invalid_tx_cmd(trans); 3726 if (ret) 3727 goto out_no_pci; 3728 3729 if (trans_pcie->msix_enabled) { 3730 ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); 3731 if (ret) 3732 goto out_no_pci; 3733 } else { 3734 ret = iwl_pcie_alloc_ict(trans); 3735 if (ret) 3736 goto out_no_pci; 3737 3738 ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, 3739 iwl_pcie_isr, 3740 iwl_pcie_irq_handler, 3741 IRQF_SHARED, DRV_NAME, trans); 3742 if (ret) { 3743 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 3744 goto out_free_ict; 3745 } 3746 } 3747 3748 #ifdef CONFIG_IWLWIFI_DEBUGFS 3749 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 3750 mutex_init(&trans_pcie->fw_mon_data.mutex); 3751 #endif 3752 3753 iwl_dbg_tlv_init(trans); 3754 3755 return trans; 3756 3757 out_free_ict: 3758 iwl_pcie_free_ict(trans); 3759 out_no_pci: 3760 destroy_workqueue(trans_pcie->rba.alloc_wq); 3761 out_free_trans: 3762 iwl_trans_free(trans); 3763 return ERR_PTR(ret); 3764 } 3765 3766 void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, 3767 u32 dst_addr, u64 src_addr, u32 byte_cnt) 3768 { 3769 iwl_write_prph(trans, IMR_UREG_CHICK, 3770 iwl_read_prph(trans, IMR_UREG_CHICK) | 3771 IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK); 3772 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr); 3773 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB, 3774 (u32)(src_addr & 0xFFFFFFFF)); 3775 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB, 3776 iwl_get_dma_hi_addr(src_addr)); 3777 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt); 3778 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL, 3779 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS | 3780 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS | 3781 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK); 3782 } 3783 3784 int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, 3785 u32 dst_addr, u64 src_addr, u32 byte_cnt) 3786 { 3787 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3788 int ret = -1; 3789 3790 trans_pcie->imr_status = IMR_D2S_REQUESTED; 3791 iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt); 3792 ret = wait_event_timeout(trans_pcie->imr_waitq, 3793 trans_pcie->imr_status != 3794 IMR_D2S_REQUESTED, 5 * HZ); 3795 if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) { 3796 IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n"); 3797 iwl_trans_pcie_dump_regs(trans); 3798 return -ETIMEDOUT; 3799 } 3800 trans_pcie->imr_status = IMR_D2S_IDLE; 3801 return 0; 3802 } 3803