1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2007-2015, 2018-2022 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/pci.h> 8 #include <linux/interrupt.h> 9 #include <linux/debugfs.h> 10 #include <linux/sched.h> 11 #include <linux/bitops.h> 12 #include <linux/gfp.h> 13 #include <linux/vmalloc.h> 14 #include <linux/module.h> 15 #include <linux/wait.h> 16 #include <linux/seq_file.h> 17 #if defined(__FreeBSD__) 18 #include <linux/delay.h> 19 #endif 20 21 #include "iwl-drv.h" 22 #include "iwl-trans.h" 23 #include "iwl-csr.h" 24 #include "iwl-prph.h" 25 #include "iwl-scd.h" 26 #include "iwl-agn-hw.h" 27 #include "fw/error-dump.h" 28 #include "fw/dbg.h" 29 #include "fw/api/tx.h" 30 #include "mei/iwl-mei.h" 31 #include "internal.h" 32 #include "iwl-fh.h" 33 #include "iwl-context-info-gen3.h" 34 35 /* extended range in FW SRAM */ 36 #define IWL_FW_MEM_EXTENDED_START 0x40000 37 #define IWL_FW_MEM_EXTENDED_END 0x57FFF 38 39 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans) 40 { 41 #define PCI_DUMP_SIZE 352 42 #define PCI_MEM_DUMP_SIZE 64 43 #define PCI_PARENT_DUMP_SIZE 524 44 #define PREFIX_LEN 32 45 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 46 struct pci_dev *pdev = trans_pcie->pci_dev; 47 u32 i, pos, alloc_size, *ptr, *buf; 48 char *prefix; 49 50 if (trans_pcie->pcie_dbg_dumped_once) 51 return; 52 53 /* Should be a multiple of 4 */ 54 BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3); 55 BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3); 56 BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3); 57 58 /* Alloc a max size buffer */ 59 alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN; 60 alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN); 61 alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN); 62 alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN); 63 64 buf = kmalloc(alloc_size, GFP_ATOMIC); 65 if (!buf) 66 return; 67 prefix = (char *)buf + alloc_size - PREFIX_LEN; 68 69 IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n"); 70 71 /* Print wifi device registers */ 72 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 73 IWL_ERR(trans, "iwlwifi device config registers:\n"); 74 for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++) 75 if (pci_read_config_dword(pdev, i, ptr)) 76 goto err_read; 77 #if defined(__linux__) 78 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 79 #elif defined(__FreeBSD__) 80 iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); 81 #endif 82 83 IWL_ERR(trans, "iwlwifi device memory mapped registers:\n"); 84 for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++) 85 *ptr = iwl_read32(trans, i); 86 #if defined(__linux__) 87 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 88 #elif defined(__FreeBSD__) 89 iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); 90 #endif 91 92 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 93 if (pos) { 94 IWL_ERR(trans, "iwlwifi device AER capability structure:\n"); 95 for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++) 96 if (pci_read_config_dword(pdev, pos + i, ptr)) 97 goto err_read; 98 #if defined(__linux__) 99 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 100 32, 4, buf, i, 0); 101 #elif defined(__FreeBSD__) 102 iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); 103 #endif 104 } 105 106 /* Print parent device registers next */ 107 if (!pdev->bus->self) 108 goto out; 109 110 pdev = pdev->bus->self; 111 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 112 113 IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n", 114 pci_name(pdev)); 115 for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++) 116 if (pci_read_config_dword(pdev, i, ptr)) 117 goto err_read; 118 #if defined(__linux__) 119 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 120 #elif defined(__FreeBSD__) 121 iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); 122 #endif 123 124 /* Print root port AER registers */ 125 pos = 0; 126 pdev = pcie_find_root_port(pdev); 127 if (pdev) 128 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); 129 if (pos) { 130 IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n", 131 pci_name(pdev)); 132 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev)); 133 for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++) 134 if (pci_read_config_dword(pdev, pos + i, ptr)) 135 goto err_read; 136 #if defined(__linux__) 137 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 138 4, buf, i, 0); 139 #elif defined(__FreeBSD__) 140 iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); 141 #endif 142 } 143 goto out; 144 145 err_read: 146 #if defined(__linux__) 147 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0); 148 #elif defined(__FreeBSD__) 149 iwl_print_hex_dump(NULL, IWL_DL_ANY, prefix, (u8 *)buf, i); 150 #endif 151 IWL_ERR(trans, "Read failed at 0x%X\n", i); 152 out: 153 trans_pcie->pcie_dbg_dumped_once = 1; 154 kfree(buf); 155 } 156 157 static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, 158 bool retake_ownership) 159 { 160 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ 161 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 162 iwl_set_bit(trans, CSR_GP_CNTRL, 163 CSR_GP_CNTRL_REG_FLAG_SW_RESET); 164 else 165 iwl_set_bit(trans, CSR_RESET, 166 CSR_RESET_REG_FLAG_SW_RESET); 167 usleep_range(5000, 6000); 168 169 if (retake_ownership) 170 return iwl_pcie_prepare_card_hw(trans); 171 172 return 0; 173 } 174 175 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) 176 { 177 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 178 179 if (!fw_mon->size) 180 return; 181 182 dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block, 183 fw_mon->physical); 184 185 fw_mon->block = NULL; 186 fw_mon->physical = 0; 187 fw_mon->size = 0; 188 } 189 190 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans, 191 u8 max_power, u8 min_power) 192 { 193 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 194 void *block = NULL; 195 dma_addr_t physical = 0; 196 u32 size = 0; 197 u8 power; 198 199 if (fw_mon->size) 200 return; 201 202 for (power = max_power; power >= min_power; power--) { 203 size = BIT(power); 204 block = dma_alloc_coherent(trans->dev, size, &physical, 205 GFP_KERNEL | __GFP_NOWARN); 206 if (!block) 207 continue; 208 209 IWL_INFO(trans, 210 "Allocated 0x%08x bytes for firmware monitor.\n", 211 size); 212 break; 213 } 214 215 if (WARN_ON_ONCE(!block)) 216 return; 217 218 if (power != max_power) 219 IWL_ERR(trans, 220 "Sorry - debug buffer is only %luK while you requested %luK\n", 221 (unsigned long)BIT(power - 10), 222 (unsigned long)BIT(max_power - 10)); 223 224 fw_mon->block = block; 225 fw_mon->physical = physical; 226 fw_mon->size = size; 227 } 228 229 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) 230 { 231 if (!max_power) { 232 /* default max_power is maximum */ 233 max_power = 26; 234 } else { 235 max_power += 11; 236 } 237 238 if (WARN(max_power > 26, 239 "External buffer size for monitor is too big %d, check the FW TLV\n", 240 max_power)) 241 return; 242 243 if (trans->dbg.fw_mon.size) 244 return; 245 246 iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11); 247 } 248 249 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg) 250 { 251 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 252 ((reg & 0x0000ffff) | (2 << 28))); 253 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG); 254 } 255 256 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) 257 { 258 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val); 259 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG, 260 ((reg & 0x0000ffff) | (3 << 28))); 261 } 262 263 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 264 { 265 if (trans->cfg->apmg_not_supported) 266 return; 267 268 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 269 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 270 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 271 ~APMG_PS_CTRL_MSK_PWR_SRC); 272 else 273 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG, 274 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 275 ~APMG_PS_CTRL_MSK_PWR_SRC); 276 } 277 278 /* PCI registers */ 279 #define PCI_CFG_RETRY_TIMEOUT 0x041 280 281 void iwl_pcie_apm_config(struct iwl_trans *trans) 282 { 283 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 284 u16 lctl; 285 u16 cap; 286 287 /* 288 * L0S states have been found to be unstable with our devices 289 * and in newer hardware they are not officially supported at 290 * all, so we must always set the L0S_DISABLED bit. 291 */ 292 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED); 293 294 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); 295 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 296 297 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); 298 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; 299 IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n", 300 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", 301 trans->ltr_enabled ? "En" : "Dis"); 302 } 303 304 /* 305 * Start up NIC's basic functionality after it has been reset 306 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) 307 * NOTE: This does not load uCode nor start the embedded processor 308 */ 309 static int iwl_pcie_apm_init(struct iwl_trans *trans) 310 { 311 int ret; 312 313 IWL_DEBUG_INFO(trans, "Init card's basic functions\n"); 314 315 /* 316 * Use "set_bit" below rather than "write", to preserve any hardware 317 * bits already set by default after reset. 318 */ 319 320 /* Disable L0S exit timer (platform NMI Work/Around) */ 321 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 322 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 323 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 324 325 /* 326 * Disable L0s without affecting L1; 327 * don't wait for ICH L0s (ICH bug W/A) 328 */ 329 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS, 330 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 331 332 /* Set FH wait threshold to maximum (HW error during stress W/A) */ 333 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL); 334 335 /* 336 * Enable HAP INTA (interrupt from management bus) to 337 * wake device's PCI Express link L1a -> L0s 338 */ 339 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 340 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A); 341 342 iwl_pcie_apm_config(trans); 343 344 /* Configure analog phase-lock-loop before activating to D0A */ 345 if (trans->trans_cfg->base_params->pll_cfg) 346 iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL); 347 348 ret = iwl_finish_nic_init(trans); 349 if (ret) 350 return ret; 351 352 if (trans->cfg->host_interrupt_operation_mode) { 353 /* 354 * This is a bit of an abuse - This is needed for 7260 / 3160 355 * only check host_interrupt_operation_mode even if this is 356 * not related to host_interrupt_operation_mode. 357 * 358 * Enable the oscillator to count wake up time for L1 exit. This 359 * consumes slightly more power (100uA) - but allows to be sure 360 * that we wake up from L1 on time. 361 * 362 * This looks weird: read twice the same register, discard the 363 * value, set a bit, and yet again, read that same register 364 * just to discard the value. But that's the way the hardware 365 * seems to like it. 366 */ 367 iwl_read_prph(trans, OSC_CLK); 368 iwl_read_prph(trans, OSC_CLK); 369 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL); 370 iwl_read_prph(trans, OSC_CLK); 371 iwl_read_prph(trans, OSC_CLK); 372 } 373 374 /* 375 * Enable DMA clock and wait for it to stabilize. 376 * 377 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" 378 * bits do not disable clocks. This preserves any hardware 379 * bits already set by default in "CLK_CTRL_REG" after reset. 380 */ 381 if (!trans->cfg->apmg_not_supported) { 382 iwl_write_prph(trans, APMG_CLK_EN_REG, 383 APMG_CLK_VAL_DMA_CLK_RQT); 384 udelay(20); 385 386 /* Disable L1-Active */ 387 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 388 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 389 390 /* Clear the interrupt in APMG if the NIC is in RFKILL */ 391 iwl_write_prph(trans, APMG_RTC_INT_STT_REG, 392 APMG_RTC_INT_STT_RFKILL); 393 } 394 395 set_bit(STATUS_DEVICE_ENABLED, &trans->status); 396 397 return 0; 398 } 399 400 /* 401 * Enable LP XTAL to avoid HW bug where device may consume much power if 402 * FW is not loaded after device reset. LP XTAL is disabled by default 403 * after device HW reset. Do it only if XTAL is fed by internal source. 404 * Configure device's "persistence" mode to avoid resetting XTAL again when 405 * SHRD_HW_RST occurs in S3. 406 */ 407 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans) 408 { 409 int ret; 410 u32 apmg_gp1_reg; 411 u32 apmg_xtal_cfg_reg; 412 u32 dl_cfg_reg; 413 414 /* Force XTAL ON */ 415 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 416 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 417 418 ret = iwl_trans_pcie_sw_reset(trans, true); 419 420 if (!ret) 421 ret = iwl_finish_nic_init(trans); 422 423 if (WARN_ON(ret)) { 424 /* Release XTAL ON request */ 425 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 426 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 427 return; 428 } 429 430 /* 431 * Clear "disable persistence" to avoid LP XTAL resetting when 432 * SHRD_HW_RST is applied in S3. 433 */ 434 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 435 APMG_PCIDEV_STT_VAL_PERSIST_DIS); 436 437 /* 438 * Force APMG XTAL to be active to prevent its disabling by HW 439 * caused by APMG idle state. 440 */ 441 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans, 442 SHR_APMG_XTAL_CFG_REG); 443 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 444 apmg_xtal_cfg_reg | 445 SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 446 447 ret = iwl_trans_pcie_sw_reset(trans, true); 448 if (ret) 449 IWL_ERR(trans, 450 "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n"); 451 452 /* Enable LP XTAL by indirect access through CSR */ 453 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG); 454 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg | 455 SHR_APMG_GP1_WF_XTAL_LP_EN | 456 SHR_APMG_GP1_CHICKEN_BIT_SELECT); 457 458 /* Clear delay line clock power up */ 459 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG); 460 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg & 461 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP); 462 463 /* 464 * Enable persistence mode to avoid LP XTAL resetting when 465 * SHRD_HW_RST is applied in S3. 466 */ 467 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 468 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 469 470 /* 471 * Clear "initialization complete" bit to move adapter from 472 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 473 */ 474 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 475 476 /* Activates XTAL resources monitor */ 477 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG, 478 CSR_MONITOR_XTAL_RESOURCES); 479 480 /* Release XTAL ON request */ 481 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 482 CSR_GP_CNTRL_REG_FLAG_XTAL_ON); 483 udelay(10); 484 485 /* Release APMG XTAL */ 486 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG, 487 apmg_xtal_cfg_reg & 488 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ); 489 } 490 491 void iwl_pcie_apm_stop_master(struct iwl_trans *trans) 492 { 493 int ret; 494 495 /* stop device's busmaster DMA activity */ 496 497 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 498 iwl_set_bit(trans, CSR_GP_CNTRL, 499 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ); 500 501 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 502 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 503 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS, 504 100); 505 msleep(100); 506 } else { 507 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 508 509 ret = iwl_poll_bit(trans, CSR_RESET, 510 CSR_RESET_REG_FLAG_MASTER_DISABLED, 511 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 512 } 513 514 if (ret < 0) 515 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 516 517 IWL_DEBUG_INFO(trans, "stop master\n"); 518 } 519 520 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave) 521 { 522 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n"); 523 524 if (op_mode_leave) { 525 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 526 iwl_pcie_apm_init(trans); 527 528 /* inform ME that we are leaving */ 529 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000) 530 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 531 APMG_PCIDEV_STT_VAL_WAKE_ME); 532 else if (trans->trans_cfg->device_family >= 533 IWL_DEVICE_FAMILY_8000) { 534 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 535 CSR_RESET_LINK_PWR_MGMT_DISABLED); 536 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 537 CSR_HW_IF_CONFIG_REG_PREPARE | 538 CSR_HW_IF_CONFIG_REG_ENABLE_PME); 539 mdelay(1); 540 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 541 CSR_RESET_LINK_PWR_MGMT_DISABLED); 542 } 543 mdelay(5); 544 } 545 546 clear_bit(STATUS_DEVICE_ENABLED, &trans->status); 547 548 /* Stop device's DMA activity */ 549 iwl_pcie_apm_stop_master(trans); 550 551 if (trans->cfg->lp_xtal_workaround) { 552 iwl_pcie_apm_lp_xtal_enable(trans); 553 return; 554 } 555 556 iwl_trans_pcie_sw_reset(trans, false); 557 558 /* 559 * Clear "initialization complete" bit to move adapter from 560 * D0A* (powered-up Active) --> D0U* (Uninitialized) state. 561 */ 562 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 563 } 564 565 static int iwl_pcie_nic_init(struct iwl_trans *trans) 566 { 567 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 568 int ret; 569 570 /* nic_init */ 571 spin_lock_bh(&trans_pcie->irq_lock); 572 ret = iwl_pcie_apm_init(trans); 573 spin_unlock_bh(&trans_pcie->irq_lock); 574 575 if (ret) 576 return ret; 577 578 iwl_pcie_set_pwr(trans, false); 579 580 iwl_op_mode_nic_config(trans->op_mode); 581 582 /* Allocate the RX queue, or reset if it is already allocated */ 583 ret = iwl_pcie_rx_init(trans); 584 if (ret) 585 return ret; 586 587 /* Allocate or reset and init all Tx and Command queues */ 588 if (iwl_pcie_tx_init(trans)) { 589 iwl_pcie_rx_free(trans); 590 return -ENOMEM; 591 } 592 593 if (trans->trans_cfg->base_params->shadow_reg_enable) { 594 /* enable shadow regs in HW */ 595 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF); 596 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n"); 597 } 598 599 return 0; 600 } 601 602 #define HW_READY_TIMEOUT (50) 603 604 /* Note: returns poll_bit return value, which is >= 0 if success */ 605 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans) 606 { 607 int ret; 608 609 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 610 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY); 611 612 /* See if we got it */ 613 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, 614 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 615 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, 616 HW_READY_TIMEOUT); 617 618 if (ret >= 0) 619 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE); 620 621 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : ""); 622 return ret; 623 } 624 625 /* Note: returns standard 0/-ERROR code */ 626 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans) 627 { 628 int ret; 629 int t = 0; 630 int iter; 631 632 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n"); 633 634 ret = iwl_pcie_set_hw_ready(trans); 635 /* If the card is ready, exit 0 */ 636 if (ret >= 0) { 637 trans->csme_own = false; 638 return 0; 639 } 640 641 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 642 CSR_RESET_LINK_PWR_MGMT_DISABLED); 643 usleep_range(1000, 2000); 644 645 for (iter = 0; iter < 10; iter++) { 646 /* If HW is not ready, prepare the conditions to check again */ 647 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 648 CSR_HW_IF_CONFIG_REG_PREPARE); 649 650 do { 651 ret = iwl_pcie_set_hw_ready(trans); 652 if (ret >= 0) { 653 trans->csme_own = false; 654 return 0; 655 } 656 657 if (iwl_mei_is_connected()) { 658 IWL_DEBUG_INFO(trans, 659 "Couldn't prepare the card but SAP is connected\n"); 660 trans->csme_own = true; 661 if (trans->trans_cfg->device_family != 662 IWL_DEVICE_FAMILY_9000) 663 IWL_ERR(trans, 664 "SAP not supported for this NIC family\n"); 665 666 return -EBUSY; 667 } 668 669 usleep_range(200, 1000); 670 t += 200; 671 } while (t < 150000); 672 msleep(25); 673 } 674 675 IWL_ERR(trans, "Couldn't prepare the card\n"); 676 677 return ret; 678 } 679 680 /* 681 * ucode 682 */ 683 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans, 684 u32 dst_addr, dma_addr_t phy_addr, 685 u32 byte_cnt) 686 { 687 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 688 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); 689 690 iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), 691 dst_addr); 692 693 iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), 694 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); 695 696 iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), 697 (iwl_get_dma_hi_addr(phy_addr) 698 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); 699 700 iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), 701 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | 702 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | 703 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); 704 705 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), 706 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 707 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | 708 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); 709 } 710 711 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, 712 u32 dst_addr, dma_addr_t phy_addr, 713 u32 byte_cnt) 714 { 715 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 716 int ret; 717 718 trans_pcie->ucode_write_complete = false; 719 720 if (!iwl_trans_grab_nic_access(trans)) 721 return -EIO; 722 723 iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr, 724 byte_cnt); 725 iwl_trans_release_nic_access(trans); 726 727 ret = wait_event_timeout(trans_pcie->ucode_write_waitq, 728 trans_pcie->ucode_write_complete, 5 * HZ); 729 if (!ret) { 730 IWL_ERR(trans, "Failed to load firmware chunk!\n"); 731 iwl_trans_pcie_dump_regs(trans); 732 return -ETIMEDOUT; 733 } 734 735 return 0; 736 } 737 738 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num, 739 const struct fw_desc *section) 740 { 741 u8 *v_addr; 742 dma_addr_t p_addr; 743 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len); 744 int ret = 0; 745 746 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n", 747 section_num); 748 749 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr, 750 GFP_KERNEL | __GFP_NOWARN); 751 if (!v_addr) { 752 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n"); 753 chunk_sz = PAGE_SIZE; 754 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, 755 &p_addr, GFP_KERNEL); 756 if (!v_addr) 757 return -ENOMEM; 758 } 759 760 for (offset = 0; offset < section->len; offset += chunk_sz) { 761 u32 copy_size, dst_addr; 762 bool extended_addr = false; 763 764 copy_size = min_t(u32, chunk_sz, section->len - offset); 765 dst_addr = section->offset + offset; 766 767 if (dst_addr >= IWL_FW_MEM_EXTENDED_START && 768 dst_addr <= IWL_FW_MEM_EXTENDED_END) 769 extended_addr = true; 770 771 if (extended_addr) 772 iwl_set_bits_prph(trans, LMPM_CHICK, 773 LMPM_CHICK_EXTENDED_ADDR_SPACE); 774 775 memcpy(v_addr, (const u8 *)section->data + offset, copy_size); 776 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr, 777 copy_size); 778 779 if (extended_addr) 780 iwl_clear_bits_prph(trans, LMPM_CHICK, 781 LMPM_CHICK_EXTENDED_ADDR_SPACE); 782 783 if (ret) { 784 IWL_ERR(trans, 785 "Could not load the [%d] uCode section\n", 786 section_num); 787 break; 788 } 789 } 790 791 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr); 792 return ret; 793 } 794 795 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, 796 const struct fw_img *image, 797 int cpu, 798 int *first_ucode_section) 799 { 800 int shift_param; 801 int i, ret = 0, sec_num = 0x1; 802 u32 val, last_read_idx = 0; 803 804 if (cpu == 1) { 805 shift_param = 0; 806 *first_ucode_section = 0; 807 } else { 808 shift_param = 16; 809 (*first_ucode_section)++; 810 } 811 812 for (i = *first_ucode_section; i < image->num_sec; i++) { 813 last_read_idx = i; 814 815 /* 816 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 817 * CPU1 to CPU2. 818 * PAGING_SEPARATOR_SECTION delimiter - separate between 819 * CPU2 non paged to CPU2 paging sec. 820 */ 821 if (!image->sec[i].data || 822 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 823 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 824 IWL_DEBUG_FW(trans, 825 "Break since Data not valid or Empty section, sec = %d\n", 826 i); 827 break; 828 } 829 830 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 831 if (ret) 832 return ret; 833 834 /* Notify ucode of loaded section number and status */ 835 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS); 836 val = val | (sec_num << shift_param); 837 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val); 838 839 sec_num = (sec_num << 1) | 0x1; 840 } 841 842 *first_ucode_section = last_read_idx; 843 844 iwl_enable_interrupts(trans); 845 846 if (trans->trans_cfg->use_tfh) { 847 if (cpu == 1) 848 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 849 0xFFFF); 850 else 851 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS, 852 0xFFFFFFFF); 853 } else { 854 if (cpu == 1) 855 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 856 0xFFFF); 857 else 858 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 859 0xFFFFFFFF); 860 } 861 862 return 0; 863 } 864 865 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans, 866 const struct fw_img *image, 867 int cpu, 868 int *first_ucode_section) 869 { 870 int i, ret = 0; 871 u32 last_read_idx = 0; 872 873 if (cpu == 1) 874 *first_ucode_section = 0; 875 else 876 (*first_ucode_section)++; 877 878 for (i = *first_ucode_section; i < image->num_sec; i++) { 879 last_read_idx = i; 880 881 /* 882 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between 883 * CPU1 to CPU2. 884 * PAGING_SEPARATOR_SECTION delimiter - separate between 885 * CPU2 non paged to CPU2 paging sec. 886 */ 887 if (!image->sec[i].data || 888 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION || 889 image->sec[i].offset == PAGING_SEPARATOR_SECTION) { 890 IWL_DEBUG_FW(trans, 891 "Break since Data not valid or Empty section, sec = %d\n", 892 i); 893 break; 894 } 895 896 ret = iwl_pcie_load_section(trans, i, &image->sec[i]); 897 if (ret) 898 return ret; 899 } 900 901 *first_ucode_section = last_read_idx; 902 903 return 0; 904 } 905 906 static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans) 907 { 908 enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1; 909 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg = 910 &trans->dbg.fw_mon_cfg[alloc_id]; 911 struct iwl_dram_data *frag; 912 913 if (!iwl_trans_dbg_ini_valid(trans)) 914 return; 915 916 if (le32_to_cpu(fw_mon_cfg->buf_location) == 917 IWL_FW_INI_LOCATION_SRAM_PATH) { 918 IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n"); 919 /* set sram monitor by enabling bit 7 */ 920 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 921 CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM); 922 923 return; 924 } 925 926 if (le32_to_cpu(fw_mon_cfg->buf_location) != 927 IWL_FW_INI_LOCATION_DRAM_PATH || 928 !trans->dbg.fw_mon_ini[alloc_id].num_frags) 929 return; 930 931 frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0]; 932 933 IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n", 934 alloc_id); 935 936 iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2, 937 frag->physical >> MON_BUFF_SHIFT_VER2); 938 iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2, 939 (frag->physical + frag->size - 256) >> 940 MON_BUFF_SHIFT_VER2); 941 } 942 943 void iwl_pcie_apply_destination(struct iwl_trans *trans) 944 { 945 const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv; 946 const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 947 int i; 948 949 if (iwl_trans_dbg_ini_valid(trans)) { 950 iwl_pcie_apply_destination_ini(trans); 951 return; 952 } 953 954 IWL_INFO(trans, "Applying debug destination %s\n", 955 get_fw_dbg_mode_string(dest->monitor_mode)); 956 957 if (dest->monitor_mode == EXTERNAL_MODE) 958 iwl_pcie_alloc_fw_monitor(trans, dest->size_power); 959 else 960 IWL_WARN(trans, "PCI should have external buffer debug\n"); 961 962 for (i = 0; i < trans->dbg.n_dest_reg; i++) { 963 u32 addr = le32_to_cpu(dest->reg_ops[i].addr); 964 u32 val = le32_to_cpu(dest->reg_ops[i].val); 965 966 switch (dest->reg_ops[i].op) { 967 case CSR_ASSIGN: 968 iwl_write32(trans, addr, val); 969 break; 970 case CSR_SETBIT: 971 iwl_set_bit(trans, addr, BIT(val)); 972 break; 973 case CSR_CLEARBIT: 974 iwl_clear_bit(trans, addr, BIT(val)); 975 break; 976 case PRPH_ASSIGN: 977 iwl_write_prph(trans, addr, val); 978 break; 979 case PRPH_SETBIT: 980 iwl_set_bits_prph(trans, addr, BIT(val)); 981 break; 982 case PRPH_CLEARBIT: 983 iwl_clear_bits_prph(trans, addr, BIT(val)); 984 break; 985 case PRPH_BLOCKBIT: 986 if (iwl_read_prph(trans, addr) & BIT(val)) { 987 IWL_ERR(trans, 988 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n", 989 val, addr); 990 goto monitor; 991 } 992 break; 993 default: 994 IWL_ERR(trans, "FW debug - unknown OP %d\n", 995 dest->reg_ops[i].op); 996 break; 997 } 998 } 999 1000 monitor: 1001 if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) { 1002 iwl_write_prph(trans, le32_to_cpu(dest->base_reg), 1003 fw_mon->physical >> dest->base_shift); 1004 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 1005 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 1006 (fw_mon->physical + fw_mon->size - 1007 256) >> dest->end_shift); 1008 else 1009 iwl_write_prph(trans, le32_to_cpu(dest->end_reg), 1010 (fw_mon->physical + fw_mon->size) >> 1011 dest->end_shift); 1012 } 1013 } 1014 1015 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, 1016 const struct fw_img *image) 1017 { 1018 int ret = 0; 1019 int first_ucode_section; 1020 1021 IWL_DEBUG_FW(trans, "working with %s CPU\n", 1022 image->is_dual_cpus ? "Dual" : "Single"); 1023 1024 /* load to FW the binary non secured sections of CPU1 */ 1025 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); 1026 if (ret) 1027 return ret; 1028 1029 if (image->is_dual_cpus) { 1030 /* set CPU2 header address */ 1031 iwl_write_prph(trans, 1032 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR, 1033 LMPM_SECURE_CPU2_HDR_MEM_SPACE); 1034 1035 /* load to FW the binary sections of CPU2 */ 1036 ret = iwl_pcie_load_cpu_sections(trans, image, 2, 1037 &first_ucode_section); 1038 if (ret) 1039 return ret; 1040 } 1041 1042 if (iwl_pcie_dbg_on(trans)) 1043 iwl_pcie_apply_destination(trans); 1044 1045 iwl_enable_interrupts(trans); 1046 1047 /* release CPU reset */ 1048 iwl_write32(trans, CSR_RESET, 0); 1049 1050 return 0; 1051 } 1052 1053 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, 1054 const struct fw_img *image) 1055 { 1056 int ret = 0; 1057 int first_ucode_section; 1058 1059 IWL_DEBUG_FW(trans, "working with %s CPU\n", 1060 image->is_dual_cpus ? "Dual" : "Single"); 1061 1062 if (iwl_pcie_dbg_on(trans)) 1063 iwl_pcie_apply_destination(trans); 1064 1065 IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", 1066 iwl_read_prph(trans, WFPM_GP2)); 1067 1068 /* 1069 * Set default value. On resume reading the values that were 1070 * zeored can provide debug data on the resume flow. 1071 * This is for debugging only and has no functional impact. 1072 */ 1073 iwl_write_prph(trans, WFPM_GP2, 0x01010101); 1074 1075 /* configure the ucode to be ready to get the secured image */ 1076 /* release CPU reset */ 1077 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT); 1078 1079 /* load to FW the binary Secured sections of CPU1 */ 1080 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1, 1081 &first_ucode_section); 1082 if (ret) 1083 return ret; 1084 1085 /* load to FW the binary sections of CPU2 */ 1086 return iwl_pcie_load_cpu_sections_8000(trans, image, 2, 1087 &first_ucode_section); 1088 } 1089 1090 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) 1091 { 1092 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1093 bool hw_rfkill = iwl_is_rfkill_set(trans); 1094 bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1095 bool report; 1096 1097 if (hw_rfkill) { 1098 set_bit(STATUS_RFKILL_HW, &trans->status); 1099 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1100 } else { 1101 clear_bit(STATUS_RFKILL_HW, &trans->status); 1102 if (trans_pcie->opmode_down) 1103 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1104 } 1105 1106 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1107 1108 if (prev != report) 1109 iwl_trans_pcie_rf_kill(trans, report); 1110 1111 return hw_rfkill; 1112 } 1113 1114 struct iwl_causes_list { 1115 u32 cause_num; 1116 u32 mask_reg; 1117 u8 addr; 1118 }; 1119 1120 static const struct iwl_causes_list causes_list_common[] = { 1121 {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, 1122 {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, 1123 {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, 1124 {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, 1125 {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, 1126 {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, 1127 {MSIX_HW_INT_CAUSES_REG_RESET_DONE, CSR_MSIX_HW_INT_MASK_AD, 0x12}, 1128 {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, 1129 {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, 1130 {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, 1131 {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, 1132 {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, 1133 {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, 1134 {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, 1135 }; 1136 1137 static const struct iwl_causes_list causes_list_pre_bz[] = { 1138 {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, 1139 }; 1140 1141 static const struct iwl_causes_list causes_list_bz[] = { 1142 {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x29}, 1143 }; 1144 1145 static void iwl_pcie_map_list(struct iwl_trans *trans, 1146 const struct iwl_causes_list *causes, 1147 int arr_size, int val) 1148 { 1149 int i; 1150 1151 for (i = 0; i < arr_size; i++) { 1152 iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); 1153 iwl_clear_bit(trans, causes[i].mask_reg, 1154 causes[i].cause_num); 1155 } 1156 } 1157 1158 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) 1159 { 1160 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1161 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; 1162 /* 1163 * Access all non RX causes and map them to the default irq. 1164 * In case we are missing at least one interrupt vector, 1165 * the first interrupt vector will serve non-RX and FBQ causes. 1166 */ 1167 iwl_pcie_map_list(trans, causes_list_common, 1168 ARRAY_SIZE(causes_list_common), val); 1169 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1170 iwl_pcie_map_list(trans, causes_list_bz, 1171 ARRAY_SIZE(causes_list_bz), val); 1172 else 1173 iwl_pcie_map_list(trans, causes_list_pre_bz, 1174 ARRAY_SIZE(causes_list_pre_bz), val); 1175 } 1176 1177 static void iwl_pcie_map_rx_causes(struct iwl_trans *trans) 1178 { 1179 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1180 u32 offset = 1181 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; 1182 u32 val, idx; 1183 1184 /* 1185 * The first RX queue - fallback queue, which is designated for 1186 * management frame, command responses etc, is always mapped to the 1187 * first interrupt vector. The other RX queues are mapped to 1188 * the other (N - 2) interrupt vectors. 1189 */ 1190 val = BIT(MSIX_FH_INT_CAUSES_Q(0)); 1191 for (idx = 1; idx < trans->num_rx_queues; idx++) { 1192 iwl_write8(trans, CSR_MSIX_RX_IVAR(idx), 1193 MSIX_FH_INT_CAUSES_Q(idx - offset)); 1194 val |= BIT(MSIX_FH_INT_CAUSES_Q(idx)); 1195 } 1196 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val); 1197 1198 val = MSIX_FH_INT_CAUSES_Q(0); 1199 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) 1200 val |= MSIX_NON_AUTO_CLEAR_CAUSE; 1201 iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val); 1202 1203 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) 1204 iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val); 1205 } 1206 1207 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie) 1208 { 1209 struct iwl_trans *trans = trans_pcie->trans; 1210 1211 if (!trans_pcie->msix_enabled) { 1212 if (trans->trans_cfg->mq_rx_supported && 1213 test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1214 iwl_write_umac_prph(trans, UREG_CHICK, 1215 UREG_CHICK_MSI_ENABLE); 1216 return; 1217 } 1218 /* 1219 * The IVAR table needs to be configured again after reset, 1220 * but if the device is disabled, we can't write to 1221 * prph. 1222 */ 1223 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 1224 iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); 1225 1226 /* 1227 * Each cause from the causes list above and the RX causes is 1228 * represented as a byte in the IVAR table. The first nibble 1229 * represents the bound interrupt vector of the cause, the second 1230 * represents no auto clear for this cause. This will be set if its 1231 * interrupt vector is bound to serve other causes. 1232 */ 1233 iwl_pcie_map_rx_causes(trans); 1234 1235 iwl_pcie_map_non_rx_causes(trans); 1236 } 1237 1238 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) 1239 { 1240 struct iwl_trans *trans = trans_pcie->trans; 1241 1242 iwl_pcie_conf_msix_hw(trans_pcie); 1243 1244 if (!trans_pcie->msix_enabled) 1245 return; 1246 1247 trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); 1248 trans_pcie->fh_mask = trans_pcie->fh_init_mask; 1249 trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); 1250 trans_pcie->hw_mask = trans_pcie->hw_init_mask; 1251 } 1252 1253 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1254 { 1255 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1256 1257 lockdep_assert_held(&trans_pcie->mutex); 1258 1259 if (trans_pcie->is_down) 1260 return; 1261 1262 trans_pcie->is_down = true; 1263 1264 /* tell the device to stop sending interrupts */ 1265 iwl_disable_interrupts(trans); 1266 1267 /* device going down, Stop using ICT table */ 1268 iwl_pcie_disable_ict(trans); 1269 1270 /* 1271 * If a HW restart happens during firmware loading, 1272 * then the firmware loading might call this function 1273 * and later it might be called again due to the 1274 * restart. So don't process again if the device is 1275 * already dead. 1276 */ 1277 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1278 IWL_DEBUG_INFO(trans, 1279 "DEVICE_ENABLED bit was set and is now cleared\n"); 1280 iwl_pcie_tx_stop(trans); 1281 iwl_pcie_rx_stop(trans); 1282 1283 /* Power-down device's busmaster DMA clocks */ 1284 if (!trans->cfg->apmg_not_supported) { 1285 iwl_write_prph(trans, APMG_CLK_DIS_REG, 1286 APMG_CLK_VAL_DMA_CLK_RQT); 1287 udelay(5); 1288 } 1289 } 1290 1291 /* Make sure (redundant) we've released our request to stay awake */ 1292 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 1293 iwl_clear_bit(trans, CSR_GP_CNTRL, 1294 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); 1295 else 1296 iwl_clear_bit(trans, CSR_GP_CNTRL, 1297 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1298 1299 /* Stop the device, and put it in low power state */ 1300 iwl_pcie_apm_stop(trans, false); 1301 1302 /* re-take ownership to prevent other users from stealing the device */ 1303 iwl_trans_pcie_sw_reset(trans, true); 1304 1305 /* 1306 * Upon stop, the IVAR table gets erased, so msi-x won't 1307 * work. This causes a bug in RF-KILL flows, since the interrupt 1308 * that enables radio won't fire on the correct irq, and the 1309 * driver won't be able to handle the interrupt. 1310 * Configure the IVAR table again after reset. 1311 */ 1312 iwl_pcie_conf_msix_hw(trans_pcie); 1313 1314 /* 1315 * Upon stop, the APM issues an interrupt if HW RF kill is set. 1316 * This is a bug in certain verions of the hardware. 1317 * Certain devices also keep sending HW RF kill interrupt all 1318 * the time, unless the interrupt is ACKed even if the interrupt 1319 * should be masked. Re-ACK all the interrupts here. 1320 */ 1321 iwl_disable_interrupts(trans); 1322 1323 /* clear all status bits */ 1324 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1325 clear_bit(STATUS_INT_ENABLED, &trans->status); 1326 clear_bit(STATUS_TPOWER_PMI, &trans->status); 1327 1328 /* 1329 * Even if we stop the HW, we still want the RF kill 1330 * interrupt 1331 */ 1332 iwl_enable_rfkill_int(trans); 1333 } 1334 1335 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) 1336 { 1337 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1338 1339 if (trans_pcie->msix_enabled) { 1340 int i; 1341 1342 for (i = 0; i < trans_pcie->alloc_vecs; i++) 1343 synchronize_irq(trans_pcie->msix_entries[i].vector); 1344 } else { 1345 synchronize_irq(trans_pcie->pci_dev->irq); 1346 } 1347 } 1348 1349 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, 1350 const struct fw_img *fw, bool run_in_rfkill) 1351 { 1352 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1353 bool hw_rfkill; 1354 int ret; 1355 1356 /* This may fail if AMT took ownership of the device */ 1357 if (iwl_pcie_prepare_card_hw(trans)) { 1358 IWL_WARN(trans, "Exit HW not ready\n"); 1359 ret = -EIO; 1360 goto out; 1361 } 1362 1363 iwl_enable_rfkill_int(trans); 1364 1365 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1366 1367 /* 1368 * We enabled the RF-Kill interrupt and the handler may very 1369 * well be running. Disable the interrupts to make sure no other 1370 * interrupt can be fired. 1371 */ 1372 iwl_disable_interrupts(trans); 1373 1374 /* Make sure it finished running */ 1375 iwl_pcie_synchronize_irqs(trans); 1376 1377 mutex_lock(&trans_pcie->mutex); 1378 1379 /* If platform's RF_KILL switch is NOT set to KILL */ 1380 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1381 if (hw_rfkill && !run_in_rfkill) { 1382 ret = -ERFKILL; 1383 goto out; 1384 } 1385 1386 /* Someone called stop_device, don't try to start_fw */ 1387 if (trans_pcie->is_down) { 1388 IWL_WARN(trans, 1389 "Can't start_fw since the HW hasn't been started\n"); 1390 ret = -EIO; 1391 goto out; 1392 } 1393 1394 /* make sure rfkill handshake bits are cleared */ 1395 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1396 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, 1397 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 1398 1399 /* clear (again), then enable host interrupts */ 1400 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1401 1402 ret = iwl_pcie_nic_init(trans); 1403 if (ret) { 1404 IWL_ERR(trans, "Unable to init nic\n"); 1405 goto out; 1406 } 1407 1408 /* 1409 * Now, we load the firmware and don't want to be interrupted, even 1410 * by the RF-Kill interrupt (hence mask all the interrupt besides the 1411 * FH_TX interrupt which is needed to load the firmware). If the 1412 * RF-Kill switch is toggled, we will find out after having loaded 1413 * the firmware and return the proper value to the caller. 1414 */ 1415 iwl_enable_fw_load_int(trans); 1416 1417 /* really make sure rfkill handshake bits are cleared */ 1418 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1419 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 1420 1421 /* Load the given image to the HW */ 1422 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 1423 ret = iwl_pcie_load_given_ucode_8000(trans, fw); 1424 else 1425 ret = iwl_pcie_load_given_ucode(trans, fw); 1426 1427 /* re-check RF-Kill state since we may have missed the interrupt */ 1428 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); 1429 if (hw_rfkill && !run_in_rfkill) 1430 ret = -ERFKILL; 1431 1432 out: 1433 mutex_unlock(&trans_pcie->mutex); 1434 return ret; 1435 } 1436 1437 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) 1438 { 1439 iwl_pcie_reset_ict(trans); 1440 iwl_pcie_tx_start(trans, scd_addr); 1441 } 1442 1443 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, 1444 bool was_in_rfkill) 1445 { 1446 bool hw_rfkill; 1447 1448 /* 1449 * Check again since the RF kill state may have changed while 1450 * all the interrupts were disabled, in this case we couldn't 1451 * receive the RF kill interrupt and update the state in the 1452 * op_mode. 1453 * Don't call the op_mode if the rkfill state hasn't changed. 1454 * This allows the op_mode to call stop_device from the rfkill 1455 * notification without endless recursion. Under very rare 1456 * circumstances, we might have a small recursion if the rfkill 1457 * state changed exactly now while we were called from stop_device. 1458 * This is very unlikely but can happen and is supported. 1459 */ 1460 hw_rfkill = iwl_is_rfkill_set(trans); 1461 if (hw_rfkill) { 1462 set_bit(STATUS_RFKILL_HW, &trans->status); 1463 set_bit(STATUS_RFKILL_OPMODE, &trans->status); 1464 } else { 1465 clear_bit(STATUS_RFKILL_HW, &trans->status); 1466 clear_bit(STATUS_RFKILL_OPMODE, &trans->status); 1467 } 1468 if (hw_rfkill != was_in_rfkill) 1469 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1470 } 1471 1472 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) 1473 { 1474 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1475 bool was_in_rfkill; 1476 1477 iwl_op_mode_time_point(trans->op_mode, 1478 IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE, 1479 NULL); 1480 1481 mutex_lock(&trans_pcie->mutex); 1482 trans_pcie->opmode_down = true; 1483 was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status); 1484 _iwl_trans_pcie_stop_device(trans); 1485 iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill); 1486 mutex_unlock(&trans_pcie->mutex); 1487 } 1488 1489 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) 1490 { 1491 struct iwl_trans_pcie __maybe_unused *trans_pcie = 1492 IWL_TRANS_GET_PCIE_TRANS(trans); 1493 1494 lockdep_assert_held(&trans_pcie->mutex); 1495 1496 IWL_WARN(trans, "reporting RF_KILL (radio %s)\n", 1497 state ? "disabled" : "enabled"); 1498 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) { 1499 if (trans->trans_cfg->gen2) 1500 _iwl_trans_pcie_gen2_stop_device(trans); 1501 else 1502 _iwl_trans_pcie_stop_device(trans); 1503 } 1504 } 1505 1506 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, 1507 bool test, bool reset) 1508 { 1509 iwl_disable_interrupts(trans); 1510 1511 /* 1512 * in testing mode, the host stays awake and the 1513 * hardware won't be reset (not even partially) 1514 */ 1515 if (test) 1516 return; 1517 1518 iwl_pcie_disable_ict(trans); 1519 1520 iwl_pcie_synchronize_irqs(trans); 1521 1522 iwl_clear_bit(trans, CSR_GP_CNTRL, 1523 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1524 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1525 1526 if (reset) { 1527 /* 1528 * reset TX queues -- some of their registers reset during S3 1529 * so if we don't reset everything here the D3 image would try 1530 * to execute some invalid memory upon resume 1531 */ 1532 iwl_trans_pcie_tx_reset(trans); 1533 } 1534 1535 iwl_pcie_set_pwr(trans, true); 1536 } 1537 1538 static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) 1539 { 1540 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1541 int ret; 1542 1543 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { 1544 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, 1545 suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND : 1546 UREG_DOORBELL_TO_ISR6_RESUME); 1547 } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 1548 iwl_write32(trans, CSR_IPC_SLEEP_CONTROL, 1549 suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND : 1550 CSR_IPC_SLEEP_CONTROL_RESUME); 1551 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6, 1552 UREG_DOORBELL_TO_ISR6_SLEEP_CTRL); 1553 } else { 1554 return 0; 1555 } 1556 1557 ret = wait_event_timeout(trans_pcie->sx_waitq, 1558 trans_pcie->sx_complete, 2 * HZ); 1559 1560 /* Invalidate it toward next suspend or resume */ 1561 trans_pcie->sx_complete = false; 1562 1563 if (!ret) { 1564 IWL_ERR(trans, "Timeout %s D3\n", 1565 suspend ? "entering" : "exiting"); 1566 return -ETIMEDOUT; 1567 } 1568 1569 return 0; 1570 } 1571 1572 static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, 1573 bool reset) 1574 { 1575 int ret; 1576 1577 if (!reset) 1578 /* Enable persistence mode to avoid reset */ 1579 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 1580 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); 1581 1582 ret = iwl_pcie_d3_handshake(trans, true); 1583 if (ret) 1584 return ret; 1585 1586 iwl_pcie_d3_complete_suspend(trans, test, reset); 1587 1588 return 0; 1589 } 1590 1591 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, 1592 enum iwl_d3_status *status, 1593 bool test, bool reset) 1594 { 1595 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1596 u32 val; 1597 int ret; 1598 1599 if (test) { 1600 iwl_enable_interrupts(trans); 1601 *status = IWL_D3_STATUS_ALIVE; 1602 ret = 0; 1603 goto out; 1604 } 1605 1606 iwl_set_bit(trans, CSR_GP_CNTRL, 1607 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1608 1609 ret = iwl_finish_nic_init(trans); 1610 if (ret) 1611 return ret; 1612 1613 /* 1614 * Reconfigure IVAR table in case of MSIX or reset ict table in 1615 * MSI mode since HW reset erased it. 1616 * Also enables interrupts - none will happen as 1617 * the device doesn't know we're waking it up, only when 1618 * the opmode actually tells it after this call. 1619 */ 1620 iwl_pcie_conf_msix_hw(trans_pcie); 1621 if (!trans_pcie->msix_enabled) 1622 iwl_pcie_reset_ict(trans); 1623 iwl_enable_interrupts(trans); 1624 1625 iwl_pcie_set_pwr(trans, false); 1626 1627 if (!reset) { 1628 iwl_clear_bit(trans, CSR_GP_CNTRL, 1629 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1630 } else { 1631 iwl_trans_pcie_tx_reset(trans); 1632 1633 ret = iwl_pcie_rx_init(trans); 1634 if (ret) { 1635 IWL_ERR(trans, 1636 "Failed to resume the device (RX reset)\n"); 1637 return ret; 1638 } 1639 } 1640 1641 IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n", 1642 iwl_read_umac_prph(trans, WFPM_GP2)); 1643 1644 val = iwl_read32(trans, CSR_RESET); 1645 if (val & CSR_RESET_REG_FLAG_NEVO_RESET) 1646 *status = IWL_D3_STATUS_RESET; 1647 else 1648 *status = IWL_D3_STATUS_ALIVE; 1649 1650 out: 1651 if (*status == IWL_D3_STATUS_ALIVE) 1652 ret = iwl_pcie_d3_handshake(trans, false); 1653 1654 return ret; 1655 } 1656 1657 static void 1658 iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, 1659 struct iwl_trans *trans, 1660 const struct iwl_cfg_trans_params *cfg_trans) 1661 { 1662 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1663 int max_irqs, num_irqs, i, ret; 1664 u16 pci_cmd; 1665 u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES; 1666 1667 if (!cfg_trans->mq_rx_supported) 1668 goto enable_msi; 1669 1670 if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000) 1671 max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES; 1672 1673 max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues); 1674 for (i = 0; i < max_irqs; i++) 1675 trans_pcie->msix_entries[i].entry = i; 1676 1677 num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries, 1678 MSIX_MIN_INTERRUPT_VECTORS, 1679 max_irqs); 1680 if (num_irqs < 0) { 1681 IWL_DEBUG_INFO(trans, 1682 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n", 1683 num_irqs); 1684 goto enable_msi; 1685 } 1686 trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0; 1687 1688 IWL_DEBUG_INFO(trans, 1689 "MSI-X enabled. %d interrupt vectors were allocated\n", 1690 num_irqs); 1691 1692 /* 1693 * In case the OS provides fewer interrupts than requested, different 1694 * causes will share the same interrupt vector as follows: 1695 * One interrupt less: non rx causes shared with FBQ. 1696 * Two interrupts less: non rx causes shared with FBQ and RSS. 1697 * More than two interrupts: we will use fewer RSS queues. 1698 */ 1699 if (num_irqs <= max_irqs - 2) { 1700 trans_pcie->trans->num_rx_queues = num_irqs + 1; 1701 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | 1702 IWL_SHARED_IRQ_FIRST_RSS; 1703 } else if (num_irqs == max_irqs - 1) { 1704 trans_pcie->trans->num_rx_queues = num_irqs; 1705 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; 1706 } else { 1707 trans_pcie->trans->num_rx_queues = num_irqs - 1; 1708 } 1709 1710 IWL_DEBUG_INFO(trans, 1711 "MSI-X enabled with rx queues %d, vec mask 0x%x\n", 1712 trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask); 1713 1714 WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); 1715 1716 trans_pcie->alloc_vecs = num_irqs; 1717 trans_pcie->msix_enabled = true; 1718 return; 1719 1720 enable_msi: 1721 ret = pci_enable_msi(pdev); 1722 if (ret) { 1723 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret); 1724 /* enable rfkill interrupt: hw bug w/a */ 1725 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 1726 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 1727 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 1728 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 1729 } 1730 } 1731 } 1732 1733 static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) 1734 { 1735 int iter_rx_q, i, ret, cpu, offset; 1736 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1737 1738 i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1; 1739 iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i; 1740 offset = 1 + i; 1741 for (; i < iter_rx_q ; i++) { 1742 /* 1743 * Get the cpu prior to the place to search 1744 * (i.e. return will be > i - 1). 1745 */ 1746 cpu = cpumask_next(i - offset, cpu_online_mask); 1747 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]); 1748 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector, 1749 &trans_pcie->affinity_mask[i]); 1750 if (ret) 1751 IWL_ERR(trans_pcie->trans, 1752 "Failed to set affinity mask for IRQ %d\n", 1753 trans_pcie->msix_entries[i].vector); 1754 } 1755 } 1756 1757 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, 1758 struct iwl_trans_pcie *trans_pcie) 1759 { 1760 int i; 1761 1762 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 1763 int ret; 1764 struct msix_entry *msix_entry; 1765 const char *qname = queue_name(&pdev->dev, trans_pcie, i); 1766 1767 if (!qname) 1768 return -ENOMEM; 1769 1770 msix_entry = &trans_pcie->msix_entries[i]; 1771 ret = devm_request_threaded_irq(&pdev->dev, 1772 msix_entry->vector, 1773 iwl_pcie_msix_isr, 1774 (i == trans_pcie->def_irq) ? 1775 iwl_pcie_irq_msix_handler : 1776 iwl_pcie_irq_rx_msix_handler, 1777 IRQF_SHARED, 1778 qname, 1779 msix_entry); 1780 if (ret) { 1781 IWL_ERR(trans_pcie->trans, 1782 "Error allocating IRQ %d\n", i); 1783 1784 return ret; 1785 } 1786 } 1787 iwl_pcie_irq_set_affinity(trans_pcie->trans); 1788 1789 return 0; 1790 } 1791 1792 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans) 1793 { 1794 u32 hpm, wprot; 1795 1796 switch (trans->trans_cfg->device_family) { 1797 case IWL_DEVICE_FAMILY_9000: 1798 wprot = PREG_PRPH_WPROT_9000; 1799 break; 1800 case IWL_DEVICE_FAMILY_22000: 1801 wprot = PREG_PRPH_WPROT_22000; 1802 break; 1803 default: 1804 return 0; 1805 } 1806 1807 hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG); 1808 if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { 1809 u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot); 1810 1811 if (wprot_val & PREG_WFPM_ACCESS) { 1812 IWL_ERR(trans, 1813 "Error, can not clear persistence bit\n"); 1814 return -EPERM; 1815 } 1816 iwl_write_umac_prph_no_grab(trans, HPM_DEBUG, 1817 hpm & ~PERSISTENCE_BIT); 1818 } 1819 1820 return 0; 1821 } 1822 1823 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans) 1824 { 1825 int ret; 1826 1827 ret = iwl_finish_nic_init(trans); 1828 if (ret < 0) 1829 return ret; 1830 1831 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1832 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1833 udelay(20); 1834 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG, 1835 HPM_HIPM_GEN_CFG_CR_PG_EN | 1836 HPM_HIPM_GEN_CFG_CR_SLP_EN); 1837 udelay(20); 1838 iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG, 1839 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE); 1840 1841 return iwl_trans_pcie_sw_reset(trans, true); 1842 } 1843 1844 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1845 { 1846 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1847 int err; 1848 1849 lockdep_assert_held(&trans_pcie->mutex); 1850 1851 err = iwl_pcie_prepare_card_hw(trans); 1852 if (err) { 1853 IWL_ERR(trans, "Error while preparing HW: %d\n", err); 1854 return err; 1855 } 1856 1857 err = iwl_trans_pcie_clear_persistence_bit(trans); 1858 if (err) 1859 return err; 1860 1861 err = iwl_trans_pcie_sw_reset(trans, true); 1862 if (err) 1863 return err; 1864 1865 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 && 1866 trans->trans_cfg->integrated) { 1867 err = iwl_pcie_gen2_force_power_gating(trans); 1868 if (err) 1869 return err; 1870 } 1871 1872 err = iwl_pcie_apm_init(trans); 1873 if (err) 1874 return err; 1875 1876 iwl_pcie_init_msix(trans_pcie); 1877 1878 /* From now on, the op_mode will be kept updated about RF kill state */ 1879 iwl_enable_rfkill_int(trans); 1880 1881 trans_pcie->opmode_down = false; 1882 1883 /* Set is_down to false here so that...*/ 1884 trans_pcie->is_down = false; 1885 1886 /* ...rfkill can call stop_device and set it false if needed */ 1887 iwl_pcie_check_hw_rf_kill(trans); 1888 1889 return 0; 1890 } 1891 1892 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) 1893 { 1894 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1895 int ret; 1896 1897 mutex_lock(&trans_pcie->mutex); 1898 ret = _iwl_trans_pcie_start_hw(trans); 1899 mutex_unlock(&trans_pcie->mutex); 1900 1901 return ret; 1902 } 1903 1904 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) 1905 { 1906 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1907 1908 mutex_lock(&trans_pcie->mutex); 1909 1910 /* disable interrupts - don't enable HW RF kill interrupt */ 1911 iwl_disable_interrupts(trans); 1912 1913 iwl_pcie_apm_stop(trans, true); 1914 1915 iwl_disable_interrupts(trans); 1916 1917 iwl_pcie_disable_ict(trans); 1918 1919 mutex_unlock(&trans_pcie->mutex); 1920 1921 iwl_pcie_synchronize_irqs(trans); 1922 } 1923 1924 #if defined(__linux__) 1925 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1926 { 1927 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1928 } 1929 1930 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1931 { 1932 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1933 } 1934 1935 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) 1936 { 1937 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); 1938 } 1939 #elif defined(__FreeBSD__) 1940 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1941 { 1942 1943 IWL_DEBUG_PCI_RW(trans, "W1 %#010x %#04x\n", ofs, val); 1944 bus_write_1((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val); 1945 } 1946 1947 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) 1948 { 1949 1950 IWL_DEBUG_PCI_RW(trans, "W4 %#010x %#010x\n", ofs, val); 1951 bus_write_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val); 1952 } 1953 1954 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) 1955 { 1956 u32 v; 1957 1958 v = bus_read_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs); 1959 IWL_DEBUG_PCI_RW(trans, "R4 %#010x %#010x\n", ofs, v); 1960 return (v); 1961 } 1962 #endif 1963 1964 static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) 1965 { 1966 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 1967 return 0x00FFFFFF; 1968 else 1969 return 0x000FFFFF; 1970 } 1971 1972 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) 1973 { 1974 u32 mask = iwl_trans_pcie_prph_msk(trans); 1975 1976 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, 1977 ((reg & mask) | (3 << 24))); 1978 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); 1979 } 1980 1981 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, 1982 u32 val) 1983 { 1984 u32 mask = iwl_trans_pcie_prph_msk(trans); 1985 1986 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, 1987 ((addr & mask) | (3 << 24))); 1988 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 1989 } 1990 1991 static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1992 const struct iwl_trans_config *trans_cfg) 1993 { 1994 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1995 1996 /* free all first - we might be reconfigured for a different size */ 1997 iwl_pcie_free_rbs_pool(trans); 1998 1999 trans->txqs.cmd.q_id = trans_cfg->cmd_queue; 2000 trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; 2001 trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; 2002 trans->txqs.page_offs = trans_cfg->cb_data_offs; 2003 trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); 2004 trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; 2005 2006 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) 2007 trans_pcie->n_no_reclaim_cmds = 0; 2008 else 2009 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds; 2010 if (trans_pcie->n_no_reclaim_cmds) 2011 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 2012 trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 2013 2014 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; 2015 trans_pcie->rx_page_order = 2016 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); 2017 trans_pcie->rx_buf_bytes = 2018 iwl_trans_get_rb_size(trans_pcie->rx_buf_size); 2019 trans_pcie->supported_dma_mask = DMA_BIT_MASK(12); 2020 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) 2021 trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); 2022 2023 trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; 2024 trans_pcie->scd_set_active = trans_cfg->scd_set_active; 2025 2026 trans->command_groups = trans_cfg->command_groups; 2027 trans->command_groups_size = trans_cfg->command_groups_size; 2028 2029 /* Initialize NAPI here - it should be before registering to mac80211 2030 * in the opmode but after the HW struct is allocated. 2031 * As this function may be called again in some corner cases don't 2032 * do anything if NAPI was already initialized. 2033 */ 2034 if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) 2035 init_dummy_netdev(&trans_pcie->napi_dev); 2036 2037 trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake; 2038 } 2039 2040 void iwl_trans_pcie_free(struct iwl_trans *trans) 2041 { 2042 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2043 int i; 2044 2045 iwl_pcie_synchronize_irqs(trans); 2046 2047 if (trans->trans_cfg->gen2) 2048 iwl_txq_gen2_tx_free(trans); 2049 else 2050 iwl_pcie_tx_free(trans); 2051 iwl_pcie_rx_free(trans); 2052 2053 if (trans_pcie->rba.alloc_wq) { 2054 destroy_workqueue(trans_pcie->rba.alloc_wq); 2055 trans_pcie->rba.alloc_wq = NULL; 2056 } 2057 2058 if (trans_pcie->msix_enabled) { 2059 for (i = 0; i < trans_pcie->alloc_vecs; i++) { 2060 irq_set_affinity_hint( 2061 trans_pcie->msix_entries[i].vector, 2062 NULL); 2063 } 2064 2065 trans_pcie->msix_enabled = false; 2066 } else { 2067 iwl_pcie_free_ict(trans); 2068 } 2069 2070 iwl_pcie_free_fw_monitor(trans); 2071 2072 if (trans_pcie->pnvm_dram.size) 2073 dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size, 2074 trans_pcie->pnvm_dram.block, 2075 trans_pcie->pnvm_dram.physical); 2076 2077 if (trans_pcie->reduce_power_dram.size) 2078 dma_free_coherent(trans->dev, 2079 trans_pcie->reduce_power_dram.size, 2080 trans_pcie->reduce_power_dram.block, 2081 trans_pcie->reduce_power_dram.physical); 2082 2083 mutex_destroy(&trans_pcie->mutex); 2084 iwl_trans_free(trans); 2085 } 2086 2087 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) 2088 { 2089 if (state) 2090 set_bit(STATUS_TPOWER_PMI, &trans->status); 2091 else 2092 clear_bit(STATUS_TPOWER_PMI, &trans->status); 2093 } 2094 2095 struct iwl_trans_pcie_removal { 2096 struct pci_dev *pdev; 2097 struct work_struct work; 2098 }; 2099 2100 static void iwl_trans_pcie_removal_wk(struct work_struct *wk) 2101 { 2102 struct iwl_trans_pcie_removal *removal = 2103 container_of(wk, struct iwl_trans_pcie_removal, work); 2104 struct pci_dev *pdev = removal->pdev; 2105 static char *prop[] = {"EVENT=INACCESSIBLE", NULL}; 2106 2107 dev_err(&pdev->dev, "Device gone - attempting removal\n"); 2108 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop); 2109 pci_lock_rescan_remove(); 2110 pci_dev_put(pdev); 2111 pci_stop_and_remove_bus_device(pdev); 2112 pci_unlock_rescan_remove(); 2113 2114 kfree(removal); 2115 module_put(THIS_MODULE); 2116 } 2117 2118 /* 2119 * This version doesn't disable BHs but rather assumes they're 2120 * already disabled. 2121 */ 2122 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) 2123 { 2124 int ret; 2125 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2126 u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ; 2127 u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 2128 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP; 2129 u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN; 2130 2131 spin_lock(&trans_pcie->reg_lock); 2132 2133 if (trans_pcie->cmd_hold_nic_awake) 2134 goto out; 2135 2136 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { 2137 write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ; 2138 mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; 2139 poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; 2140 } 2141 2142 /* this bit wakes up the NIC */ 2143 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write); 2144 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000) 2145 udelay(2); 2146 2147 /* 2148 * These bits say the device is running, and should keep running for 2149 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), 2150 * but they do not indicate that embedded SRAM is restored yet; 2151 * HW with volatile SRAM must save/restore contents to/from 2152 * host DRAM when sleeping/waking for power-saving. 2153 * Each direction takes approximately 1/4 millisecond; with this 2154 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a 2155 * series of register accesses are expected (e.g. reading Event Log), 2156 * to keep device from sleeping. 2157 * 2158 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that 2159 * SRAM is okay/restored. We don't check that here because this call 2160 * is just for hardware register access; but GP1 MAC_SLEEP 2161 * check is a good idea before accessing the SRAM of HW with 2162 * volatile SRAM (e.g. reading Event Log). 2163 * 2164 * 5000 series and later (including 1000 series) have non-volatile SRAM, 2165 * and do not save/restore SRAM when power cycling. 2166 */ 2167 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000); 2168 if (unlikely(ret < 0)) { 2169 u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL); 2170 2171 WARN_ONCE(1, 2172 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n", 2173 cntrl); 2174 2175 iwl_trans_pcie_dump_regs(trans); 2176 2177 if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) { 2178 struct iwl_trans_pcie_removal *removal; 2179 2180 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2181 goto err; 2182 2183 IWL_ERR(trans, "Device gone - scheduling removal!\n"); 2184 2185 /* 2186 * get a module reference to avoid doing this 2187 * while unloading anyway and to avoid 2188 * scheduling a work with code that's being 2189 * removed. 2190 */ 2191 if (!try_module_get(THIS_MODULE)) { 2192 IWL_ERR(trans, 2193 "Module is being unloaded - abort\n"); 2194 goto err; 2195 } 2196 2197 removal = kzalloc(sizeof(*removal), GFP_ATOMIC); 2198 if (!removal) { 2199 module_put(THIS_MODULE); 2200 goto err; 2201 } 2202 /* 2203 * we don't need to clear this flag, because 2204 * the trans will be freed and reallocated. 2205 */ 2206 set_bit(STATUS_TRANS_DEAD, &trans->status); 2207 2208 removal->pdev = to_pci_dev(trans->dev); 2209 INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk); 2210 pci_dev_get(removal->pdev); 2211 schedule_work(&removal->work); 2212 } else { 2213 iwl_write32(trans, CSR_RESET, 2214 CSR_RESET_REG_FLAG_FORCE_NMI); 2215 } 2216 2217 err: 2218 spin_unlock(&trans_pcie->reg_lock); 2219 return false; 2220 } 2221 2222 out: 2223 /* 2224 * Fool sparse by faking we release the lock - sparse will 2225 * track nic_access anyway. 2226 */ 2227 __release(&trans_pcie->reg_lock); 2228 return true; 2229 } 2230 2231 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) 2232 { 2233 bool ret; 2234 2235 local_bh_disable(); 2236 ret = __iwl_trans_pcie_grab_nic_access(trans); 2237 if (ret) { 2238 /* keep BHs disabled until iwl_trans_pcie_release_nic_access */ 2239 return ret; 2240 } 2241 local_bh_enable(); 2242 return false; 2243 } 2244 2245 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) 2246 { 2247 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2248 2249 lockdep_assert_held(&trans_pcie->reg_lock); 2250 2251 /* 2252 * Fool sparse by faking we acquiring the lock - sparse will 2253 * track nic_access anyway. 2254 */ 2255 __acquire(&trans_pcie->reg_lock); 2256 2257 if (trans_pcie->cmd_hold_nic_awake) 2258 goto out; 2259 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 2260 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 2261 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); 2262 else 2263 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 2264 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 2265 /* 2266 * Above we read the CSR_GP_CNTRL register, which will flush 2267 * any previous writes, but we need the write that clears the 2268 * MAC_ACCESS_REQ bit to be performed before any other writes 2269 * scheduled on different CPUs (after we drop reg_lock). 2270 */ 2271 out: 2272 spin_unlock_bh(&trans_pcie->reg_lock); 2273 } 2274 2275 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, 2276 void *buf, int dwords) 2277 { 2278 int offs = 0; 2279 u32 *vals = buf; 2280 2281 while (offs < dwords) { 2282 /* limit the time we spin here under lock to 1/2s */ 2283 unsigned long end = jiffies + HZ / 2; 2284 bool resched = false; 2285 2286 if (iwl_trans_grab_nic_access(trans)) { 2287 iwl_write32(trans, HBUS_TARG_MEM_RADDR, 2288 addr + 4 * offs); 2289 2290 while (offs < dwords) { 2291 vals[offs] = iwl_read32(trans, 2292 HBUS_TARG_MEM_RDAT); 2293 offs++; 2294 2295 if (time_after(jiffies, end)) { 2296 resched = true; 2297 break; 2298 } 2299 } 2300 iwl_trans_release_nic_access(trans); 2301 2302 if (resched) 2303 cond_resched(); 2304 } else { 2305 return -EBUSY; 2306 } 2307 } 2308 2309 return 0; 2310 } 2311 2312 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, 2313 const void *buf, int dwords) 2314 { 2315 int offs, ret = 0; 2316 const u32 *vals = buf; 2317 2318 if (iwl_trans_grab_nic_access(trans)) { 2319 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); 2320 for (offs = 0; offs < dwords; offs++) 2321 iwl_write32(trans, HBUS_TARG_MEM_WDAT, 2322 vals ? vals[offs] : 0); 2323 iwl_trans_release_nic_access(trans); 2324 } else { 2325 ret = -EBUSY; 2326 } 2327 return ret; 2328 } 2329 2330 static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, 2331 u32 *val) 2332 { 2333 return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev, 2334 ofs, val); 2335 } 2336 2337 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) 2338 { 2339 int i; 2340 2341 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 2342 struct iwl_txq *txq = trans->txqs.txq[i]; 2343 2344 if (i == trans->txqs.cmd.q_id) 2345 continue; 2346 2347 spin_lock_bh(&txq->lock); 2348 2349 if (!block && !(WARN_ON_ONCE(!txq->block))) { 2350 txq->block--; 2351 if (!txq->block) { 2352 iwl_write32(trans, HBUS_TARG_WRPTR, 2353 txq->write_ptr | (i << 8)); 2354 } 2355 } else if (block) { 2356 txq->block++; 2357 } 2358 2359 spin_unlock_bh(&txq->lock); 2360 } 2361 } 2362 2363 #define IWL_FLUSH_WAIT_MS 2000 2364 2365 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, 2366 struct iwl_trans_rxq_dma_data *data) 2367 { 2368 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2369 2370 if (queue >= trans->num_rx_queues || !trans_pcie->rxq) 2371 return -EINVAL; 2372 2373 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; 2374 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; 2375 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; 2376 data->fr_bd_wid = 0; 2377 2378 return 0; 2379 } 2380 2381 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) 2382 { 2383 struct iwl_txq *txq; 2384 unsigned long now = jiffies; 2385 bool overflow_tx; 2386 u8 wr_ptr; 2387 2388 /* Make sure the NIC is still alive in the bus */ 2389 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2390 return -ENODEV; 2391 2392 if (!test_bit(txq_idx, trans->txqs.queue_used)) 2393 return -EINVAL; 2394 2395 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); 2396 txq = trans->txqs.txq[txq_idx]; 2397 2398 spin_lock_bh(&txq->lock); 2399 overflow_tx = txq->overflow_tx || 2400 !skb_queue_empty(&txq->overflow_q); 2401 spin_unlock_bh(&txq->lock); 2402 2403 wr_ptr = READ_ONCE(txq->write_ptr); 2404 2405 while ((txq->read_ptr != READ_ONCE(txq->write_ptr) || 2406 overflow_tx) && 2407 !time_after(jiffies, 2408 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { 2409 u8 write_ptr = READ_ONCE(txq->write_ptr); 2410 2411 /* 2412 * If write pointer moved during the wait, warn only 2413 * if the TX came from op mode. In case TX came from 2414 * trans layer (overflow TX) don't warn. 2415 */ 2416 if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx, 2417 "WR pointer moved while flushing %d -> %d\n", 2418 wr_ptr, write_ptr)) 2419 return -ETIMEDOUT; 2420 wr_ptr = write_ptr; 2421 2422 usleep_range(1000, 2000); 2423 2424 spin_lock_bh(&txq->lock); 2425 overflow_tx = txq->overflow_tx || 2426 !skb_queue_empty(&txq->overflow_q); 2427 spin_unlock_bh(&txq->lock); 2428 } 2429 2430 if (txq->read_ptr != txq->write_ptr) { 2431 IWL_ERR(trans, 2432 "fail to flush all tx fifo queues Q %d\n", txq_idx); 2433 iwl_txq_log_scd_error(trans, txq); 2434 return -ETIMEDOUT; 2435 } 2436 2437 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx); 2438 2439 return 0; 2440 } 2441 2442 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) 2443 { 2444 int cnt; 2445 int ret = 0; 2446 2447 /* waiting for all the tx frames complete might take a while */ 2448 for (cnt = 0; 2449 cnt < trans->trans_cfg->base_params->num_of_queues; 2450 cnt++) { 2451 2452 if (cnt == trans->txqs.cmd.q_id) 2453 continue; 2454 if (!test_bit(cnt, trans->txqs.queue_used)) 2455 continue; 2456 if (!(BIT(cnt) & txq_bm)) 2457 continue; 2458 2459 ret = iwl_trans_pcie_wait_txq_empty(trans, cnt); 2460 if (ret) 2461 break; 2462 } 2463 2464 return ret; 2465 } 2466 2467 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, 2468 u32 mask, u32 value) 2469 { 2470 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2471 2472 spin_lock_bh(&trans_pcie->reg_lock); 2473 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); 2474 spin_unlock_bh(&trans_pcie->reg_lock); 2475 } 2476 2477 static const char *get_csr_string(int cmd) 2478 { 2479 #define IWL_CMD(x) case x: return #x 2480 switch (cmd) { 2481 IWL_CMD(CSR_HW_IF_CONFIG_REG); 2482 IWL_CMD(CSR_INT_COALESCING); 2483 IWL_CMD(CSR_INT); 2484 IWL_CMD(CSR_INT_MASK); 2485 IWL_CMD(CSR_FH_INT_STATUS); 2486 IWL_CMD(CSR_GPIO_IN); 2487 IWL_CMD(CSR_RESET); 2488 IWL_CMD(CSR_GP_CNTRL); 2489 IWL_CMD(CSR_HW_REV); 2490 IWL_CMD(CSR_EEPROM_REG); 2491 IWL_CMD(CSR_EEPROM_GP); 2492 IWL_CMD(CSR_OTP_GP_REG); 2493 IWL_CMD(CSR_GIO_REG); 2494 IWL_CMD(CSR_GP_UCODE_REG); 2495 IWL_CMD(CSR_GP_DRIVER_REG); 2496 IWL_CMD(CSR_UCODE_DRV_GP1); 2497 IWL_CMD(CSR_UCODE_DRV_GP2); 2498 IWL_CMD(CSR_LED_REG); 2499 IWL_CMD(CSR_DRAM_INT_TBL_REG); 2500 IWL_CMD(CSR_GIO_CHICKEN_BITS); 2501 IWL_CMD(CSR_ANA_PLL_CFG); 2502 IWL_CMD(CSR_HW_REV_WA_REG); 2503 IWL_CMD(CSR_MONITOR_STATUS_REG); 2504 IWL_CMD(CSR_DBG_HPET_MEM_REG); 2505 default: 2506 return "UNKNOWN"; 2507 } 2508 #undef IWL_CMD 2509 } 2510 2511 void iwl_pcie_dump_csr(struct iwl_trans *trans) 2512 { 2513 int i; 2514 static const u32 csr_tbl[] = { 2515 CSR_HW_IF_CONFIG_REG, 2516 CSR_INT_COALESCING, 2517 CSR_INT, 2518 CSR_INT_MASK, 2519 CSR_FH_INT_STATUS, 2520 CSR_GPIO_IN, 2521 CSR_RESET, 2522 CSR_GP_CNTRL, 2523 CSR_HW_REV, 2524 CSR_EEPROM_REG, 2525 CSR_EEPROM_GP, 2526 CSR_OTP_GP_REG, 2527 CSR_GIO_REG, 2528 CSR_GP_UCODE_REG, 2529 CSR_GP_DRIVER_REG, 2530 CSR_UCODE_DRV_GP1, 2531 CSR_UCODE_DRV_GP2, 2532 CSR_LED_REG, 2533 CSR_DRAM_INT_TBL_REG, 2534 CSR_GIO_CHICKEN_BITS, 2535 CSR_ANA_PLL_CFG, 2536 CSR_MONITOR_STATUS_REG, 2537 CSR_HW_REV_WA_REG, 2538 CSR_DBG_HPET_MEM_REG 2539 }; 2540 IWL_ERR(trans, "CSR values:\n"); 2541 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is " 2542 "CSR_INT_PERIODIC_REG)\n"); 2543 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) { 2544 IWL_ERR(trans, " %25s: 0X%08x\n", 2545 get_csr_string(csr_tbl[i]), 2546 iwl_read32(trans, csr_tbl[i])); 2547 } 2548 } 2549 2550 #ifdef CONFIG_IWLWIFI_DEBUGFS 2551 /* create and remove of files */ 2552 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 2553 debugfs_create_file(#name, mode, parent, trans, \ 2554 &iwl_dbgfs_##name##_ops); \ 2555 } while (0) 2556 2557 /* file operation */ 2558 #define DEBUGFS_READ_FILE_OPS(name) \ 2559 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2560 .read = iwl_dbgfs_##name##_read, \ 2561 .open = simple_open, \ 2562 .llseek = generic_file_llseek, \ 2563 }; 2564 2565 #define DEBUGFS_WRITE_FILE_OPS(name) \ 2566 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2567 .write = iwl_dbgfs_##name##_write, \ 2568 .open = simple_open, \ 2569 .llseek = generic_file_llseek, \ 2570 }; 2571 2572 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ 2573 static const struct file_operations iwl_dbgfs_##name##_ops = { \ 2574 .write = iwl_dbgfs_##name##_write, \ 2575 .read = iwl_dbgfs_##name##_read, \ 2576 .open = simple_open, \ 2577 .llseek = generic_file_llseek, \ 2578 }; 2579 2580 struct iwl_dbgfs_tx_queue_priv { 2581 struct iwl_trans *trans; 2582 }; 2583 2584 struct iwl_dbgfs_tx_queue_state { 2585 loff_t pos; 2586 }; 2587 2588 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos) 2589 { 2590 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2591 struct iwl_dbgfs_tx_queue_state *state; 2592 2593 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) 2594 return NULL; 2595 2596 state = kmalloc(sizeof(*state), GFP_KERNEL); 2597 if (!state) 2598 return NULL; 2599 state->pos = *pos; 2600 return state; 2601 } 2602 2603 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq, 2604 void *v, loff_t *pos) 2605 { 2606 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2607 struct iwl_dbgfs_tx_queue_state *state = v; 2608 2609 *pos = ++state->pos; 2610 2611 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues) 2612 return NULL; 2613 2614 return state; 2615 } 2616 2617 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v) 2618 { 2619 kfree(v); 2620 } 2621 2622 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) 2623 { 2624 struct iwl_dbgfs_tx_queue_priv *priv = seq->private; 2625 struct iwl_dbgfs_tx_queue_state *state = v; 2626 struct iwl_trans *trans = priv->trans; 2627 struct iwl_txq *txq = trans->txqs.txq[state->pos]; 2628 2629 seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", 2630 (unsigned int)state->pos, 2631 !!test_bit(state->pos, trans->txqs.queue_used), 2632 !!test_bit(state->pos, trans->txqs.queue_stopped)); 2633 if (txq) 2634 seq_printf(seq, 2635 "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", 2636 txq->read_ptr, txq->write_ptr, 2637 txq->need_update, txq->frozen, 2638 txq->n_window, txq->ampdu); 2639 else 2640 seq_puts(seq, "(unallocated)"); 2641 2642 if (state->pos == trans->txqs.cmd.q_id) 2643 seq_puts(seq, " (HCMD)"); 2644 seq_puts(seq, "\n"); 2645 2646 return 0; 2647 } 2648 2649 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = { 2650 .start = iwl_dbgfs_tx_queue_seq_start, 2651 .next = iwl_dbgfs_tx_queue_seq_next, 2652 .stop = iwl_dbgfs_tx_queue_seq_stop, 2653 .show = iwl_dbgfs_tx_queue_seq_show, 2654 }; 2655 2656 static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp) 2657 { 2658 struct iwl_dbgfs_tx_queue_priv *priv; 2659 2660 priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops, 2661 sizeof(*priv)); 2662 2663 if (!priv) 2664 return -ENOMEM; 2665 2666 priv->trans = inode->i_private; 2667 return 0; 2668 } 2669 2670 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, 2671 char __user *user_buf, 2672 size_t count, loff_t *ppos) 2673 { 2674 struct iwl_trans *trans = file->private_data; 2675 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2676 char *buf; 2677 int pos = 0, i, ret; 2678 size_t bufsz; 2679 2680 bufsz = sizeof(char) * 121 * trans->num_rx_queues; 2681 2682 if (!trans_pcie->rxq) 2683 return -EAGAIN; 2684 2685 buf = kzalloc(bufsz, GFP_KERNEL); 2686 if (!buf) 2687 return -ENOMEM; 2688 2689 for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { 2690 struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 2691 2692 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", 2693 i); 2694 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", 2695 rxq->read); 2696 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", 2697 rxq->write); 2698 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", 2699 rxq->write_actual); 2700 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", 2701 rxq->need_update); 2702 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", 2703 rxq->free_count); 2704 if (rxq->rb_stts) { 2705 u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, 2706 rxq)); 2707 pos += scnprintf(buf + pos, bufsz - pos, 2708 "\tclosed_rb_num: %u\n", 2709 r & 0x0FFF); 2710 } else { 2711 pos += scnprintf(buf + pos, bufsz - pos, 2712 "\tclosed_rb_num: Not Allocated\n"); 2713 } 2714 } 2715 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2716 kfree(buf); 2717 2718 return ret; 2719 } 2720 2721 static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 2722 char __user *user_buf, 2723 size_t count, loff_t *ppos) 2724 { 2725 struct iwl_trans *trans = file->private_data; 2726 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2727 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2728 2729 int pos = 0; 2730 char *buf; 2731 int bufsz = 24 * 64; /* 24 items * 64 char per item */ 2732 ssize_t ret; 2733 2734 buf = kzalloc(bufsz, GFP_KERNEL); 2735 if (!buf) 2736 return -ENOMEM; 2737 2738 pos += scnprintf(buf + pos, bufsz - pos, 2739 "Interrupt Statistics Report:\n"); 2740 2741 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n", 2742 isr_stats->hw); 2743 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n", 2744 isr_stats->sw); 2745 if (isr_stats->sw || isr_stats->hw) { 2746 pos += scnprintf(buf + pos, bufsz - pos, 2747 "\tLast Restarting Code: 0x%X\n", 2748 isr_stats->err_code); 2749 } 2750 #ifdef CONFIG_IWLWIFI_DEBUG 2751 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n", 2752 isr_stats->sch); 2753 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n", 2754 isr_stats->alive); 2755 #endif 2756 pos += scnprintf(buf + pos, bufsz - pos, 2757 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill); 2758 2759 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n", 2760 isr_stats->ctkill); 2761 2762 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n", 2763 isr_stats->wakeup); 2764 2765 pos += scnprintf(buf + pos, bufsz - pos, 2766 "Rx command responses:\t\t %u\n", isr_stats->rx); 2767 2768 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n", 2769 isr_stats->tx); 2770 2771 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n", 2772 isr_stats->unhandled); 2773 2774 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2775 kfree(buf); 2776 return ret; 2777 } 2778 2779 static ssize_t iwl_dbgfs_interrupt_write(struct file *file, 2780 const char __user *user_buf, 2781 size_t count, loff_t *ppos) 2782 { 2783 struct iwl_trans *trans = file->private_data; 2784 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2785 struct isr_statistics *isr_stats = &trans_pcie->isr_stats; 2786 u32 reset_flag; 2787 int ret; 2788 2789 ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag); 2790 if (ret) 2791 return ret; 2792 if (reset_flag == 0) 2793 memset(isr_stats, 0, sizeof(*isr_stats)); 2794 2795 return count; 2796 } 2797 2798 static ssize_t iwl_dbgfs_csr_write(struct file *file, 2799 const char __user *user_buf, 2800 size_t count, loff_t *ppos) 2801 { 2802 struct iwl_trans *trans = file->private_data; 2803 2804 iwl_pcie_dump_csr(trans); 2805 2806 return count; 2807 } 2808 2809 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 2810 char __user *user_buf, 2811 size_t count, loff_t *ppos) 2812 { 2813 struct iwl_trans *trans = file->private_data; 2814 char *buf = NULL; 2815 ssize_t ret; 2816 2817 ret = iwl_dump_fh(trans, &buf); 2818 if (ret < 0) 2819 return ret; 2820 if (!buf) 2821 return -EINVAL; 2822 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 2823 kfree(buf); 2824 return ret; 2825 } 2826 2827 static ssize_t iwl_dbgfs_rfkill_read(struct file *file, 2828 char __user *user_buf, 2829 size_t count, loff_t *ppos) 2830 { 2831 struct iwl_trans *trans = file->private_data; 2832 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2833 char buf[100]; 2834 int pos; 2835 2836 pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n", 2837 trans_pcie->debug_rfkill, 2838 !(iwl_read32(trans, CSR_GP_CNTRL) & 2839 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)); 2840 2841 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2842 } 2843 2844 static ssize_t iwl_dbgfs_rfkill_write(struct file *file, 2845 const char __user *user_buf, 2846 size_t count, loff_t *ppos) 2847 { 2848 struct iwl_trans *trans = file->private_data; 2849 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2850 bool new_value; 2851 int ret; 2852 2853 ret = kstrtobool_from_user(user_buf, count, &new_value); 2854 if (ret) 2855 return ret; 2856 if (new_value == trans_pcie->debug_rfkill) 2857 return count; 2858 IWL_WARN(trans, "changing debug rfkill %d->%d\n", 2859 trans_pcie->debug_rfkill, new_value); 2860 trans_pcie->debug_rfkill = new_value; 2861 iwl_pcie_handle_rfkill_irq(trans); 2862 2863 return count; 2864 } 2865 2866 static int iwl_dbgfs_monitor_data_open(struct inode *inode, 2867 struct file *file) 2868 { 2869 struct iwl_trans *trans = inode->i_private; 2870 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2871 2872 if (!trans->dbg.dest_tlv || 2873 trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) { 2874 IWL_ERR(trans, "Debug destination is not set to DRAM\n"); 2875 return -ENOENT; 2876 } 2877 2878 if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED) 2879 return -EBUSY; 2880 2881 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN; 2882 return simple_open(inode, file); 2883 } 2884 2885 static int iwl_dbgfs_monitor_data_release(struct inode *inode, 2886 struct file *file) 2887 { 2888 struct iwl_trans_pcie *trans_pcie = 2889 IWL_TRANS_GET_PCIE_TRANS(inode->i_private); 2890 2891 if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN) 2892 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 2893 return 0; 2894 } 2895 2896 static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, 2897 void *buf, ssize_t *size, 2898 ssize_t *bytes_copied) 2899 { 2900 int buf_size_left = count - *bytes_copied; 2901 2902 buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); 2903 if (*size > buf_size_left) 2904 *size = buf_size_left; 2905 2906 *size -= copy_to_user(user_buf, buf, *size); 2907 *bytes_copied += *size; 2908 2909 if (buf_size_left == *size) 2910 return true; 2911 return false; 2912 } 2913 2914 static ssize_t iwl_dbgfs_monitor_data_read(struct file *file, 2915 char __user *user_buf, 2916 size_t count, loff_t *ppos) 2917 { 2918 struct iwl_trans *trans = file->private_data; 2919 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2920 u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf; 2921 struct cont_rec *data = &trans_pcie->fw_mon_data; 2922 u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt; 2923 ssize_t size, bytes_copied = 0; 2924 bool b_full; 2925 2926 if (trans->dbg.dest_tlv) { 2927 write_ptr_addr = 2928 le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 2929 wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 2930 } else { 2931 write_ptr_addr = MON_BUFF_WRPTR; 2932 wrap_cnt_addr = MON_BUFF_CYCLE_CNT; 2933 } 2934 2935 if (unlikely(!trans->dbg.rec_on)) 2936 return 0; 2937 2938 mutex_lock(&data->mutex); 2939 if (data->state == 2940 IWL_FW_MON_DBGFS_STATE_DISABLED) { 2941 mutex_unlock(&data->mutex); 2942 return 0; 2943 } 2944 2945 /* write_ptr position in bytes rather then DW */ 2946 write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32); 2947 wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr); 2948 2949 if (data->prev_wrap_cnt == wrap_cnt) { 2950 size = write_ptr - data->prev_wr_ptr; 2951 curr_buf = cpu_addr + data->prev_wr_ptr; 2952 b_full = iwl_write_to_user_buf(user_buf, count, 2953 curr_buf, &size, 2954 &bytes_copied); 2955 data->prev_wr_ptr += size; 2956 2957 } else if (data->prev_wrap_cnt == wrap_cnt - 1 && 2958 write_ptr < data->prev_wr_ptr) { 2959 size = trans->dbg.fw_mon.size - data->prev_wr_ptr; 2960 curr_buf = cpu_addr + data->prev_wr_ptr; 2961 b_full = iwl_write_to_user_buf(user_buf, count, 2962 curr_buf, &size, 2963 &bytes_copied); 2964 data->prev_wr_ptr += size; 2965 2966 if (!b_full) { 2967 size = write_ptr; 2968 b_full = iwl_write_to_user_buf(user_buf, count, 2969 cpu_addr, &size, 2970 &bytes_copied); 2971 data->prev_wr_ptr = size; 2972 data->prev_wrap_cnt++; 2973 } 2974 } else { 2975 if (data->prev_wrap_cnt == wrap_cnt - 1 && 2976 write_ptr > data->prev_wr_ptr) 2977 IWL_WARN(trans, 2978 "write pointer passed previous write pointer, start copying from the beginning\n"); 2979 else if (!unlikely(data->prev_wrap_cnt == 0 && 2980 data->prev_wr_ptr == 0)) 2981 IWL_WARN(trans, 2982 "monitor data is out of sync, start copying from the beginning\n"); 2983 2984 size = write_ptr; 2985 b_full = iwl_write_to_user_buf(user_buf, count, 2986 cpu_addr, &size, 2987 &bytes_copied); 2988 data->prev_wr_ptr = size; 2989 data->prev_wrap_cnt = wrap_cnt; 2990 } 2991 2992 mutex_unlock(&data->mutex); 2993 2994 return bytes_copied; 2995 } 2996 2997 static ssize_t iwl_dbgfs_rf_read(struct file *file, 2998 char __user *user_buf, 2999 size_t count, loff_t *ppos) 3000 { 3001 struct iwl_trans *trans = file->private_data; 3002 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3003 3004 if (!trans_pcie->rf_name[0]) 3005 return -ENODEV; 3006 3007 return simple_read_from_buffer(user_buf, count, ppos, 3008 trans_pcie->rf_name, 3009 strlen(trans_pcie->rf_name)); 3010 } 3011 3012 DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 3013 DEBUGFS_READ_FILE_OPS(fh_reg); 3014 DEBUGFS_READ_FILE_OPS(rx_queue); 3015 DEBUGFS_WRITE_FILE_OPS(csr); 3016 DEBUGFS_READ_WRITE_FILE_OPS(rfkill); 3017 DEBUGFS_READ_FILE_OPS(rf); 3018 3019 static const struct file_operations iwl_dbgfs_tx_queue_ops = { 3020 .owner = THIS_MODULE, 3021 .open = iwl_dbgfs_tx_queue_open, 3022 .read = seq_read, 3023 .llseek = seq_lseek, 3024 .release = seq_release_private, 3025 }; 3026 3027 static const struct file_operations iwl_dbgfs_monitor_data_ops = { 3028 .read = iwl_dbgfs_monitor_data_read, 3029 .open = iwl_dbgfs_monitor_data_open, 3030 .release = iwl_dbgfs_monitor_data_release, 3031 }; 3032 3033 /* Create the debugfs files and directories */ 3034 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) 3035 { 3036 struct dentry *dir = trans->dbgfs_dir; 3037 3038 DEBUGFS_ADD_FILE(rx_queue, dir, 0400); 3039 DEBUGFS_ADD_FILE(tx_queue, dir, 0400); 3040 DEBUGFS_ADD_FILE(interrupt, dir, 0600); 3041 DEBUGFS_ADD_FILE(csr, dir, 0200); 3042 DEBUGFS_ADD_FILE(fh_reg, dir, 0400); 3043 DEBUGFS_ADD_FILE(rfkill, dir, 0600); 3044 DEBUGFS_ADD_FILE(monitor_data, dir, 0400); 3045 DEBUGFS_ADD_FILE(rf, dir, 0400); 3046 } 3047 3048 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) 3049 { 3050 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3051 struct cont_rec *data = &trans_pcie->fw_mon_data; 3052 3053 mutex_lock(&data->mutex); 3054 data->state = IWL_FW_MON_DBGFS_STATE_DISABLED; 3055 mutex_unlock(&data->mutex); 3056 } 3057 #endif /*CONFIG_IWLWIFI_DEBUGFS */ 3058 3059 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) 3060 { 3061 u32 cmdlen = 0; 3062 int i; 3063 3064 for (i = 0; i < trans->txqs.tfd.max_tbs; i++) 3065 cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i); 3066 3067 return cmdlen; 3068 } 3069 3070 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, 3071 struct iwl_fw_error_dump_data **data, 3072 int allocated_rb_nums) 3073 { 3074 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3075 int max_len = trans_pcie->rx_buf_bytes; 3076 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 3077 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3078 u32 i, r, j, rb_len = 0; 3079 3080 spin_lock(&rxq->lock); 3081 3082 r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; 3083 3084 for (i = rxq->read, j = 0; 3085 i != r && j < allocated_rb_nums; 3086 i = (i + 1) & RX_QUEUE_MASK, j++) { 3087 struct iwl_rx_mem_buffer *rxb = rxq->queue[i]; 3088 struct iwl_fw_error_dump_rb *rb; 3089 3090 dma_sync_single_for_cpu(trans->dev, rxb->page_dma, 3091 max_len, DMA_FROM_DEVICE); 3092 3093 rb_len += sizeof(**data) + sizeof(*rb) + max_len; 3094 3095 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB); 3096 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len); 3097 rb = (void *)(*data)->data; 3098 rb->index = cpu_to_le32(i); 3099 memcpy(rb->data, page_address(rxb->page), max_len); 3100 3101 *data = iwl_fw_error_next_data(*data); 3102 } 3103 3104 spin_unlock(&rxq->lock); 3105 3106 return rb_len; 3107 } 3108 #define IWL_CSR_TO_DUMP (0x250) 3109 3110 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans, 3111 struct iwl_fw_error_dump_data **data) 3112 { 3113 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP; 3114 __le32 *val; 3115 int i; 3116 3117 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); 3118 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP); 3119 val = (void *)(*data)->data; 3120 3121 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4) 3122 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 3123 3124 *data = iwl_fw_error_next_data(*data); 3125 3126 return csr_len; 3127 } 3128 3129 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans, 3130 struct iwl_fw_error_dump_data **data) 3131 { 3132 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND; 3133 __le32 *val; 3134 int i; 3135 3136 if (!iwl_trans_grab_nic_access(trans)) 3137 return 0; 3138 3139 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS); 3140 (*data)->len = cpu_to_le32(fh_regs_len); 3141 val = (void *)(*data)->data; 3142 3143 if (!trans->trans_cfg->gen2) 3144 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; 3145 i += sizeof(u32)) 3146 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i)); 3147 else 3148 for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2); 3149 i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2); 3150 i += sizeof(u32)) 3151 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans, 3152 i)); 3153 3154 iwl_trans_release_nic_access(trans); 3155 3156 *data = iwl_fw_error_next_data(*data); 3157 3158 return sizeof(**data) + fh_regs_len; 3159 } 3160 3161 static u32 3162 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, 3163 struct iwl_fw_error_dump_fw_mon *fw_mon_data, 3164 u32 monitor_len) 3165 { 3166 u32 buf_size_in_dwords = (monitor_len >> 2); 3167 u32 *buffer = (u32 *)fw_mon_data->data; 3168 u32 i; 3169 3170 if (!iwl_trans_grab_nic_access(trans)) 3171 return 0; 3172 3173 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1); 3174 for (i = 0; i < buf_size_in_dwords; i++) 3175 buffer[i] = iwl_read_umac_prph_no_grab(trans, 3176 MON_DMARB_RD_DATA_ADDR); 3177 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0); 3178 3179 iwl_trans_release_nic_access(trans); 3180 3181 return monitor_len; 3182 } 3183 3184 static void 3185 iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, 3186 struct iwl_fw_error_dump_fw_mon *fw_mon_data) 3187 { 3188 u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt; 3189 3190 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3191 base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB; 3192 base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB; 3193 write_ptr = DBGC_CUR_DBGBUF_STATUS; 3194 wrap_cnt = DBGC_DBGBUF_WRAP_AROUND; 3195 } else if (trans->dbg.dest_tlv) { 3196 write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg); 3197 wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count); 3198 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3199 } else { 3200 base = MON_BUFF_BASE_ADDR; 3201 write_ptr = MON_BUFF_WRPTR; 3202 wrap_cnt = MON_BUFF_CYCLE_CNT; 3203 } 3204 3205 write_ptr_val = iwl_read_prph(trans, write_ptr); 3206 fw_mon_data->fw_mon_cycle_cnt = 3207 cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); 3208 fw_mon_data->fw_mon_base_ptr = 3209 cpu_to_le32(iwl_read_prph(trans, base)); 3210 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { 3211 fw_mon_data->fw_mon_base_high_ptr = 3212 cpu_to_le32(iwl_read_prph(trans, base_high)); 3213 write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK; 3214 /* convert wrtPtr to DWs, to align with all HWs */ 3215 write_ptr_val >>= 2; 3216 } 3217 fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val); 3218 } 3219 3220 static u32 3221 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, 3222 struct iwl_fw_error_dump_data **data, 3223 u32 monitor_len) 3224 { 3225 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon; 3226 u32 len = 0; 3227 3228 if (trans->dbg.dest_tlv || 3229 (fw_mon->size && 3230 (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 || 3231 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) { 3232 struct iwl_fw_error_dump_fw_mon *fw_mon_data; 3233 3234 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); 3235 fw_mon_data = (void *)(*data)->data; 3236 3237 iwl_trans_pcie_dump_pointers(trans, fw_mon_data); 3238 3239 len += sizeof(**data) + sizeof(*fw_mon_data); 3240 if (fw_mon->size) { 3241 memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size); 3242 monitor_len = fw_mon->size; 3243 } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) { 3244 u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr); 3245 /* 3246 * Update pointers to reflect actual values after 3247 * shifting 3248 */ 3249 if (trans->dbg.dest_tlv->version) { 3250 base = (iwl_read_prph(trans, base) & 3251 IWL_LDBG_M2S_BUF_BA_MSK) << 3252 trans->dbg.dest_tlv->base_shift; 3253 base *= IWL_M2S_UNIT_SIZE; 3254 base += trans->cfg->smem_offset; 3255 } else { 3256 base = iwl_read_prph(trans, base) << 3257 trans->dbg.dest_tlv->base_shift; 3258 } 3259 3260 iwl_trans_read_mem(trans, base, fw_mon_data->data, 3261 monitor_len / sizeof(u32)); 3262 } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) { 3263 monitor_len = 3264 iwl_trans_pci_dump_marbh_monitor(trans, 3265 fw_mon_data, 3266 monitor_len); 3267 } else { 3268 /* Didn't match anything - output no monitor data */ 3269 monitor_len = 0; 3270 } 3271 3272 len += monitor_len; 3273 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data)); 3274 } 3275 3276 return len; 3277 } 3278 3279 static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) 3280 { 3281 if (trans->dbg.fw_mon.size) { 3282 *len += sizeof(struct iwl_fw_error_dump_data) + 3283 sizeof(struct iwl_fw_error_dump_fw_mon) + 3284 trans->dbg.fw_mon.size; 3285 return trans->dbg.fw_mon.size; 3286 } else if (trans->dbg.dest_tlv) { 3287 u32 base, end, cfg_reg, monitor_len; 3288 3289 if (trans->dbg.dest_tlv->version == 1) { 3290 cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3291 cfg_reg = iwl_read_prph(trans, cfg_reg); 3292 base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) << 3293 trans->dbg.dest_tlv->base_shift; 3294 base *= IWL_M2S_UNIT_SIZE; 3295 base += trans->cfg->smem_offset; 3296 3297 monitor_len = 3298 (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >> 3299 trans->dbg.dest_tlv->end_shift; 3300 monitor_len *= IWL_M2S_UNIT_SIZE; 3301 } else { 3302 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg); 3303 end = le32_to_cpu(trans->dbg.dest_tlv->end_reg); 3304 3305 base = iwl_read_prph(trans, base) << 3306 trans->dbg.dest_tlv->base_shift; 3307 end = iwl_read_prph(trans, end) << 3308 trans->dbg.dest_tlv->end_shift; 3309 3310 /* Make "end" point to the actual end */ 3311 if (trans->trans_cfg->device_family >= 3312 IWL_DEVICE_FAMILY_8000 || 3313 trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) 3314 end += (1 << trans->dbg.dest_tlv->end_shift); 3315 monitor_len = end - base; 3316 } 3317 *len += sizeof(struct iwl_fw_error_dump_data) + 3318 sizeof(struct iwl_fw_error_dump_fw_mon) + 3319 monitor_len; 3320 return monitor_len; 3321 } 3322 return 0; 3323 } 3324 3325 static struct iwl_trans_dump_data * 3326 iwl_trans_pcie_dump_data(struct iwl_trans *trans, 3327 u32 dump_mask, 3328 const struct iwl_dump_sanitize_ops *sanitize_ops, 3329 void *sanitize_ctx) 3330 { 3331 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3332 struct iwl_fw_error_dump_data *data; 3333 struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id]; 3334 struct iwl_fw_error_dump_txcmd *txcmd; 3335 struct iwl_trans_dump_data *dump_data; 3336 u32 len, num_rbs = 0, monitor_len = 0; 3337 int i, ptr; 3338 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && 3339 !trans->trans_cfg->mq_rx_supported && 3340 dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); 3341 3342 if (!dump_mask) 3343 return NULL; 3344 3345 /* transport dump header */ 3346 len = sizeof(*dump_data); 3347 3348 /* host commands */ 3349 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) 3350 len += sizeof(*data) + 3351 cmdq->n_window * (sizeof(*txcmd) + 3352 TFD_MAX_PAYLOAD_SIZE); 3353 3354 /* FW monitor */ 3355 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3356 monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); 3357 3358 /* CSR registers */ 3359 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3360 len += sizeof(*data) + IWL_CSR_TO_DUMP; 3361 3362 /* FH registers */ 3363 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { 3364 if (trans->trans_cfg->gen2) 3365 len += sizeof(*data) + 3366 (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) - 3367 iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2)); 3368 else 3369 len += sizeof(*data) + 3370 (FH_MEM_UPPER_BOUND - 3371 FH_MEM_LOWER_BOUND); 3372 } 3373 3374 if (dump_rbs) { 3375 /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 3376 struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 3377 /* RBs */ 3378 num_rbs = 3379 le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) 3380 & 0x0FFF; 3381 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; 3382 len += num_rbs * (sizeof(*data) + 3383 sizeof(struct iwl_fw_error_dump_rb) + 3384 (PAGE_SIZE << trans_pcie->rx_page_order)); 3385 } 3386 3387 /* Paged memory for gen2 HW */ 3388 if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) 3389 for (i = 0; i < trans->init_dram.paging_cnt; i++) 3390 len += sizeof(*data) + 3391 sizeof(struct iwl_fw_error_dump_paging) + 3392 trans->init_dram.paging[i].size; 3393 3394 dump_data = vzalloc(len); 3395 if (!dump_data) 3396 return NULL; 3397 3398 len = 0; 3399 data = (void *)dump_data->data; 3400 3401 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { 3402 u16 tfd_size = trans->txqs.tfd.size; 3403 3404 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); 3405 txcmd = (void *)data->data; 3406 spin_lock_bh(&cmdq->lock); 3407 ptr = cmdq->write_ptr; 3408 for (i = 0; i < cmdq->n_window; i++) { 3409 u8 idx = iwl_txq_get_cmd_index(cmdq, ptr); 3410 u8 tfdidx; 3411 u32 caplen, cmdlen; 3412 3413 if (trans->trans_cfg->use_tfh) 3414 tfdidx = idx; 3415 else 3416 tfdidx = ptr; 3417 3418 cmdlen = iwl_trans_pcie_get_cmdlen(trans, 3419 (u8 *)cmdq->tfds + 3420 tfd_size * tfdidx); 3421 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); 3422 3423 if (cmdlen) { 3424 len += sizeof(*txcmd) + caplen; 3425 txcmd->cmdlen = cpu_to_le32(cmdlen); 3426 txcmd->caplen = cpu_to_le32(caplen); 3427 memcpy(txcmd->data, cmdq->entries[idx].cmd, 3428 caplen); 3429 if (sanitize_ops && sanitize_ops->frob_hcmd) 3430 sanitize_ops->frob_hcmd(sanitize_ctx, 3431 txcmd->data, 3432 caplen); 3433 txcmd = (void *)((u8 *)txcmd->data + caplen); 3434 } 3435 3436 ptr = iwl_txq_dec_wrap(trans, ptr); 3437 } 3438 spin_unlock_bh(&cmdq->lock); 3439 3440 data->len = cpu_to_le32(len); 3441 len += sizeof(*data); 3442 data = iwl_fw_error_next_data(data); 3443 } 3444 3445 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) 3446 len += iwl_trans_pcie_dump_csr(trans, &data); 3447 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) 3448 len += iwl_trans_pcie_fh_regs_dump(trans, &data); 3449 if (dump_rbs) 3450 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); 3451 3452 /* Paged memory for gen2 HW */ 3453 if (trans->trans_cfg->gen2 && 3454 dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { 3455 for (i = 0; i < trans->init_dram.paging_cnt; i++) { 3456 struct iwl_fw_error_dump_paging *paging; 3457 u32 page_len = trans->init_dram.paging[i].size; 3458 3459 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); 3460 data->len = cpu_to_le32(sizeof(*paging) + page_len); 3461 paging = (void *)data->data; 3462 paging->index = cpu_to_le32(i); 3463 memcpy(paging->data, 3464 trans->init_dram.paging[i].block, page_len); 3465 data = iwl_fw_error_next_data(data); 3466 3467 len += sizeof(*data) + sizeof(*paging) + page_len; 3468 } 3469 } 3470 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) 3471 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); 3472 3473 dump_data->len = len; 3474 3475 return dump_data; 3476 } 3477 3478 static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) 3479 { 3480 if (enable) 3481 iwl_enable_interrupts(trans); 3482 else 3483 iwl_disable_interrupts(trans); 3484 } 3485 3486 static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) 3487 { 3488 u32 inta_addr, sw_err_bit; 3489 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3490 3491 if (trans_pcie->msix_enabled) { 3492 inta_addr = CSR_MSIX_HW_INT_CAUSES_AD; 3493 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 3494 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ; 3495 else 3496 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR; 3497 } else { 3498 inta_addr = CSR_INT; 3499 sw_err_bit = CSR_INT_BIT_SW_ERR; 3500 } 3501 3502 iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit); 3503 } 3504 3505 #define IWL_TRANS_COMMON_OPS \ 3506 .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ 3507 .write8 = iwl_trans_pcie_write8, \ 3508 .write32 = iwl_trans_pcie_write32, \ 3509 .read32 = iwl_trans_pcie_read32, \ 3510 .read_prph = iwl_trans_pcie_read_prph, \ 3511 .write_prph = iwl_trans_pcie_write_prph, \ 3512 .read_mem = iwl_trans_pcie_read_mem, \ 3513 .write_mem = iwl_trans_pcie_write_mem, \ 3514 .read_config32 = iwl_trans_pcie_read_config32, \ 3515 .configure = iwl_trans_pcie_configure, \ 3516 .set_pmi = iwl_trans_pcie_set_pmi, \ 3517 .sw_reset = iwl_trans_pcie_sw_reset, \ 3518 .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ 3519 .release_nic_access = iwl_trans_pcie_release_nic_access, \ 3520 .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ 3521 .dump_data = iwl_trans_pcie_dump_data, \ 3522 .d3_suspend = iwl_trans_pcie_d3_suspend, \ 3523 .d3_resume = iwl_trans_pcie_d3_resume, \ 3524 .interrupts = iwl_trans_pci_interrupts, \ 3525 .sync_nmi = iwl_trans_pcie_sync_nmi, \ 3526 .imr_dma_data = iwl_trans_pcie_copy_imr \ 3527 3528 static const struct iwl_trans_ops trans_ops_pcie = { 3529 IWL_TRANS_COMMON_OPS, 3530 .start_hw = iwl_trans_pcie_start_hw, 3531 .fw_alive = iwl_trans_pcie_fw_alive, 3532 .start_fw = iwl_trans_pcie_start_fw, 3533 .stop_device = iwl_trans_pcie_stop_device, 3534 3535 .send_cmd = iwl_pcie_enqueue_hcmd, 3536 3537 .tx = iwl_trans_pcie_tx, 3538 .reclaim = iwl_txq_reclaim, 3539 3540 .txq_disable = iwl_trans_pcie_txq_disable, 3541 .txq_enable = iwl_trans_pcie_txq_enable, 3542 3543 .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, 3544 3545 .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, 3546 3547 .freeze_txq_timer = iwl_trans_txq_freeze_timer, 3548 .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, 3549 #ifdef CONFIG_IWLWIFI_DEBUGFS 3550 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, 3551 #endif 3552 }; 3553 3554 static const struct iwl_trans_ops trans_ops_pcie_gen2 = { 3555 IWL_TRANS_COMMON_OPS, 3556 .start_hw = iwl_trans_pcie_start_hw, 3557 .fw_alive = iwl_trans_pcie_gen2_fw_alive, 3558 .start_fw = iwl_trans_pcie_gen2_start_fw, 3559 .stop_device = iwl_trans_pcie_gen2_stop_device, 3560 3561 .send_cmd = iwl_pcie_gen2_enqueue_hcmd, 3562 3563 .tx = iwl_txq_gen2_tx, 3564 .reclaim = iwl_txq_reclaim, 3565 3566 .set_q_ptrs = iwl_txq_set_q_ptrs, 3567 3568 .txq_alloc = iwl_txq_dyn_alloc, 3569 .txq_free = iwl_txq_dyn_free, 3570 .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, 3571 .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, 3572 .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm, 3573 .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power, 3574 #ifdef CONFIG_IWLWIFI_DEBUGFS 3575 .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, 3576 #endif 3577 }; 3578 3579 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 3580 const struct pci_device_id *ent, 3581 const struct iwl_cfg_trans_params *cfg_trans) 3582 { 3583 struct iwl_trans_pcie *trans_pcie; 3584 struct iwl_trans *trans; 3585 int ret, addr_size; 3586 const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; 3587 void __iomem * const *table; 3588 3589 if (!cfg_trans->gen2) 3590 ops = &trans_ops_pcie; 3591 3592 ret = pcim_enable_device(pdev); 3593 if (ret) 3594 return ERR_PTR(ret); 3595 3596 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, 3597 cfg_trans); 3598 if (!trans) 3599 return ERR_PTR(-ENOMEM); 3600 3601 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3602 3603 trans_pcie->trans = trans; 3604 trans_pcie->opmode_down = true; 3605 spin_lock_init(&trans_pcie->irq_lock); 3606 spin_lock_init(&trans_pcie->reg_lock); 3607 spin_lock_init(&trans_pcie->alloc_page_lock); 3608 mutex_init(&trans_pcie->mutex); 3609 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 3610 init_waitqueue_head(&trans_pcie->fw_reset_waitq); 3611 init_waitqueue_head(&trans_pcie->imr_waitq); 3612 3613 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator", 3614 WQ_HIGHPRI | WQ_UNBOUND, 1); 3615 if (!trans_pcie->rba.alloc_wq) { 3616 ret = -ENOMEM; 3617 goto out_free_trans; 3618 } 3619 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work); 3620 3621 trans_pcie->debug_rfkill = -1; 3622 3623 if (!cfg_trans->base_params->pcie_l1_allowed) { 3624 /* 3625 * W/A - seems to solve weird behavior. We need to remove this 3626 * if we don't want to stay in L1 all the time. This wastes a 3627 * lot of power. 3628 */ 3629 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | 3630 PCIE_LINK_STATE_L1 | 3631 PCIE_LINK_STATE_CLKPM); 3632 } 3633 3634 trans_pcie->def_rx_queue = 0; 3635 3636 pci_set_master(pdev); 3637 3638 addr_size = trans->txqs.tfd.addr_size; 3639 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size)); 3640 if (ret) { 3641 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 3642 /* both attempts failed: */ 3643 if (ret) { 3644 dev_err(&pdev->dev, "No suitable DMA available\n"); 3645 goto out_no_pci; 3646 } 3647 } 3648 3649 ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME); 3650 if (ret) { 3651 dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n"); 3652 goto out_no_pci; 3653 } 3654 3655 #if defined(__FreeBSD__) 3656 linuxkpi_pcim_want_to_use_bus_functions(pdev); 3657 #endif 3658 table = pcim_iomap_table(pdev); 3659 if (!table) { 3660 dev_err(&pdev->dev, "pcim_iomap_table failed\n"); 3661 ret = -ENOMEM; 3662 goto out_no_pci; 3663 } 3664 3665 trans_pcie->hw_base = table[0]; 3666 if (!trans_pcie->hw_base) { 3667 dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n"); 3668 ret = -ENODEV; 3669 goto out_no_pci; 3670 } 3671 3672 /* We disable the RETRY_TIMEOUT register (0x41) to keep 3673 * PCI Tx retries from interfering with C3 CPU state */ 3674 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 3675 3676 trans_pcie->pci_dev = pdev; 3677 iwl_disable_interrupts(trans); 3678 3679 trans->hw_rev = iwl_read32(trans, CSR_HW_REV); 3680 if (trans->hw_rev == 0xffffffff) { 3681 dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n"); 3682 ret = -EIO; 3683 goto out_no_pci; 3684 } 3685 3686 /* 3687 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have 3688 * changed, and now the revision step also includes bit 0-1 (no more 3689 * "dash" value). To keep hw_rev backwards compatible - we'll store it 3690 * in the old format. 3691 */ 3692 if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) 3693 trans->hw_rev_step = trans->hw_rev & 0xF; 3694 else 3695 trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2; 3696 3697 IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev); 3698 3699 iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans); 3700 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 3701 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), 3702 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); 3703 3704 init_waitqueue_head(&trans_pcie->sx_waitq); 3705 3706 3707 if (trans_pcie->msix_enabled) { 3708 ret = iwl_pcie_init_msix_handler(pdev, trans_pcie); 3709 if (ret) 3710 goto out_no_pci; 3711 } else { 3712 ret = iwl_pcie_alloc_ict(trans); 3713 if (ret) 3714 goto out_no_pci; 3715 3716 ret = devm_request_threaded_irq(&pdev->dev, pdev->irq, 3717 iwl_pcie_isr, 3718 iwl_pcie_irq_handler, 3719 IRQF_SHARED, DRV_NAME, trans); 3720 if (ret) { 3721 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 3722 goto out_free_ict; 3723 } 3724 } 3725 3726 #ifdef CONFIG_IWLWIFI_DEBUGFS 3727 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; 3728 mutex_init(&trans_pcie->fw_mon_data.mutex); 3729 #endif 3730 3731 iwl_dbg_tlv_init(trans); 3732 3733 return trans; 3734 3735 out_free_ict: 3736 iwl_pcie_free_ict(trans); 3737 out_no_pci: 3738 destroy_workqueue(trans_pcie->rba.alloc_wq); 3739 out_free_trans: 3740 iwl_trans_free(trans); 3741 return ERR_PTR(ret); 3742 } 3743 3744 void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, 3745 u32 dst_addr, u64 src_addr, u32 byte_cnt) 3746 { 3747 iwl_write_prph(trans, IMR_UREG_CHICK, 3748 iwl_read_prph(trans, IMR_UREG_CHICK) | 3749 IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK); 3750 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr); 3751 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB, 3752 (u32)(src_addr & 0xFFFFFFFF)); 3753 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB, 3754 iwl_get_dma_hi_addr(src_addr)); 3755 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt); 3756 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL, 3757 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS | 3758 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS | 3759 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK); 3760 } 3761 3762 int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, 3763 u32 dst_addr, u64 src_addr, u32 byte_cnt) 3764 { 3765 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 3766 int ret = -1; 3767 3768 trans_pcie->imr_status = IMR_D2S_REQUESTED; 3769 iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt); 3770 ret = wait_event_timeout(trans_pcie->imr_waitq, 3771 trans_pcie->imr_status != 3772 IMR_D2S_REQUESTED, 5 * HZ); 3773 if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) { 3774 IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n"); 3775 iwl_trans_pcie_dump_regs(trans); 3776 return -ETIMEDOUT; 3777 } 3778 trans_pcie->imr_status = IMR_D2S_IDLE; 3779 return 0; 3780 } 3781