Lines Matching +full:dual +full:- +full:radio
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2007-2015, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
22 #include "iwl-drv.h"
23 #include "iwl-trans.h"
24 #include "iwl-csr.h"
25 #include "iwl-prph.h"
26 #include "iwl-scd.h"
27 #include "iwl-agn-hw.h"
28 #include "fw/error-dump.h"
31 #include "mei/iwl-mei.h"
33 #include "iwl-fh.h"
34 #include "iwl-context-info-gen3.h"
47 struct pci_dev *pdev = trans_pcie->pci_dev;
51 if (trans_pcie->pcie_dbg_dumped_once)
68 prefix = (char *)buf + alloc_size - PREFIX_LEN;
108 if (!pdev->bus->self)
111 pdev = pdev->bus->self;
154 trans_pcie->pcie_dbg_dumped_once = 1;
160 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
161 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
179 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
181 if (!fw_mon->size)
184 dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
185 fw_mon->physical);
187 fw_mon->block = NULL;
188 fw_mon->physical = 0;
189 fw_mon->size = 0;
195 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
201 if (fw_mon->size) {
202 memset(fw_mon->block, 0, fw_mon->size);
207 for (power = max_power; power >= 11; power--) {
209 block = dma_alloc_coherent(trans->dev, size, &physical,
225 "Sorry - debug buffer is only %luK while you requested %luK\n",
226 (unsigned long)BIT(power - 10),
227 (unsigned long)BIT(max_power - 10));
229 fw_mon->block = block;
230 fw_mon->physical = physical;
231 fw_mon->size = size;
267 if (trans->cfg->apmg_not_supported)
270 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
296 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
297 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
299 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
300 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
301 IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
303 trans->ltr_enabled ? "En" : "Dis");
323 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
339 * wake device's PCI Express link L1a -> L0s
346 /* Configure analog phase-lock-loop before activating to D0A */
347 if (trans->trans_cfg->base_params->pll_cfg)
354 if (trans->cfg->host_interrupt_operation_mode) {
356 * This is a bit of an abuse - This is needed for 7260 / 3160
361 * consumes slightly more power (100uA) - but allows to be sure
383 if (!trans->cfg->apmg_not_supported) {
388 /* Disable L1-Active */
397 set_bit(STATUS_DEVICE_ENABLED, &trans->status);
474 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
499 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
527 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
531 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000)
534 else if (trans->trans_cfg->device_family >=
548 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
553 if (trans->cfg->lp_xtal_workaround) {
562 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
573 spin_lock_bh(&trans_pcie->irq_lock);
575 spin_unlock_bh(&trans_pcie->irq_lock);
582 iwl_op_mode_nic_config(trans->op_mode);
592 return -ENOMEM;
595 if (trans->trans_cfg->base_params->shadow_reg_enable) {
627 /* Note: returns standard 0/-ERROR code */
638 trans->csme_own = false;
656 trans->csme_own = false;
663 trans->csme_own = true;
664 if (trans->trans_cfg->device_family !=
669 return -EBUSY;
721 trans_pcie->ucode_write_complete = false;
724 return -EIO;
730 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
731 trans_pcie->ucode_write_complete, 5 * HZ);
735 return -ETIMEDOUT;
746 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
752 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
757 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
760 return -ENOMEM;
763 for (offset = 0; offset < section->len; offset += chunk_sz) {
767 copy_size = min_t(u32, chunk_sz, section->len - offset);
768 dst_addr = section->offset + offset;
778 memcpy(v_addr, (const u8 *)section->data + offset, copy_size);
794 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
815 for (i = *first_ucode_section; i < image->num_sec; i++) {
819 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
821 * PAGING_SEPARATOR_SECTION delimiter - separate between
824 if (!image->sec[i].data ||
825 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
826 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
833 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
849 if (trans->trans_cfg->gen2) {
881 for (i = *first_ucode_section; i < image->num_sec; i++) {
885 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
887 * PAGING_SEPARATOR_SECTION delimiter - separate between
890 if (!image->sec[i].data ||
891 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
892 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
899 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
913 &trans->dbg.fw_mon_cfg[alloc_id];
919 if (le32_to_cpu(fw_mon_cfg->buf_location) ==
929 if (le32_to_cpu(fw_mon_cfg->buf_location) !=
931 !trans->dbg.fw_mon_ini[alloc_id].num_frags)
934 frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
940 frag->physical >> MON_BUFF_SHIFT_VER2);
942 (frag->physical + frag->size - 256) >>
948 const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
949 const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
958 get_fw_dbg_mode_string(dest->monitor_mode));
960 if (dest->monitor_mode == EXTERNAL_MODE)
961 iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
965 for (i = 0; i < trans->dbg.n_dest_reg; i++) {
966 u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
967 u32 val = le32_to_cpu(dest->reg_ops[i].val);
969 switch (dest->reg_ops[i].op) {
997 IWL_ERR(trans, "FW debug - unknown OP %d\n",
998 dest->reg_ops[i].op);
1004 if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
1005 iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
1006 fw_mon->physical >> dest->base_shift);
1007 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1008 iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
1009 (fw_mon->physical + fw_mon->size -
1010 256) >> dest->end_shift);
1012 iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
1013 (fw_mon->physical + fw_mon->size) >>
1014 dest->end_shift);
1025 image->is_dual_cpus ? "Dual" : "Single");
1032 if (image->is_dual_cpus) {
1063 image->is_dual_cpus ? "Dual" : "Single");
1097 bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1101 set_bit(STATUS_RFKILL_HW, &trans->status);
1102 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1104 clear_bit(STATUS_RFKILL_HW, &trans->status);
1105 if (trans_pcie->opmode_down)
1106 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1109 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1128 ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \
1175 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1179 * the first interrupt vector will serve non-RX and FBQ causes.
1183 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1195 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1199 * The first RX queue - fallback queue, which is designated for
1202 * the other (N - 2) interrupt vectors.
1205 for (idx = 1; idx < trans->num_rx_queues; idx++) {
1207 MSIX_FH_INT_CAUSES_Q(idx - offset));
1213 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
1217 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
1223 struct iwl_trans *trans = trans_pcie->trans;
1225 if (!trans_pcie->msix_enabled) {
1226 if (trans->trans_cfg->mq_rx_supported &&
1227 test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1237 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1254 struct iwl_trans *trans = trans_pcie->trans;
1258 if (!trans_pcie->msix_enabled)
1261 trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1262 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1263 trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1264 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1271 lockdep_assert_held(&trans_pcie->mutex);
1273 if (trans_pcie->is_down)
1276 trans_pcie->is_down = true;
1291 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1300 /* Power-down device's busmaster DMA clocks */
1301 if (!trans->cfg->apmg_not_supported) {
1309 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1319 /* re-take ownership to prevent other users from stealing the device */
1323 * Upon stop, the IVAR table gets erased, so msi-x won't
1324 * work. This causes a bug in RF-KILL flows, since the interrupt
1325 * that enables radio won't fire on the correct irq, and the
1336 * should be masked. Re-ACK all the interrupts here.
1341 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1342 clear_bit(STATUS_INT_ENABLED, &trans->status);
1343 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1356 if (trans_pcie->msix_enabled) {
1359 for (i = 0; i < trans_pcie->alloc_vecs; i++)
1360 synchronize_irq(trans_pcie->msix_entries[i].vector);
1362 synchronize_irq(trans_pcie->pci_dev->irq);
1376 return -EIO;
1384 * We enabled the RF-Kill interrupt and the handler may very
1393 mutex_lock(&trans_pcie->mutex);
1398 ret = -ERFKILL;
1403 if (trans_pcie->is_down) {
1406 ret = -EIO;
1426 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1428 * RF-Kill switch is toggled, we will find out after having loaded
1438 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1443 /* re-check RF-Kill state since we may have missed the interrupt */
1446 ret = -ERFKILL;
1449 mutex_unlock(&trans_pcie->mutex);
1478 set_bit(STATUS_RFKILL_HW, &trans->status);
1479 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1481 clear_bit(STATUS_RFKILL_HW, &trans->status);
1482 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1493 iwl_op_mode_time_point(trans->op_mode,
1497 mutex_lock(&trans_pcie->mutex);
1498 trans_pcie->opmode_down = true;
1499 was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1502 mutex_unlock(&trans_pcie->mutex);
1510 lockdep_assert_held(&trans_pcie->mutex);
1512 IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
1514 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) &&
1515 !WARN_ON(trans->trans_cfg->gen2))
1535 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
1549 * reset TX queues -- some of their registers reset during S3
1564 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
1568 else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1575 ret = wait_event_timeout(trans_pcie->sx_waitq,
1576 trans_pcie->sx_complete, 2 * HZ);
1579 trans_pcie->sx_complete = false;
1584 return -ETIMEDOUT;
1623 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1637 * Also enables interrupts - none will happen as
1642 if (!trans_pcie->msix_enabled)
1688 if (!cfg_trans->mq_rx_supported)
1691 if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000)
1696 trans_pcie->msix_entries[i].entry = i;
1698 num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1703 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1707 trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1710 "MSI-X enabled. %d interrupt vectors were allocated\n",
1720 if (num_irqs <= max_irqs - 2) {
1721 trans_pcie->trans->num_rx_queues = num_irqs + 1;
1722 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1724 } else if (num_irqs == max_irqs - 1) {
1725 trans_pcie->trans->num_rx_queues = num_irqs;
1726 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1728 trans_pcie->trans->num_rx_queues = num_irqs - 1;
1732 "MSI-X enabled with rx queues %d, vec mask 0x%x\n",
1733 trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask);
1735 WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1737 trans_pcie->alloc_vecs = num_irqs;
1738 trans_pcie->msix_enabled = true;
1744 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1760 i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
1761 iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
1766 * (i.e. return will be > i - 1).
1768 cpu = cpumask_next(i - offset, cpu_online_mask);
1769 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
1770 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
1771 &trans_pcie->affinity_mask[i]);
1773 IWL_ERR(trans_pcie->trans,
1775 trans_pcie->msix_entries[i].vector);
1785 for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1788 const char *qname = queue_name(&pdev->dev, trans_pcie, i);
1791 return -ENOMEM;
1793 msix_entry = &trans_pcie->msix_entries[i];
1794 ret = devm_request_threaded_irq(&pdev->dev,
1795 msix_entry->vector,
1797 (i == trans_pcie->def_irq) ?
1804 IWL_ERR(trans_pcie->trans,
1810 iwl_pcie_irq_set_affinity(trans_pcie->trans);
1819 switch (trans->trans_cfg->device_family) {
1837 return -EPERM;
1872 lockdep_assert_held(&trans_pcie->mutex);
1888 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
1889 trans->trans_cfg->integrated) {
1904 trans_pcie->opmode_down = false;
1907 trans_pcie->is_down = false;
1920 mutex_lock(&trans_pcie->mutex);
1922 mutex_unlock(&trans_pcie->mutex);
1931 mutex_lock(&trans_pcie->mutex);
1933 /* disable interrupts - don't enable HW RF kill interrupt */
1942 mutex_unlock(&trans_pcie->mutex);
1950 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1955 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1960 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1967 bus_write_1((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val);
1974 bus_write_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs, val);
1981 v = bus_read_4((struct resource *)IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base, ofs);
1989 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
2018 /* free all first - we might be reconfigured for a different size */
2021 trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue;
2022 trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo;
2023 trans_pcie->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
2024 trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs;
2025 trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
2026 trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
2028 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
2029 trans_pcie->n_no_reclaim_cmds = 0;
2031 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
2032 if (trans_pcie->n_no_reclaim_cmds)
2033 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
2034 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
2036 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
2037 trans_pcie->rx_page_order =
2038 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
2039 trans_pcie->rx_buf_bytes =
2040 iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
2041 trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
2042 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
2043 trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
2045 trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword;
2046 trans_pcie->scd_set_active = trans_cfg->scd_set_active;
2048 trans->command_groups = trans_cfg->command_groups;
2049 trans->command_groups_size = trans_cfg->command_groups_size;
2052 trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
2059 struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
2062 for (i = 0; i < dram_regions->n_regions; i++) {
2063 dma_free_coherent(dev, dram_regions->drams[i].size,
2064 dram_regions->drams[i].block,
2065 dram_regions->drams[i].physical);
2067 dram_regions->n_regions = 0;
2070 if (desc_dram->block) {
2071 dma_free_coherent(dev, desc_dram->size,
2072 desc_dram->block,
2073 desc_dram->physical);
2080 iwl_pcie_free_dma_ptr(trans, &trans->invalid_tx_cmd);
2094 ret = iwl_pcie_alloc_dma_ptr(trans, &trans->invalid_tx_cmd,
2098 memcpy(trans->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd));
2109 if (trans->trans_cfg->gen2)
2115 if (trans_pcie->rba.alloc_wq) {
2116 destroy_workqueue(trans_pcie->rba.alloc_wq);
2117 trans_pcie->rba.alloc_wq = NULL;
2120 if (trans_pcie->msix_enabled) {
2121 for (i = 0; i < trans_pcie->alloc_vecs; i++) {
2123 trans_pcie->msix_entries[i].vector,
2127 trans_pcie->msix_enabled = false;
2132 free_netdev(trans_pcie->napi_dev);
2138 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data,
2139 trans->dev);
2140 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data,
2141 trans->dev);
2143 mutex_destroy(&trans_pcie->mutex);
2146 if (trans_pcie->txqs.tso_hdr_page) {
2149 per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i);
2151 if (p && p->page)
2152 __free_page(p->page);
2155 free_percpu(trans_pcie->txqs.tso_hdr_page);
2172 struct pci_dev *pdev = removal->pdev;
2178 bus = pdev->bus;
2183 dev_err(&pdev->dev, "Device gone - attempting removal\n");
2185 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
2190 if (removal->rescan) {
2192 if (bus->parent)
2193 bus = bus->parent;
2209 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2212 IWL_ERR(trans, "Device gone - scheduling removal!\n");
2223 "Module is being unloaded - abort\n");
2236 set_bit(STATUS_TRANS_DEAD, &trans->status);
2238 removal->pdev = to_pci_dev(trans->dev);
2239 removal->rescan = rescan;
2240 INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
2241 pci_dev_get(removal->pdev);
2242 schedule_work(&removal->work);
2259 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2262 spin_lock(&trans_pcie->reg_lock);
2264 if (trans_pcie->cmd_hold_nic_awake)
2267 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
2275 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
2283 * host DRAM when sleeping/waking for power-saving.
2295 * 5000 series and later (including 1000 series) have non-volatile SRAM,
2314 spin_unlock(&trans_pcie->reg_lock);
2320 * Fool sparse by faking we release the lock - sparse will
2323 __release(&trans_pcie->reg_lock);
2345 lockdep_assert_held(&trans_pcie->reg_lock);
2348 * Fool sparse by faking we acquiring the lock - sparse will
2351 __acquire(&trans_pcie->reg_lock);
2353 if (trans_pcie->cmd_hold_nic_awake)
2355 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
2368 spin_unlock_bh(&trans_pcie->reg_lock);
2399 return -EIO;
2414 return -EBUSY;
2434 ret = -EBUSY;
2442 return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
2453 if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
2454 return -EINVAL;
2456 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2457 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2458 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2459 data->fr_bd_wid = 0;
2473 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2474 return -ENODEV;
2476 if (!test_bit(txq_idx, trans_pcie->txqs.queue_used))
2477 return -EINVAL;
2480 txq = trans_pcie->txqs.txq[txq_idx];
2482 spin_lock_bh(&txq->lock);
2483 overflow_tx = txq->overflow_tx ||
2484 !skb_queue_empty(&txq->overflow_q);
2485 spin_unlock_bh(&txq->lock);
2487 wr_ptr = READ_ONCE(txq->write_ptr);
2489 while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
2493 u8 write_ptr = READ_ONCE(txq->write_ptr);
2501 "WR pointer moved while flushing %d -> %d\n",
2503 return -ETIMEDOUT;
2508 spin_lock_bh(&txq->lock);
2509 overflow_tx = txq->overflow_tx ||
2510 !skb_queue_empty(&txq->overflow_q);
2511 spin_unlock_bh(&txq->lock);
2514 if (txq->read_ptr != txq->write_ptr) {
2518 return -ETIMEDOUT;
2534 cnt < trans->trans_cfg->base_params->num_of_queues;
2537 if (cnt == trans_pcie->txqs.cmd.q_id)
2539 if (!test_bit(cnt, trans_pcie->txqs.queue_used))
2557 spin_lock_bh(&trans_pcie->reg_lock);
2559 spin_unlock_bh(&trans_pcie->reg_lock);
2675 struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2678 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2684 state->pos = *pos;
2691 struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2694 *pos = ++state->pos;
2696 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2709 struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2711 struct iwl_trans *trans = priv->trans;
2713 struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos];
2716 (unsigned int)state->pos,
2717 !!test_bit(state->pos, trans_pcie->txqs.queue_used),
2718 !!test_bit(state->pos, trans_pcie->txqs.queue_stopped));
2722 txq->read_ptr, txq->write_ptr,
2723 txq->need_update, txq->frozen,
2724 txq->n_window, txq->ampdu);
2728 if (state->pos == trans_pcie->txqs.cmd.q_id)
2750 return -ENOMEM;
2752 priv->trans = inode->i_private;
2760 struct iwl_trans *trans = file->private_data;
2766 bufsz = sizeof(char) * 121 * trans->num_rx_queues;
2768 if (!trans_pcie->rxq)
2769 return -EAGAIN;
2773 return -ENOMEM;
2775 for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
2776 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2778 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
2780 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
2781 rxq->read);
2782 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
2783 rxq->write);
2784 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
2785 rxq->write_actual);
2786 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
2787 rxq->need_update);
2788 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2789 rxq->free_count);
2790 if (rxq->rb_stts) {
2792 pos += scnprintf(buf + pos, bufsz - pos,
2795 pos += scnprintf(buf + pos, bufsz - pos,
2809 struct iwl_trans *trans = file->private_data;
2811 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2820 return -ENOMEM;
2822 pos += scnprintf(buf + pos, bufsz - pos,
2825 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2826 isr_stats->hw);
2827 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2828 isr_stats->sw);
2829 if (isr_stats->sw || isr_stats->hw) {
2830 pos += scnprintf(buf + pos, bufsz - pos,
2832 isr_stats->err_code);
2835 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2836 isr_stats->sch);
2837 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2838 isr_stats->alive);
2840 pos += scnprintf(buf + pos, bufsz - pos,
2841 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2843 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2844 isr_stats->ctkill);
2846 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2847 isr_stats->wakeup);
2849 pos += scnprintf(buf + pos, bufsz - pos,
2850 "Rx command responses:\t\t %u\n", isr_stats->rx);
2852 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2853 isr_stats->tx);
2855 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2856 isr_stats->unhandled);
2867 struct iwl_trans *trans = file->private_data;
2869 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2886 struct iwl_trans *trans = file->private_data;
2897 struct iwl_trans *trans = file->private_data;
2905 return -EINVAL;
2915 struct iwl_trans *trans = file->private_data;
2921 trans_pcie->debug_rfkill,
2932 struct iwl_trans *trans = file->private_data;
2940 if (new_value == trans_pcie->debug_rfkill)
2942 IWL_WARN(trans, "changing debug rfkill %d->%d\n",
2943 trans_pcie->debug_rfkill, new_value);
2944 trans_pcie->debug_rfkill = new_value;
2953 struct iwl_trans *trans = inode->i_private;
2956 if (!trans->dbg.dest_tlv ||
2957 trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
2959 return -ENOENT;
2962 if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
2963 return -EBUSY;
2965 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
2973 IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
2975 if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
2976 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
2984 ssize_t buf_size_left = count - *bytes_copied;
2986 buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
2990 *size -= copy_to_user(user_buf, buf, *size);
3002 struct iwl_trans *trans = file->private_data;
3004 u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
3005 struct cont_rec *data = &trans_pcie->fw_mon_data;
3010 if (trans->dbg.dest_tlv) {
3012 le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
3013 wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
3019 if (unlikely(!trans->dbg.rec_on))
3022 mutex_lock(&data->mutex);
3023 if (data->state ==
3025 mutex_unlock(&data->mutex);
3033 if (data->prev_wrap_cnt == wrap_cnt) {
3034 size = write_ptr - data->prev_wr_ptr;
3035 curr_buf = cpu_addr + data->prev_wr_ptr;
3039 data->prev_wr_ptr += size;
3041 } else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
3042 write_ptr < data->prev_wr_ptr) {
3043 size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
3044 curr_buf = cpu_addr + data->prev_wr_ptr;
3048 data->prev_wr_ptr += size;
3055 data->prev_wr_ptr = size;
3056 data->prev_wrap_cnt++;
3059 if (data->prev_wrap_cnt == wrap_cnt - 1 &&
3060 write_ptr > data->prev_wr_ptr)
3063 else if (!unlikely(data->prev_wrap_cnt == 0 &&
3064 data->prev_wr_ptr == 0))
3072 data->prev_wr_ptr = size;
3073 data->prev_wrap_cnt = wrap_cnt;
3076 mutex_unlock(&data->mutex);
3085 struct iwl_trans *trans = file->private_data;
3088 if (!trans_pcie->rf_name[0])
3089 return -ENODEV;
3092 trans_pcie->rf_name,
3093 strlen(trans_pcie->rf_name));
3120 struct dentry *dir = trans->dbgfs_dir;
3135 struct cont_rec *data = &trans_pcie->fw_mon_data;
3137 mutex_lock(&data->mutex);
3138 data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
3139 mutex_unlock(&data->mutex);
3149 for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++)
3160 int max_len = trans_pcie->rx_buf_bytes;
3161 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
3162 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3165 spin_lock_bh(&rxq->lock);
3169 for (i = rxq->read, j = 0;
3172 struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
3175 dma_sync_single_for_cpu(trans->dev, rxb->page_dma,
3180 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
3181 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
3182 rb = (void *)(*data)->data;
3183 rb->index = cpu_to_le32(i);
3184 memcpy(rb->data, page_address(rxb->page), max_len);
3189 spin_unlock_bh(&rxq->lock);
3202 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
3203 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
3204 val = (void *)(*data)->data;
3217 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
3224 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
3225 (*data)->len = cpu_to_le32(fh_regs_len);
3226 val = (void *)(*data)->data;
3228 if (!trans->trans_cfg->gen2)
3252 u32 *buffer = (u32 *)fw_mon_data->data;
3275 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3280 } else if (trans->dbg.dest_tlv) {
3281 write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
3282 wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
3283 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3291 fw_mon_data->fw_mon_cycle_cnt =
3293 fw_mon_data->fw_mon_base_ptr =
3295 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3296 fw_mon_data->fw_mon_base_high_ptr =
3302 fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
3310 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
3313 if (trans->dbg.dest_tlv ||
3314 (fw_mon->size &&
3315 (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
3316 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
3319 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
3320 fw_mon_data = (void *)(*data)->data;
3325 if (fw_mon->size) {
3326 memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
3327 monitor_len = fw_mon->size;
3328 } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
3329 u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
3334 if (trans->dbg.dest_tlv->version) {
3337 trans->dbg.dest_tlv->base_shift;
3339 base += trans->cfg->smem_offset;
3342 trans->dbg.dest_tlv->base_shift;
3345 iwl_trans_read_mem(trans, base, fw_mon_data->data,
3347 } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
3353 /* Didn't match anything - output no monitor data */
3358 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
3366 if (trans->dbg.fw_mon.size) {
3369 trans->dbg.fw_mon.size;
3370 return trans->dbg.fw_mon.size;
3371 } else if (trans->dbg.dest_tlv) {
3374 if (trans->dbg.dest_tlv->version == 1) {
3375 cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3378 trans->dbg.dest_tlv->base_shift;
3380 base += trans->cfg->smem_offset;
3384 trans->dbg.dest_tlv->end_shift;
3387 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3388 end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
3391 trans->dbg.dest_tlv->base_shift;
3393 trans->dbg.dest_tlv->end_shift;
3396 if (trans->trans_cfg->device_family >=
3398 trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
3399 end += (1 << trans->dbg.dest_tlv->end_shift);
3400 monitor_len = end - base;
3417 struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
3422 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
3423 !trans->trans_cfg->mq_rx_supported &&
3435 cmdq->n_window * (sizeof(*txcmd) +
3448 if (trans->trans_cfg->gen2)
3450 (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
3454 (FH_MEM_UPPER_BOUND -
3459 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
3460 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3463 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
3466 (PAGE_SIZE << trans_pcie->rx_page_order));
3470 if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
3471 for (i = 0; i < trans->init_dram.paging_cnt; i++)
3474 trans->init_dram.paging[i].size;
3481 data = (void *)dump_data->data;
3484 u16 tfd_size = trans_pcie->txqs.tfd.size;
3486 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
3487 txcmd = (void *)data->data;
3488 spin_lock_bh(&cmdq->lock);
3489 ptr = cmdq->write_ptr;
3490 for (i = 0; i < cmdq->n_window; i++) {
3495 if (trans->trans_cfg->gen2)
3501 (u8 *)cmdq->tfds +
3507 txcmd->cmdlen = cpu_to_le32(cmdlen);
3508 txcmd->caplen = cpu_to_le32(caplen);
3509 memcpy(txcmd->data, cmdq->entries[idx].cmd,
3511 if (sanitize_ops && sanitize_ops->frob_hcmd)
3512 sanitize_ops->frob_hcmd(sanitize_ctx,
3513 txcmd->data,
3515 txcmd = (void *)((u8 *)txcmd->data + caplen);
3520 spin_unlock_bh(&cmdq->lock);
3522 data->len = cpu_to_le32(len);
3535 if (trans->trans_cfg->gen2 &&
3537 for (i = 0; i < trans->init_dram.paging_cnt; i++) {
3539 u32 page_len = trans->init_dram.paging[i].size;
3541 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
3542 data->len = cpu_to_le32(sizeof(*paging) + page_len);
3543 paging = (void *)data->data;
3544 paging->index = cpu_to_le32(i);
3545 memcpy(paging->data,
3546 trans->init_dram.paging[i].block, page_len);
3555 dump_data->len = len;
3573 if (trans_pcie->msix_enabled) {
3575 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
3609 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
3612 return ERR_PTR(-ENOMEM);
3616 if (trans->trans_cfg->gen2) {
3617 trans_pcie->txqs.tfd.addr_size = 64;
3618 trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
3619 trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
3621 trans_pcie->txqs.tfd.addr_size = 36;
3622 trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
3623 trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);
3625 trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
3628 trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
3629 if (!trans_pcie->txqs.tso_hdr_page) {
3630 ret = -ENOMEM;
3635 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
3636 trans_pcie->txqs.bc_tbl_size =
3638 else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
3639 trans_pcie->txqs.bc_tbl_size =
3642 trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
3644 * For gen2 devices, we use a single allocation for each byte-count
3648 if (trans->trans_cfg->gen2) {
3649 trans_pcie->txqs.bc_pool =
3650 dmam_pool_create("iwlwifi:bc", trans->dev,
3651 trans_pcie->txqs.bc_tbl_size,
3653 if (!trans_pcie->txqs.bc_pool) {
3654 ret = -ENOMEM;
3660 WARN_ON(trans_pcie->txqs.tfd.addr_size !=
3661 (trans->trans_cfg->gen2 ? 64 : 36));
3663 /* Initialize NAPI here - it should be before registering to mac80211
3666 trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *));
3667 if (!trans_pcie->napi_dev) {
3668 ret = -ENOMEM;
3672 priv = netdev_priv(trans_pcie->napi_dev);
3675 trans_pcie->trans = trans;
3676 trans_pcie->opmode_down = true;
3677 spin_lock_init(&trans_pcie->irq_lock);
3678 spin_lock_init(&trans_pcie->reg_lock);
3679 spin_lock_init(&trans_pcie->alloc_page_lock);
3680 mutex_init(&trans_pcie->mutex);
3681 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
3682 init_waitqueue_head(&trans_pcie->fw_reset_waitq);
3683 init_waitqueue_head(&trans_pcie->imr_waitq);
3685 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3687 if (!trans_pcie->rba.alloc_wq) {
3688 ret = -ENOMEM;
3691 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3693 trans_pcie->debug_rfkill = -1;
3695 if (!cfg_trans->base_params->pcie_l1_allowed) {
3697 * W/A - seems to solve weird behavior. We need to remove this
3708 addr_size = trans_pcie->txqs.tfd.addr_size;
3709 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
3711 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3714 dev_err(&pdev->dev, "No suitable DMA available\n");
3721 dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
3730 dev_err(&pdev->dev, "pcim_iomap_table failed\n");
3731 ret = -ENOMEM;
3735 trans_pcie->hw_base = table[0];
3736 if (!trans_pcie->hw_base) {
3737 dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n");
3738 ret = -ENODEV;
3746 trans_pcie->pci_dev = pdev;
3749 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
3750 if (trans->hw_rev == 0xffffffff) {
3751 dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
3752 ret = -EIO;
3758 * changed, and now the revision step also includes bit 0-1 (no more
3759 * "dash" value). To keep hw_rev backwards compatible - we'll store it
3762 if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000)
3763 trans->hw_rev_step = trans->hw_rev & 0xF;
3765 trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2;
3767 IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
3770 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
3771 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
3772 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
3774 init_waitqueue_head(&trans_pcie->sx_waitq);
3780 if (trans_pcie->msix_enabled) {
3789 ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
3794 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
3800 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3801 mutex_init(&trans_pcie->fw_mon_data.mutex);
3811 destroy_workqueue(trans_pcie->rba.alloc_wq);
3813 free_netdev(trans_pcie->napi_dev);
3816 free_percpu(trans_pcie->txqs.tso_hdr_page);
3845 int ret = -1;
3847 trans_pcie->imr_status = IMR_D2S_REQUESTED;
3849 ret = wait_event_timeout(trans_pcie->imr_waitq,
3850 trans_pcie->imr_status !=
3852 if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
3855 return -ETIMEDOUT;
3857 trans_pcie->imr_status = IMR_D2S_IDLE;