Lines Matching defs:trans_pcie
46 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
47 struct pci_dev *pdev = trans_pcie->pci_dev;
51 if (trans_pcie->pcie_dbg_dumped_once)
154 trans_pcie->pcie_dbg_dumped_once = 1;
285 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
296 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
299 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
569 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
573 spin_lock_bh(&trans_pcie->irq_lock);
575 spin_unlock_bh(&trans_pcie->irq_lock);
718 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
721 trans_pcie->ucode_write_complete = false;
730 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
731 trans_pcie->ucode_write_complete, 5 * HZ);
1095 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1105 if (trans_pcie->opmode_down)
1174 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1175 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1193 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1195 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1213 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
1217 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
1221 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
1223 struct iwl_trans *trans = trans_pcie->trans;
1225 if (!trans_pcie->msix_enabled) {
1252 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1254 struct iwl_trans *trans = trans_pcie->trans;
1256 iwl_pcie_conf_msix_hw(trans_pcie);
1258 if (!trans_pcie->msix_enabled)
1261 trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1262 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1263 trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1264 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1269 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1271 lockdep_assert_held(&trans_pcie->mutex);
1273 if (trans_pcie->is_down)
1276 trans_pcie->is_down = true;
1329 iwl_pcie_conf_msix_hw(trans_pcie);
1354 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1356 if (trans_pcie->msix_enabled) {
1359 for (i = 0; i < trans_pcie->alloc_vecs; i++)
1360 synchronize_irq(trans_pcie->msix_entries[i].vector);
1362 synchronize_irq(trans_pcie->pci_dev->irq);
1369 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1393 mutex_lock(&trans_pcie->mutex);
1403 if (trans_pcie->is_down) {
1449 mutex_unlock(&trans_pcie->mutex);
1490 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1497 mutex_lock(&trans_pcie->mutex);
1498 trans_pcie->opmode_down = true;
1502 mutex_unlock(&trans_pcie->mutex);
1507 struct iwl_trans_pcie __maybe_unused *trans_pcie =
1510 lockdep_assert_held(&trans_pcie->mutex);
1561 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1575 ret = wait_event_timeout(trans_pcie->sx_waitq,
1576 trans_pcie->sx_complete, 2 * HZ);
1579 trans_pcie->sx_complete = false;
1612 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1641 iwl_pcie_conf_msix_hw(trans_pcie);
1642 if (!trans_pcie->msix_enabled)
1683 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1696 trans_pcie->msix_entries[i].entry = i;
1698 num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1707 trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1721 trans_pcie->trans->num_rx_queues = num_irqs + 1;
1722 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1725 trans_pcie->trans->num_rx_queues = num_irqs;
1726 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1728 trans_pcie->trans->num_rx_queues = num_irqs - 1;
1733 trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask);
1735 WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1737 trans_pcie->alloc_vecs = num_irqs;
1738 trans_pcie->msix_enabled = true;
1758 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1760 i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
1761 iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
1769 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
1770 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
1771 &trans_pcie->affinity_mask[i]);
1773 IWL_ERR(trans_pcie->trans,
1775 trans_pcie->msix_entries[i].vector);
1781 struct iwl_trans_pcie *trans_pcie)
1785 for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1788 const char *qname = queue_name(&pdev->dev, trans_pcie, i);
1793 msix_entry = &trans_pcie->msix_entries[i];
1797 (i == trans_pcie->def_irq) ?
1804 IWL_ERR(trans_pcie->trans,
1810 iwl_pcie_irq_set_affinity(trans_pcie->trans);
1869 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1872 lockdep_assert_held(&trans_pcie->mutex);
1899 iwl_pcie_init_msix(trans_pcie);
1904 trans_pcie->opmode_down = false;
1907 trans_pcie->is_down = false;
1917 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1920 mutex_lock(&trans_pcie->mutex);
1922 mutex_unlock(&trans_pcie->mutex);
1929 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1931 mutex_lock(&trans_pcie->mutex);
1942 mutex_unlock(&trans_pcie->mutex);
2016 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2021 trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue;
2022 trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo;
2023 trans_pcie->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
2024 trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs;
2025 trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
2026 trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
2029 trans_pcie->n_no_reclaim_cmds = 0;
2031 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
2032 if (trans_pcie->n_no_reclaim_cmds)
2033 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
2034 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
2036 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
2037 trans_pcie->rx_page_order =
2038 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
2039 trans_pcie->rx_buf_bytes =
2040 iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
2041 trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
2043 trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
2045 trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword;
2046 trans_pcie->scd_set_active = trans_cfg->scd_set_active;
2052 trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
2104 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2115 if (trans_pcie->rba.alloc_wq) {
2116 destroy_workqueue(trans_pcie->rba.alloc_wq);
2117 trans_pcie->rba.alloc_wq = NULL;
2120 if (trans_pcie->msix_enabled) {
2121 for (i = 0; i < trans_pcie->alloc_vecs; i++) {
2123 trans_pcie->msix_entries[i].vector,
2127 trans_pcie->msix_enabled = false;
2132 free_netdev(trans_pcie->napi_dev);
2138 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data,
2140 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data,
2143 mutex_destroy(&trans_pcie->mutex);
2146 if (trans_pcie->txqs.tso_hdr_page) {
2149 per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i);
2155 free_percpu(trans_pcie->txqs.tso_hdr_page);
2253 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2262 spin_lock(&trans_pcie->reg_lock);
2264 if (trans_pcie->cmd_hold_nic_awake)
2314 spin_unlock(&trans_pcie->reg_lock);
2323 __release(&trans_pcie->reg_lock);
2343 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2345 lockdep_assert_held(&trans_pcie->reg_lock);
2351 __acquire(&trans_pcie->reg_lock);
2353 if (trans_pcie->cmd_hold_nic_awake)
2368 spin_unlock_bh(&trans_pcie->reg_lock);
2451 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2453 if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
2456 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2457 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2458 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2466 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2476 if (!test_bit(txq_idx, trans_pcie->txqs.queue_used))
2480 txq = trans_pcie->txqs.txq[txq_idx];
2528 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2537 if (cnt == trans_pcie->txqs.cmd.q_id)
2539 if (!test_bit(cnt, trans_pcie->txqs.queue_used))
2555 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2557 spin_lock_bh(&trans_pcie->reg_lock);
2559 spin_unlock_bh(&trans_pcie->reg_lock);
2712 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2713 struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos];
2717 !!test_bit(state->pos, trans_pcie->txqs.queue_used),
2718 !!test_bit(state->pos, trans_pcie->txqs.queue_stopped));
2728 if (state->pos == trans_pcie->txqs.cmd.q_id)
2761 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2768 if (!trans_pcie->rxq)
2776 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2810 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2811 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2868 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2869 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2916 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2921 trans_pcie->debug_rfkill,
2933 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2940 if (new_value == trans_pcie->debug_rfkill)
2943 trans_pcie->debug_rfkill, new_value);
2944 trans_pcie->debug_rfkill = new_value;
2954 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2962 if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
2965 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
2972 struct iwl_trans_pcie *trans_pcie =
2975 if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
2976 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3003 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3005 struct cont_rec *data = &trans_pcie->fw_mon_data;
3086 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3088 if (!trans_pcie->rf_name[0])
3092 trans_pcie->rf_name,
3093 strlen(trans_pcie->rf_name));
3134 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3135 struct cont_rec *data = &trans_pcie->fw_mon_data;
3145 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3149 for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++)
3159 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3160 int max_len = trans_pcie->rx_buf_bytes;
3162 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3415 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3417 struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
3460 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3466 (PAGE_SIZE << trans_pcie->rx_page_order));
3484 u16 tfd_size = trans_pcie->txqs.tfd.size;
3571 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3573 if (trans_pcie->msix_enabled) {
3591 struct iwl_trans_pcie *trans_pcie, **priv;
3614 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3617 trans_pcie->txqs.tfd.addr_size = 64;
3618 trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
3619 trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
3621 trans_pcie->txqs.tfd.addr_size = 36;
3622 trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
3623 trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);
3625 trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
3628 trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
3629 if (!trans_pcie->txqs.tso_hdr_page) {
3636 trans_pcie->txqs.bc_tbl_size =
3639 trans_pcie->txqs.bc_tbl_size =
3642 trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
3649 trans_pcie->txqs.bc_pool =
3651 trans_pcie->txqs.bc_tbl_size,
3653 if (!trans_pcie->txqs.bc_pool) {
3660 WARN_ON(trans_pcie->txqs.tfd.addr_size !=
3666 trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *));
3667 if (!trans_pcie->napi_dev) {
3672 priv = netdev_priv(trans_pcie->napi_dev);
3673 *priv = trans_pcie;
3675 trans_pcie->trans = trans;
3676 trans_pcie->opmode_down = true;
3677 spin_lock_init(&trans_pcie->irq_lock);
3678 spin_lock_init(&trans_pcie->reg_lock);
3679 spin_lock_init(&trans_pcie->alloc_page_lock);
3680 mutex_init(&trans_pcie->mutex);
3681 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
3682 init_waitqueue_head(&trans_pcie->fw_reset_waitq);
3683 init_waitqueue_head(&trans_pcie->imr_waitq);
3685 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3687 if (!trans_pcie->rba.alloc_wq) {
3691 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3693 trans_pcie->debug_rfkill = -1;
3708 addr_size = trans_pcie->txqs.tfd.addr_size;
3735 trans_pcie->hw_base = table[0];
3736 if (!trans_pcie->hw_base) {
3746 trans_pcie->pci_dev = pdev;
3774 init_waitqueue_head(&trans_pcie->sx_waitq);
3780 if (trans_pcie->msix_enabled) {
3781 ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
3800 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3801 mutex_init(&trans_pcie->fw_mon_data.mutex);
3811 destroy_workqueue(trans_pcie->rba.alloc_wq);
3813 free_netdev(trans_pcie->napi_dev);
3816 free_percpu(trans_pcie->txqs.tso_hdr_page);
3844 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3847 trans_pcie->imr_status = IMR_D2S_REQUESTED;
3849 ret = wait_event_timeout(trans_pcie->imr_waitq,
3850 trans_pcie->imr_status !=
3852 if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
3857 trans_pcie->imr_status = IMR_D2S_IDLE;