Lines Matching +full:sram +full:- +full:others

1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
3 * Copyright (C) 2003-2015, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
18 #include "iwl-fh.h"
19 #include "iwl-csr.h"
20 #include "iwl-trans.h"
21 #include "iwl-debug.h"
22 #include "iwl-io.h"
23 #include "iwl-op-mode.h"
24 #include "iwl-drv.h"
25 #include "iwl-context-info.h"
46 * @invalid: rxb is in driver ownership - not owned by HW
76 * struct iwl_rx_transfer_desc - transfer descriptor
90 * struct iwl_rx_completion_desc - completion descriptor
93 * @flags: flags (0: fragmented, all others: reserved)
104 * struct iwl_rx_completion_desc_bz - Bz completion descriptor
106 * @flags: flags (0: fragmented, all others: reserved)
116 * struct iwl_rxq - Rx queue
119 * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
128 * @free_count: Number of pre-allocated buffers in rx_free
136 * @lock: per-queue lock
137 * @queue: actual rx queue. Not used for multi-rx queue.
168 * struct iwl_rb_allocator - Rx allocator
190 * iwl_get_closed_rb_stts - get closed rb stts from different structs
197 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
198 __le16 *rb_stts = rxq->rb_stts;
202 struct iwl_rb_status *rb_stts = rxq->rb_stts;
204 return le16_to_cpu(READ_ONCE(rb_stts->closed_rb_num)) & 0xFFF;
210 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
226 * enum iwl_shared_irq_flags - level of sharing for irq
236 * enum iwl_image_response_code - image response values
275 * enum iwl_pcie_imr_status - imr dma transfer state
289 * struct iwl_pcie_txqs - TX queues data
292 * @page_offs: offset from skb->cb to mac header page pointer
293 * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
301 * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
341 * struct iwl_trans_pcie - PCIe transport specific data
357 * @scd_base_addr: scheduler sram base address in SRAM
362 * @pci_dev: basic pci-network driver stuff
366 * @cmd_queue - command queue number
376 * @msix_entries: array of MSI-X entries
377 * @msix_enabled: true if managed to enable MSI-X
401 * @inta_mask: interrupt (INT-A) mask
413 * @debug_rfkill: RF-kill debugging state, -1 for unset, 0/1 for radio
526 return (void *)trans->trans_specific;
535 * re-enabled by clearing this bit. This register is defined as
584 * ICT - interrupt handling
599 #define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3)
608 * that no TB referencing this page can trigger the 32-bit boundary hardware
617 #define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info))
655 res = IWL_TSO_PAGE_INFO(addr)->dma_addr;
664 return txq->first_tb_dma +
670 return index & (q->n_window - 1);
678 if (trans->trans_cfg->gen2)
681 return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx;
685 * We need this inline in case dma_addr_t is only 32-bits - since the
686 * hardware is always 64-bit, the issue can still occur in that case,
687 * so use u64 for 'phys' here to force the addition in 64-bit.
700 if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
701 iwl_op_mode_queue_full(trans->op_mode, txq->id);
702 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
705 txq->id);
710 * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
717 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
721 * iwl_txq_dec_wrap - decrement queue index, wrap back to end
727 return --index &
728 (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
738 if (test_and_clear_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
739 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
740 iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
751 tfd->num_tbs = 0;
753 iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
754 trans->invalid_tx_cmd.size);
781 if (trans->trans_cfg->gen2) {
783 struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
785 return le16_to_cpu(tfh_tb->tb_len);
789 tb = &tfd->tbs[idx];
791 return le16_to_cpu(tb->hi_n_len) >> 4;
814 clear_bit(STATUS_INT_ENABLED, &trans->status);
815 if (!trans_pcie->msix_enabled) {
826 trans_pcie->fh_init_mask);
828 trans_pcie->hw_init_mask);
838 while (start < fw->num_sec &&
839 fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
840 fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
850 struct iwl_self_init_dram *dram = &trans->init_dram;
853 if (!dram->fw) {
854 WARN_ON(dram->fw_cnt);
858 for (i = 0; i < dram->fw_cnt; i++)
859 dma_free_coherent(trans->dev, dram->fw[i].size,
860 dram->fw[i].block, dram->fw[i].physical);
862 kfree(dram->fw);
863 dram->fw_cnt = 0;
864 dram->fw = NULL;
871 spin_lock_bh(&trans_pcie->irq_lock);
873 spin_unlock_bh(&trans_pcie->irq_lock);
881 set_bit(STATUS_INT_ENABLED, &trans->status);
882 if (!trans_pcie->msix_enabled) {
883 trans_pcie->inta_mask = CSR_INI_SET_MASK;
884 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
890 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
891 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
893 ~trans_pcie->fh_mask);
895 ~trans_pcie->hw_mask);
903 spin_lock_bh(&trans_pcie->irq_lock);
905 spin_unlock_bh(&trans_pcie->irq_lock);
912 trans_pcie->hw_mask = msk;
920 trans_pcie->fh_mask = msk;
928 if (!trans_pcie->msix_enabled) {
929 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
930 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
933 trans_pcie->hw_init_mask);
945 if (!trans_pcie->msix_enabled) {
953 trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
954 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
962 iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
969 if (trans_p->shared_vec_mask) {
970 int vec = trans_p->shared_vec_mask &
982 if (i == trans_p->alloc_vecs - 1)
994 if (!trans_pcie->msix_enabled) {
995 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
996 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
999 trans_pcie->fh_init_mask);
1004 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
1006 * On 9000-series devices this bit isn't enabled by default, so
1008 * to wake up the PCI-E bus for RF-kill interrupts.
1021 lockdep_assert_held(&trans_pcie->mutex);
1023 if (trans_pcie->debug_rfkill == 1)
1059 return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));