tx.c (213cc929cbfd7962164420b300f9a6c60aaff189) tx.c (72bc934cb393d9aa0a3a73026c020da36e817fa1)
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2003-2014, 2018-2020 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7#include <linux/etherdevice.h>
8#include <linux/ieee80211.h>

--- 167 unchanged lines hidden (view full) ---

176
177 return num_tbs;
178}
179
180static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
181{
182 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
183
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2003-2014, 2018-2020 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7#include <linux/etherdevice.h>
8#include <linux/ieee80211.h>

--- 167 unchanged lines hidden (view full) ---

176
177 return num_tbs;
178}
179
180static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
181{
182 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
183
184 lockdep_assert_held(&trans_pcie->reg_lock);
185
186 if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
187 return;
184 if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
185 return;
188 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
186
187 spin_lock(&trans_pcie->reg_lock);
188
189 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) {
190 spin_unlock(&trans_pcie->reg_lock);
189 return;
191 return;
192 }
190
191 trans_pcie->cmd_hold_nic_awake = false;
192 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
193 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
193
194 trans_pcie->cmd_hold_nic_awake = false;
195 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
196 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
197 spin_unlock(&trans_pcie->reg_lock);
194}
195
196/*
197 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
198 */
199static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
200{
198}
199
200/*
201 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
202 */
203static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
204{
201 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
202 struct iwl_txq *txq = trans->txqs.txq[txq_id];
203
204 if (!txq) {
205 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
206 return;
207 }
208
209 spin_lock_bh(&txq->lock);

--- 7 unchanged lines hidden (view full) ---

217 if (WARN_ON_ONCE(!skb))
218 continue;
219
220 iwl_txq_free_tso_page(trans, skb);
221 }
222 iwl_txq_free_tfd(trans, txq);
223 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
224
205 struct iwl_txq *txq = trans->txqs.txq[txq_id];
206
207 if (!txq) {
208 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
209 return;
210 }
211
212 spin_lock_bh(&txq->lock);

--- 7 unchanged lines hidden (view full) ---

220 if (WARN_ON_ONCE(!skb))
221 continue;
222
223 iwl_txq_free_tso_page(trans, skb);
224 }
225 iwl_txq_free_tfd(trans, txq);
226 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
227
225 if (txq->read_ptr == txq->write_ptr) {
226 spin_lock(&trans_pcie->reg_lock);
227 if (txq_id == trans->txqs.cmd.q_id)
228 iwl_pcie_clear_cmd_in_flight(trans);
229 spin_unlock(&trans_pcie->reg_lock);
230 }
228 if (txq->read_ptr == txq->write_ptr &&
229 txq_id == trans->txqs.cmd.q_id)
230 iwl_pcie_clear_cmd_in_flight(trans);
231 }
232
233 while (!skb_queue_empty(&txq->overflow_q)) {
234 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
235
236 iwl_op_mode_free_skb(trans->op_mode, skb);
237 }
238

--- 385 unchanged lines hidden (view full) ---

624 iwl_pcie_tx_free(trans);
625 return ret;
626}
627
628static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
629 const struct iwl_host_cmd *cmd)
630{
631 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
231 }
232
233 while (!skb_queue_empty(&txq->overflow_q)) {
234 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
235
236 iwl_op_mode_free_skb(trans->op_mode, skb);
237 }
238

--- 385 unchanged lines hidden (view full) ---

624 iwl_pcie_tx_free(trans);
625 return ret;
626}
627
628static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
629 const struct iwl_host_cmd *cmd)
630{
631 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
632 int ret;
632 int ret = 0;
633
633
634 lockdep_assert_held(&trans_pcie->reg_lock);
635
636 /* Make sure the NIC is still alive in the bus */
637 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
638 return -ENODEV;
639
634 /* Make sure the NIC is still alive in the bus */
635 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
636 return -ENODEV;
637
638 if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
639 return 0;
640
641 spin_lock(&trans_pcie->reg_lock);
640 /*
641 * wake up the NIC to make sure that the firmware will see the host
642 * command - we will let the NIC sleep once all the host commands
643 * returned. This needs to be done only on NICs that have
642 /*
643 * wake up the NIC to make sure that the firmware will see the host
644 * command - we will let the NIC sleep once all the host commands
645 * returned. This needs to be done only on NICs that have
644 * apmg_wake_up_wa set.
646 * apmg_wake_up_wa set (see above.)
645 */
647 */
646 if (trans->trans_cfg->base_params->apmg_wake_up_wa &&
647 !trans_pcie->cmd_hold_nic_awake) {
648 if (!trans_pcie->cmd_hold_nic_awake) {
648 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
649 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
650
651 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
652 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
653 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
654 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
655 15000);
656 if (ret < 0) {
657 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
658 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
659 IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
649 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
650 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
651
652 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
653 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
654 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
655 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
656 15000);
657 if (ret < 0) {
658 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
659 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
660 IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
660 return -EIO;
661 ret = -EIO;
662 } else {
663 trans_pcie->cmd_hold_nic_awake = true;
661 }
664 }
662 trans_pcie->cmd_hold_nic_awake = true;
663 }
665 }
666 spin_unlock(&trans_pcie->reg_lock);
664
667
665 return 0;
668 return ret;
666}
667
668/*
669 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
670 *
671 * When FW advances 'R' index, all entries between old and new 'R' index
672 * need to be reclaimed. As result, some free space forms. If there is
673 * enough free space (> low mark), wake the stack that feeds us.
674 */
675static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
676{
669}
670
671/*
672 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
673 *
674 * When FW advances 'R' index, all entries between old and new 'R' index
675 * need to be reclaimed. As result, some free space forms. If there is
676 * enough free space (> low mark), wake the stack that feeds us.
677 */
678static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
679{
677 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
678 struct iwl_txq *txq = trans->txqs.txq[txq_id];
679 int nfreed = 0;
680 u16 r;
681
682 lockdep_assert_held(&txq->lock);
683
684 idx = iwl_txq_get_cmd_index(txq, idx);
685 r = iwl_txq_get_cmd_index(txq, txq->read_ptr);

--- 14 unchanged lines hidden (view full) ---

700
701 if (nfreed++ > 0) {
702 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
703 idx, txq->write_ptr, r);
704 iwl_force_nmi(trans);
705 }
706 }
707
680 struct iwl_txq *txq = trans->txqs.txq[txq_id];
681 int nfreed = 0;
682 u16 r;
683
684 lockdep_assert_held(&txq->lock);
685
686 idx = iwl_txq_get_cmd_index(txq, idx);
687 r = iwl_txq_get_cmd_index(txq, txq->read_ptr);

--- 14 unchanged lines hidden (view full) ---

702
703 if (nfreed++ > 0) {
704 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
705 idx, txq->write_ptr, r);
706 iwl_force_nmi(trans);
707 }
708 }
709
708 if (txq->read_ptr == txq->write_ptr) {
709 /* BHs are also disabled due to txq->lock */
710 spin_lock(&trans_pcie->reg_lock);
710 if (txq->read_ptr == txq->write_ptr)
711 iwl_pcie_clear_cmd_in_flight(trans);
711 iwl_pcie_clear_cmd_in_flight(trans);
712 spin_unlock(&trans_pcie->reg_lock);
713 }
714
715 iwl_txq_progress(txq);
716}
717
718static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
719 u16 txq_id)
720{
721 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

--- 187 unchanged lines hidden (view full) ---

909 *
910 * The function returns < 0 values to indicate the operation
911 * failed. On success, it returns the index (>= 0) of command in the
912 * command queue.
913 */
914int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
915 struct iwl_host_cmd *cmd)
916{
712
713 iwl_txq_progress(txq);
714}
715
716static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
717 u16 txq_id)
718{
719 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);

--- 187 unchanged lines hidden (view full) ---

907 *
908 * The function returns < 0 values to indicate the operation
909 * failed. On success, it returns the index (>= 0) of command in the
910 * command queue.
911 */
912int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
913 struct iwl_host_cmd *cmd)
914{
917 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
918 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
919 struct iwl_device_cmd *out_cmd;
920 struct iwl_cmd_meta *out_meta;
921 void *dup_buf = NULL;
922 dma_addr_t phys_addr;
923 int idx;
924 u16 copy_size, cmd_size, tb0_size;
925 bool had_nocopy = false;

--- 230 unchanged lines hidden (view full) ---

1156 txq->entries[idx].free_buf = dup_buf;
1157
1158 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1159
1160 /* start timer if queue currently empty */
1161 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1162 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1163
915 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
916 struct iwl_device_cmd *out_cmd;
917 struct iwl_cmd_meta *out_meta;
918 void *dup_buf = NULL;
919 dma_addr_t phys_addr;
920 int idx;
921 u16 copy_size, cmd_size, tb0_size;
922 bool had_nocopy = false;

--- 230 unchanged lines hidden (view full) ---

1153 txq->entries[idx].free_buf = dup_buf;
1154
1155 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1156
1157 /* start timer if queue currently empty */
1158 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1159 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1160
1164 spin_lock(&trans_pcie->reg_lock);
1165 ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1166 if (ret < 0) {
1167 idx = ret;
1161 ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1162 if (ret < 0) {
1163 idx = ret;
1168 goto unlock_reg;
1164 goto out;
1169 }
1170
1171 /* Increment and update queue's write index */
1172 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1173 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1174
1165 }
1166
1167 /* Increment and update queue's write index */
1168 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1169 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1170
1175 unlock_reg:
1176 spin_unlock(&trans_pcie->reg_lock);
1177 out:
1178 spin_unlock_irqrestore(&txq->lock, flags);
1179 free_dup_buf:
1180 if (idx < 0)
1181 kfree(dup_buf);
1182 return idx;
1183}
1184

--- 468 unchanged lines hidden ---
1171 out:
1172 spin_unlock_irqrestore(&txq->lock, flags);
1173 free_dup_buf:
1174 if (idx < 0)
1175 kfree(dup_buf);
1176 return idx;
1177}
1178

--- 468 unchanged lines hidden ---