mac80211.c (698478c49294eb3d4d64f7a3b61d460495826078) mac80211.c (fba8248e7e67b7e1098e69284aeccbcb2110fa86)
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.

--- 864 unchanged lines hidden (view full) ---

873}
874
875void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
876{
877 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
878 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
879 struct sk_buff *skb = NULL;
880
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.

--- 864 unchanged lines hidden (view full) ---

873}
874
875void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
876{
877 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
878 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
879 struct sk_buff *skb = NULL;
880
881 spin_lock(&mvmtxq->tx_path_lock);
881 /*
882 * No need for threads to be pending here, they can leave the first
883 * taker all the work.
884 *
885 * mvmtxq->tx_request logic:
886 *
887 * If 0, no one is currently TXing, set to 1 to indicate current thread
888 * will now start TX and other threads should quit.
889 *
890 * If 1, another thread is currently TXing, set to 2 to indicate to
891 * that thread that there was another request. Since that request may
892 * have raced with the check whether the queue is empty, the TXing
893 * thread should check the queue's status one more time before leaving.
894 * This check is done in order to not leave any TX hanging in the queue
895 * until the next TX invocation (which may not even happen).
896 *
897 * If 2, another thread is currently TXing, and it will already double
898 * check the queue, so do nothing.
899 */
900 if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2))
901 return;
882
883 rcu_read_lock();
902
903 rcu_read_lock();
884 while (likely(!mvmtxq->stopped &&
885 (mvm->trans->system_pm_mode ==
886 IWL_PLAT_PM_MODE_DISABLED))) {
887 skb = ieee80211_tx_dequeue(hw, txq);
904 do {
905 while (likely(!mvmtxq->stopped &&
906 (mvm->trans->system_pm_mode ==
907 IWL_PLAT_PM_MODE_DISABLED))) {
908 skb = ieee80211_tx_dequeue(hw, txq);
888
909
889 if (!skb)
890 break;
910 if (!skb)
911 break;
891
912
892 if (!txq->sta)
893 iwl_mvm_tx_skb_non_sta(mvm, skb);
894 else
895 iwl_mvm_tx_skb(mvm, skb, txq->sta);
896 }
913 if (!txq->sta)
914 iwl_mvm_tx_skb_non_sta(mvm, skb);
915 else
916 iwl_mvm_tx_skb(mvm, skb, txq->sta);
917 }
918 } while (atomic_dec_return(&mvmtxq->tx_request));
897 rcu_read_unlock();
919 rcu_read_unlock();
898
899 spin_unlock(&mvmtxq->tx_path_lock);
900}
901
902static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
903 struct ieee80211_txq *txq)
904{
905 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
906 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
907

--- 3927 unchanged lines hidden ---
920}
921
922static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
923 struct ieee80211_txq *txq)
924{
925 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
926 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
927

--- 3927 unchanged lines hidden ---