xref: /linux/drivers/net/wireless/intel/iwlwifi/mld/tx.h (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1*d1e879ecSMiri Korenblit /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2*d1e879ecSMiri Korenblit /*
3*d1e879ecSMiri Korenblit  * Copyright (C) 2024 Intel Corporation
4*d1e879ecSMiri Korenblit  */
5*d1e879ecSMiri Korenblit #ifndef __iwl_mld_tx_h__
6*d1e879ecSMiri Korenblit #define __iwl_mld_tx_h__
7*d1e879ecSMiri Korenblit 
8*d1e879ecSMiri Korenblit #include "mld.h"
9*d1e879ecSMiri Korenblit 
10*d1e879ecSMiri Korenblit #define IWL_MLD_INVALID_QUEUE		0xFFFF
11*d1e879ecSMiri Korenblit #define IWL_MLD_INVALID_DROP_TX		0xFFFE
12*d1e879ecSMiri Korenblit 
13*d1e879ecSMiri Korenblit /**
14*d1e879ecSMiri Korenblit  * struct iwl_mld_txq - TX Queue data
15*d1e879ecSMiri Korenblit  *
16*d1e879ecSMiri Korenblit  * @fw_id: the fw id of this txq. Only valid when &status.allocated is true.
17*d1e879ecSMiri Korenblit  * @status: bitmap of the txq status
18*d1e879ecSMiri Korenblit  * @status.allocated: Indicates that the queue was allocated.
19*d1e879ecSMiri Korenblit  * @status.stop_full: Indicates that the queue is full and should stop TXing.
20*d1e879ecSMiri Korenblit  * @list: list pointer, for &mld::txqs_to_add
21*d1e879ecSMiri Korenblit  * @tx_request: makes sure that if there are multiple threads that want to tx
22*d1e879ecSMiri Korenblit  *	from this txq, only one of them will do all the TXing.
23*d1e879ecSMiri Korenblit  *	This is needed to avoid spinning the trans txq lock, which is expensive
24*d1e879ecSMiri Korenblit  */
25*d1e879ecSMiri Korenblit struct iwl_mld_txq {
26*d1e879ecSMiri Korenblit 	/* Add here fields that need clean up on restart */
27*d1e879ecSMiri Korenblit 	struct_group(zeroed_on_hw_restart,
28*d1e879ecSMiri Korenblit 		u16 fw_id;
29*d1e879ecSMiri Korenblit 		struct {
30*d1e879ecSMiri Korenblit 			u8 allocated:1;
31*d1e879ecSMiri Korenblit 			u8 stop_full:1;
32*d1e879ecSMiri Korenblit 		} status;
33*d1e879ecSMiri Korenblit 	);
34*d1e879ecSMiri Korenblit 	struct list_head list;
35*d1e879ecSMiri Korenblit 	atomic_t tx_request;
36*d1e879ecSMiri Korenblit 	/* And here fields that survive a fw restart */
37*d1e879ecSMiri Korenblit };
38*d1e879ecSMiri Korenblit 
iwl_mld_init_txq(struct iwl_mld_txq * mld_txq)39*d1e879ecSMiri Korenblit static inline void iwl_mld_init_txq(struct iwl_mld_txq *mld_txq)
40*d1e879ecSMiri Korenblit {
41*d1e879ecSMiri Korenblit 	INIT_LIST_HEAD(&mld_txq->list);
42*d1e879ecSMiri Korenblit 	atomic_set(&mld_txq->tx_request, 0);
43*d1e879ecSMiri Korenblit }
44*d1e879ecSMiri Korenblit 
45*d1e879ecSMiri Korenblit static inline struct iwl_mld_txq *
iwl_mld_txq_from_mac80211(struct ieee80211_txq * txq)46*d1e879ecSMiri Korenblit iwl_mld_txq_from_mac80211(struct ieee80211_txq *txq)
47*d1e879ecSMiri Korenblit {
48*d1e879ecSMiri Korenblit 	return (void *)txq->drv_priv;
49*d1e879ecSMiri Korenblit }
50*d1e879ecSMiri Korenblit 
51*d1e879ecSMiri Korenblit void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk);
52*d1e879ecSMiri Korenblit void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq);
53*d1e879ecSMiri Korenblit void iwl_mld_add_txq_list(struct iwl_mld *mld);
54*d1e879ecSMiri Korenblit void
55*d1e879ecSMiri Korenblit iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id);
56*d1e879ecSMiri Korenblit void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq);
57*d1e879ecSMiri Korenblit void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
58*d1e879ecSMiri Korenblit 				  struct iwl_rx_packet *pkt);
59*d1e879ecSMiri Korenblit int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id);
60*d1e879ecSMiri Korenblit int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq);
61*d1e879ecSMiri Korenblit 
62*d1e879ecSMiri Korenblit int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
63*d1e879ecSMiri Korenblit 			    struct ieee80211_sta *sta,
64*d1e879ecSMiri Korenblit 			    u32 old_sta_mask, u32 new_sta_mask);
65*d1e879ecSMiri Korenblit 
66*d1e879ecSMiri Korenblit void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
67*d1e879ecSMiri Korenblit 					struct iwl_rx_packet *pkt);
68*d1e879ecSMiri Korenblit void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant);
69*d1e879ecSMiri Korenblit 
70*d1e879ecSMiri Korenblit u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
71*d1e879ecSMiri Korenblit 			   struct ieee80211_tx_info *info,
72*d1e879ecSMiri Korenblit 			   struct ieee80211_vif *vif);
73*d1e879ecSMiri Korenblit 
74*d1e879ecSMiri Korenblit void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
75*d1e879ecSMiri Korenblit 		    struct ieee80211_txq *txq);
76*d1e879ecSMiri Korenblit 
77*d1e879ecSMiri Korenblit #endif /* __iwl_mld_tx_h__ */
78