1*6b627f88SBjoern A. Zeeb /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2*6b627f88SBjoern A. Zeeb /*
3*6b627f88SBjoern A. Zeeb * Copyright (C) 2024 Intel Corporation
4*6b627f88SBjoern A. Zeeb */
5*6b627f88SBjoern A. Zeeb #ifndef __iwl_mld_tx_h__
6*6b627f88SBjoern A. Zeeb #define __iwl_mld_tx_h__
7*6b627f88SBjoern A. Zeeb
8*6b627f88SBjoern A. Zeeb #include "mld.h"
9*6b627f88SBjoern A. Zeeb
10*6b627f88SBjoern A. Zeeb #define IWL_MLD_INVALID_QUEUE 0xFFFF
11*6b627f88SBjoern A. Zeeb #define IWL_MLD_INVALID_DROP_TX 0xFFFE
12*6b627f88SBjoern A. Zeeb
13*6b627f88SBjoern A. Zeeb /**
14*6b627f88SBjoern A. Zeeb * struct iwl_mld_txq - TX Queue data
15*6b627f88SBjoern A. Zeeb *
16*6b627f88SBjoern A. Zeeb * @fw_id: the fw id of this txq. Only valid when &status.allocated is true.
17*6b627f88SBjoern A. Zeeb * @status: bitmap of the txq status
18*6b627f88SBjoern A. Zeeb * @status.allocated: Indicates that the queue was allocated.
19*6b627f88SBjoern A. Zeeb * @status.stop_full: Indicates that the queue is full and should stop TXing.
20*6b627f88SBjoern A. Zeeb * @list: list pointer, for &mld::txqs_to_add
21*6b627f88SBjoern A. Zeeb * @tx_request: makes sure that if there are multiple threads that want to tx
22*6b627f88SBjoern A. Zeeb * from this txq, only one of them will do all the TXing.
23*6b627f88SBjoern A. Zeeb * This is needed to avoid spinning the trans txq lock, which is expensive
24*6b627f88SBjoern A. Zeeb */
25*6b627f88SBjoern A. Zeeb struct iwl_mld_txq {
26*6b627f88SBjoern A. Zeeb /* Add here fields that need clean up on restart */
27*6b627f88SBjoern A. Zeeb struct_group(zeroed_on_hw_restart,
28*6b627f88SBjoern A. Zeeb u16 fw_id;
29*6b627f88SBjoern A. Zeeb struct {
30*6b627f88SBjoern A. Zeeb u8 allocated:1;
31*6b627f88SBjoern A. Zeeb u8 stop_full:1;
32*6b627f88SBjoern A. Zeeb } status;
33*6b627f88SBjoern A. Zeeb );
34*6b627f88SBjoern A. Zeeb struct list_head list;
35*6b627f88SBjoern A. Zeeb atomic_t tx_request;
36*6b627f88SBjoern A. Zeeb /* And here fields that survive a fw restart */
37*6b627f88SBjoern A. Zeeb };
38*6b627f88SBjoern A. Zeeb
iwl_mld_init_txq(struct iwl_mld_txq * mld_txq)39*6b627f88SBjoern A. Zeeb static inline void iwl_mld_init_txq(struct iwl_mld_txq *mld_txq)
40*6b627f88SBjoern A. Zeeb {
41*6b627f88SBjoern A. Zeeb INIT_LIST_HEAD(&mld_txq->list);
42*6b627f88SBjoern A. Zeeb atomic_set(&mld_txq->tx_request, 0);
43*6b627f88SBjoern A. Zeeb }
44*6b627f88SBjoern A. Zeeb
45*6b627f88SBjoern A. Zeeb static inline struct iwl_mld_txq *
iwl_mld_txq_from_mac80211(struct ieee80211_txq * txq)46*6b627f88SBjoern A. Zeeb iwl_mld_txq_from_mac80211(struct ieee80211_txq *txq)
47*6b627f88SBjoern A. Zeeb {
48*6b627f88SBjoern A. Zeeb return (void *)txq->drv_priv;
49*6b627f88SBjoern A. Zeeb }
50*6b627f88SBjoern A. Zeeb
51*6b627f88SBjoern A. Zeeb void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk);
52*6b627f88SBjoern A. Zeeb void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq);
53*6b627f88SBjoern A. Zeeb void iwl_mld_add_txq_list(struct iwl_mld *mld);
54*6b627f88SBjoern A. Zeeb void
55*6b627f88SBjoern A. Zeeb iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id);
56*6b627f88SBjoern A. Zeeb void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq);
57*6b627f88SBjoern A. Zeeb void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
58*6b627f88SBjoern A. Zeeb struct iwl_rx_packet *pkt);
59*6b627f88SBjoern A. Zeeb int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id);
60*6b627f88SBjoern A. Zeeb int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq);
61*6b627f88SBjoern A. Zeeb
62*6b627f88SBjoern A. Zeeb int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
63*6b627f88SBjoern A. Zeeb struct ieee80211_sta *sta,
64*6b627f88SBjoern A. Zeeb u32 old_sta_mask, u32 new_sta_mask);
65*6b627f88SBjoern A. Zeeb
66*6b627f88SBjoern A. Zeeb void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
67*6b627f88SBjoern A. Zeeb struct iwl_rx_packet *pkt);
68*6b627f88SBjoern A. Zeeb void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant);
69*6b627f88SBjoern A. Zeeb
70*6b627f88SBjoern A. Zeeb u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
71*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info,
72*6b627f88SBjoern A. Zeeb struct ieee80211_vif *vif);
73*6b627f88SBjoern A. Zeeb
74*6b627f88SBjoern A. Zeeb void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
75*6b627f88SBjoern A. Zeeb struct ieee80211_txq *txq);
76*6b627f88SBjoern A. Zeeb
77*6b627f88SBjoern A. Zeeb #endif /* __iwl_mld_tx_h__ */
78