xref: /linux/drivers/net/wireless/intel/iwlwifi/mld/agg.h (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3  * Copyright (C) 2024 Intel Corporation
4  */
5 #ifndef __iwl_agg_h__
6 #define __iwl_agg_h__
7 
8 #include "mld.h"
9 #include "fw/api/rx.h"
10 
11 /**
12  * struct iwl_mld_reorder_buffer - per ra/tid/queue reorder buffer
13  * @head_sn: reorder window head sequence number
14  * @num_stored: number of MPDUs stored in the buffer
15  * @queue: queue of this reorder buffer
16  * @valid: true if reordering is valid for this queue
17  */
18 struct iwl_mld_reorder_buffer {
19 	u16 head_sn;
20 	u16 num_stored;
21 	int queue;
22 	bool valid;
23 } ____cacheline_aligned_in_smp;
24 
25 /**
26  * struct iwl_mld_reorder_buf_entry - reorder buffer entry per-queue/per-seqno
27  * @frames: list of skbs stored. a list is necessary because in an A-MSDU,
28  *	all sub-frames share the same sequence number, so they are stored
29  *	together in the same list.
30  */
31 struct iwl_mld_reorder_buf_entry {
32 	struct sk_buff_head frames;
33 }
34 #ifndef __CHECKER__
35 /* sparse doesn't like this construct: "bad integer constant expression" */
36 __aligned(roundup_pow_of_two(sizeof(struct sk_buff_head)))
37 #endif
38 ;
39 
40 /**
41  * struct iwl_mld_baid_data - Block Ack session data
42  * @rcu_head: RCU head for freeing this data
43  * @sta_mask: station mask for the BAID
44  * @tid: tid of the session
45  * @baid: baid of the session
46  * @buf_size: the reorder buffer size as set by the last ADDBA request
47  * @entries_per_queue: number of buffers per queue, this actually gets
48  *	aligned up to avoid cache line sharing between queues
49  * @timeout: the timeout value specified in the ADDBA request.
50  * @last_rx_timestamp: timestamp of the last received packet (in jiffies). This
51  *	value is updated only when the configured @timeout has passed since
52  *	the last update to minimize cache bouncing between RX queues.
53  * @session_timer: timer is set to expire after 2 * @timeout (since we want
54  *	to minimize the cache bouncing by updating @last_rx_timestamp only once
55  *	after @timeout has passed). If no packets are received within this
56  *	period, it informs mac80211 to initiate delBA flow, terminating the
57  *	BA session.
58  * @rcu_ptr: BA data RCU protected access
59  * @mld: mld pointer, needed for timer context
60  * @reorder_buf: reorder buffer, allocated per queue
61  * @entries: data
62  */
63 struct iwl_mld_baid_data {
64 	struct rcu_head rcu_head;
65 	u32 sta_mask;
66 	u8 tid;
67 	u8 baid;
68 	u16 buf_size;
69 	u16 entries_per_queue;
70 	u16 timeout;
71 	struct timer_list session_timer;
72 	unsigned long last_rx_timestamp;
73 	struct iwl_mld_baid_data __rcu **rcu_ptr;
74 	struct iwl_mld *mld;
75 	struct iwl_mld_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
76 	struct iwl_mld_reorder_buf_entry entries[] ____cacheline_aligned_in_smp;
77 };
78 
79 /**
80  * struct iwl_mld_delba_data - RX queue sync data for %IWL_MLD_RXQ_NOTIF_DEL_BA
81  *
82  * @baid: Block Ack id, used to identify the BA session to be removed
83  */
84 struct iwl_mld_delba_data {
85 	u32 baid;
86 } __packed;
87 
88 /**
89  * enum iwl_mld_reorder_result - Possible return values for iwl_mld_reorder()
90  * indicating how the caller should handle the skb based on the result.
91  *
92  * @IWL_MLD_PASS_SKB: skb should be passed to upper layer.
93  * @IWL_MLD_BUFFERED_SKB: skb has been buffered, don't pass it to upper layer.
94  * @IWL_MLD_DROP_SKB: skb should be dropped and freed by the caller.
95  */
96 enum iwl_mld_reorder_result {
97 	IWL_MLD_PASS_SKB,
98 	IWL_MLD_BUFFERED_SKB,
99 	IWL_MLD_DROP_SKB
100 };
101 
102 int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
103 			   int tid, u16 ssn, u16 buf_size, u16 timeout);
104 int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta,
105 			  int tid);
106 
107 enum iwl_mld_reorder_result
108 iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi,
109 		int queue, struct ieee80211_sta *sta,
110 		struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc);
111 
112 void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld,
113 					struct napi_struct *napi,
114 					struct iwl_rx_packet *pkt, int queue);
115 void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
116 					    struct napi_struct *napi,
117 					    struct iwl_rx_packet *pkt,
118 					    int queue);
119 
120 void iwl_mld_del_ba(struct iwl_mld *mld, int queue,
121 		    struct iwl_mld_delba_data *data);
122 
123 int iwl_mld_update_sta_baids(struct iwl_mld *mld,
124 			     u32 old_sta_mask,
125 			     u32 new_sta_mask);
126 
127 #endif /* __iwl_agg_h__ */
128