xref: /linux/drivers/net/wireless/mediatek/mt76/agg-rx.c (revision 55f3538c4923e9dfca132e99ebec370e8094afda)
1 /*
2  * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 #include "mt76.h"
17 
18 #define REORDER_TIMEOUT (HZ / 10)
19 
20 static void
21 mt76_aggr_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames, int idx)
22 {
23 	struct sk_buff *skb;
24 
25 	tid->head = ieee80211_sn_inc(tid->head);
26 
27 	skb = tid->reorder_buf[idx];
28 	if (!skb)
29 		return;
30 
31 	tid->reorder_buf[idx] = NULL;
32 	tid->nframes--;
33 	__skb_queue_tail(frames, skb);
34 }
35 
36 static void
37 mt76_rx_aggr_release_frames(struct mt76_rx_tid *tid, struct sk_buff_head *frames,
38 			 u16 head)
39 {
40 	int idx;
41 
42 	while (ieee80211_sn_less(tid->head, head)) {
43 		idx = tid->head % tid->size;
44 		mt76_aggr_release(tid, frames, idx);
45 	}
46 }
47 
48 static void
49 mt76_rx_aggr_release_head(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
50 {
51 	int idx = tid->head % tid->size;
52 
53 	while (tid->reorder_buf[idx]) {
54 		mt76_aggr_release(tid, frames, idx);
55 		idx = tid->head % tid->size;
56 	}
57 }
58 
59 static void
60 mt76_rx_aggr_check_release(struct mt76_rx_tid *tid, struct sk_buff_head *frames)
61 {
62 	struct mt76_rx_status *status;
63 	struct sk_buff *skb;
64 	int start, idx, nframes;
65 
66 	if (!tid->nframes)
67 		return;
68 
69 	mt76_rx_aggr_release_head(tid, frames);
70 
71 	start = tid->head % tid->size;
72 	nframes = tid->nframes;
73 
74 	for (idx = (tid->head + 1) % tid->size;
75 	     idx != start && nframes;
76 	     idx = (idx + 1) % tid->size) {
77 
78 		skb = tid->reorder_buf[idx];
79 		if (!skb)
80 			continue;
81 
82 		nframes--;
83 		status = (struct mt76_rx_status *) skb->cb;
84 		if (!time_after(jiffies, status->reorder_time +
85 					 REORDER_TIMEOUT))
86 			continue;
87 
88 		mt76_rx_aggr_release_frames(tid, frames, status->seqno);
89 	}
90 
91 	mt76_rx_aggr_release_head(tid, frames);
92 }
93 
94 static void
95 mt76_rx_aggr_reorder_work(struct work_struct *work)
96 {
97 	struct mt76_rx_tid *tid = container_of(work, struct mt76_rx_tid,
98 					       reorder_work.work);
99 	struct mt76_dev *dev = tid->dev;
100 	struct sk_buff_head frames;
101 
102 	__skb_queue_head_init(&frames);
103 
104 	local_bh_disable();
105 
106 	spin_lock(&tid->lock);
107 	mt76_rx_aggr_check_release(tid, &frames);
108 	spin_unlock(&tid->lock);
109 
110 	ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
111 	mt76_rx_complete(dev, &frames, -1);
112 
113 	local_bh_enable();
114 }
115 
116 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames)
117 {
118 	struct mt76_rx_status *status = (struct mt76_rx_status *) skb->cb;
119 	struct mt76_wcid *wcid = status->wcid;
120 	struct ieee80211_sta *sta;
121 	struct mt76_rx_tid *tid;
122 	bool sn_less;
123 	u16 seqno, head, size;
124 	u8 idx;
125 
126 	__skb_queue_tail(frames, skb);
127 
128 	sta = wcid_to_sta(wcid);
129 	if (!sta || !status->aggr)
130 		return;
131 
132 	tid = rcu_dereference(wcid->aggr[status->tid]);
133 	if (!tid)
134 		return;
135 
136 	spin_lock_bh(&tid->lock);
137 
138 	if (tid->stopped)
139 		goto out;
140 
141 	head = tid->head;
142 	seqno = status->seqno;
143 	size = tid->size;
144 	sn_less = ieee80211_sn_less(seqno, head);
145 
146 	if (!tid->started) {
147 		if (sn_less)
148 			goto out;
149 
150 		tid->started = true;
151 	}
152 
153 	if (sn_less) {
154 		__skb_unlink(skb, frames);
155 		dev_kfree_skb(skb);
156 		goto out;
157 	}
158 
159 	if (seqno == head) {
160 		tid->head = ieee80211_sn_inc(head);
161 		if (tid->nframes)
162 			mt76_rx_aggr_release_head(tid, frames);
163 		goto out;
164 	}
165 
166 	__skb_unlink(skb, frames);
167 
168 	/*
169 	 * Frame sequence number exceeds buffering window, free up some space
170 	 * by releasing previous frames
171 	 */
172 	if (!ieee80211_sn_less(seqno, head + size)) {
173 		head = ieee80211_sn_inc(ieee80211_sn_sub(seqno, size));
174 		mt76_rx_aggr_release_frames(tid, frames, head);
175 	}
176 
177 	idx = seqno % size;
178 
179 	/* Discard if the current slot is already in use */
180 	if (tid->reorder_buf[idx]) {
181 		dev_kfree_skb(skb);
182 		goto out;
183 	}
184 
185 	status->reorder_time = jiffies;
186 	tid->reorder_buf[idx] = skb;
187 	tid->nframes++;
188 	mt76_rx_aggr_release_head(tid, frames);
189 
190 	ieee80211_queue_delayed_work(tid->dev->hw, &tid->reorder_work, REORDER_TIMEOUT);
191 
192 out:
193 	spin_unlock_bh(&tid->lock);
194 }
195 
196 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno,
197 		       u16 ssn, u8 size)
198 {
199 	struct mt76_rx_tid *tid;
200 
201 	mt76_rx_aggr_stop(dev, wcid, tidno);
202 
203 	tid = kzalloc(sizeof(*tid) + size * sizeof(tid->reorder_buf[0]),
204 		      GFP_KERNEL);
205 	if (!tid)
206 		return -ENOMEM;
207 
208 	tid->dev = dev;
209 	tid->head = ssn;
210 	tid->size = size;
211 	INIT_DELAYED_WORK(&tid->reorder_work, mt76_rx_aggr_reorder_work);
212 	spin_lock_init(&tid->lock);
213 
214 	rcu_assign_pointer(wcid->aggr[tidno], tid);
215 
216 	return 0;
217 }
218 EXPORT_SYMBOL_GPL(mt76_rx_aggr_start);
219 
220 static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
221 {
222 	u8 size = tid->size;
223 	int i;
224 
225 	spin_lock_bh(&tid->lock);
226 
227 	tid->stopped = true;
228 	for (i = 0; tid->nframes && i < size; i++) {
229 		struct sk_buff *skb = tid->reorder_buf[i];
230 
231 		if (!skb)
232 			continue;
233 
234 		tid->nframes--;
235 		dev_kfree_skb(skb);
236 	}
237 
238 	spin_unlock_bh(&tid->lock);
239 
240 	cancel_delayed_work_sync(&tid->reorder_work);
241 }
242 
243 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tidno)
244 {
245 	struct mt76_rx_tid *tid;
246 
247 	rcu_read_lock();
248 
249 	tid = rcu_dereference(wcid->aggr[tidno]);
250 	if (tid) {
251 		rcu_assign_pointer(wcid->aggr[tidno], NULL);
252 		mt76_rx_aggr_shutdown(dev, tid);
253 		kfree_rcu(tid, rcu_head);
254 	}
255 
256 	rcu_read_unlock();
257 }
258 EXPORT_SYMBOL_GPL(mt76_rx_aggr_stop);
259