1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 #include "opt_wlan.h"
30
31 #ifdef IEEE80211_SUPPORT_SUPERG
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/mbuf.h>
36 #include <sys/kernel.h>
37 #include <sys/endian.h>
38
39 #include <sys/socket.h>
40
41 #include <net/if.h>
42 #include <net/if_var.h>
43 #include <net/if_llc.h>
44 #include <net/if_media.h>
45 #include <net/bpf.h>
46 #include <net/ethernet.h>
47
48 #include <net80211/ieee80211_var.h>
49 #include <net80211/ieee80211_input.h>
50 #include <net80211/ieee80211_phy.h>
51 #include <net80211/ieee80211_superg.h>
52
53 /*
54 * Atheros fast-frame encapsulation format.
55 * FF max payload:
56 * 802.2 + FFHDR + HPAD + 802.3 + 802.2 + 1500 + SPAD + 802.3 + 802.2 + 1500:
57 * 8 + 4 + 4 + 14 + 8 + 1500 + 6 + 14 + 8 + 1500
58 * = 3066
59 */
60 /* fast frame header is 32-bits */
61 #define ATH_FF_PROTO 0x0000003f /* protocol */
62 #define ATH_FF_PROTO_S 0
63 #define ATH_FF_FTYPE 0x000000c0 /* frame type */
64 #define ATH_FF_FTYPE_S 6
65 #define ATH_FF_HLEN32 0x00000300 /* optional hdr length */
66 #define ATH_FF_HLEN32_S 8
67 #define ATH_FF_SEQNUM 0x001ffc00 /* sequence number */
68 #define ATH_FF_SEQNUM_S 10
69 #define ATH_FF_OFFSET 0xffe00000 /* offset to 2nd payload */
70 #define ATH_FF_OFFSET_S 21
71
72 #define ATH_FF_MAX_HDR_PAD 4
73 #define ATH_FF_MAX_SEP_PAD 6
74 #define ATH_FF_MAX_HDR 30
75
76 #define ATH_FF_PROTO_L2TUNNEL 0 /* L2 tunnel protocol */
77 #define ATH_FF_ETH_TYPE 0x88bd /* Ether type for encapsulated frames */
78 #define ATH_FF_SNAP_ORGCODE_0 0x00
79 #define ATH_FF_SNAP_ORGCODE_1 0x03
80 #define ATH_FF_SNAP_ORGCODE_2 0x7f
81
82 #define ATH_FF_TXQMIN 2 /* min txq depth for staging */
83 #define ATH_FF_TXQMAX 50 /* maximum # of queued frames allowed */
84 #define ATH_FF_STAGEMAX 5 /* max waiting period for staged frame*/
85
86 #define ETHER_HEADER_COPY(dst, src) \
87 memcpy(dst, src, sizeof(struct ether_header))
88
89 static int ieee80211_ffppsmin = 2; /* pps threshold for ff aggregation */
90 SYSCTL_INT(_net_wlan, OID_AUTO, ffppsmin, CTLFLAG_RW,
91 &ieee80211_ffppsmin, 0, "min packet rate before fast-frame staging");
92 static int ieee80211_ffagemax = -1; /* max time frames held on stage q */
93 SYSCTL_PROC(_net_wlan, OID_AUTO, ffagemax,
94 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
95 &ieee80211_ffagemax, 0, ieee80211_sysctl_msecs_ticks, "I",
96 "max hold time for fast-frame staging (ms)");
97
98 static void
ff_age_all(void * arg,int npending)99 ff_age_all(void *arg, int npending)
100 {
101 struct ieee80211com *ic = arg;
102
103 /* XXX cache timer value somewhere (racy) */
104 ieee80211_ff_age_all(ic, ieee80211_ffagemax + 1);
105 }
106
107 void
ieee80211_superg_attach(struct ieee80211com * ic)108 ieee80211_superg_attach(struct ieee80211com *ic)
109 {
110 struct ieee80211_superg *sg;
111
112 IEEE80211_FF_LOCK_INIT(ic, ic->ic_name);
113
114 sg = (struct ieee80211_superg *) IEEE80211_MALLOC(
115 sizeof(struct ieee80211_superg), M_80211_VAP,
116 IEEE80211_M_NOWAIT | IEEE80211_M_ZERO);
117 if (sg == NULL) {
118 printf("%s: cannot allocate SuperG state block\n",
119 __func__);
120 return;
121 }
122 TIMEOUT_TASK_INIT(ic->ic_tq, &sg->ff_qtimer, 0, ff_age_all, ic);
123 ic->ic_superg = sg;
124
125 /*
126 * Default to not being so aggressive for FF/AMSDU
127 * aging, otherwise we may hold a frame around
128 * for way too long before we expire it out.
129 */
130 ieee80211_ffagemax = msecs_to_ticks(2);
131 }
132
133 void
ieee80211_superg_detach(struct ieee80211com * ic)134 ieee80211_superg_detach(struct ieee80211com *ic)
135 {
136
137 if (ic->ic_superg != NULL) {
138 struct timeout_task *qtask = &ic->ic_superg->ff_qtimer;
139
140 while (taskqueue_cancel_timeout(ic->ic_tq, qtask, NULL) != 0)
141 taskqueue_drain_timeout(ic->ic_tq, qtask);
142 IEEE80211_FREE(ic->ic_superg, M_80211_VAP);
143 ic->ic_superg = NULL;
144 }
145 IEEE80211_FF_LOCK_DESTROY(ic);
146 }
147
148 void
ieee80211_superg_vattach(struct ieee80211vap * vap)149 ieee80211_superg_vattach(struct ieee80211vap *vap)
150 {
151 struct ieee80211com *ic = vap->iv_ic;
152
153 if (ic->ic_superg == NULL) /* NB: can't do fast-frames w/o state */
154 vap->iv_caps &= ~IEEE80211_C_FF;
155 if (vap->iv_caps & IEEE80211_C_FF)
156 vap->iv_flags |= IEEE80211_F_FF;
157 /* NB: we only implement sta mode */
158 if (vap->iv_opmode == IEEE80211_M_STA &&
159 (vap->iv_caps & IEEE80211_C_TURBOP))
160 vap->iv_flags |= IEEE80211_F_TURBOP;
161 }
162
163 void
ieee80211_superg_vdetach(struct ieee80211vap * vap)164 ieee80211_superg_vdetach(struct ieee80211vap *vap)
165 {
166 }
167
168 #define ATH_OUI_BYTES 0x00, 0x03, 0x7f
169 /*
170 * Add a WME information element to a frame.
171 */
172 uint8_t *
ieee80211_add_ath(uint8_t * frm,uint8_t caps,ieee80211_keyix defkeyix)173 ieee80211_add_ath(uint8_t *frm, uint8_t caps, ieee80211_keyix defkeyix)
174 {
175 static const struct ieee80211_ath_ie info = {
176 .ath_id = IEEE80211_ELEMID_VENDOR,
177 .ath_len = sizeof(struct ieee80211_ath_ie) - 2,
178 .ath_oui = { ATH_OUI_BYTES },
179 .ath_oui_type = ATH_OUI_TYPE,
180 .ath_oui_subtype= ATH_OUI_SUBTYPE,
181 .ath_version = ATH_OUI_VERSION,
182 };
183 struct ieee80211_ath_ie *ath = (struct ieee80211_ath_ie *) frm;
184
185 memcpy(frm, &info, sizeof(info));
186 ath->ath_capability = caps;
187 if (defkeyix != IEEE80211_KEYIX_NONE) {
188 ath->ath_defkeyix[0] = (defkeyix & 0xff);
189 ath->ath_defkeyix[1] = ((defkeyix >> 8) & 0xff);
190 } else {
191 ath->ath_defkeyix[0] = 0xff;
192 ath->ath_defkeyix[1] = 0x7f;
193 }
194 return frm + sizeof(info);
195 }
196 #undef ATH_OUI_BYTES
197
198 uint8_t *
ieee80211_add_athcaps(uint8_t * frm,const struct ieee80211_node * bss)199 ieee80211_add_athcaps(uint8_t *frm, const struct ieee80211_node *bss)
200 {
201 const struct ieee80211vap *vap = bss->ni_vap;
202
203 return ieee80211_add_ath(frm,
204 vap->iv_flags & IEEE80211_F_ATHEROS,
205 ((vap->iv_flags & IEEE80211_F_WPA) == 0 &&
206 bss->ni_authmode != IEEE80211_AUTH_8021X) ?
207 vap->iv_def_txkey : IEEE80211_KEYIX_NONE);
208 }
209
210 void
ieee80211_parse_ath(struct ieee80211_node * ni,uint8_t * ie)211 ieee80211_parse_ath(struct ieee80211_node *ni, uint8_t *ie)
212 {
213 const struct ieee80211_ath_ie *ath =
214 (const struct ieee80211_ath_ie *) ie;
215
216 ni->ni_ath_flags = ath->ath_capability;
217 ni->ni_ath_defkeyix = le16dec(&ath->ath_defkeyix);
218 }
219
220 int
ieee80211_parse_athparams(struct ieee80211_node * ni,uint8_t * frm,const struct ieee80211_frame * wh)221 ieee80211_parse_athparams(struct ieee80211_node *ni, uint8_t *frm,
222 const struct ieee80211_frame *wh)
223 {
224 struct ieee80211vap *vap = ni->ni_vap;
225 const struct ieee80211_ath_ie *ath;
226 u_int len = frm[1];
227 int capschanged;
228 uint16_t defkeyix;
229
230 if (len < sizeof(struct ieee80211_ath_ie)-2) {
231 IEEE80211_DISCARD_IE(vap,
232 IEEE80211_MSG_ELEMID | IEEE80211_MSG_SUPERG,
233 wh, "Atheros", "too short, len %u", len);
234 return -1;
235 }
236 ath = (const struct ieee80211_ath_ie *)frm;
237 capschanged = (ni->ni_ath_flags != ath->ath_capability);
238 defkeyix = le16dec(ath->ath_defkeyix);
239 if (capschanged || defkeyix != ni->ni_ath_defkeyix) {
240 ni->ni_ath_flags = ath->ath_capability;
241 ni->ni_ath_defkeyix = defkeyix;
242 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
243 "ath ie change: new caps 0x%x defkeyix 0x%x",
244 ni->ni_ath_flags, ni->ni_ath_defkeyix);
245 }
246 if (IEEE80211_ATH_CAP(vap, ni, ATHEROS_CAP_TURBO_PRIME)) {
247 uint16_t curflags, newflags;
248
249 /*
250 * Check for turbo mode switch. Calculate flags
251 * for the new mode and effect the switch.
252 */
253 newflags = curflags = vap->iv_ic->ic_bsschan->ic_flags;
254 /* NB: BOOST is not in ic_flags, so get it from the ie */
255 if (ath->ath_capability & ATHEROS_CAP_BOOST)
256 newflags |= IEEE80211_CHAN_TURBO;
257 else
258 newflags &= ~IEEE80211_CHAN_TURBO;
259 if (newflags != curflags)
260 ieee80211_dturbo_switch(vap, newflags);
261 }
262 return capschanged;
263 }
264
265 /*
266 * Decap the encapsulated frame pair and dispatch the first
267 * for delivery. The second frame is returned for delivery
268 * via the normal path.
269 */
270 struct mbuf *
ieee80211_ff_decap(struct ieee80211_node * ni,struct mbuf * m)271 ieee80211_ff_decap(struct ieee80211_node *ni, struct mbuf *m)
272 {
273 #define FF_LLC_SIZE (sizeof(struct ether_header) + sizeof(struct llc))
274 struct ieee80211vap *vap = ni->ni_vap;
275 struct llc *llc;
276 uint32_t ath;
277 struct mbuf *n;
278 int framelen;
279
280 /* NB: we assume caller does this check for us */
281 KASSERT(IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF),
282 ("ff not negotiated"));
283 /*
284 * Check for fast-frame tunnel encapsulation.
285 */
286 if (m->m_pkthdr.len < 3*FF_LLC_SIZE)
287 return m;
288 if (m->m_len < FF_LLC_SIZE &&
289 (m = m_pullup(m, FF_LLC_SIZE)) == NULL) {
290 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
291 ni->ni_macaddr, "fast-frame",
292 "%s", "m_pullup(llc) failed");
293 vap->iv_stats.is_rx_tooshort++;
294 return NULL;
295 }
296 llc = (struct llc *)(mtod(m, uint8_t *) +
297 sizeof(struct ether_header));
298 if (llc->llc_snap.ether_type != htons(ATH_FF_ETH_TYPE))
299 return m;
300 m_adj(m, FF_LLC_SIZE);
301 m_copydata(m, 0, sizeof(uint32_t), (caddr_t) &ath);
302 if (_IEEE80211_MASKSHIFT(ath, ATH_FF_PROTO) != ATH_FF_PROTO_L2TUNNEL) {
303 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
304 ni->ni_macaddr, "fast-frame",
305 "unsupport tunnel protocol, header 0x%x", ath);
306 vap->iv_stats.is_ff_badhdr++;
307 m_freem(m);
308 return NULL;
309 }
310 /* NB: skip header and alignment padding */
311 m_adj(m, roundup(sizeof(uint32_t) - 2, 4) + 2);
312
313 vap->iv_stats.is_ff_decap++;
314
315 /*
316 * Decap the first frame, bust it apart from the
317 * second and deliver; then decap the second frame
318 * and return it to the caller for normal delivery.
319 */
320 m = ieee80211_decap1(m, &framelen);
321 if (m == NULL) {
322 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
323 ni->ni_macaddr, "fast-frame", "%s", "first decap failed");
324 vap->iv_stats.is_ff_tooshort++;
325 return NULL;
326 }
327 n = m_split(m, framelen, IEEE80211_M_NOWAIT);
328 if (n == NULL) {
329 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
330 ni->ni_macaddr, "fast-frame",
331 "%s", "unable to split encapsulated frames");
332 vap->iv_stats.is_ff_split++;
333 m_freem(m); /* NB: must reclaim */
334 return NULL;
335 }
336 /* XXX not right for WDS */
337 vap->iv_deliver_data(vap, ni, m); /* 1st of pair */
338
339 /*
340 * Decap second frame.
341 */
342 m_adj(n, roundup2(framelen, 4) - framelen); /* padding */
343 n = ieee80211_decap1(n, &framelen);
344 if (n == NULL) {
345 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
346 ni->ni_macaddr, "fast-frame", "%s", "second decap failed");
347 vap->iv_stats.is_ff_tooshort++;
348 }
349 /* XXX verify framelen against mbuf contents */
350 return n; /* 2nd delivered by caller */
351 #undef FF_LLC_SIZE
352 }
353
354 /*
355 * Fast frame encapsulation. There must be two packets
356 * chained with m_nextpkt. We do header adjustment for
357 * each, add the tunnel encapsulation, and then concatenate
358 * the mbuf chains to form a single frame for transmission.
359 */
360 struct mbuf *
ieee80211_ff_encap(struct ieee80211vap * vap,struct mbuf * m1,int hdrspace,struct ieee80211_key * key)361 ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
362 struct ieee80211_key *key)
363 {
364 struct mbuf *m2;
365 struct ether_header eh1, eh2;
366 struct llc *llc;
367 struct mbuf *m;
368 int pad;
369
370 m2 = m1->m_nextpkt;
371 if (m2 == NULL) {
372 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
373 "%s: only one frame\n", __func__);
374 goto bad;
375 }
376 m1->m_nextpkt = NULL;
377
378 /*
379 * Adjust to include 802.11 header requirement.
380 */
381 KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!"));
382 ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t));
383 m1 = ieee80211_mbuf_adjust(vap, hdrspace, key, m1);
384 if (m1 == NULL) {
385 printf("%s: failed initial mbuf_adjust\n", __func__);
386 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */
387 m_freem(m2);
388 goto bad;
389 }
390
391 /*
392 * Copy second frame's Ethernet header out of line
393 * and adjust for possible padding in case there isn't room
394 * at the end of first frame.
395 */
396 KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!"));
397 ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t));
398 m2 = ieee80211_mbuf_adjust(vap, 4, NULL, m2);
399 if (m2 == NULL) {
400 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */
401 printf("%s: failed second \n", __func__);
402 goto bad;
403 }
404
405 /*
406 * Now do tunnel encapsulation. First, each
407 * frame gets a standard encapsulation.
408 */
409 m1 = ieee80211_ff_encap1(vap, m1, &eh1);
410 if (m1 == NULL)
411 goto bad;
412 m2 = ieee80211_ff_encap1(vap, m2, &eh2);
413 if (m2 == NULL)
414 goto bad;
415
416 /*
417 * Pad leading frame to a 4-byte boundary. If there
418 * is space at the end of the first frame, put it
419 * there; otherwise prepend to the front of the second
420 * frame. We know doing the second will always work
421 * because we reserve space above. We prefer appending
422 * as this typically has better DMA alignment properties.
423 */
424 for (m = m1; m->m_next != NULL; m = m->m_next)
425 ;
426 pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len;
427 if (pad) {
428 if (M_TRAILINGSPACE(m) < pad) { /* prepend to second */
429 m2->m_data -= pad;
430 m2->m_len += pad;
431 m2->m_pkthdr.len += pad;
432 } else { /* append to first */
433 m->m_len += pad;
434 m1->m_pkthdr.len += pad;
435 }
436 }
437
438 /*
439 * A-MSDU's are just appended; the "I'm A-MSDU!" bit is in the
440 * QoS header.
441 *
442 * XXX optimize by prepending together
443 */
444 m->m_next = m2; /* NB: last mbuf from above */
445 m1->m_pkthdr.len += m2->m_pkthdr.len;
446 M_PREPEND(m1, sizeof(uint32_t)+2, IEEE80211_M_NOWAIT);
447 if (m1 == NULL) { /* XXX cannot happen */
448 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
449 "%s: no space for tunnel header\n", __func__);
450 vap->iv_stats.is_tx_nobuf++;
451 return NULL;
452 }
453 memset(mtod(m1, void *), 0, sizeof(uint32_t)+2);
454
455 M_PREPEND(m1, sizeof(struct llc), IEEE80211_M_NOWAIT);
456 if (m1 == NULL) { /* XXX cannot happen */
457 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
458 "%s: no space for llc header\n", __func__);
459 vap->iv_stats.is_tx_nobuf++;
460 return NULL;
461 }
462 llc = mtod(m1, struct llc *);
463 llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
464 llc->llc_control = LLC_UI;
465 llc->llc_snap.org_code[0] = ATH_FF_SNAP_ORGCODE_0;
466 llc->llc_snap.org_code[1] = ATH_FF_SNAP_ORGCODE_1;
467 llc->llc_snap.org_code[2] = ATH_FF_SNAP_ORGCODE_2;
468 llc->llc_snap.ether_type = htons(ATH_FF_ETH_TYPE);
469
470 vap->iv_stats.is_ff_encap++;
471
472 return m1;
473 bad:
474 vap->iv_stats.is_ff_encapfail++;
475 if (m1 != NULL)
476 m_freem(m1);
477 if (m2 != NULL)
478 m_freem(m2);
479 return NULL;
480 }
481
482 /*
483 * A-MSDU encapsulation.
484 *
485 * This assumes just two frames for now, since we're borrowing the
486 * same queuing code and infrastructure as fast-frames.
487 *
488 * There must be two packets chained with m_nextpkt.
489 * We do header adjustment for each, and then concatenate the mbuf chains
490 * to form a single frame for transmission.
491 */
492 struct mbuf *
ieee80211_amsdu_encap(struct ieee80211vap * vap,struct mbuf * m1,int hdrspace,struct ieee80211_key * key)493 ieee80211_amsdu_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
494 struct ieee80211_key *key)
495 {
496 struct mbuf *m2;
497 struct ether_header eh1, eh2;
498 struct mbuf *m;
499 int pad;
500
501 m2 = m1->m_nextpkt;
502 if (m2 == NULL) {
503 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
504 "%s: only one frame\n", __func__);
505 goto bad;
506 }
507 m1->m_nextpkt = NULL;
508
509 /*
510 * Include A-MSDU header in adjusting header layout.
511 */
512 KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!"));
513 ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t));
514 m1 = ieee80211_mbuf_adjust(vap,
515 hdrspace + sizeof(struct llc) + sizeof(uint32_t) +
516 sizeof(struct ether_header),
517 key, m1);
518 if (m1 == NULL) {
519 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */
520 m_freem(m2);
521 goto bad;
522 }
523
524 /*
525 * Copy second frame's Ethernet header out of line
526 * and adjust for encapsulation headers. Note that
527 * we make room for padding in case there isn't room
528 * at the end of first frame.
529 */
530 KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!"));
531 ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t));
532 m2 = ieee80211_mbuf_adjust(vap, 4, NULL, m2);
533 if (m2 == NULL) {
534 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */
535 goto bad;
536 }
537
538 /*
539 * Now do tunnel encapsulation. First, each
540 * frame gets a standard encapsulation.
541 */
542 m1 = ieee80211_ff_encap1(vap, m1, &eh1);
543 if (m1 == NULL)
544 goto bad;
545 m2 = ieee80211_ff_encap1(vap, m2, &eh2);
546 if (m2 == NULL)
547 goto bad;
548
549 /*
550 * Pad leading frame to a 4-byte boundary. If there
551 * is space at the end of the first frame, put it
552 * there; otherwise prepend to the front of the second
553 * frame. We know doing the second will always work
554 * because we reserve space above. We prefer appending
555 * as this typically has better DMA alignment properties.
556 */
557 for (m = m1; m->m_next != NULL; m = m->m_next)
558 ;
559 pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len;
560 if (pad) {
561 if (M_TRAILINGSPACE(m) < pad) { /* prepend to second */
562 m2->m_data -= pad;
563 m2->m_len += pad;
564 m2->m_pkthdr.len += pad;
565 } else { /* append to first */
566 m->m_len += pad;
567 m1->m_pkthdr.len += pad;
568 }
569 }
570
571 /*
572 * Now, stick 'em together.
573 */
574 m->m_next = m2; /* NB: last mbuf from above */
575 m1->m_pkthdr.len += m2->m_pkthdr.len;
576
577 vap->iv_stats.is_amsdu_encap++;
578
579 return m1;
580 bad:
581 vap->iv_stats.is_amsdu_encapfail++;
582 if (m1 != NULL)
583 m_freem(m1);
584 if (m2 != NULL)
585 m_freem(m2);
586 return NULL;
587 }
588
589 static void
ff_transmit(struct ieee80211_node * ni,struct mbuf * m)590 ff_transmit(struct ieee80211_node *ni, struct mbuf *m)
591 {
592 struct ieee80211vap *vap = ni->ni_vap;
593 struct ieee80211com *ic = ni->ni_ic;
594
595 IEEE80211_TX_LOCK_ASSERT(ic);
596
597 /* encap and xmit */
598 m = ieee80211_encap(vap, ni, m);
599 if (m != NULL)
600 (void) ieee80211_parent_xmitpkt(ic, m);
601 else
602 ieee80211_free_node(ni);
603 }
604
605 /*
606 * Flush frames to device; note we re-use the linked list
607 * the frames were stored on and use the sentinel (unchanged)
608 * which may be non-NULL.
609 */
610 static void
ff_flush(struct mbuf * head,struct mbuf * last)611 ff_flush(struct mbuf *head, struct mbuf *last)
612 {
613 struct mbuf *m, *next;
614 struct ieee80211_node *ni;
615 struct ieee80211vap *vap;
616
617 for (m = head; m != last; m = next) {
618 next = m->m_nextpkt;
619 m->m_nextpkt = NULL;
620
621 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
622 vap = ni->ni_vap;
623
624 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
625 "%s: flush frame, age %u", __func__, M_AGE_GET(m));
626 vap->iv_stats.is_ff_flush++;
627
628 ff_transmit(ni, m);
629 }
630 }
631
632 /*
633 * Age frames on the staging queue.
634 */
635 void
ieee80211_ff_age(struct ieee80211com * ic,struct ieee80211_stageq * sq,int quanta)636 ieee80211_ff_age(struct ieee80211com *ic, struct ieee80211_stageq *sq,
637 int quanta)
638 {
639 struct mbuf *m, *head;
640 struct ieee80211_node *ni;
641
642 IEEE80211_FF_LOCK(ic);
643 if (sq->depth == 0) {
644 IEEE80211_FF_UNLOCK(ic);
645 return; /* nothing to do */
646 }
647
648 KASSERT(sq->head != NULL, ("stageq empty"));
649
650 head = sq->head;
651 while ((m = sq->head) != NULL && M_AGE_GET(m) < quanta) {
652 int tid = WME_AC_TO_TID(M_WME_GETAC(m));
653
654 /* clear staging ref to frame */
655 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
656 KASSERT(ni->ni_tx_superg[tid] == m, ("staging queue empty"));
657 ni->ni_tx_superg[tid] = NULL;
658
659 sq->head = m->m_nextpkt;
660 sq->depth--;
661 }
662 if (m == NULL)
663 sq->tail = NULL;
664 else
665 M_AGE_SUB(m, quanta);
666 IEEE80211_FF_UNLOCK(ic);
667
668 IEEE80211_TX_LOCK(ic);
669 ff_flush(head, m);
670 IEEE80211_TX_UNLOCK(ic);
671 }
672
673 static void
stageq_add(struct ieee80211com * ic,struct ieee80211_stageq * sq,struct mbuf * m)674 stageq_add(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *m)
675 {
676 int age = ieee80211_ffagemax;
677
678 IEEE80211_FF_LOCK_ASSERT(ic);
679
680 if (sq->tail != NULL) {
681 sq->tail->m_nextpkt = m;
682 age -= M_AGE_GET(sq->head);
683 } else {
684 sq->head = m;
685
686 struct timeout_task *qtask = &ic->ic_superg->ff_qtimer;
687 taskqueue_enqueue_timeout(ic->ic_tq, qtask, age);
688 }
689 KASSERT(age >= 0, ("age %d", age));
690 M_AGE_SET(m, age);
691 m->m_nextpkt = NULL;
692 sq->tail = m;
693 sq->depth++;
694 }
695
696 static void
stageq_remove(struct ieee80211com * ic,struct ieee80211_stageq * sq,struct mbuf * mstaged)697 stageq_remove(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *mstaged)
698 {
699 struct mbuf *m, *mprev;
700
701 IEEE80211_FF_LOCK_ASSERT(ic);
702
703 mprev = NULL;
704 for (m = sq->head; m != NULL; m = m->m_nextpkt) {
705 if (m == mstaged) {
706 if (mprev == NULL)
707 sq->head = m->m_nextpkt;
708 else
709 mprev->m_nextpkt = m->m_nextpkt;
710 if (sq->tail == m)
711 sq->tail = mprev;
712 sq->depth--;
713 return;
714 }
715 mprev = m;
716 }
717 printf("%s: packet not found\n", __func__);
718 }
719
720 static uint32_t
ff_approx_txtime(struct ieee80211_node * ni,const struct mbuf * m1,const struct mbuf * m2)721 ff_approx_txtime(struct ieee80211_node *ni,
722 const struct mbuf *m1, const struct mbuf *m2)
723 {
724 struct ieee80211com *ic = ni->ni_ic;
725 struct ieee80211vap *vap = ni->ni_vap;
726 uint32_t framelen;
727 uint32_t frame_time;
728
729 /*
730 * Approximate the frame length to be transmitted. A swag to add
731 * the following maximal values to the skb payload:
732 * - 32: 802.11 encap + CRC
733 * - 24: encryption overhead (if wep bit)
734 * - 4 + 6: fast-frame header and padding
735 * - 16: 2 LLC FF tunnel headers
736 * - 14: 1 802.3 FF tunnel header (mbuf already accounts for 2nd)
737 */
738 framelen = m1->m_pkthdr.len + 32 +
739 ATH_FF_MAX_HDR_PAD + ATH_FF_MAX_SEP_PAD + ATH_FF_MAX_HDR;
740 if (vap->iv_flags & IEEE80211_F_PRIVACY)
741 framelen += 24;
742 if (m2 != NULL)
743 framelen += m2->m_pkthdr.len;
744
745 /*
746 * For now, we assume non-shortgi, 20MHz, just because I want to
747 * at least test 802.11n.
748 */
749 if (ni->ni_txrate & IEEE80211_RATE_MCS)
750 frame_time = ieee80211_compute_duration_ht(framelen,
751 ni->ni_txrate,
752 IEEE80211_HT_RC_2_STREAMS(ni->ni_txrate),
753 0, /* isht40 */
754 0); /* isshortgi */
755 else
756 frame_time = ieee80211_compute_duration(ic->ic_rt, framelen,
757 ni->ni_txrate, 0);
758 return (frame_time);
759 }
760
761 /*
762 * Check if the supplied frame can be partnered with an existing
763 * or pending frame. Return a reference to any frame that should be
764 * sent on return; otherwise return NULL.
765 */
766 struct mbuf *
ieee80211_ff_check(struct ieee80211_node * ni,struct mbuf * m)767 ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m)
768 {
769 struct ieee80211vap *vap = ni->ni_vap;
770 struct ieee80211com *ic = ni->ni_ic;
771 struct ieee80211_superg *sg = ic->ic_superg;
772 const int pri = M_WME_GETAC(m);
773 struct ieee80211_stageq *sq;
774 struct ieee80211_tx_ampdu *tap;
775 struct mbuf *mstaged;
776 uint32_t txtime, limit;
777
778 IEEE80211_TX_UNLOCK_ASSERT(ic);
779
780 IEEE80211_LOCK(ic);
781 limit = IEEE80211_TXOP_TO_US(
782 ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit);
783 IEEE80211_UNLOCK(ic);
784
785 /*
786 * Check if the supplied frame can be aggregated.
787 *
788 * NB: we allow EAPOL frames to be aggregated with other ucast traffic.
789 * Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
790 * be aggregated with other types of frames when encryption is on?
791 */
792 IEEE80211_FF_LOCK(ic);
793 tap = &ni->ni_tx_ampdu[WME_AC_TO_TID(pri)];
794 mstaged = ni->ni_tx_superg[WME_AC_TO_TID(pri)];
795 /* XXX NOTE: reusing packet counter state from A-MPDU */
796 /*
797 * XXX NOTE: this means we're double-counting; it should just
798 * be done in ieee80211_output.c once for both superg and A-MPDU.
799 */
800 ieee80211_txampdu_count_packet(tap);
801
802 /*
803 * When not in station mode never aggregate a multicast
804 * frame; this insures, for example, that a combined frame
805 * does not require multiple encryption keys.
806 */
807 if (vap->iv_opmode != IEEE80211_M_STA &&
808 ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) {
809 /* XXX flush staged frame? */
810 IEEE80211_FF_UNLOCK(ic);
811 return m;
812 }
813 /*
814 * If there is no frame to combine with and the pps is
815 * too low; then do not attempt to aggregate this frame.
816 */
817 if (mstaged == NULL &&
818 ieee80211_txampdu_getpps(tap) < ieee80211_ffppsmin) {
819 IEEE80211_FF_UNLOCK(ic);
820 return m;
821 }
822 sq = &sg->ff_stageq[pri];
823 /*
824 * Check the txop limit to insure the aggregate fits.
825 */
826 if (limit != 0 &&
827 (txtime = ff_approx_txtime(ni, m, mstaged)) > limit) {
828 /*
829 * Aggregate too long, return to the caller for direct
830 * transmission. In addition, flush any pending frame
831 * before sending this one.
832 */
833 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
834 "%s: txtime %u exceeds txop limit %u\n",
835 __func__, txtime, limit);
836
837 ni->ni_tx_superg[WME_AC_TO_TID(pri)] = NULL;
838 if (mstaged != NULL)
839 stageq_remove(ic, sq, mstaged);
840 IEEE80211_FF_UNLOCK(ic);
841
842 if (mstaged != NULL) {
843 IEEE80211_TX_LOCK(ic);
844 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
845 "%s: flush staged frame", __func__);
846 /* encap and xmit */
847 ff_transmit(ni, mstaged);
848 IEEE80211_TX_UNLOCK(ic);
849 }
850 return m; /* NB: original frame */
851 }
852 /*
853 * An aggregation candidate. If there's a frame to partner
854 * with then combine and return for processing. Otherwise
855 * save this frame and wait for a partner to show up (or
856 * the frame to be flushed). Note that staged frames also
857 * hold their node reference.
858 */
859 if (mstaged != NULL) {
860 ni->ni_tx_superg[WME_AC_TO_TID(pri)] = NULL;
861 stageq_remove(ic, sq, mstaged);
862 IEEE80211_FF_UNLOCK(ic);
863
864 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
865 "%s: aggregate fast-frame", __func__);
866 /*
867 * Release the node reference; we only need
868 * the one already in mstaged.
869 */
870 KASSERT(mstaged->m_pkthdr.rcvif == (void *)ni,
871 ("rcvif %p ni %p", mstaged->m_pkthdr.rcvif, ni));
872 ieee80211_free_node(ni);
873
874 m->m_nextpkt = NULL;
875 mstaged->m_nextpkt = m;
876 mstaged->m_flags |= M_FF; /* NB: mark for encap work */
877 } else {
878 KASSERT(ni->ni_tx_superg[WME_AC_TO_TID(pri)] == NULL,
879 ("ni_tx_superg[]: %p",
880 ni->ni_tx_superg[WME_AC_TO_TID(pri)]));
881 ni->ni_tx_superg[WME_AC_TO_TID(pri)] = m;
882
883 stageq_add(ic, sq, m);
884 IEEE80211_FF_UNLOCK(ic);
885
886 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
887 "%s: stage frame, %u queued", __func__, sq->depth);
888 /* NB: mstaged is NULL */
889 }
890 return mstaged;
891 }
892
893 struct mbuf *
ieee80211_amsdu_check(struct ieee80211_node * ni,struct mbuf * m)894 ieee80211_amsdu_check(struct ieee80211_node *ni, struct mbuf *m)
895 {
896 /*
897 * XXX TODO: actually enforce the node support
898 * and HTCAP requirements for the maximum A-MSDU
899 * size.
900 */
901
902 /* First: software A-MSDU transmit? */
903 if (! ieee80211_amsdu_tx_ok(ni))
904 return (m);
905
906 /* Next - EAPOL? Nope, don't aggregate; we don't QoS encap them */
907 if (m->m_flags & (M_EAPOL | M_MCAST | M_BCAST))
908 return (m);
909
910 /* Next - needs to be a data frame, non-broadcast, etc */
911 if (ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost))
912 return (m);
913
914 return (ieee80211_ff_check(ni, m));
915 }
916
917 void
ieee80211_ff_node_init(struct ieee80211_node * ni)918 ieee80211_ff_node_init(struct ieee80211_node *ni)
919 {
920 /*
921 * Clean FF state on re-associate. This handles the case
922 * where a station leaves w/o notifying us and then returns
923 * before node is reaped for inactivity.
924 */
925 ieee80211_ff_node_cleanup(ni);
926 }
927
928 void
ieee80211_ff_node_cleanup(struct ieee80211_node * ni)929 ieee80211_ff_node_cleanup(struct ieee80211_node *ni)
930 {
931 struct ieee80211com *ic = ni->ni_ic;
932 struct ieee80211_superg *sg = ic->ic_superg;
933 struct mbuf *m, *next_m, *head;
934 int tid;
935
936 IEEE80211_FF_LOCK(ic);
937 head = NULL;
938 for (tid = 0; tid < WME_NUM_TID; tid++) {
939 int ac = TID_TO_WME_AC(tid);
940 /*
941 * XXX Initialise the packet counter.
942 *
943 * This may be double-work for 11n stations;
944 * but without it we never setup things.
945 */
946 ieee80211_txampdu_init_pps(&ni->ni_tx_ampdu[tid]);
947 m = ni->ni_tx_superg[tid];
948 if (m != NULL) {
949 ni->ni_tx_superg[tid] = NULL;
950 stageq_remove(ic, &sg->ff_stageq[ac], m);
951 m->m_nextpkt = head;
952 head = m;
953 }
954 }
955 IEEE80211_FF_UNLOCK(ic);
956
957 /*
958 * Free mbufs, taking care to not dereference the mbuf after
959 * we free it (hence grabbing m_nextpkt before we free it.)
960 */
961 m = head;
962 while (m != NULL) {
963 next_m = m->m_nextpkt;
964 m_freem(m);
965 ieee80211_free_node(ni);
966 m = next_m;
967 }
968 }
969
970 /*
971 * Switch between turbo and non-turbo operating modes.
972 * Use the specified channel flags to locate the new
973 * channel, update 802.11 state, and then call back into
974 * the driver to effect the change.
975 */
976 void
ieee80211_dturbo_switch(struct ieee80211vap * vap,int newflags)977 ieee80211_dturbo_switch(struct ieee80211vap *vap, int newflags)
978 {
979 struct ieee80211com *ic = vap->iv_ic;
980 struct ieee80211_channel *chan;
981
982 chan = ieee80211_find_channel(ic, ic->ic_bsschan->ic_freq, newflags);
983 if (chan == NULL) { /* XXX should not happen */
984 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
985 "%s: no channel with freq %u flags 0x%x\n",
986 __func__, ic->ic_bsschan->ic_freq, newflags);
987 return;
988 }
989
990 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
991 "%s: %s -> %s (freq %u flags 0x%x)\n", __func__,
992 ieee80211_phymode_name[ieee80211_chan2mode(ic->ic_bsschan)],
993 ieee80211_phymode_name[ieee80211_chan2mode(chan)],
994 chan->ic_freq, chan->ic_flags);
995
996 ic->ic_bsschan = chan;
997 ic->ic_prevchan = ic->ic_curchan;
998 ic->ic_curchan = chan;
999 ic->ic_rt = ieee80211_get_ratetable(chan);
1000 ic->ic_set_channel(ic);
1001 ieee80211_radiotap_chan_change(ic);
1002 /* NB: do not need to reset ERP state 'cuz we're in sta mode */
1003 }
1004
1005 /*
1006 * Return the current ``state'' of an Atheros capbility.
1007 * If associated in station mode report the negotiated
1008 * setting. Otherwise report the current setting.
1009 */
1010 static int
getathcap(struct ieee80211vap * vap,int cap)1011 getathcap(struct ieee80211vap *vap, int cap)
1012 {
1013 if (vap->iv_opmode == IEEE80211_M_STA &&
1014 vap->iv_state == IEEE80211_S_RUN)
1015 return IEEE80211_ATH_CAP(vap, vap->iv_bss, cap) != 0;
1016 else
1017 return (vap->iv_flags & cap) != 0;
1018 }
1019
1020 static int
superg_ioctl_get80211(struct ieee80211vap * vap,struct ieee80211req * ireq)1021 superg_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
1022 {
1023 switch (ireq->i_type) {
1024 case IEEE80211_IOC_FF:
1025 ireq->i_val = getathcap(vap, IEEE80211_F_FF);
1026 break;
1027 case IEEE80211_IOC_TURBOP:
1028 ireq->i_val = getathcap(vap, IEEE80211_F_TURBOP);
1029 break;
1030 default:
1031 return ENOSYS;
1032 }
1033 return 0;
1034 }
1035 IEEE80211_IOCTL_GET(superg, superg_ioctl_get80211);
1036
1037 static int
superg_ioctl_set80211(struct ieee80211vap * vap,struct ieee80211req * ireq)1038 superg_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
1039 {
1040 switch (ireq->i_type) {
1041 case IEEE80211_IOC_FF:
1042 if (ireq->i_val) {
1043 if ((vap->iv_caps & IEEE80211_C_FF) == 0)
1044 return EOPNOTSUPP;
1045 vap->iv_flags |= IEEE80211_F_FF;
1046 } else
1047 vap->iv_flags &= ~IEEE80211_F_FF;
1048 return ENETRESET;
1049 case IEEE80211_IOC_TURBOP:
1050 if (ireq->i_val) {
1051 if ((vap->iv_caps & IEEE80211_C_TURBOP) == 0)
1052 return EOPNOTSUPP;
1053 vap->iv_flags |= IEEE80211_F_TURBOP;
1054 } else
1055 vap->iv_flags &= ~IEEE80211_F_TURBOP;
1056 return ENETRESET;
1057 default:
1058 return ENOSYS;
1059 }
1060 }
1061 IEEE80211_IOCTL_SET(superg, superg_ioctl_set80211);
1062
1063 #endif /* IEEE80211_SUPPORT_SUPERG */
1064