xref: /freebsd/sys/net80211/ieee80211_superg.c (revision 955c8cbb4960e6cf3602de144b1b9154a5092968)
1 /*-
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24  */
25 
26 #include <sys/cdefs.h>
27 __FBSDID("$FreeBSD$");
28 
29 #include "opt_wlan.h"
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/mbuf.h>
34 #include <sys/kernel.h>
35 #include <sys/endian.h>
36 
37 #include <sys/socket.h>
38 
39 #include <net/bpf.h>
40 #include <net/ethernet.h>
41 #include <net/if.h>
42 #include <net/if_llc.h>
43 #include <net/if_media.h>
44 
45 #include <net80211/ieee80211_var.h>
46 #include <net80211/ieee80211_input.h>
47 #include <net80211/ieee80211_phy.h>
48 #include <net80211/ieee80211_superg.h>
49 
50 /*
51  * Atheros fast-frame encapsulation format.
52  * FF max payload:
53  * 802.2 + FFHDR + HPAD + 802.3 + 802.2 + 1500 + SPAD + 802.3 + 802.2 + 1500:
54  *   8   +   4   +  4   +   14  +   8   + 1500 +  6   +   14  +   8   + 1500
55  * = 3066
56  */
57 /* fast frame header is 32-bits */
58 #define	ATH_FF_PROTO	0x0000003f	/* protocol */
59 #define	ATH_FF_PROTO_S	0
60 #define	ATH_FF_FTYPE	0x000000c0	/* frame type */
61 #define	ATH_FF_FTYPE_S	6
62 #define	ATH_FF_HLEN32	0x00000300	/* optional hdr length */
63 #define	ATH_FF_HLEN32_S	8
64 #define	ATH_FF_SEQNUM	0x001ffc00	/* sequence number */
65 #define	ATH_FF_SEQNUM_S	10
66 #define	ATH_FF_OFFSET	0xffe00000	/* offset to 2nd payload */
67 #define	ATH_FF_OFFSET_S	21
68 
69 #define	ATH_FF_MAX_HDR_PAD	4
70 #define	ATH_FF_MAX_SEP_PAD	6
71 #define	ATH_FF_MAX_HDR		30
72 
73 #define	ATH_FF_PROTO_L2TUNNEL	0	/* L2 tunnel protocol */
74 #define	ATH_FF_ETH_TYPE		0x88bd	/* Ether type for encapsulated frames */
75 #define	ATH_FF_SNAP_ORGCODE_0	0x00
76 #define	ATH_FF_SNAP_ORGCODE_1	0x03
77 #define	ATH_FF_SNAP_ORGCODE_2	0x7f
78 
79 #define	ATH_FF_TXQMIN	2		/* min txq depth for staging */
80 #define	ATH_FF_TXQMAX	50		/* maximum # of queued frames allowed */
81 #define	ATH_FF_STAGEMAX	5		/* max waiting period for staged frame*/
82 
83 #define	ETHER_HEADER_COPY(dst, src) \
84 	memcpy(dst, src, sizeof(struct ether_header))
85 
86 static	int ieee80211_ffppsmin = 2;	/* pps threshold for ff aggregation */
87 SYSCTL_INT(_net_wlan, OID_AUTO, ffppsmin, CTLTYPE_INT | CTLFLAG_RW,
88 	&ieee80211_ffppsmin, 0, "min packet rate before fast-frame staging");
89 static	int ieee80211_ffagemax = -1;	/* max time frames held on stage q */
90 SYSCTL_PROC(_net_wlan, OID_AUTO, ffagemax, CTLTYPE_INT | CTLFLAG_RW,
91 	&ieee80211_ffagemax, 0, ieee80211_sysctl_msecs_ticks, "I",
92 	"max hold time for fast-frame staging (ms)");
93 
94 void
95 ieee80211_superg_attach(struct ieee80211com *ic)
96 {
97 	struct ieee80211_superg *sg;
98 
99 	if (ic->ic_caps & IEEE80211_C_FF) {
100 		sg = (struct ieee80211_superg *) malloc(
101 		     sizeof(struct ieee80211_superg), M_80211_VAP,
102 		     M_NOWAIT | M_ZERO);
103 		if (sg == NULL) {
104 			printf("%s: cannot allocate SuperG state block\n",
105 			    __func__);
106 			return;
107 		}
108 		ic->ic_superg = sg;
109 	}
110 	ieee80211_ffagemax = msecs_to_ticks(150);
111 }
112 
113 void
114 ieee80211_superg_detach(struct ieee80211com *ic)
115 {
116 	if (ic->ic_superg != NULL) {
117 		free(ic->ic_superg, M_80211_VAP);
118 		ic->ic_superg = NULL;
119 	}
120 }
121 
122 void
123 ieee80211_superg_vattach(struct ieee80211vap *vap)
124 {
125 	struct ieee80211com *ic = vap->iv_ic;
126 
127 	if (ic->ic_superg == NULL)	/* NB: can't do fast-frames w/o state */
128 		vap->iv_caps &= ~IEEE80211_C_FF;
129 	if (vap->iv_caps & IEEE80211_C_FF)
130 		vap->iv_flags |= IEEE80211_F_FF;
131 	/* NB: we only implement sta mode */
132 	if (vap->iv_opmode == IEEE80211_M_STA &&
133 	    (vap->iv_caps & IEEE80211_C_TURBOP))
134 		vap->iv_flags |= IEEE80211_F_TURBOP;
135 }
136 
137 void
138 ieee80211_superg_vdetach(struct ieee80211vap *vap)
139 {
140 }
141 
142 #define	ATH_OUI_BYTES		0x00, 0x03, 0x7f
143 /*
144  * Add a WME information element to a frame.
145  */
146 uint8_t *
147 ieee80211_add_ath(uint8_t *frm, uint8_t caps, ieee80211_keyix defkeyix)
148 {
149 	static const struct ieee80211_ath_ie info = {
150 		.ath_id		= IEEE80211_ELEMID_VENDOR,
151 		.ath_len	= sizeof(struct ieee80211_ath_ie) - 2,
152 		.ath_oui	= { ATH_OUI_BYTES },
153 		.ath_oui_type	= ATH_OUI_TYPE,
154 		.ath_oui_subtype= ATH_OUI_SUBTYPE,
155 		.ath_version	= ATH_OUI_VERSION,
156 	};
157 	struct ieee80211_ath_ie *ath = (struct ieee80211_ath_ie *) frm;
158 
159 	memcpy(frm, &info, sizeof(info));
160 	ath->ath_capability = caps;
161 	if (defkeyix != IEEE80211_KEYIX_NONE) {
162 		ath->ath_defkeyix[0] = (defkeyix & 0xff);
163 		ath->ath_defkeyix[1] = ((defkeyix >> 8) & 0xff);
164 	} else {
165 		ath->ath_defkeyix[0] = 0xff;
166 		ath->ath_defkeyix[1] = 0x7f;
167 	}
168 	return frm + sizeof(info);
169 }
170 #undef ATH_OUI_BYTES
171 
172 uint8_t *
173 ieee80211_add_athcaps(uint8_t *frm, const struct ieee80211_node *bss)
174 {
175 	const struct ieee80211vap *vap = bss->ni_vap;
176 
177 	return ieee80211_add_ath(frm,
178 	    vap->iv_flags & IEEE80211_F_ATHEROS,
179 	    ((vap->iv_flags & IEEE80211_F_WPA) == 0 &&
180 	    bss->ni_authmode != IEEE80211_AUTH_8021X) ?
181 	    vap->iv_def_txkey : IEEE80211_KEYIX_NONE);
182 }
183 
184 void
185 ieee80211_parse_ath(struct ieee80211_node *ni, uint8_t *ie)
186 {
187 	const struct ieee80211_ath_ie *ath =
188 		(const struct ieee80211_ath_ie *) ie;
189 
190 	ni->ni_ath_flags = ath->ath_capability;
191 	ni->ni_ath_defkeyix = LE_READ_2(&ath->ath_defkeyix);
192 }
193 
194 int
195 ieee80211_parse_athparams(struct ieee80211_node *ni, uint8_t *frm,
196 	const struct ieee80211_frame *wh)
197 {
198 	struct ieee80211vap *vap = ni->ni_vap;
199 	const struct ieee80211_ath_ie *ath;
200 	u_int len = frm[1];
201 	int capschanged;
202 	uint16_t defkeyix;
203 
204 	if (len < sizeof(struct ieee80211_ath_ie)-2) {
205 		IEEE80211_DISCARD_IE(vap,
206 		    IEEE80211_MSG_ELEMID | IEEE80211_MSG_SUPERG,
207 		    wh, "Atheros", "too short, len %u", len);
208 		return -1;
209 	}
210 	ath = (const struct ieee80211_ath_ie *)frm;
211 	capschanged = (ni->ni_ath_flags != ath->ath_capability);
212 	defkeyix = LE_READ_2(ath->ath_defkeyix);
213 	if (capschanged || defkeyix != ni->ni_ath_defkeyix) {
214 		ni->ni_ath_flags = ath->ath_capability;
215 		ni->ni_ath_defkeyix = defkeyix;
216 		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
217 		    "ath ie change: new caps 0x%x defkeyix 0x%x",
218 		    ni->ni_ath_flags, ni->ni_ath_defkeyix);
219 	}
220 	if (IEEE80211_ATH_CAP(vap, ni, ATHEROS_CAP_TURBO_PRIME)) {
221 		uint16_t curflags, newflags;
222 
223 		/*
224 		 * Check for turbo mode switch.  Calculate flags
225 		 * for the new mode and effect the switch.
226 		 */
227 		newflags = curflags = vap->iv_ic->ic_bsschan->ic_flags;
228 		/* NB: BOOST is not in ic_flags, so get it from the ie */
229 		if (ath->ath_capability & ATHEROS_CAP_BOOST)
230 			newflags |= IEEE80211_CHAN_TURBO;
231 		else
232 			newflags &= ~IEEE80211_CHAN_TURBO;
233 		if (newflags != curflags)
234 			ieee80211_dturbo_switch(vap, newflags);
235 	}
236 	return capschanged;
237 }
238 
239 /*
240  * Decap the encapsulated frame pair and dispatch the first
241  * for delivery.  The second frame is returned for delivery
242  * via the normal path.
243  */
244 struct mbuf *
245 ieee80211_ff_decap(struct ieee80211_node *ni, struct mbuf *m)
246 {
247 #define	FF_LLC_SIZE	(sizeof(struct ether_header) + sizeof(struct llc))
248 #define	MS(x,f)	(((x) & f) >> f##_S)
249 	struct ieee80211vap *vap = ni->ni_vap;
250 	struct llc *llc;
251 	uint32_t ath;
252 	struct mbuf *n;
253 	int framelen;
254 
255 	/* NB: we assume caller does this check for us */
256 	KASSERT(IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF),
257 	    ("ff not negotiated"));
258 	/*
259 	 * Check for fast-frame tunnel encapsulation.
260 	 */
261 	if (m->m_pkthdr.len < 3*FF_LLC_SIZE)
262 		return m;
263 	if (m->m_len < FF_LLC_SIZE &&
264 	    (m = m_pullup(m, FF_LLC_SIZE)) == NULL) {
265 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
266 		    ni->ni_macaddr, "fast-frame",
267 		    "%s", "m_pullup(llc) failed");
268 		vap->iv_stats.is_rx_tooshort++;
269 		return NULL;
270 	}
271 	llc = (struct llc *)(mtod(m, uint8_t *) +
272 	    sizeof(struct ether_header));
273 	if (llc->llc_snap.ether_type != htons(ATH_FF_ETH_TYPE))
274 		return m;
275 	m_adj(m, FF_LLC_SIZE);
276 	m_copydata(m, 0, sizeof(uint32_t), (caddr_t) &ath);
277 	if (MS(ath, ATH_FF_PROTO) != ATH_FF_PROTO_L2TUNNEL) {
278 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
279 		    ni->ni_macaddr, "fast-frame",
280 		    "unsupport tunnel protocol, header 0x%x", ath);
281 		vap->iv_stats.is_ff_badhdr++;
282 		m_freem(m);
283 		return NULL;
284 	}
285 	/* NB: skip header and alignment padding */
286 	m_adj(m, roundup(sizeof(uint32_t) - 2, 4) + 2);
287 
288 	vap->iv_stats.is_ff_decap++;
289 
290 	/*
291 	 * Decap the first frame, bust it apart from the
292 	 * second and deliver; then decap the second frame
293 	 * and return it to the caller for normal delivery.
294 	 */
295 	m = ieee80211_decap1(m, &framelen);
296 	if (m == NULL) {
297 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
298 		    ni->ni_macaddr, "fast-frame", "%s", "first decap failed");
299 		vap->iv_stats.is_ff_tooshort++;
300 		return NULL;
301 	}
302 	n = m_split(m, framelen, M_NOWAIT);
303 	if (n == NULL) {
304 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
305 		    ni->ni_macaddr, "fast-frame",
306 		    "%s", "unable to split encapsulated frames");
307 		vap->iv_stats.is_ff_split++;
308 		m_freem(m);			/* NB: must reclaim */
309 		return NULL;
310 	}
311 	/* XXX not right for WDS */
312 	vap->iv_deliver_data(vap, ni, m);	/* 1st of pair */
313 
314 	/*
315 	 * Decap second frame.
316 	 */
317 	m_adj(n, roundup2(framelen, 4) - framelen);	/* padding */
318 	n = ieee80211_decap1(n, &framelen);
319 	if (n == NULL) {
320 		IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY,
321 		    ni->ni_macaddr, "fast-frame", "%s", "second decap failed");
322 		vap->iv_stats.is_ff_tooshort++;
323 	}
324 	/* XXX verify framelen against mbuf contents */
325 	return n;				/* 2nd delivered by caller */
326 #undef MS
327 #undef FF_LLC_SIZE
328 }
329 
330 /*
331  * Do Ethernet-LLC encapsulation for each payload in a fast frame
332  * tunnel encapsulation.  The frame is assumed to have an Ethernet
333  * header at the front that must be stripped before prepending the
334  * LLC followed by the Ethernet header passed in (with an Ethernet
335  * type that specifies the payload size).
336  */
337 static struct mbuf *
338 ff_encap1(struct ieee80211vap *vap, struct mbuf *m,
339 	const struct ether_header *eh)
340 {
341 	struct llc *llc;
342 	uint16_t payload;
343 
344 	/* XXX optimize by combining m_adj+M_PREPEND */
345 	m_adj(m, sizeof(struct ether_header) - sizeof(struct llc));
346 	llc = mtod(m, struct llc *);
347 	llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
348 	llc->llc_control = LLC_UI;
349 	llc->llc_snap.org_code[0] = 0;
350 	llc->llc_snap.org_code[1] = 0;
351 	llc->llc_snap.org_code[2] = 0;
352 	llc->llc_snap.ether_type = eh->ether_type;
353 	payload = m->m_pkthdr.len;		/* NB: w/o Ethernet header */
354 
355 	M_PREPEND(m, sizeof(struct ether_header), M_NOWAIT);
356 	if (m == NULL) {		/* XXX cannot happen */
357 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
358 			"%s: no space for ether_header\n", __func__);
359 		vap->iv_stats.is_tx_nobuf++;
360 		return NULL;
361 	}
362 	ETHER_HEADER_COPY(mtod(m, void *), eh);
363 	mtod(m, struct ether_header *)->ether_type = htons(payload);
364 	return m;
365 }
366 
367 /*
368  * Fast frame encapsulation.  There must be two packets
369  * chained with m_nextpkt.  We do header adjustment for
370  * each, add the tunnel encapsulation, and then concatenate
371  * the mbuf chains to form a single frame for transmission.
372  */
373 struct mbuf *
374 ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
375 	struct ieee80211_key *key)
376 {
377 	struct mbuf *m2;
378 	struct ether_header eh1, eh2;
379 	struct llc *llc;
380 	struct mbuf *m;
381 	int pad;
382 
383 	m2 = m1->m_nextpkt;
384 	if (m2 == NULL) {
385 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
386 		    "%s: only one frame\n", __func__);
387 		goto bad;
388 	}
389 	m1->m_nextpkt = NULL;
390 	/*
391 	 * Include fast frame headers in adjusting header layout.
392 	 */
393 	KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!"));
394 	ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t));
395 	m1 = ieee80211_mbuf_adjust(vap,
396 		hdrspace + sizeof(struct llc) + sizeof(uint32_t) + 2 +
397 		    sizeof(struct ether_header),
398 		key, m1);
399 	if (m1 == NULL) {
400 		/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
401 		m_freem(m2);
402 		goto bad;
403 	}
404 
405 	/*
406 	 * Copy second frame's Ethernet header out of line
407 	 * and adjust for encapsulation headers.  Note that
408 	 * we make room for padding in case there isn't room
409 	 * at the end of first frame.
410 	 */
411 	KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!"));
412 	ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t));
413 	m2 = ieee80211_mbuf_adjust(vap,
414 		ATH_FF_MAX_HDR_PAD + sizeof(struct ether_header),
415 		NULL, m2);
416 	if (m2 == NULL) {
417 		/* NB: ieee80211_mbuf_adjust handles msgs+statistics */
418 		goto bad;
419 	}
420 
421 	/*
422 	 * Now do tunnel encapsulation.  First, each
423 	 * frame gets a standard encapsulation.
424 	 */
425 	m1 = ff_encap1(vap, m1, &eh1);
426 	if (m1 == NULL)
427 		goto bad;
428 	m2 = ff_encap1(vap, m2, &eh2);
429 	if (m2 == NULL)
430 		goto bad;
431 
432 	/*
433 	 * Pad leading frame to a 4-byte boundary.  If there
434 	 * is space at the end of the first frame, put it
435 	 * there; otherwise prepend to the front of the second
436 	 * frame.  We know doing the second will always work
437 	 * because we reserve space above.  We prefer appending
438 	 * as this typically has better DMA alignment properties.
439 	 */
440 	for (m = m1; m->m_next != NULL; m = m->m_next)
441 		;
442 	pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len;
443 	if (pad) {
444 		if (M_TRAILINGSPACE(m) < pad) {		/* prepend to second */
445 			m2->m_data -= pad;
446 			m2->m_len += pad;
447 			m2->m_pkthdr.len += pad;
448 		} else {				/* append to first */
449 			m->m_len += pad;
450 			m1->m_pkthdr.len += pad;
451 		}
452 	}
453 
454 	/*
455 	 * Now, stick 'em together and prepend the tunnel headers;
456 	 * first the Atheros tunnel header (all zero for now) and
457 	 * then a special fast frame LLC.
458 	 *
459 	 * XXX optimize by prepending together
460 	 */
461 	m->m_next = m2;			/* NB: last mbuf from above */
462 	m1->m_pkthdr.len += m2->m_pkthdr.len;
463 	M_PREPEND(m1, sizeof(uint32_t)+2, M_NOWAIT);
464 	if (m1 == NULL) {		/* XXX cannot happen */
465 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
466 		    "%s: no space for tunnel header\n", __func__);
467 		vap->iv_stats.is_tx_nobuf++;
468 		return NULL;
469 	}
470 	memset(mtod(m1, void *), 0, sizeof(uint32_t)+2);
471 
472 	M_PREPEND(m1, sizeof(struct llc), M_NOWAIT);
473 	if (m1 == NULL) {		/* XXX cannot happen */
474 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
475 		    "%s: no space for llc header\n", __func__);
476 		vap->iv_stats.is_tx_nobuf++;
477 		return NULL;
478 	}
479 	llc = mtod(m1, struct llc *);
480 	llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP;
481 	llc->llc_control = LLC_UI;
482 	llc->llc_snap.org_code[0] = ATH_FF_SNAP_ORGCODE_0;
483 	llc->llc_snap.org_code[1] = ATH_FF_SNAP_ORGCODE_1;
484 	llc->llc_snap.org_code[2] = ATH_FF_SNAP_ORGCODE_2;
485 	llc->llc_snap.ether_type = htons(ATH_FF_ETH_TYPE);
486 
487 	vap->iv_stats.is_ff_encap++;
488 
489 	return m1;
490 bad:
491 	if (m1 != NULL)
492 		m_freem(m1);
493 	if (m2 != NULL)
494 		m_freem(m2);
495 	return NULL;
496 }
497 
498 static void
499 ff_transmit(struct ieee80211_node *ni, struct mbuf *m)
500 {
501 	struct ieee80211vap *vap = ni->ni_vap;
502 	int error;
503 
504 	/* encap and xmit */
505 	m = ieee80211_encap(vap, ni, m);
506 	if (m != NULL) {
507 		struct ifnet *ifp = vap->iv_ifp;
508 		struct ifnet *parent = ni->ni_ic->ic_ifp;
509 
510 		error = parent->if_transmit(parent, m);
511 		if (error != 0) {
512 			/* NB: IFQ_HANDOFF reclaims mbuf */
513 			ieee80211_free_node(ni);
514 		} else {
515 			ifp->if_opackets++;
516 		}
517 	} else
518 		ieee80211_free_node(ni);
519 }
520 
521 /*
522  * Flush frames to device; note we re-use the linked list
523  * the frames were stored on and use the sentinel (unchanged)
524  * which may be non-NULL.
525  */
526 static void
527 ff_flush(struct mbuf *head, struct mbuf *last)
528 {
529 	struct mbuf *m, *next;
530 	struct ieee80211_node *ni;
531 	struct ieee80211vap *vap;
532 
533 	for (m = head; m != last; m = next) {
534 		next = m->m_nextpkt;
535 		m->m_nextpkt = NULL;
536 
537 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
538 		vap = ni->ni_vap;
539 
540 		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
541 		    "%s: flush frame, age %u", __func__, M_AGE_GET(m));
542 		vap->iv_stats.is_ff_flush++;
543 
544 		ff_transmit(ni, m);
545 	}
546 }
547 
548 /*
549  * Age frames on the staging queue.
550  *
551  * This is called without the comlock held, but it does all its work
552  * behind the comlock.  Because of this, it's possible that the
553  * staging queue will be serviced between the function which called
554  * it and now; thus simply checking that the queue has work in it
555  * may fail.
556  *
557  * See PR kern/174283 for more details.
558  */
559 void
560 ieee80211_ff_age(struct ieee80211com *ic, struct ieee80211_stageq *sq,
561     int quanta)
562 {
563 	struct mbuf *m, *head;
564 	struct ieee80211_node *ni;
565 	struct ieee80211_tx_ampdu *tap;
566 
567 #if 0
568 	KASSERT(sq->head != NULL, ("stageq empty"));
569 #endif
570 
571 	IEEE80211_LOCK(ic);
572 	head = sq->head;
573 	while ((m = sq->head) != NULL && M_AGE_GET(m) < quanta) {
574 		int tid = WME_AC_TO_TID(M_WME_GETAC(m));
575 
576 		/* clear tap ref to frame */
577 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
578 		tap = &ni->ni_tx_ampdu[tid];
579 		KASSERT(tap->txa_private == m, ("staging queue empty"));
580 		tap->txa_private = NULL;
581 
582 		sq->head = m->m_nextpkt;
583 		sq->depth--;
584 	}
585 	if (m == NULL)
586 		sq->tail = NULL;
587 	else
588 		M_AGE_SUB(m, quanta);
589 	IEEE80211_UNLOCK(ic);
590 
591 	ff_flush(head, m);
592 }
593 
594 static void
595 stageq_add(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *m)
596 {
597 	int age = ieee80211_ffagemax;
598 
599 	IEEE80211_LOCK_ASSERT(ic);
600 
601 	if (sq->tail != NULL) {
602 		sq->tail->m_nextpkt = m;
603 		age -= M_AGE_GET(sq->head);
604 	} else
605 		sq->head = m;
606 	KASSERT(age >= 0, ("age %d", age));
607 	M_AGE_SET(m, age);
608 	m->m_nextpkt = NULL;
609 	sq->tail = m;
610 	sq->depth++;
611 }
612 
613 static void
614 stageq_remove(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *mstaged)
615 {
616 	struct mbuf *m, *mprev;
617 
618 	IEEE80211_LOCK_ASSERT(ic);
619 
620 	mprev = NULL;
621 	for (m = sq->head; m != NULL; m = m->m_nextpkt) {
622 		if (m == mstaged) {
623 			if (mprev == NULL)
624 				sq->head = m->m_nextpkt;
625 			else
626 				mprev->m_nextpkt = m->m_nextpkt;
627 			if (sq->tail == m)
628 				sq->tail = mprev;
629 			sq->depth--;
630 			return;
631 		}
632 		mprev = m;
633 	}
634 	printf("%s: packet not found\n", __func__);
635 }
636 
637 static uint32_t
638 ff_approx_txtime(struct ieee80211_node *ni,
639 	const struct mbuf *m1, const struct mbuf *m2)
640 {
641 	struct ieee80211com *ic = ni->ni_ic;
642 	struct ieee80211vap *vap = ni->ni_vap;
643 	uint32_t framelen;
644 
645 	/*
646 	 * Approximate the frame length to be transmitted. A swag to add
647 	 * the following maximal values to the skb payload:
648 	 *   - 32: 802.11 encap + CRC
649 	 *   - 24: encryption overhead (if wep bit)
650 	 *   - 4 + 6: fast-frame header and padding
651 	 *   - 16: 2 LLC FF tunnel headers
652 	 *   - 14: 1 802.3 FF tunnel header (mbuf already accounts for 2nd)
653 	 */
654 	framelen = m1->m_pkthdr.len + 32 +
655 	    ATH_FF_MAX_HDR_PAD + ATH_FF_MAX_SEP_PAD + ATH_FF_MAX_HDR;
656 	if (vap->iv_flags & IEEE80211_F_PRIVACY)
657 		framelen += 24;
658 	if (m2 != NULL)
659 		framelen += m2->m_pkthdr.len;
660 	return ieee80211_compute_duration(ic->ic_rt, framelen, ni->ni_txrate, 0);
661 }
662 
663 /*
664  * Check if the supplied frame can be partnered with an existing
665  * or pending frame.  Return a reference to any frame that should be
666  * sent on return; otherwise return NULL.
667  */
668 struct mbuf *
669 ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m)
670 {
671 	struct ieee80211vap *vap = ni->ni_vap;
672 	struct ieee80211com *ic = ni->ni_ic;
673 	struct ieee80211_superg *sg = ic->ic_superg;
674 	const int pri = M_WME_GETAC(m);
675 	struct ieee80211_stageq *sq;
676 	struct ieee80211_tx_ampdu *tap;
677 	struct mbuf *mstaged;
678 	uint32_t txtime, limit;
679 
680 	/*
681 	 * Check if the supplied frame can be aggregated.
682 	 *
683 	 * NB: we allow EAPOL frames to be aggregated with other ucast traffic.
684 	 *     Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
685 	 *     be aggregated with other types of frames when encryption is on?
686 	 */
687 	IEEE80211_LOCK(ic);
688 	tap = &ni->ni_tx_ampdu[WME_AC_TO_TID(pri)];
689 	mstaged = tap->txa_private;		/* NB: we reuse AMPDU state */
690 	ieee80211_txampdu_count_packet(tap);
691 
692 	/*
693 	 * When not in station mode never aggregate a multicast
694 	 * frame; this insures, for example, that a combined frame
695 	 * does not require multiple encryption keys.
696 	 */
697 	if (vap->iv_opmode != IEEE80211_M_STA &&
698 	    ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) {
699 		/* XXX flush staged frame? */
700 		IEEE80211_UNLOCK(ic);
701 		return m;
702 	}
703 	/*
704 	 * If there is no frame to combine with and the pps is
705 	 * too low; then do not attempt to aggregate this frame.
706 	 */
707 	if (mstaged == NULL &&
708 	    ieee80211_txampdu_getpps(tap) < ieee80211_ffppsmin) {
709 		IEEE80211_UNLOCK(ic);
710 		return m;
711 	}
712 	sq = &sg->ff_stageq[pri];
713 	/*
714 	 * Check the txop limit to insure the aggregate fits.
715 	 */
716 	limit = IEEE80211_TXOP_TO_US(
717 		ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit);
718 	if (limit != 0 &&
719 	    (txtime = ff_approx_txtime(ni, m, mstaged)) > limit) {
720 		/*
721 		 * Aggregate too long, return to the caller for direct
722 		 * transmission.  In addition, flush any pending frame
723 		 * before sending this one.
724 		 */
725 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
726 		    "%s: txtime %u exceeds txop limit %u\n",
727 		    __func__, txtime, limit);
728 
729 		tap->txa_private = NULL;
730 		if (mstaged != NULL)
731 			stageq_remove(ic, sq, mstaged);
732 		IEEE80211_UNLOCK(ic);
733 
734 		if (mstaged != NULL) {
735 			IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
736 			    "%s: flush staged frame", __func__);
737 			/* encap and xmit */
738 			ff_transmit(ni, mstaged);
739 		}
740 		return m;		/* NB: original frame */
741 	}
742 	/*
743 	 * An aggregation candidate.  If there's a frame to partner
744 	 * with then combine and return for processing.  Otherwise
745 	 * save this frame and wait for a partner to show up (or
746 	 * the frame to be flushed).  Note that staged frames also
747 	 * hold their node reference.
748 	 */
749 	if (mstaged != NULL) {
750 		tap->txa_private = NULL;
751 		stageq_remove(ic, sq, mstaged);
752 		IEEE80211_UNLOCK(ic);
753 
754 		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
755 		    "%s: aggregate fast-frame", __func__);
756 		/*
757 		 * Release the node reference; we only need
758 		 * the one already in mstaged.
759 		 */
760 		KASSERT(mstaged->m_pkthdr.rcvif == (void *)ni,
761 		    ("rcvif %p ni %p", mstaged->m_pkthdr.rcvif, ni));
762 		ieee80211_free_node(ni);
763 
764 		m->m_nextpkt = NULL;
765 		mstaged->m_nextpkt = m;
766 		mstaged->m_flags |= M_FF; /* NB: mark for encap work */
767 	} else {
768 		KASSERT(tap->txa_private == NULL,
769 		    ("txa_private %p", tap->txa_private));
770 		tap->txa_private = m;
771 
772 		stageq_add(ic, sq, m);
773 		IEEE80211_UNLOCK(ic);
774 
775 		IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
776 		    "%s: stage frame, %u queued", __func__, sq->depth);
777 		/* NB: mstaged is NULL */
778 	}
779 	return mstaged;
780 }
781 
782 void
783 ieee80211_ff_node_init(struct ieee80211_node *ni)
784 {
785 	/*
786 	 * Clean FF state on re-associate.  This handles the case
787 	 * where a station leaves w/o notifying us and then returns
788 	 * before node is reaped for inactivity.
789 	 */
790 	ieee80211_ff_node_cleanup(ni);
791 }
792 
793 void
794 ieee80211_ff_node_cleanup(struct ieee80211_node *ni)
795 {
796 	struct ieee80211com *ic = ni->ni_ic;
797 	struct ieee80211_superg *sg = ic->ic_superg;
798 	struct ieee80211_tx_ampdu *tap;
799 	struct mbuf *m, *next_m, *head;
800 	int tid;
801 
802 	IEEE80211_LOCK(ic);
803 	head = NULL;
804 	for (tid = 0; tid < WME_NUM_TID; tid++) {
805 		int ac = TID_TO_WME_AC(tid);
806 
807 		tap = &ni->ni_tx_ampdu[tid];
808 		m = tap->txa_private;
809 		if (m != NULL) {
810 			tap->txa_private = NULL;
811 			stageq_remove(ic, &sg->ff_stageq[ac], m);
812 			m->m_nextpkt = head;
813 			head = m;
814 		}
815 	}
816 	IEEE80211_UNLOCK(ic);
817 
818 	/*
819 	 * Free mbufs, taking care to not dereference the mbuf after
820 	 * we free it (hence grabbing m_nextpkt before we free it.)
821 	 */
822 	m = head;
823 	while (m != NULL) {
824 		next_m = m->m_nextpkt;
825 		m_freem(m);
826 		ieee80211_free_node(ni);
827 		m = next_m;
828 	}
829 }
830 
831 /*
832  * Switch between turbo and non-turbo operating modes.
833  * Use the specified channel flags to locate the new
834  * channel, update 802.11 state, and then call back into
835  * the driver to effect the change.
836  */
837 void
838 ieee80211_dturbo_switch(struct ieee80211vap *vap, int newflags)
839 {
840 	struct ieee80211com *ic = vap->iv_ic;
841 	struct ieee80211_channel *chan;
842 
843 	chan = ieee80211_find_channel(ic, ic->ic_bsschan->ic_freq, newflags);
844 	if (chan == NULL) {		/* XXX should not happen */
845 		IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
846 		    "%s: no channel with freq %u flags 0x%x\n",
847 		    __func__, ic->ic_bsschan->ic_freq, newflags);
848 		return;
849 	}
850 
851 	IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
852 	    "%s: %s -> %s (freq %u flags 0x%x)\n", __func__,
853 	    ieee80211_phymode_name[ieee80211_chan2mode(ic->ic_bsschan)],
854 	    ieee80211_phymode_name[ieee80211_chan2mode(chan)],
855 	    chan->ic_freq, chan->ic_flags);
856 
857 	ic->ic_bsschan = chan;
858 	ic->ic_prevchan = ic->ic_curchan;
859 	ic->ic_curchan = chan;
860 	ic->ic_rt = ieee80211_get_ratetable(chan);
861 	ic->ic_set_channel(ic);
862 	ieee80211_radiotap_chan_change(ic);
863 	/* NB: do not need to reset ERP state 'cuz we're in sta mode */
864 }
865 
866 /*
867  * Return the current ``state'' of an Atheros capbility.
868  * If associated in station mode report the negotiated
869  * setting. Otherwise report the current setting.
870  */
871 static int
872 getathcap(struct ieee80211vap *vap, int cap)
873 {
874 	if (vap->iv_opmode == IEEE80211_M_STA &&
875 	    vap->iv_state == IEEE80211_S_RUN)
876 		return IEEE80211_ATH_CAP(vap, vap->iv_bss, cap) != 0;
877 	else
878 		return (vap->iv_flags & cap) != 0;
879 }
880 
881 static int
882 superg_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
883 {
884 	switch (ireq->i_type) {
885 	case IEEE80211_IOC_FF:
886 		ireq->i_val = getathcap(vap, IEEE80211_F_FF);
887 		break;
888 	case IEEE80211_IOC_TURBOP:
889 		ireq->i_val = getathcap(vap, IEEE80211_F_TURBOP);
890 		break;
891 	default:
892 		return ENOSYS;
893 	}
894 	return 0;
895 }
896 IEEE80211_IOCTL_GET(superg, superg_ioctl_get80211);
897 
898 static int
899 superg_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq)
900 {
901 	switch (ireq->i_type) {
902 	case IEEE80211_IOC_FF:
903 		if (ireq->i_val) {
904 			if ((vap->iv_caps & IEEE80211_C_FF) == 0)
905 				return EOPNOTSUPP;
906 			vap->iv_flags |= IEEE80211_F_FF;
907 		} else
908 			vap->iv_flags &= ~IEEE80211_F_FF;
909 		return ENETRESET;
910 	case IEEE80211_IOC_TURBOP:
911 		if (ireq->i_val) {
912 			if ((vap->iv_caps & IEEE80211_C_TURBOP) == 0)
913 				return EOPNOTSUPP;
914 			vap->iv_flags |= IEEE80211_F_TURBOP;
915 		} else
916 			vap->iv_flags &= ~IEEE80211_F_TURBOP;
917 		return ENETRESET;
918 	default:
919 		return ENOSYS;
920 	}
921 	return 0;
922 }
923 IEEE80211_IOCTL_SET(superg, superg_ioctl_set80211);
924