1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include "opt_wlan.h" 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/mbuf.h> 34 #include <sys/kernel.h> 35 #include <sys/endian.h> 36 37 #include <sys/socket.h> 38 39 #include <net/bpf.h> 40 #include <net/ethernet.h> 41 #include <net/if.h> 42 #include <net/if_llc.h> 43 #include <net/if_media.h> 44 45 #include <net80211/ieee80211_var.h> 46 #include <net80211/ieee80211_input.h> 47 #include <net80211/ieee80211_phy.h> 48 #include <net80211/ieee80211_superg.h> 49 50 /* 51 * Atheros fast-frame encapsulation format. 52 * FF max payload: 53 * 802.2 + FFHDR + HPAD + 802.3 + 802.2 + 1500 + SPAD + 802.3 + 802.2 + 1500: 54 * 8 + 4 + 4 + 14 + 8 + 1500 + 6 + 14 + 8 + 1500 55 * = 3066 56 */ 57 /* fast frame header is 32-bits */ 58 #define ATH_FF_PROTO 0x0000003f /* protocol */ 59 #define ATH_FF_PROTO_S 0 60 #define ATH_FF_FTYPE 0x000000c0 /* frame type */ 61 #define ATH_FF_FTYPE_S 6 62 #define ATH_FF_HLEN32 0x00000300 /* optional hdr length */ 63 #define ATH_FF_HLEN32_S 8 64 #define ATH_FF_SEQNUM 0x001ffc00 /* sequence number */ 65 #define ATH_FF_SEQNUM_S 10 66 #define ATH_FF_OFFSET 0xffe00000 /* offset to 2nd payload */ 67 #define ATH_FF_OFFSET_S 21 68 69 #define ATH_FF_MAX_HDR_PAD 4 70 #define ATH_FF_MAX_SEP_PAD 6 71 #define ATH_FF_MAX_HDR 30 72 73 #define ATH_FF_PROTO_L2TUNNEL 0 /* L2 tunnel protocol */ 74 #define ATH_FF_ETH_TYPE 0x88bd /* Ether type for encapsulated frames */ 75 #define ATH_FF_SNAP_ORGCODE_0 0x00 76 #define ATH_FF_SNAP_ORGCODE_1 0x03 77 #define ATH_FF_SNAP_ORGCODE_2 0x7f 78 79 #define ATH_FF_TXQMIN 2 /* min txq depth for staging */ 80 #define ATH_FF_TXQMAX 50 /* maximum # of queued frames allowed */ 81 #define ATH_FF_STAGEMAX 5 /* max waiting period for staged frame*/ 82 83 #define ETHER_HEADER_COPY(dst, src) \ 84 memcpy(dst, src, sizeof(struct ether_header)) 85 86 static int ieee80211_ffppsmin = 2; /* pps threshold for ff aggregation */ 87 SYSCTL_INT(_net_wlan, OID_AUTO, ffppsmin, CTLTYPE_INT | CTLFLAG_RW, 88 &ieee80211_ffppsmin, 0, "min packet rate before fast-frame staging"); 89 static int ieee80211_ffagemax = -1; /* max time frames held on stage q */ 90 SYSCTL_PROC(_net_wlan, OID_AUTO, ffagemax, CTLTYPE_INT | CTLFLAG_RW, 91 &ieee80211_ffagemax, 0, ieee80211_sysctl_msecs_ticks, "I", 92 "max hold time for fast-frame staging (ms)"); 93 94 void 95 ieee80211_superg_attach(struct ieee80211com *ic) 96 { 97 struct ieee80211_superg *sg; 98 99 if (ic->ic_caps & IEEE80211_C_FF) { 100 sg = (struct ieee80211_superg *) malloc( 101 sizeof(struct ieee80211_superg), M_80211_VAP, 102 M_NOWAIT | M_ZERO); 103 if (sg == NULL) { 104 printf("%s: cannot allocate SuperG state block\n", 105 __func__); 106 return; 107 } 108 ic->ic_superg = sg; 109 } 110 ieee80211_ffagemax = msecs_to_ticks(150); 111 } 112 113 void 114 ieee80211_superg_detach(struct ieee80211com *ic) 115 { 116 if (ic->ic_superg != NULL) { 117 free(ic->ic_superg, M_80211_VAP); 118 ic->ic_superg = NULL; 119 } 120 } 121 122 void 123 ieee80211_superg_vattach(struct ieee80211vap *vap) 124 { 125 struct ieee80211com *ic = vap->iv_ic; 126 127 if (ic->ic_superg == NULL) /* NB: can't do fast-frames w/o state */ 128 vap->iv_caps &= ~IEEE80211_C_FF; 129 if (vap->iv_caps & IEEE80211_C_FF) 130 vap->iv_flags |= IEEE80211_F_FF; 131 /* NB: we only implement sta mode */ 132 if (vap->iv_opmode == IEEE80211_M_STA && 133 (vap->iv_caps & IEEE80211_C_TURBOP)) 134 vap->iv_flags |= IEEE80211_F_TURBOP; 135 } 136 137 void 138 ieee80211_superg_vdetach(struct ieee80211vap *vap) 139 { 140 } 141 142 #define ATH_OUI_BYTES 0x00, 0x03, 0x7f 143 /* 144 * Add a WME information element to a frame. 145 */ 146 uint8_t * 147 ieee80211_add_ath(uint8_t *frm, uint8_t caps, ieee80211_keyix defkeyix) 148 { 149 static const struct ieee80211_ath_ie info = { 150 .ath_id = IEEE80211_ELEMID_VENDOR, 151 .ath_len = sizeof(struct ieee80211_ath_ie) - 2, 152 .ath_oui = { ATH_OUI_BYTES }, 153 .ath_oui_type = ATH_OUI_TYPE, 154 .ath_oui_subtype= ATH_OUI_SUBTYPE, 155 .ath_version = ATH_OUI_VERSION, 156 }; 157 struct ieee80211_ath_ie *ath = (struct ieee80211_ath_ie *) frm; 158 159 memcpy(frm, &info, sizeof(info)); 160 ath->ath_capability = caps; 161 if (defkeyix != IEEE80211_KEYIX_NONE) { 162 ath->ath_defkeyix[0] = (defkeyix & 0xff); 163 ath->ath_defkeyix[1] = ((defkeyix >> 8) & 0xff); 164 } else { 165 ath->ath_defkeyix[0] = 0xff; 166 ath->ath_defkeyix[1] = 0x7f; 167 } 168 return frm + sizeof(info); 169 } 170 #undef ATH_OUI_BYTES 171 172 uint8_t * 173 ieee80211_add_athcaps(uint8_t *frm, const struct ieee80211_node *bss) 174 { 175 const struct ieee80211vap *vap = bss->ni_vap; 176 177 return ieee80211_add_ath(frm, 178 vap->iv_flags & IEEE80211_F_ATHEROS, 179 ((vap->iv_flags & IEEE80211_F_WPA) == 0 && 180 bss->ni_authmode != IEEE80211_AUTH_8021X) ? 181 vap->iv_def_txkey : IEEE80211_KEYIX_NONE); 182 } 183 184 void 185 ieee80211_parse_ath(struct ieee80211_node *ni, uint8_t *ie) 186 { 187 const struct ieee80211_ath_ie *ath = 188 (const struct ieee80211_ath_ie *) ie; 189 190 ni->ni_ath_flags = ath->ath_capability; 191 ni->ni_ath_defkeyix = LE_READ_2(&ath->ath_defkeyix); 192 } 193 194 int 195 ieee80211_parse_athparams(struct ieee80211_node *ni, uint8_t *frm, 196 const struct ieee80211_frame *wh) 197 { 198 struct ieee80211vap *vap = ni->ni_vap; 199 const struct ieee80211_ath_ie *ath; 200 u_int len = frm[1]; 201 int capschanged; 202 uint16_t defkeyix; 203 204 if (len < sizeof(struct ieee80211_ath_ie)-2) { 205 IEEE80211_DISCARD_IE(vap, 206 IEEE80211_MSG_ELEMID | IEEE80211_MSG_SUPERG, 207 wh, "Atheros", "too short, len %u", len); 208 return -1; 209 } 210 ath = (const struct ieee80211_ath_ie *)frm; 211 capschanged = (ni->ni_ath_flags != ath->ath_capability); 212 defkeyix = LE_READ_2(ath->ath_defkeyix); 213 if (capschanged || defkeyix != ni->ni_ath_defkeyix) { 214 ni->ni_ath_flags = ath->ath_capability; 215 ni->ni_ath_defkeyix = defkeyix; 216 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 217 "ath ie change: new caps 0x%x defkeyix 0x%x", 218 ni->ni_ath_flags, ni->ni_ath_defkeyix); 219 } 220 if (IEEE80211_ATH_CAP(vap, ni, ATHEROS_CAP_TURBO_PRIME)) { 221 uint16_t curflags, newflags; 222 223 /* 224 * Check for turbo mode switch. Calculate flags 225 * for the new mode and effect the switch. 226 */ 227 newflags = curflags = vap->iv_ic->ic_bsschan->ic_flags; 228 /* NB: BOOST is not in ic_flags, so get it from the ie */ 229 if (ath->ath_capability & ATHEROS_CAP_BOOST) 230 newflags |= IEEE80211_CHAN_TURBO; 231 else 232 newflags &= ~IEEE80211_CHAN_TURBO; 233 if (newflags != curflags) 234 ieee80211_dturbo_switch(vap, newflags); 235 } 236 return capschanged; 237 } 238 239 /* 240 * Decap the encapsulated frame pair and dispatch the first 241 * for delivery. The second frame is returned for delivery 242 * via the normal path. 243 */ 244 struct mbuf * 245 ieee80211_ff_decap(struct ieee80211_node *ni, struct mbuf *m) 246 { 247 #define FF_LLC_SIZE (sizeof(struct ether_header) + sizeof(struct llc)) 248 #define MS(x,f) (((x) & f) >> f##_S) 249 struct ieee80211vap *vap = ni->ni_vap; 250 struct llc *llc; 251 uint32_t ath; 252 struct mbuf *n; 253 int framelen; 254 255 /* NB: we assume caller does this check for us */ 256 KASSERT(IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF), 257 ("ff not negotiated")); 258 /* 259 * Check for fast-frame tunnel encapsulation. 260 */ 261 if (m->m_pkthdr.len < 3*FF_LLC_SIZE) 262 return m; 263 if (m->m_len < FF_LLC_SIZE && 264 (m = m_pullup(m, FF_LLC_SIZE)) == NULL) { 265 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 266 ni->ni_macaddr, "fast-frame", 267 "%s", "m_pullup(llc) failed"); 268 vap->iv_stats.is_rx_tooshort++; 269 return NULL; 270 } 271 llc = (struct llc *)(mtod(m, uint8_t *) + 272 sizeof(struct ether_header)); 273 if (llc->llc_snap.ether_type != htons(ATH_FF_ETH_TYPE)) 274 return m; 275 m_adj(m, FF_LLC_SIZE); 276 m_copydata(m, 0, sizeof(uint32_t), (caddr_t) &ath); 277 if (MS(ath, ATH_FF_PROTO) != ATH_FF_PROTO_L2TUNNEL) { 278 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 279 ni->ni_macaddr, "fast-frame", 280 "unsupport tunnel protocol, header 0x%x", ath); 281 vap->iv_stats.is_ff_badhdr++; 282 m_freem(m); 283 return NULL; 284 } 285 /* NB: skip header and alignment padding */ 286 m_adj(m, roundup(sizeof(uint32_t) - 2, 4) + 2); 287 288 vap->iv_stats.is_ff_decap++; 289 290 /* 291 * Decap the first frame, bust it apart from the 292 * second and deliver; then decap the second frame 293 * and return it to the caller for normal delivery. 294 */ 295 m = ieee80211_decap1(m, &framelen); 296 if (m == NULL) { 297 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 298 ni->ni_macaddr, "fast-frame", "%s", "first decap failed"); 299 vap->iv_stats.is_ff_tooshort++; 300 return NULL; 301 } 302 n = m_split(m, framelen, M_NOWAIT); 303 if (n == NULL) { 304 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 305 ni->ni_macaddr, "fast-frame", 306 "%s", "unable to split encapsulated frames"); 307 vap->iv_stats.is_ff_split++; 308 m_freem(m); /* NB: must reclaim */ 309 return NULL; 310 } 311 /* XXX not right for WDS */ 312 vap->iv_deliver_data(vap, ni, m); /* 1st of pair */ 313 314 /* 315 * Decap second frame. 316 */ 317 m_adj(n, roundup2(framelen, 4) - framelen); /* padding */ 318 n = ieee80211_decap1(n, &framelen); 319 if (n == NULL) { 320 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 321 ni->ni_macaddr, "fast-frame", "%s", "second decap failed"); 322 vap->iv_stats.is_ff_tooshort++; 323 } 324 /* XXX verify framelen against mbuf contents */ 325 return n; /* 2nd delivered by caller */ 326 #undef MS 327 #undef FF_LLC_SIZE 328 } 329 330 /* 331 * Do Ethernet-LLC encapsulation for each payload in a fast frame 332 * tunnel encapsulation. The frame is assumed to have an Ethernet 333 * header at the front that must be stripped before prepending the 334 * LLC followed by the Ethernet header passed in (with an Ethernet 335 * type that specifies the payload size). 336 */ 337 static struct mbuf * 338 ff_encap1(struct ieee80211vap *vap, struct mbuf *m, 339 const struct ether_header *eh) 340 { 341 struct llc *llc; 342 uint16_t payload; 343 344 /* XXX optimize by combining m_adj+M_PREPEND */ 345 m_adj(m, sizeof(struct ether_header) - sizeof(struct llc)); 346 llc = mtod(m, struct llc *); 347 llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP; 348 llc->llc_control = LLC_UI; 349 llc->llc_snap.org_code[0] = 0; 350 llc->llc_snap.org_code[1] = 0; 351 llc->llc_snap.org_code[2] = 0; 352 llc->llc_snap.ether_type = eh->ether_type; 353 payload = m->m_pkthdr.len; /* NB: w/o Ethernet header */ 354 355 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT); 356 if (m == NULL) { /* XXX cannot happen */ 357 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 358 "%s: no space for ether_header\n", __func__); 359 vap->iv_stats.is_tx_nobuf++; 360 return NULL; 361 } 362 ETHER_HEADER_COPY(mtod(m, void *), eh); 363 mtod(m, struct ether_header *)->ether_type = htons(payload); 364 return m; 365 } 366 367 /* 368 * Fast frame encapsulation. There must be two packets 369 * chained with m_nextpkt. We do header adjustment for 370 * each, add the tunnel encapsulation, and then concatenate 371 * the mbuf chains to form a single frame for transmission. 372 */ 373 struct mbuf * 374 ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace, 375 struct ieee80211_key *key) 376 { 377 struct mbuf *m2; 378 struct ether_header eh1, eh2; 379 struct llc *llc; 380 struct mbuf *m; 381 int pad; 382 383 m2 = m1->m_nextpkt; 384 if (m2 == NULL) { 385 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 386 "%s: only one frame\n", __func__); 387 goto bad; 388 } 389 m1->m_nextpkt = NULL; 390 /* 391 * Include fast frame headers in adjusting header layout. 392 */ 393 KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!")); 394 ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t)); 395 m1 = ieee80211_mbuf_adjust(vap, 396 hdrspace + sizeof(struct llc) + sizeof(uint32_t) + 2 + 397 sizeof(struct ether_header), 398 key, m1); 399 if (m1 == NULL) { 400 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ 401 m_freem(m2); 402 goto bad; 403 } 404 405 /* 406 * Copy second frame's Ethernet header out of line 407 * and adjust for encapsulation headers. Note that 408 * we make room for padding in case there isn't room 409 * at the end of first frame. 410 */ 411 KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!")); 412 ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t)); 413 m2 = ieee80211_mbuf_adjust(vap, 414 ATH_FF_MAX_HDR_PAD + sizeof(struct ether_header), 415 NULL, m2); 416 if (m2 == NULL) { 417 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ 418 goto bad; 419 } 420 421 /* 422 * Now do tunnel encapsulation. First, each 423 * frame gets a standard encapsulation. 424 */ 425 m1 = ff_encap1(vap, m1, &eh1); 426 if (m1 == NULL) 427 goto bad; 428 m2 = ff_encap1(vap, m2, &eh2); 429 if (m2 == NULL) 430 goto bad; 431 432 /* 433 * Pad leading frame to a 4-byte boundary. If there 434 * is space at the end of the first frame, put it 435 * there; otherwise prepend to the front of the second 436 * frame. We know doing the second will always work 437 * because we reserve space above. We prefer appending 438 * as this typically has better DMA alignment properties. 439 */ 440 for (m = m1; m->m_next != NULL; m = m->m_next) 441 ; 442 pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len; 443 if (pad) { 444 if (M_TRAILINGSPACE(m) < pad) { /* prepend to second */ 445 m2->m_data -= pad; 446 m2->m_len += pad; 447 m2->m_pkthdr.len += pad; 448 } else { /* append to first */ 449 m->m_len += pad; 450 m1->m_pkthdr.len += pad; 451 } 452 } 453 454 /* 455 * Now, stick 'em together and prepend the tunnel headers; 456 * first the Atheros tunnel header (all zero for now) and 457 * then a special fast frame LLC. 458 * 459 * XXX optimize by prepending together 460 */ 461 m->m_next = m2; /* NB: last mbuf from above */ 462 m1->m_pkthdr.len += m2->m_pkthdr.len; 463 M_PREPEND(m1, sizeof(uint32_t)+2, M_DONTWAIT); 464 if (m1 == NULL) { /* XXX cannot happen */ 465 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 466 "%s: no space for tunnel header\n", __func__); 467 vap->iv_stats.is_tx_nobuf++; 468 return NULL; 469 } 470 memset(mtod(m1, void *), 0, sizeof(uint32_t)+2); 471 472 M_PREPEND(m1, sizeof(struct llc), M_DONTWAIT); 473 if (m1 == NULL) { /* XXX cannot happen */ 474 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 475 "%s: no space for llc header\n", __func__); 476 vap->iv_stats.is_tx_nobuf++; 477 return NULL; 478 } 479 llc = mtod(m1, struct llc *); 480 llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP; 481 llc->llc_control = LLC_UI; 482 llc->llc_snap.org_code[0] = ATH_FF_SNAP_ORGCODE_0; 483 llc->llc_snap.org_code[1] = ATH_FF_SNAP_ORGCODE_1; 484 llc->llc_snap.org_code[2] = ATH_FF_SNAP_ORGCODE_2; 485 llc->llc_snap.ether_type = htons(ATH_FF_ETH_TYPE); 486 487 vap->iv_stats.is_ff_encap++; 488 489 return m1; 490 bad: 491 if (m1 != NULL) 492 m_freem(m1); 493 if (m2 != NULL) 494 m_freem(m2); 495 return NULL; 496 } 497 498 static void 499 ff_transmit(struct ieee80211_node *ni, struct mbuf *m) 500 { 501 struct ieee80211vap *vap = ni->ni_vap; 502 int error; 503 504 /* encap and xmit */ 505 m = ieee80211_encap(vap, ni, m); 506 if (m != NULL) { 507 struct ifnet *ifp = vap->iv_ifp; 508 struct ifnet *parent = ni->ni_ic->ic_ifp; 509 510 error = parent->if_transmit(parent, m); 511 if (error != 0) { 512 /* NB: IFQ_HANDOFF reclaims mbuf */ 513 ieee80211_free_node(ni); 514 } else { 515 ifp->if_opackets++; 516 } 517 } else 518 ieee80211_free_node(ni); 519 } 520 521 /* 522 * Flush frames to device; note we re-use the linked list 523 * the frames were stored on and use the sentinel (unchanged) 524 * which may be non-NULL. 525 */ 526 static void 527 ff_flush(struct mbuf *head, struct mbuf *last) 528 { 529 struct mbuf *m, *next; 530 struct ieee80211_node *ni; 531 struct ieee80211vap *vap; 532 533 for (m = head; m != last; m = next) { 534 next = m->m_nextpkt; 535 m->m_nextpkt = NULL; 536 537 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 538 vap = ni->ni_vap; 539 540 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 541 "%s: flush frame, age %u", __func__, M_AGE_GET(m)); 542 vap->iv_stats.is_ff_flush++; 543 544 ff_transmit(ni, m); 545 } 546 } 547 548 /* 549 * Age frames on the staging queue. 550 */ 551 void 552 ieee80211_ff_age(struct ieee80211com *ic, struct ieee80211_stageq *sq, 553 int quanta) 554 { 555 struct ieee80211_superg *sg = ic->ic_superg; 556 struct mbuf *m, *head; 557 struct ieee80211_node *ni; 558 struct ieee80211_tx_ampdu *tap; 559 560 KASSERT(sq->head != NULL, ("stageq empty")); 561 562 IEEE80211_LOCK(ic); 563 head = sq->head; 564 while ((m = sq->head) != NULL && M_AGE_GET(m) < quanta) { 565 int tid = WME_AC_TO_TID(M_WME_GETAC(m)); 566 567 /* clear tap ref to frame */ 568 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 569 tap = &ni->ni_tx_ampdu[tid]; 570 KASSERT(tap->txa_private == m, ("staging queue empty")); 571 tap->txa_private = NULL; 572 573 sq->head = m->m_nextpkt; 574 sq->depth--; 575 sg->ff_stageqdepth--; 576 } 577 if (m == NULL) 578 sq->tail = NULL; 579 else 580 M_AGE_SUB(m, quanta); 581 IEEE80211_UNLOCK(ic); 582 583 ff_flush(head, m); 584 } 585 586 static void 587 stageq_add(struct ieee80211_stageq *sq, struct mbuf *m) 588 { 589 int age = ieee80211_ffagemax; 590 if (sq->tail != NULL) { 591 sq->tail->m_nextpkt = m; 592 age -= M_AGE_GET(sq->head); 593 } else 594 sq->head = m; 595 KASSERT(age >= 0, ("age %d", age)); 596 M_AGE_SET(m, age); 597 m->m_nextpkt = NULL; 598 sq->tail = m; 599 sq->depth++; 600 } 601 602 static void 603 stageq_remove(struct ieee80211_stageq *sq, struct mbuf *mstaged) 604 { 605 struct mbuf *m, *mprev; 606 607 mprev = NULL; 608 for (m = sq->head; m != NULL; m = m->m_nextpkt) { 609 if (m == mstaged) { 610 if (mprev == NULL) 611 sq->head = m->m_nextpkt; 612 else 613 mprev->m_nextpkt = m->m_nextpkt; 614 if (sq->tail == m) 615 sq->tail = mprev; 616 sq->depth--; 617 return; 618 } 619 mprev = m; 620 } 621 printf("%s: packet not found\n", __func__); 622 } 623 624 static uint32_t 625 ff_approx_txtime(struct ieee80211_node *ni, 626 const struct mbuf *m1, const struct mbuf *m2) 627 { 628 struct ieee80211com *ic = ni->ni_ic; 629 struct ieee80211vap *vap = ni->ni_vap; 630 uint32_t framelen; 631 632 /* 633 * Approximate the frame length to be transmitted. A swag to add 634 * the following maximal values to the skb payload: 635 * - 32: 802.11 encap + CRC 636 * - 24: encryption overhead (if wep bit) 637 * - 4 + 6: fast-frame header and padding 638 * - 16: 2 LLC FF tunnel headers 639 * - 14: 1 802.3 FF tunnel header (mbuf already accounts for 2nd) 640 */ 641 framelen = m1->m_pkthdr.len + 32 + 642 ATH_FF_MAX_HDR_PAD + ATH_FF_MAX_SEP_PAD + ATH_FF_MAX_HDR; 643 if (vap->iv_flags & IEEE80211_F_PRIVACY) 644 framelen += 24; 645 if (m2 != NULL) 646 framelen += m2->m_pkthdr.len; 647 return ieee80211_compute_duration(ic->ic_rt, framelen, ni->ni_txrate, 0); 648 } 649 650 /* 651 * Check if the supplied frame can be partnered with an existing 652 * or pending frame. Return a reference to any frame that should be 653 * sent on return; otherwise return NULL. 654 */ 655 struct mbuf * 656 ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m) 657 { 658 struct ieee80211vap *vap = ni->ni_vap; 659 struct ieee80211com *ic = ni->ni_ic; 660 struct ieee80211_superg *sg = ic->ic_superg; 661 const int pri = M_WME_GETAC(m); 662 struct ieee80211_stageq *sq; 663 struct ieee80211_tx_ampdu *tap; 664 struct mbuf *mstaged; 665 uint32_t txtime, limit; 666 667 /* 668 * Check if the supplied frame can be aggregated. 669 * 670 * NB: we allow EAPOL frames to be aggregated with other ucast traffic. 671 * Do 802.1x EAPOL frames proceed in the clear? Then they couldn't 672 * be aggregated with other types of frames when encryption is on? 673 */ 674 IEEE80211_LOCK(ic); 675 tap = &ni->ni_tx_ampdu[WME_AC_TO_TID(pri)]; 676 mstaged = tap->txa_private; /* NB: we reuse AMPDU state */ 677 ieee80211_txampdu_count_packet(tap); 678 679 /* 680 * When not in station mode never aggregate a multicast 681 * frame; this insures, for example, that a combined frame 682 * does not require multiple encryption keys. 683 */ 684 if (vap->iv_opmode != IEEE80211_M_STA && 685 ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) { 686 /* XXX flush staged frame? */ 687 IEEE80211_UNLOCK(ic); 688 return m; 689 } 690 /* 691 * If there is no frame to combine with and the pps is 692 * too low; then do not attempt to aggregate this frame. 693 */ 694 if (mstaged == NULL && 695 ieee80211_txampdu_getpps(tap) < ieee80211_ffppsmin) { 696 IEEE80211_UNLOCK(ic); 697 return m; 698 } 699 sq = &sg->ff_stageq[pri]; 700 /* 701 * Check the txop limit to insure the aggregate fits. 702 */ 703 limit = IEEE80211_TXOP_TO_US( 704 ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit); 705 if (limit != 0 && 706 (txtime = ff_approx_txtime(ni, m, mstaged)) > limit) { 707 /* 708 * Aggregate too long, return to the caller for direct 709 * transmission. In addition, flush any pending frame 710 * before sending this one. 711 */ 712 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 713 "%s: txtime %u exceeds txop limit %u\n", 714 __func__, txtime, limit); 715 716 tap->txa_private = NULL; 717 if (mstaged != NULL) 718 stageq_remove(sq, mstaged); 719 IEEE80211_UNLOCK(ic); 720 721 if (mstaged != NULL) { 722 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 723 "%s: flush staged frame", __func__); 724 /* encap and xmit */ 725 ff_transmit(ni, mstaged); 726 } 727 return m; /* NB: original frame */ 728 } 729 /* 730 * An aggregation candidate. If there's a frame to partner 731 * with then combine and return for processing. Otherwise 732 * save this frame and wait for a partner to show up (or 733 * the frame to be flushed). Note that staged frames also 734 * hold their node reference. 735 */ 736 if (mstaged != NULL) { 737 tap->txa_private = NULL; 738 stageq_remove(sq, mstaged); 739 IEEE80211_UNLOCK(ic); 740 741 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 742 "%s: aggregate fast-frame", __func__); 743 /* 744 * Release the node reference; we only need 745 * the one already in mstaged. 746 */ 747 KASSERT(mstaged->m_pkthdr.rcvif == (void *)ni, 748 ("rcvif %p ni %p", mstaged->m_pkthdr.rcvif, ni)); 749 ieee80211_free_node(ni); 750 751 m->m_nextpkt = NULL; 752 mstaged->m_nextpkt = m; 753 mstaged->m_flags |= M_FF; /* NB: mark for encap work */ 754 } else { 755 KASSERT(tap->txa_private == NULL, 756 ("txa_private %p", tap->txa_private)); 757 tap->txa_private = m; 758 759 stageq_add(sq, m); 760 sg->ff_stageqdepth++; 761 IEEE80211_UNLOCK(ic); 762 763 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 764 "%s: stage frame, %u queued", __func__, sq->depth); 765 /* NB: mstaged is NULL */ 766 } 767 return mstaged; 768 } 769 770 void 771 ieee80211_ff_node_init(struct ieee80211_node *ni) 772 { 773 /* 774 * Clean FF state on re-associate. This handles the case 775 * where a station leaves w/o notifying us and then returns 776 * before node is reaped for inactivity. 777 */ 778 ieee80211_ff_node_cleanup(ni); 779 } 780 781 void 782 ieee80211_ff_node_cleanup(struct ieee80211_node *ni) 783 { 784 struct ieee80211com *ic = ni->ni_ic; 785 struct ieee80211_superg *sg = ic->ic_superg; 786 struct ieee80211_tx_ampdu *tap; 787 struct mbuf *m, *head; 788 int tid; 789 790 IEEE80211_LOCK(ic); 791 head = NULL; 792 for (tid = 0; tid < WME_NUM_TID; tid++) { 793 int ac = TID_TO_WME_AC(tid); 794 795 tap = &ni->ni_tx_ampdu[tid]; 796 m = tap->txa_private; 797 if (m != NULL) { 798 tap->txa_private = NULL; 799 stageq_remove(&sg->ff_stageq[ac], m); 800 m->m_nextpkt = head; 801 head = m; 802 } 803 } 804 IEEE80211_UNLOCK(ic); 805 806 for (m = head; m != NULL; m = m->m_nextpkt) { 807 m_freem(m); 808 ieee80211_free_node(ni); 809 } 810 } 811 812 /* 813 * Switch between turbo and non-turbo operating modes. 814 * Use the specified channel flags to locate the new 815 * channel, update 802.11 state, and then call back into 816 * the driver to effect the change. 817 */ 818 void 819 ieee80211_dturbo_switch(struct ieee80211vap *vap, int newflags) 820 { 821 struct ieee80211com *ic = vap->iv_ic; 822 struct ieee80211_channel *chan; 823 824 chan = ieee80211_find_channel(ic, ic->ic_bsschan->ic_freq, newflags); 825 if (chan == NULL) { /* XXX should not happen */ 826 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 827 "%s: no channel with freq %u flags 0x%x\n", 828 __func__, ic->ic_bsschan->ic_freq, newflags); 829 return; 830 } 831 832 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 833 "%s: %s -> %s (freq %u flags 0x%x)\n", __func__, 834 ieee80211_phymode_name[ieee80211_chan2mode(ic->ic_bsschan)], 835 ieee80211_phymode_name[ieee80211_chan2mode(chan)], 836 chan->ic_freq, chan->ic_flags); 837 838 ic->ic_bsschan = chan; 839 ic->ic_prevchan = ic->ic_curchan; 840 ic->ic_curchan = chan; 841 ic->ic_rt = ieee80211_get_ratetable(chan); 842 ic->ic_set_channel(ic); 843 ieee80211_radiotap_chan_change(ic); 844 /* NB: do not need to reset ERP state 'cuz we're in sta mode */ 845 } 846 847 /* 848 * Return the current ``state'' of an Atheros capbility. 849 * If associated in station mode report the negotiated 850 * setting. Otherwise report the current setting. 851 */ 852 static int 853 getathcap(struct ieee80211vap *vap, int cap) 854 { 855 if (vap->iv_opmode == IEEE80211_M_STA && 856 vap->iv_state == IEEE80211_S_RUN) 857 return IEEE80211_ATH_CAP(vap, vap->iv_bss, cap) != 0; 858 else 859 return (vap->iv_flags & cap) != 0; 860 } 861 862 static int 863 superg_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq) 864 { 865 switch (ireq->i_type) { 866 case IEEE80211_IOC_FF: 867 ireq->i_val = getathcap(vap, IEEE80211_F_FF); 868 break; 869 case IEEE80211_IOC_TURBOP: 870 ireq->i_val = getathcap(vap, IEEE80211_F_TURBOP); 871 break; 872 default: 873 return ENOSYS; 874 } 875 return 0; 876 } 877 IEEE80211_IOCTL_GET(superg, superg_ioctl_get80211); 878 879 static int 880 superg_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq) 881 { 882 switch (ireq->i_type) { 883 case IEEE80211_IOC_FF: 884 if (ireq->i_val) { 885 if ((vap->iv_caps & IEEE80211_C_FF) == 0) 886 return EOPNOTSUPP; 887 vap->iv_flags |= IEEE80211_F_FF; 888 } else 889 vap->iv_flags &= ~IEEE80211_F_FF; 890 return ENETRESET; 891 case IEEE80211_IOC_TURBOP: 892 if (ireq->i_val) { 893 if ((vap->iv_caps & IEEE80211_C_TURBOP) == 0) 894 return EOPNOTSUPP; 895 vap->iv_flags |= IEEE80211_F_TURBOP; 896 } else 897 vap->iv_flags &= ~IEEE80211_F_TURBOP; 898 return ENETRESET; 899 default: 900 return ENOSYS; 901 } 902 return 0; 903 } 904 IEEE80211_IOCTL_SET(superg, superg_ioctl_set80211); 905