1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include "opt_wlan.h" 30 31 #ifdef IEEE80211_SUPPORT_SUPERG 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/mbuf.h> 36 #include <sys/kernel.h> 37 #include <sys/endian.h> 38 39 #include <sys/socket.h> 40 41 #include <net/if.h> 42 #include <net/if_var.h> 43 #include <net/if_llc.h> 44 #include <net/if_media.h> 45 #include <net/bpf.h> 46 #include <net/ethernet.h> 47 48 #include <net80211/ieee80211_var.h> 49 #include <net80211/ieee80211_input.h> 50 #include <net80211/ieee80211_phy.h> 51 #include <net80211/ieee80211_superg.h> 52 53 /* 54 * Atheros fast-frame encapsulation format. 55 * FF max payload: 56 * 802.2 + FFHDR + HPAD + 802.3 + 802.2 + 1500 + SPAD + 802.3 + 802.2 + 1500: 57 * 8 + 4 + 4 + 14 + 8 + 1500 + 6 + 14 + 8 + 1500 58 * = 3066 59 */ 60 /* fast frame header is 32-bits */ 61 #define ATH_FF_PROTO 0x0000003f /* protocol */ 62 #define ATH_FF_PROTO_S 0 63 #define ATH_FF_FTYPE 0x000000c0 /* frame type */ 64 #define ATH_FF_FTYPE_S 6 65 #define ATH_FF_HLEN32 0x00000300 /* optional hdr length */ 66 #define ATH_FF_HLEN32_S 8 67 #define ATH_FF_SEQNUM 0x001ffc00 /* sequence number */ 68 #define ATH_FF_SEQNUM_S 10 69 #define ATH_FF_OFFSET 0xffe00000 /* offset to 2nd payload */ 70 #define ATH_FF_OFFSET_S 21 71 72 #define ATH_FF_MAX_HDR_PAD 4 73 #define ATH_FF_MAX_SEP_PAD 6 74 #define ATH_FF_MAX_HDR 30 75 76 #define ATH_FF_PROTO_L2TUNNEL 0 /* L2 tunnel protocol */ 77 #define ATH_FF_ETH_TYPE 0x88bd /* Ether type for encapsulated frames */ 78 #define ATH_FF_SNAP_ORGCODE_0 0x00 79 #define ATH_FF_SNAP_ORGCODE_1 0x03 80 #define ATH_FF_SNAP_ORGCODE_2 0x7f 81 82 #define ATH_FF_TXQMIN 2 /* min txq depth for staging */ 83 #define ATH_FF_TXQMAX 50 /* maximum # of queued frames allowed */ 84 #define ATH_FF_STAGEMAX 5 /* max waiting period for staged frame*/ 85 86 #define ETHER_HEADER_COPY(dst, src) \ 87 memcpy(dst, src, sizeof(struct ether_header)) 88 89 static int ieee80211_ffppsmin = 2; /* pps threshold for ff aggregation */ 90 SYSCTL_INT(_net_wlan, OID_AUTO, ffppsmin, CTLTYPE_INT | CTLFLAG_RW, 91 &ieee80211_ffppsmin, 0, "min packet rate before fast-frame staging"); 92 static int ieee80211_ffagemax = -1; /* max time frames held on stage q */ 93 SYSCTL_PROC(_net_wlan, OID_AUTO, ffagemax, CTLTYPE_INT | CTLFLAG_RW, 94 &ieee80211_ffagemax, 0, ieee80211_sysctl_msecs_ticks, "I", 95 "max hold time for fast-frame staging (ms)"); 96 97 void 98 ieee80211_superg_attach(struct ieee80211com *ic) 99 { 100 struct ieee80211_superg *sg; 101 102 if (ic->ic_caps & IEEE80211_C_FF) { 103 sg = (struct ieee80211_superg *) malloc( 104 sizeof(struct ieee80211_superg), M_80211_VAP, 105 M_NOWAIT | M_ZERO); 106 if (sg == NULL) { 107 printf("%s: cannot allocate SuperG state block\n", 108 __func__); 109 return; 110 } 111 ic->ic_superg = sg; 112 } 113 ieee80211_ffagemax = msecs_to_ticks(150); 114 } 115 116 void 117 ieee80211_superg_detach(struct ieee80211com *ic) 118 { 119 if (ic->ic_superg != NULL) { 120 free(ic->ic_superg, M_80211_VAP); 121 ic->ic_superg = NULL; 122 } 123 } 124 125 void 126 ieee80211_superg_vattach(struct ieee80211vap *vap) 127 { 128 struct ieee80211com *ic = vap->iv_ic; 129 130 if (ic->ic_superg == NULL) /* NB: can't do fast-frames w/o state */ 131 vap->iv_caps &= ~IEEE80211_C_FF; 132 if (vap->iv_caps & IEEE80211_C_FF) 133 vap->iv_flags |= IEEE80211_F_FF; 134 /* NB: we only implement sta mode */ 135 if (vap->iv_opmode == IEEE80211_M_STA && 136 (vap->iv_caps & IEEE80211_C_TURBOP)) 137 vap->iv_flags |= IEEE80211_F_TURBOP; 138 } 139 140 void 141 ieee80211_superg_vdetach(struct ieee80211vap *vap) 142 { 143 } 144 145 #define ATH_OUI_BYTES 0x00, 0x03, 0x7f 146 /* 147 * Add a WME information element to a frame. 148 */ 149 uint8_t * 150 ieee80211_add_ath(uint8_t *frm, uint8_t caps, ieee80211_keyix defkeyix) 151 { 152 static const struct ieee80211_ath_ie info = { 153 .ath_id = IEEE80211_ELEMID_VENDOR, 154 .ath_len = sizeof(struct ieee80211_ath_ie) - 2, 155 .ath_oui = { ATH_OUI_BYTES }, 156 .ath_oui_type = ATH_OUI_TYPE, 157 .ath_oui_subtype= ATH_OUI_SUBTYPE, 158 .ath_version = ATH_OUI_VERSION, 159 }; 160 struct ieee80211_ath_ie *ath = (struct ieee80211_ath_ie *) frm; 161 162 memcpy(frm, &info, sizeof(info)); 163 ath->ath_capability = caps; 164 if (defkeyix != IEEE80211_KEYIX_NONE) { 165 ath->ath_defkeyix[0] = (defkeyix & 0xff); 166 ath->ath_defkeyix[1] = ((defkeyix >> 8) & 0xff); 167 } else { 168 ath->ath_defkeyix[0] = 0xff; 169 ath->ath_defkeyix[1] = 0x7f; 170 } 171 return frm + sizeof(info); 172 } 173 #undef ATH_OUI_BYTES 174 175 uint8_t * 176 ieee80211_add_athcaps(uint8_t *frm, const struct ieee80211_node *bss) 177 { 178 const struct ieee80211vap *vap = bss->ni_vap; 179 180 return ieee80211_add_ath(frm, 181 vap->iv_flags & IEEE80211_F_ATHEROS, 182 ((vap->iv_flags & IEEE80211_F_WPA) == 0 && 183 bss->ni_authmode != IEEE80211_AUTH_8021X) ? 184 vap->iv_def_txkey : IEEE80211_KEYIX_NONE); 185 } 186 187 void 188 ieee80211_parse_ath(struct ieee80211_node *ni, uint8_t *ie) 189 { 190 const struct ieee80211_ath_ie *ath = 191 (const struct ieee80211_ath_ie *) ie; 192 193 ni->ni_ath_flags = ath->ath_capability; 194 ni->ni_ath_defkeyix = LE_READ_2(&ath->ath_defkeyix); 195 } 196 197 int 198 ieee80211_parse_athparams(struct ieee80211_node *ni, uint8_t *frm, 199 const struct ieee80211_frame *wh) 200 { 201 struct ieee80211vap *vap = ni->ni_vap; 202 const struct ieee80211_ath_ie *ath; 203 u_int len = frm[1]; 204 int capschanged; 205 uint16_t defkeyix; 206 207 if (len < sizeof(struct ieee80211_ath_ie)-2) { 208 IEEE80211_DISCARD_IE(vap, 209 IEEE80211_MSG_ELEMID | IEEE80211_MSG_SUPERG, 210 wh, "Atheros", "too short, len %u", len); 211 return -1; 212 } 213 ath = (const struct ieee80211_ath_ie *)frm; 214 capschanged = (ni->ni_ath_flags != ath->ath_capability); 215 defkeyix = LE_READ_2(ath->ath_defkeyix); 216 if (capschanged || defkeyix != ni->ni_ath_defkeyix) { 217 ni->ni_ath_flags = ath->ath_capability; 218 ni->ni_ath_defkeyix = defkeyix; 219 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 220 "ath ie change: new caps 0x%x defkeyix 0x%x", 221 ni->ni_ath_flags, ni->ni_ath_defkeyix); 222 } 223 if (IEEE80211_ATH_CAP(vap, ni, ATHEROS_CAP_TURBO_PRIME)) { 224 uint16_t curflags, newflags; 225 226 /* 227 * Check for turbo mode switch. Calculate flags 228 * for the new mode and effect the switch. 229 */ 230 newflags = curflags = vap->iv_ic->ic_bsschan->ic_flags; 231 /* NB: BOOST is not in ic_flags, so get it from the ie */ 232 if (ath->ath_capability & ATHEROS_CAP_BOOST) 233 newflags |= IEEE80211_CHAN_TURBO; 234 else 235 newflags &= ~IEEE80211_CHAN_TURBO; 236 if (newflags != curflags) 237 ieee80211_dturbo_switch(vap, newflags); 238 } 239 return capschanged; 240 } 241 242 /* 243 * Decap the encapsulated frame pair and dispatch the first 244 * for delivery. The second frame is returned for delivery 245 * via the normal path. 246 */ 247 struct mbuf * 248 ieee80211_ff_decap(struct ieee80211_node *ni, struct mbuf *m) 249 { 250 #define FF_LLC_SIZE (sizeof(struct ether_header) + sizeof(struct llc)) 251 #define MS(x,f) (((x) & f) >> f##_S) 252 struct ieee80211vap *vap = ni->ni_vap; 253 struct llc *llc; 254 uint32_t ath; 255 struct mbuf *n; 256 int framelen; 257 258 /* NB: we assume caller does this check for us */ 259 KASSERT(IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF), 260 ("ff not negotiated")); 261 /* 262 * Check for fast-frame tunnel encapsulation. 263 */ 264 if (m->m_pkthdr.len < 3*FF_LLC_SIZE) 265 return m; 266 if (m->m_len < FF_LLC_SIZE && 267 (m = m_pullup(m, FF_LLC_SIZE)) == NULL) { 268 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 269 ni->ni_macaddr, "fast-frame", 270 "%s", "m_pullup(llc) failed"); 271 vap->iv_stats.is_rx_tooshort++; 272 return NULL; 273 } 274 llc = (struct llc *)(mtod(m, uint8_t *) + 275 sizeof(struct ether_header)); 276 if (llc->llc_snap.ether_type != htons(ATH_FF_ETH_TYPE)) 277 return m; 278 m_adj(m, FF_LLC_SIZE); 279 m_copydata(m, 0, sizeof(uint32_t), (caddr_t) &ath); 280 if (MS(ath, ATH_FF_PROTO) != ATH_FF_PROTO_L2TUNNEL) { 281 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 282 ni->ni_macaddr, "fast-frame", 283 "unsupport tunnel protocol, header 0x%x", ath); 284 vap->iv_stats.is_ff_badhdr++; 285 m_freem(m); 286 return NULL; 287 } 288 /* NB: skip header and alignment padding */ 289 m_adj(m, roundup(sizeof(uint32_t) - 2, 4) + 2); 290 291 vap->iv_stats.is_ff_decap++; 292 293 /* 294 * Decap the first frame, bust it apart from the 295 * second and deliver; then decap the second frame 296 * and return it to the caller for normal delivery. 297 */ 298 m = ieee80211_decap1(m, &framelen); 299 if (m == NULL) { 300 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 301 ni->ni_macaddr, "fast-frame", "%s", "first decap failed"); 302 vap->iv_stats.is_ff_tooshort++; 303 return NULL; 304 } 305 n = m_split(m, framelen, M_NOWAIT); 306 if (n == NULL) { 307 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 308 ni->ni_macaddr, "fast-frame", 309 "%s", "unable to split encapsulated frames"); 310 vap->iv_stats.is_ff_split++; 311 m_freem(m); /* NB: must reclaim */ 312 return NULL; 313 } 314 /* XXX not right for WDS */ 315 vap->iv_deliver_data(vap, ni, m); /* 1st of pair */ 316 317 /* 318 * Decap second frame. 319 */ 320 m_adj(n, roundup2(framelen, 4) - framelen); /* padding */ 321 n = ieee80211_decap1(n, &framelen); 322 if (n == NULL) { 323 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 324 ni->ni_macaddr, "fast-frame", "%s", "second decap failed"); 325 vap->iv_stats.is_ff_tooshort++; 326 } 327 /* XXX verify framelen against mbuf contents */ 328 return n; /* 2nd delivered by caller */ 329 #undef MS 330 #undef FF_LLC_SIZE 331 } 332 333 /* 334 * Fast frame encapsulation. There must be two packets 335 * chained with m_nextpkt. We do header adjustment for 336 * each, add the tunnel encapsulation, and then concatenate 337 * the mbuf chains to form a single frame for transmission. 338 */ 339 struct mbuf * 340 ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace, 341 struct ieee80211_key *key) 342 { 343 struct mbuf *m2; 344 struct ether_header eh1, eh2; 345 struct llc *llc; 346 struct mbuf *m; 347 int pad; 348 349 m2 = m1->m_nextpkt; 350 if (m2 == NULL) { 351 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 352 "%s: only one frame\n", __func__); 353 goto bad; 354 } 355 m1->m_nextpkt = NULL; 356 /* 357 * Include fast frame headers in adjusting header layout. 358 */ 359 KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!")); 360 ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t)); 361 m1 = ieee80211_mbuf_adjust(vap, 362 hdrspace + sizeof(struct llc) + sizeof(uint32_t) + 2 + 363 sizeof(struct ether_header), 364 key, m1); 365 if (m1 == NULL) { 366 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ 367 m_freem(m2); 368 goto bad; 369 } 370 371 /* 372 * Copy second frame's Ethernet header out of line 373 * and adjust for encapsulation headers. Note that 374 * we make room for padding in case there isn't room 375 * at the end of first frame. 376 */ 377 KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!")); 378 ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t)); 379 m2 = ieee80211_mbuf_adjust(vap, 380 ATH_FF_MAX_HDR_PAD + sizeof(struct ether_header), 381 NULL, m2); 382 if (m2 == NULL) { 383 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ 384 goto bad; 385 } 386 387 /* 388 * Now do tunnel encapsulation. First, each 389 * frame gets a standard encapsulation. 390 */ 391 m1 = ieee80211_ff_encap1(vap, m1, &eh1); 392 if (m1 == NULL) 393 goto bad; 394 m2 = ieee80211_ff_encap1(vap, m2, &eh2); 395 if (m2 == NULL) 396 goto bad; 397 398 /* 399 * Pad leading frame to a 4-byte boundary. If there 400 * is space at the end of the first frame, put it 401 * there; otherwise prepend to the front of the second 402 * frame. We know doing the second will always work 403 * because we reserve space above. We prefer appending 404 * as this typically has better DMA alignment properties. 405 */ 406 for (m = m1; m->m_next != NULL; m = m->m_next) 407 ; 408 pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len; 409 if (pad) { 410 if (M_TRAILINGSPACE(m) < pad) { /* prepend to second */ 411 m2->m_data -= pad; 412 m2->m_len += pad; 413 m2->m_pkthdr.len += pad; 414 } else { /* append to first */ 415 m->m_len += pad; 416 m1->m_pkthdr.len += pad; 417 } 418 } 419 420 /* 421 * Now, stick 'em together and prepend the tunnel headers; 422 * first the Atheros tunnel header (all zero for now) and 423 * then a special fast frame LLC. 424 * 425 * XXX optimize by prepending together 426 */ 427 m->m_next = m2; /* NB: last mbuf from above */ 428 m1->m_pkthdr.len += m2->m_pkthdr.len; 429 M_PREPEND(m1, sizeof(uint32_t)+2, M_NOWAIT); 430 if (m1 == NULL) { /* XXX cannot happen */ 431 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 432 "%s: no space for tunnel header\n", __func__); 433 vap->iv_stats.is_tx_nobuf++; 434 return NULL; 435 } 436 memset(mtod(m1, void *), 0, sizeof(uint32_t)+2); 437 438 M_PREPEND(m1, sizeof(struct llc), M_NOWAIT); 439 if (m1 == NULL) { /* XXX cannot happen */ 440 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 441 "%s: no space for llc header\n", __func__); 442 vap->iv_stats.is_tx_nobuf++; 443 return NULL; 444 } 445 llc = mtod(m1, struct llc *); 446 llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP; 447 llc->llc_control = LLC_UI; 448 llc->llc_snap.org_code[0] = ATH_FF_SNAP_ORGCODE_0; 449 llc->llc_snap.org_code[1] = ATH_FF_SNAP_ORGCODE_1; 450 llc->llc_snap.org_code[2] = ATH_FF_SNAP_ORGCODE_2; 451 llc->llc_snap.ether_type = htons(ATH_FF_ETH_TYPE); 452 453 vap->iv_stats.is_ff_encap++; 454 455 return m1; 456 bad: 457 if (m1 != NULL) 458 m_freem(m1); 459 if (m2 != NULL) 460 m_freem(m2); 461 return NULL; 462 } 463 464 static void 465 ff_transmit(struct ieee80211_node *ni, struct mbuf *m) 466 { 467 struct ieee80211vap *vap = ni->ni_vap; 468 struct ieee80211com *ic = ni->ni_ic; 469 int error; 470 471 IEEE80211_TX_LOCK_ASSERT(vap->iv_ic); 472 473 /* encap and xmit */ 474 m = ieee80211_encap(vap, ni, m); 475 if (m != NULL) { 476 struct ifnet *ifp = vap->iv_ifp; 477 478 error = ieee80211_parent_xmitpkt(ic, m);; 479 if (error != 0) { 480 /* NB: IFQ_HANDOFF reclaims mbuf */ 481 ieee80211_free_node(ni); 482 } else { 483 ifp->if_opackets++; 484 } 485 } else 486 ieee80211_free_node(ni); 487 } 488 489 /* 490 * Flush frames to device; note we re-use the linked list 491 * the frames were stored on and use the sentinel (unchanged) 492 * which may be non-NULL. 493 */ 494 static void 495 ff_flush(struct mbuf *head, struct mbuf *last) 496 { 497 struct mbuf *m, *next; 498 struct ieee80211_node *ni; 499 struct ieee80211vap *vap; 500 501 for (m = head; m != last; m = next) { 502 next = m->m_nextpkt; 503 m->m_nextpkt = NULL; 504 505 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 506 vap = ni->ni_vap; 507 508 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 509 "%s: flush frame, age %u", __func__, M_AGE_GET(m)); 510 vap->iv_stats.is_ff_flush++; 511 512 ff_transmit(ni, m); 513 } 514 } 515 516 /* 517 * Age frames on the staging queue. 518 * 519 * This is called without the comlock held, but it does all its work 520 * behind the comlock. Because of this, it's possible that the 521 * staging queue will be serviced between the function which called 522 * it and now; thus simply checking that the queue has work in it 523 * may fail. 524 * 525 * See PR kern/174283 for more details. 526 */ 527 void 528 ieee80211_ff_age(struct ieee80211com *ic, struct ieee80211_stageq *sq, 529 int quanta) 530 { 531 struct mbuf *m, *head; 532 struct ieee80211_node *ni; 533 struct ieee80211_tx_ampdu *tap; 534 535 #if 0 536 KASSERT(sq->head != NULL, ("stageq empty")); 537 #endif 538 539 IEEE80211_LOCK(ic); 540 head = sq->head; 541 while ((m = sq->head) != NULL && M_AGE_GET(m) < quanta) { 542 int tid = WME_AC_TO_TID(M_WME_GETAC(m)); 543 544 /* clear tap ref to frame */ 545 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 546 tap = &ni->ni_tx_ampdu[tid]; 547 KASSERT(tap->txa_private == m, ("staging queue empty")); 548 tap->txa_private = NULL; 549 550 sq->head = m->m_nextpkt; 551 sq->depth--; 552 } 553 if (m == NULL) 554 sq->tail = NULL; 555 else 556 M_AGE_SUB(m, quanta); 557 IEEE80211_UNLOCK(ic); 558 559 IEEE80211_TX_LOCK(ic); 560 ff_flush(head, m); 561 IEEE80211_TX_UNLOCK(ic); 562 } 563 564 static void 565 stageq_add(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *m) 566 { 567 int age = ieee80211_ffagemax; 568 569 IEEE80211_LOCK_ASSERT(ic); 570 571 if (sq->tail != NULL) { 572 sq->tail->m_nextpkt = m; 573 age -= M_AGE_GET(sq->head); 574 } else 575 sq->head = m; 576 KASSERT(age >= 0, ("age %d", age)); 577 M_AGE_SET(m, age); 578 m->m_nextpkt = NULL; 579 sq->tail = m; 580 sq->depth++; 581 } 582 583 static void 584 stageq_remove(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *mstaged) 585 { 586 struct mbuf *m, *mprev; 587 588 IEEE80211_LOCK_ASSERT(ic); 589 590 mprev = NULL; 591 for (m = sq->head; m != NULL; m = m->m_nextpkt) { 592 if (m == mstaged) { 593 if (mprev == NULL) 594 sq->head = m->m_nextpkt; 595 else 596 mprev->m_nextpkt = m->m_nextpkt; 597 if (sq->tail == m) 598 sq->tail = mprev; 599 sq->depth--; 600 return; 601 } 602 mprev = m; 603 } 604 printf("%s: packet not found\n", __func__); 605 } 606 607 static uint32_t 608 ff_approx_txtime(struct ieee80211_node *ni, 609 const struct mbuf *m1, const struct mbuf *m2) 610 { 611 struct ieee80211com *ic = ni->ni_ic; 612 struct ieee80211vap *vap = ni->ni_vap; 613 uint32_t framelen; 614 615 /* 616 * Approximate the frame length to be transmitted. A swag to add 617 * the following maximal values to the skb payload: 618 * - 32: 802.11 encap + CRC 619 * - 24: encryption overhead (if wep bit) 620 * - 4 + 6: fast-frame header and padding 621 * - 16: 2 LLC FF tunnel headers 622 * - 14: 1 802.3 FF tunnel header (mbuf already accounts for 2nd) 623 */ 624 framelen = m1->m_pkthdr.len + 32 + 625 ATH_FF_MAX_HDR_PAD + ATH_FF_MAX_SEP_PAD + ATH_FF_MAX_HDR; 626 if (vap->iv_flags & IEEE80211_F_PRIVACY) 627 framelen += 24; 628 if (m2 != NULL) 629 framelen += m2->m_pkthdr.len; 630 return ieee80211_compute_duration(ic->ic_rt, framelen, ni->ni_txrate, 0); 631 } 632 633 /* 634 * Check if the supplied frame can be partnered with an existing 635 * or pending frame. Return a reference to any frame that should be 636 * sent on return; otherwise return NULL. 637 */ 638 struct mbuf * 639 ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m) 640 { 641 struct ieee80211vap *vap = ni->ni_vap; 642 struct ieee80211com *ic = ni->ni_ic; 643 struct ieee80211_superg *sg = ic->ic_superg; 644 const int pri = M_WME_GETAC(m); 645 struct ieee80211_stageq *sq; 646 struct ieee80211_tx_ampdu *tap; 647 struct mbuf *mstaged; 648 uint32_t txtime, limit; 649 650 IEEE80211_TX_UNLOCK_ASSERT(ic); 651 652 /* 653 * Check if the supplied frame can be aggregated. 654 * 655 * NB: we allow EAPOL frames to be aggregated with other ucast traffic. 656 * Do 802.1x EAPOL frames proceed in the clear? Then they couldn't 657 * be aggregated with other types of frames when encryption is on? 658 */ 659 IEEE80211_LOCK(ic); 660 tap = &ni->ni_tx_ampdu[WME_AC_TO_TID(pri)]; 661 mstaged = tap->txa_private; /* NB: we reuse AMPDU state */ 662 ieee80211_txampdu_count_packet(tap); 663 664 /* 665 * When not in station mode never aggregate a multicast 666 * frame; this insures, for example, that a combined frame 667 * does not require multiple encryption keys. 668 */ 669 if (vap->iv_opmode != IEEE80211_M_STA && 670 ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) { 671 /* XXX flush staged frame? */ 672 IEEE80211_UNLOCK(ic); 673 return m; 674 } 675 /* 676 * If there is no frame to combine with and the pps is 677 * too low; then do not attempt to aggregate this frame. 678 */ 679 if (mstaged == NULL && 680 ieee80211_txampdu_getpps(tap) < ieee80211_ffppsmin) { 681 IEEE80211_UNLOCK(ic); 682 return m; 683 } 684 sq = &sg->ff_stageq[pri]; 685 /* 686 * Check the txop limit to insure the aggregate fits. 687 */ 688 limit = IEEE80211_TXOP_TO_US( 689 ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit); 690 if (limit != 0 && 691 (txtime = ff_approx_txtime(ni, m, mstaged)) > limit) { 692 /* 693 * Aggregate too long, return to the caller for direct 694 * transmission. In addition, flush any pending frame 695 * before sending this one. 696 */ 697 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 698 "%s: txtime %u exceeds txop limit %u\n", 699 __func__, txtime, limit); 700 701 tap->txa_private = NULL; 702 if (mstaged != NULL) 703 stageq_remove(ic, sq, mstaged); 704 IEEE80211_UNLOCK(ic); 705 706 if (mstaged != NULL) { 707 IEEE80211_TX_LOCK(ic); 708 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 709 "%s: flush staged frame", __func__); 710 /* encap and xmit */ 711 ff_transmit(ni, mstaged); 712 IEEE80211_TX_UNLOCK(ic); 713 } 714 return m; /* NB: original frame */ 715 } 716 /* 717 * An aggregation candidate. If there's a frame to partner 718 * with then combine and return for processing. Otherwise 719 * save this frame and wait for a partner to show up (or 720 * the frame to be flushed). Note that staged frames also 721 * hold their node reference. 722 */ 723 if (mstaged != NULL) { 724 tap->txa_private = NULL; 725 stageq_remove(ic, sq, mstaged); 726 IEEE80211_UNLOCK(ic); 727 728 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 729 "%s: aggregate fast-frame", __func__); 730 /* 731 * Release the node reference; we only need 732 * the one already in mstaged. 733 */ 734 KASSERT(mstaged->m_pkthdr.rcvif == (void *)ni, 735 ("rcvif %p ni %p", mstaged->m_pkthdr.rcvif, ni)); 736 ieee80211_free_node(ni); 737 738 m->m_nextpkt = NULL; 739 mstaged->m_nextpkt = m; 740 mstaged->m_flags |= M_FF; /* NB: mark for encap work */ 741 } else { 742 KASSERT(tap->txa_private == NULL, 743 ("txa_private %p", tap->txa_private)); 744 tap->txa_private = m; 745 746 stageq_add(ic, sq, m); 747 IEEE80211_UNLOCK(ic); 748 749 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 750 "%s: stage frame, %u queued", __func__, sq->depth); 751 /* NB: mstaged is NULL */ 752 } 753 return mstaged; 754 } 755 756 void 757 ieee80211_ff_node_init(struct ieee80211_node *ni) 758 { 759 /* 760 * Clean FF state on re-associate. This handles the case 761 * where a station leaves w/o notifying us and then returns 762 * before node is reaped for inactivity. 763 */ 764 ieee80211_ff_node_cleanup(ni); 765 } 766 767 void 768 ieee80211_ff_node_cleanup(struct ieee80211_node *ni) 769 { 770 struct ieee80211com *ic = ni->ni_ic; 771 struct ieee80211_superg *sg = ic->ic_superg; 772 struct ieee80211_tx_ampdu *tap; 773 struct mbuf *m, *next_m, *head; 774 int tid; 775 776 IEEE80211_LOCK(ic); 777 head = NULL; 778 for (tid = 0; tid < WME_NUM_TID; tid++) { 779 int ac = TID_TO_WME_AC(tid); 780 781 tap = &ni->ni_tx_ampdu[tid]; 782 m = tap->txa_private; 783 if (m != NULL) { 784 tap->txa_private = NULL; 785 stageq_remove(ic, &sg->ff_stageq[ac], m); 786 m->m_nextpkt = head; 787 head = m; 788 } 789 } 790 IEEE80211_UNLOCK(ic); 791 792 /* 793 * Free mbufs, taking care to not dereference the mbuf after 794 * we free it (hence grabbing m_nextpkt before we free it.) 795 */ 796 m = head; 797 while (m != NULL) { 798 next_m = m->m_nextpkt; 799 m_freem(m); 800 ieee80211_free_node(ni); 801 m = next_m; 802 } 803 } 804 805 /* 806 * Switch between turbo and non-turbo operating modes. 807 * Use the specified channel flags to locate the new 808 * channel, update 802.11 state, and then call back into 809 * the driver to effect the change. 810 */ 811 void 812 ieee80211_dturbo_switch(struct ieee80211vap *vap, int newflags) 813 { 814 struct ieee80211com *ic = vap->iv_ic; 815 struct ieee80211_channel *chan; 816 817 chan = ieee80211_find_channel(ic, ic->ic_bsschan->ic_freq, newflags); 818 if (chan == NULL) { /* XXX should not happen */ 819 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 820 "%s: no channel with freq %u flags 0x%x\n", 821 __func__, ic->ic_bsschan->ic_freq, newflags); 822 return; 823 } 824 825 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 826 "%s: %s -> %s (freq %u flags 0x%x)\n", __func__, 827 ieee80211_phymode_name[ieee80211_chan2mode(ic->ic_bsschan)], 828 ieee80211_phymode_name[ieee80211_chan2mode(chan)], 829 chan->ic_freq, chan->ic_flags); 830 831 ic->ic_bsschan = chan; 832 ic->ic_prevchan = ic->ic_curchan; 833 ic->ic_curchan = chan; 834 ic->ic_rt = ieee80211_get_ratetable(chan); 835 ic->ic_set_channel(ic); 836 ieee80211_radiotap_chan_change(ic); 837 /* NB: do not need to reset ERP state 'cuz we're in sta mode */ 838 } 839 840 /* 841 * Return the current ``state'' of an Atheros capbility. 842 * If associated in station mode report the negotiated 843 * setting. Otherwise report the current setting. 844 */ 845 static int 846 getathcap(struct ieee80211vap *vap, int cap) 847 { 848 if (vap->iv_opmode == IEEE80211_M_STA && 849 vap->iv_state == IEEE80211_S_RUN) 850 return IEEE80211_ATH_CAP(vap, vap->iv_bss, cap) != 0; 851 else 852 return (vap->iv_flags & cap) != 0; 853 } 854 855 static int 856 superg_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq) 857 { 858 switch (ireq->i_type) { 859 case IEEE80211_IOC_FF: 860 ireq->i_val = getathcap(vap, IEEE80211_F_FF); 861 break; 862 case IEEE80211_IOC_TURBOP: 863 ireq->i_val = getathcap(vap, IEEE80211_F_TURBOP); 864 break; 865 default: 866 return ENOSYS; 867 } 868 return 0; 869 } 870 IEEE80211_IOCTL_GET(superg, superg_ioctl_get80211); 871 872 static int 873 superg_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq) 874 { 875 switch (ireq->i_type) { 876 case IEEE80211_IOC_FF: 877 if (ireq->i_val) { 878 if ((vap->iv_caps & IEEE80211_C_FF) == 0) 879 return EOPNOTSUPP; 880 vap->iv_flags |= IEEE80211_F_FF; 881 } else 882 vap->iv_flags &= ~IEEE80211_F_FF; 883 return ENETRESET; 884 case IEEE80211_IOC_TURBOP: 885 if (ireq->i_val) { 886 if ((vap->iv_caps & IEEE80211_C_TURBOP) == 0) 887 return EOPNOTSUPP; 888 vap->iv_flags |= IEEE80211_F_TURBOP; 889 } else 890 vap->iv_flags &= ~IEEE80211_F_TURBOP; 891 return ENETRESET; 892 default: 893 return ENOSYS; 894 } 895 return 0; 896 } 897 IEEE80211_IOCTL_SET(superg, superg_ioctl_set80211); 898 899 #endif /* IEEE80211_SUPPORT_SUPERG */ 900