1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include "opt_wlan.h" 30 31 #ifdef IEEE80211_SUPPORT_SUPERG 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/mbuf.h> 36 #include <sys/kernel.h> 37 #include <sys/endian.h> 38 39 #include <sys/socket.h> 40 41 #include <net/if.h> 42 #include <net/if_var.h> 43 #include <net/if_llc.h> 44 #include <net/if_media.h> 45 #include <net/bpf.h> 46 #include <net/ethernet.h> 47 48 #include <net80211/ieee80211_var.h> 49 #include <net80211/ieee80211_input.h> 50 #include <net80211/ieee80211_phy.h> 51 #include <net80211/ieee80211_superg.h> 52 53 /* 54 * Atheros fast-frame encapsulation format. 55 * FF max payload: 56 * 802.2 + FFHDR + HPAD + 802.3 + 802.2 + 1500 + SPAD + 802.3 + 802.2 + 1500: 57 * 8 + 4 + 4 + 14 + 8 + 1500 + 6 + 14 + 8 + 1500 58 * = 3066 59 */ 60 /* fast frame header is 32-bits */ 61 #define ATH_FF_PROTO 0x0000003f /* protocol */ 62 #define ATH_FF_PROTO_S 0 63 #define ATH_FF_FTYPE 0x000000c0 /* frame type */ 64 #define ATH_FF_FTYPE_S 6 65 #define ATH_FF_HLEN32 0x00000300 /* optional hdr length */ 66 #define ATH_FF_HLEN32_S 8 67 #define ATH_FF_SEQNUM 0x001ffc00 /* sequence number */ 68 #define ATH_FF_SEQNUM_S 10 69 #define ATH_FF_OFFSET 0xffe00000 /* offset to 2nd payload */ 70 #define ATH_FF_OFFSET_S 21 71 72 #define ATH_FF_MAX_HDR_PAD 4 73 #define ATH_FF_MAX_SEP_PAD 6 74 #define ATH_FF_MAX_HDR 30 75 76 #define ATH_FF_PROTO_L2TUNNEL 0 /* L2 tunnel protocol */ 77 #define ATH_FF_ETH_TYPE 0x88bd /* Ether type for encapsulated frames */ 78 #define ATH_FF_SNAP_ORGCODE_0 0x00 79 #define ATH_FF_SNAP_ORGCODE_1 0x03 80 #define ATH_FF_SNAP_ORGCODE_2 0x7f 81 82 #define ATH_FF_TXQMIN 2 /* min txq depth for staging */ 83 #define ATH_FF_TXQMAX 50 /* maximum # of queued frames allowed */ 84 #define ATH_FF_STAGEMAX 5 /* max waiting period for staged frame*/ 85 86 #define ETHER_HEADER_COPY(dst, src) \ 87 memcpy(dst, src, sizeof(struct ether_header)) 88 89 static int ieee80211_ffppsmin = 2; /* pps threshold for ff aggregation */ 90 SYSCTL_INT(_net_wlan, OID_AUTO, ffppsmin, CTLFLAG_RW, 91 &ieee80211_ffppsmin, 0, "min packet rate before fast-frame staging"); 92 static int ieee80211_ffagemax = -1; /* max time frames held on stage q */ 93 SYSCTL_PROC(_net_wlan, OID_AUTO, ffagemax, CTLTYPE_INT | CTLFLAG_RW, 94 &ieee80211_ffagemax, 0, ieee80211_sysctl_msecs_ticks, "I", 95 "max hold time for fast-frame staging (ms)"); 96 97 void 98 ieee80211_superg_attach(struct ieee80211com *ic) 99 { 100 struct ieee80211_superg *sg; 101 102 sg = (struct ieee80211_superg *) IEEE80211_MALLOC( 103 sizeof(struct ieee80211_superg), M_80211_VAP, 104 IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); 105 if (sg == NULL) { 106 printf("%s: cannot allocate SuperG state block\n", 107 __func__); 108 return; 109 } 110 ic->ic_superg = sg; 111 112 /* 113 * Default to not being so aggressive for FF/AMSDU 114 * aging, otherwise we may hold a frame around 115 * for way too long before we expire it out. 116 */ 117 ieee80211_ffagemax = msecs_to_ticks(2); 118 } 119 120 void 121 ieee80211_superg_detach(struct ieee80211com *ic) 122 { 123 if (ic->ic_superg != NULL) { 124 IEEE80211_FREE(ic->ic_superg, M_80211_VAP); 125 ic->ic_superg = NULL; 126 } 127 } 128 129 void 130 ieee80211_superg_vattach(struct ieee80211vap *vap) 131 { 132 struct ieee80211com *ic = vap->iv_ic; 133 134 if (ic->ic_superg == NULL) /* NB: can't do fast-frames w/o state */ 135 vap->iv_caps &= ~IEEE80211_C_FF; 136 if (vap->iv_caps & IEEE80211_C_FF) 137 vap->iv_flags |= IEEE80211_F_FF; 138 /* NB: we only implement sta mode */ 139 if (vap->iv_opmode == IEEE80211_M_STA && 140 (vap->iv_caps & IEEE80211_C_TURBOP)) 141 vap->iv_flags |= IEEE80211_F_TURBOP; 142 } 143 144 void 145 ieee80211_superg_vdetach(struct ieee80211vap *vap) 146 { 147 } 148 149 #define ATH_OUI_BYTES 0x00, 0x03, 0x7f 150 /* 151 * Add a WME information element to a frame. 152 */ 153 uint8_t * 154 ieee80211_add_ath(uint8_t *frm, uint8_t caps, ieee80211_keyix defkeyix) 155 { 156 static const struct ieee80211_ath_ie info = { 157 .ath_id = IEEE80211_ELEMID_VENDOR, 158 .ath_len = sizeof(struct ieee80211_ath_ie) - 2, 159 .ath_oui = { ATH_OUI_BYTES }, 160 .ath_oui_type = ATH_OUI_TYPE, 161 .ath_oui_subtype= ATH_OUI_SUBTYPE, 162 .ath_version = ATH_OUI_VERSION, 163 }; 164 struct ieee80211_ath_ie *ath = (struct ieee80211_ath_ie *) frm; 165 166 memcpy(frm, &info, sizeof(info)); 167 ath->ath_capability = caps; 168 if (defkeyix != IEEE80211_KEYIX_NONE) { 169 ath->ath_defkeyix[0] = (defkeyix & 0xff); 170 ath->ath_defkeyix[1] = ((defkeyix >> 8) & 0xff); 171 } else { 172 ath->ath_defkeyix[0] = 0xff; 173 ath->ath_defkeyix[1] = 0x7f; 174 } 175 return frm + sizeof(info); 176 } 177 #undef ATH_OUI_BYTES 178 179 uint8_t * 180 ieee80211_add_athcaps(uint8_t *frm, const struct ieee80211_node *bss) 181 { 182 const struct ieee80211vap *vap = bss->ni_vap; 183 184 return ieee80211_add_ath(frm, 185 vap->iv_flags & IEEE80211_F_ATHEROS, 186 ((vap->iv_flags & IEEE80211_F_WPA) == 0 && 187 bss->ni_authmode != IEEE80211_AUTH_8021X) ? 188 vap->iv_def_txkey : IEEE80211_KEYIX_NONE); 189 } 190 191 void 192 ieee80211_parse_ath(struct ieee80211_node *ni, uint8_t *ie) 193 { 194 const struct ieee80211_ath_ie *ath = 195 (const struct ieee80211_ath_ie *) ie; 196 197 ni->ni_ath_flags = ath->ath_capability; 198 ni->ni_ath_defkeyix = LE_READ_2(&ath->ath_defkeyix); 199 } 200 201 int 202 ieee80211_parse_athparams(struct ieee80211_node *ni, uint8_t *frm, 203 const struct ieee80211_frame *wh) 204 { 205 struct ieee80211vap *vap = ni->ni_vap; 206 const struct ieee80211_ath_ie *ath; 207 u_int len = frm[1]; 208 int capschanged; 209 uint16_t defkeyix; 210 211 if (len < sizeof(struct ieee80211_ath_ie)-2) { 212 IEEE80211_DISCARD_IE(vap, 213 IEEE80211_MSG_ELEMID | IEEE80211_MSG_SUPERG, 214 wh, "Atheros", "too short, len %u", len); 215 return -1; 216 } 217 ath = (const struct ieee80211_ath_ie *)frm; 218 capschanged = (ni->ni_ath_flags != ath->ath_capability); 219 defkeyix = LE_READ_2(ath->ath_defkeyix); 220 if (capschanged || defkeyix != ni->ni_ath_defkeyix) { 221 ni->ni_ath_flags = ath->ath_capability; 222 ni->ni_ath_defkeyix = defkeyix; 223 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 224 "ath ie change: new caps 0x%x defkeyix 0x%x", 225 ni->ni_ath_flags, ni->ni_ath_defkeyix); 226 } 227 if (IEEE80211_ATH_CAP(vap, ni, ATHEROS_CAP_TURBO_PRIME)) { 228 uint16_t curflags, newflags; 229 230 /* 231 * Check for turbo mode switch. Calculate flags 232 * for the new mode and effect the switch. 233 */ 234 newflags = curflags = vap->iv_ic->ic_bsschan->ic_flags; 235 /* NB: BOOST is not in ic_flags, so get it from the ie */ 236 if (ath->ath_capability & ATHEROS_CAP_BOOST) 237 newflags |= IEEE80211_CHAN_TURBO; 238 else 239 newflags &= ~IEEE80211_CHAN_TURBO; 240 if (newflags != curflags) 241 ieee80211_dturbo_switch(vap, newflags); 242 } 243 return capschanged; 244 } 245 246 /* 247 * Decap the encapsulated frame pair and dispatch the first 248 * for delivery. The second frame is returned for delivery 249 * via the normal path. 250 */ 251 struct mbuf * 252 ieee80211_ff_decap(struct ieee80211_node *ni, struct mbuf *m) 253 { 254 #define FF_LLC_SIZE (sizeof(struct ether_header) + sizeof(struct llc)) 255 #define MS(x,f) (((x) & f) >> f##_S) 256 struct ieee80211vap *vap = ni->ni_vap; 257 struct llc *llc; 258 uint32_t ath; 259 struct mbuf *n; 260 int framelen; 261 262 /* NB: we assume caller does this check for us */ 263 KASSERT(IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF), 264 ("ff not negotiated")); 265 /* 266 * Check for fast-frame tunnel encapsulation. 267 */ 268 if (m->m_pkthdr.len < 3*FF_LLC_SIZE) 269 return m; 270 if (m->m_len < FF_LLC_SIZE && 271 (m = m_pullup(m, FF_LLC_SIZE)) == NULL) { 272 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 273 ni->ni_macaddr, "fast-frame", 274 "%s", "m_pullup(llc) failed"); 275 vap->iv_stats.is_rx_tooshort++; 276 return NULL; 277 } 278 llc = (struct llc *)(mtod(m, uint8_t *) + 279 sizeof(struct ether_header)); 280 if (llc->llc_snap.ether_type != htons(ATH_FF_ETH_TYPE)) 281 return m; 282 m_adj(m, FF_LLC_SIZE); 283 m_copydata(m, 0, sizeof(uint32_t), (caddr_t) &ath); 284 if (MS(ath, ATH_FF_PROTO) != ATH_FF_PROTO_L2TUNNEL) { 285 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 286 ni->ni_macaddr, "fast-frame", 287 "unsupport tunnel protocol, header 0x%x", ath); 288 vap->iv_stats.is_ff_badhdr++; 289 m_freem(m); 290 return NULL; 291 } 292 /* NB: skip header and alignment padding */ 293 m_adj(m, roundup(sizeof(uint32_t) - 2, 4) + 2); 294 295 vap->iv_stats.is_ff_decap++; 296 297 /* 298 * Decap the first frame, bust it apart from the 299 * second and deliver; then decap the second frame 300 * and return it to the caller for normal delivery. 301 */ 302 m = ieee80211_decap1(m, &framelen); 303 if (m == NULL) { 304 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 305 ni->ni_macaddr, "fast-frame", "%s", "first decap failed"); 306 vap->iv_stats.is_ff_tooshort++; 307 return NULL; 308 } 309 n = m_split(m, framelen, M_NOWAIT); 310 if (n == NULL) { 311 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 312 ni->ni_macaddr, "fast-frame", 313 "%s", "unable to split encapsulated frames"); 314 vap->iv_stats.is_ff_split++; 315 m_freem(m); /* NB: must reclaim */ 316 return NULL; 317 } 318 /* XXX not right for WDS */ 319 vap->iv_deliver_data(vap, ni, m); /* 1st of pair */ 320 321 /* 322 * Decap second frame. 323 */ 324 m_adj(n, roundup2(framelen, 4) - framelen); /* padding */ 325 n = ieee80211_decap1(n, &framelen); 326 if (n == NULL) { 327 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 328 ni->ni_macaddr, "fast-frame", "%s", "second decap failed"); 329 vap->iv_stats.is_ff_tooshort++; 330 } 331 /* XXX verify framelen against mbuf contents */ 332 return n; /* 2nd delivered by caller */ 333 #undef MS 334 #undef FF_LLC_SIZE 335 } 336 337 /* 338 * Fast frame encapsulation. There must be two packets 339 * chained with m_nextpkt. We do header adjustment for 340 * each, add the tunnel encapsulation, and then concatenate 341 * the mbuf chains to form a single frame for transmission. 342 */ 343 struct mbuf * 344 ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace, 345 struct ieee80211_key *key) 346 { 347 struct mbuf *m2; 348 struct ether_header eh1, eh2; 349 struct llc *llc; 350 struct mbuf *m; 351 int pad; 352 353 m2 = m1->m_nextpkt; 354 if (m2 == NULL) { 355 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 356 "%s: only one frame\n", __func__); 357 goto bad; 358 } 359 m1->m_nextpkt = NULL; 360 361 /* 362 * Adjust to include 802.11 header requirement. 363 */ 364 KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!")); 365 ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t)); 366 m1 = ieee80211_mbuf_adjust(vap, hdrspace, key, m1); 367 if (m1 == NULL) { 368 printf("%s: failed initial mbuf_adjust\n", __func__); 369 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ 370 m_freem(m2); 371 goto bad; 372 } 373 374 /* 375 * Copy second frame's Ethernet header out of line 376 * and adjust for possible padding in case there isn't room 377 * at the end of first frame. 378 */ 379 KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!")); 380 ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t)); 381 m2 = ieee80211_mbuf_adjust(vap, 4, NULL, m2); 382 if (m2 == NULL) { 383 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ 384 printf("%s: failed second \n", __func__); 385 goto bad; 386 } 387 388 /* 389 * Now do tunnel encapsulation. First, each 390 * frame gets a standard encapsulation. 391 */ 392 m1 = ieee80211_ff_encap1(vap, m1, &eh1); 393 if (m1 == NULL) 394 goto bad; 395 m2 = ieee80211_ff_encap1(vap, m2, &eh2); 396 if (m2 == NULL) 397 goto bad; 398 399 /* 400 * Pad leading frame to a 4-byte boundary. If there 401 * is space at the end of the first frame, put it 402 * there; otherwise prepend to the front of the second 403 * frame. We know doing the second will always work 404 * because we reserve space above. We prefer appending 405 * as this typically has better DMA alignment properties. 406 */ 407 for (m = m1; m->m_next != NULL; m = m->m_next) 408 ; 409 pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len; 410 if (pad) { 411 if (M_TRAILINGSPACE(m) < pad) { /* prepend to second */ 412 m2->m_data -= pad; 413 m2->m_len += pad; 414 m2->m_pkthdr.len += pad; 415 } else { /* append to first */ 416 m->m_len += pad; 417 m1->m_pkthdr.len += pad; 418 } 419 } 420 421 /* 422 * A-MSDU's are just appended; the "I'm A-MSDU!" bit is in the 423 * QoS header. 424 * 425 * XXX optimize by prepending together 426 */ 427 m->m_next = m2; /* NB: last mbuf from above */ 428 m1->m_pkthdr.len += m2->m_pkthdr.len; 429 M_PREPEND(m1, sizeof(uint32_t)+2, M_NOWAIT); 430 if (m1 == NULL) { /* XXX cannot happen */ 431 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 432 "%s: no space for tunnel header\n", __func__); 433 vap->iv_stats.is_tx_nobuf++; 434 return NULL; 435 } 436 memset(mtod(m1, void *), 0, sizeof(uint32_t)+2); 437 438 M_PREPEND(m1, sizeof(struct llc), M_NOWAIT); 439 if (m1 == NULL) { /* XXX cannot happen */ 440 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 441 "%s: no space for llc header\n", __func__); 442 vap->iv_stats.is_tx_nobuf++; 443 return NULL; 444 } 445 llc = mtod(m1, struct llc *); 446 llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP; 447 llc->llc_control = LLC_UI; 448 llc->llc_snap.org_code[0] = ATH_FF_SNAP_ORGCODE_0; 449 llc->llc_snap.org_code[1] = ATH_FF_SNAP_ORGCODE_1; 450 llc->llc_snap.org_code[2] = ATH_FF_SNAP_ORGCODE_2; 451 llc->llc_snap.ether_type = htons(ATH_FF_ETH_TYPE); 452 453 vap->iv_stats.is_ff_encap++; 454 455 return m1; 456 bad: 457 vap->iv_stats.is_ff_encapfail++; 458 if (m1 != NULL) 459 m_freem(m1); 460 if (m2 != NULL) 461 m_freem(m2); 462 return NULL; 463 } 464 465 /* 466 * A-MSDU encapsulation. 467 * 468 * This assumes just two frames for now, since we're borrowing the 469 * same queuing code and infrastructure as fast-frames. 470 * 471 * There must be two packets chained with m_nextpkt. 472 * We do header adjustment for each, and then concatenate the mbuf chains 473 * to form a single frame for transmission. 474 */ 475 struct mbuf * 476 ieee80211_amsdu_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace, 477 struct ieee80211_key *key) 478 { 479 struct mbuf *m2; 480 struct ether_header eh1, eh2; 481 struct mbuf *m; 482 int pad; 483 484 m2 = m1->m_nextpkt; 485 if (m2 == NULL) { 486 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 487 "%s: only one frame\n", __func__); 488 goto bad; 489 } 490 m1->m_nextpkt = NULL; 491 492 /* 493 * Include A-MSDU header in adjusting header layout. 494 */ 495 KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!")); 496 ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t)); 497 m1 = ieee80211_mbuf_adjust(vap, 498 hdrspace + sizeof(struct llc) + sizeof(uint32_t) + 499 sizeof(struct ether_header), 500 key, m1); 501 if (m1 == NULL) { 502 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ 503 m_freem(m2); 504 goto bad; 505 } 506 507 /* 508 * Copy second frame's Ethernet header out of line 509 * and adjust for encapsulation headers. Note that 510 * we make room for padding in case there isn't room 511 * at the end of first frame. 512 */ 513 KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!")); 514 ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t)); 515 m2 = ieee80211_mbuf_adjust(vap, 4, NULL, m2); 516 if (m2 == NULL) { 517 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ 518 goto bad; 519 } 520 521 /* 522 * Now do tunnel encapsulation. First, each 523 * frame gets a standard encapsulation. 524 */ 525 m1 = ieee80211_ff_encap1(vap, m1, &eh1); 526 if (m1 == NULL) 527 goto bad; 528 m2 = ieee80211_ff_encap1(vap, m2, &eh2); 529 if (m2 == NULL) 530 goto bad; 531 532 /* 533 * Pad leading frame to a 4-byte boundary. If there 534 * is space at the end of the first frame, put it 535 * there; otherwise prepend to the front of the second 536 * frame. We know doing the second will always work 537 * because we reserve space above. We prefer appending 538 * as this typically has better DMA alignment properties. 539 */ 540 for (m = m1; m->m_next != NULL; m = m->m_next) 541 ; 542 pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len; 543 if (pad) { 544 if (M_TRAILINGSPACE(m) < pad) { /* prepend to second */ 545 m2->m_data -= pad; 546 m2->m_len += pad; 547 m2->m_pkthdr.len += pad; 548 } else { /* append to first */ 549 m->m_len += pad; 550 m1->m_pkthdr.len += pad; 551 } 552 } 553 554 /* 555 * Now, stick 'em together. 556 */ 557 m->m_next = m2; /* NB: last mbuf from above */ 558 m1->m_pkthdr.len += m2->m_pkthdr.len; 559 560 vap->iv_stats.is_amsdu_encap++; 561 562 return m1; 563 bad: 564 vap->iv_stats.is_amsdu_encapfail++; 565 if (m1 != NULL) 566 m_freem(m1); 567 if (m2 != NULL) 568 m_freem(m2); 569 return NULL; 570 } 571 572 573 static void 574 ff_transmit(struct ieee80211_node *ni, struct mbuf *m) 575 { 576 struct ieee80211vap *vap = ni->ni_vap; 577 struct ieee80211com *ic = ni->ni_ic; 578 int error; 579 580 IEEE80211_TX_LOCK_ASSERT(vap->iv_ic); 581 582 /* encap and xmit */ 583 m = ieee80211_encap(vap, ni, m); 584 if (m != NULL) { 585 struct ifnet *ifp = vap->iv_ifp; 586 587 error = ieee80211_parent_xmitpkt(ic, m); 588 if (!error) 589 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 590 } else 591 ieee80211_free_node(ni); 592 } 593 594 /* 595 * Flush frames to device; note we re-use the linked list 596 * the frames were stored on and use the sentinel (unchanged) 597 * which may be non-NULL. 598 */ 599 static void 600 ff_flush(struct mbuf *head, struct mbuf *last) 601 { 602 struct mbuf *m, *next; 603 struct ieee80211_node *ni; 604 struct ieee80211vap *vap; 605 606 for (m = head; m != last; m = next) { 607 next = m->m_nextpkt; 608 m->m_nextpkt = NULL; 609 610 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 611 vap = ni->ni_vap; 612 613 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 614 "%s: flush frame, age %u", __func__, M_AGE_GET(m)); 615 vap->iv_stats.is_ff_flush++; 616 617 ff_transmit(ni, m); 618 } 619 } 620 621 /* 622 * Age frames on the staging queue. 623 * 624 * This is called without the comlock held, but it does all its work 625 * behind the comlock. Because of this, it's possible that the 626 * staging queue will be serviced between the function which called 627 * it and now; thus simply checking that the queue has work in it 628 * may fail. 629 * 630 * See PR kern/174283 for more details. 631 */ 632 void 633 ieee80211_ff_age(struct ieee80211com *ic, struct ieee80211_stageq *sq, 634 int quanta) 635 { 636 struct mbuf *m, *head; 637 struct ieee80211_node *ni; 638 639 #if 0 640 KASSERT(sq->head != NULL, ("stageq empty")); 641 #endif 642 643 IEEE80211_LOCK(ic); 644 head = sq->head; 645 while ((m = sq->head) != NULL && M_AGE_GET(m) < quanta) { 646 int tid = WME_AC_TO_TID(M_WME_GETAC(m)); 647 648 /* clear staging ref to frame */ 649 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 650 KASSERT(ni->ni_tx_superg[tid] == m, ("staging queue empty")); 651 ni->ni_tx_superg[tid] = NULL; 652 653 sq->head = m->m_nextpkt; 654 sq->depth--; 655 } 656 if (m == NULL) 657 sq->tail = NULL; 658 else 659 M_AGE_SUB(m, quanta); 660 IEEE80211_UNLOCK(ic); 661 662 IEEE80211_TX_LOCK(ic); 663 ff_flush(head, m); 664 IEEE80211_TX_UNLOCK(ic); 665 } 666 667 static void 668 stageq_add(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *m) 669 { 670 int age = ieee80211_ffagemax; 671 672 IEEE80211_LOCK_ASSERT(ic); 673 674 if (sq->tail != NULL) { 675 sq->tail->m_nextpkt = m; 676 age -= M_AGE_GET(sq->head); 677 } else 678 sq->head = m; 679 KASSERT(age >= 0, ("age %d", age)); 680 M_AGE_SET(m, age); 681 m->m_nextpkt = NULL; 682 sq->tail = m; 683 sq->depth++; 684 } 685 686 static void 687 stageq_remove(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *mstaged) 688 { 689 struct mbuf *m, *mprev; 690 691 IEEE80211_LOCK_ASSERT(ic); 692 693 mprev = NULL; 694 for (m = sq->head; m != NULL; m = m->m_nextpkt) { 695 if (m == mstaged) { 696 if (mprev == NULL) 697 sq->head = m->m_nextpkt; 698 else 699 mprev->m_nextpkt = m->m_nextpkt; 700 if (sq->tail == m) 701 sq->tail = mprev; 702 sq->depth--; 703 return; 704 } 705 mprev = m; 706 } 707 printf("%s: packet not found\n", __func__); 708 } 709 710 static uint32_t 711 ff_approx_txtime(struct ieee80211_node *ni, 712 const struct mbuf *m1, const struct mbuf *m2) 713 { 714 struct ieee80211com *ic = ni->ni_ic; 715 struct ieee80211vap *vap = ni->ni_vap; 716 uint32_t framelen; 717 uint32_t frame_time; 718 719 /* 720 * Approximate the frame length to be transmitted. A swag to add 721 * the following maximal values to the skb payload: 722 * - 32: 802.11 encap + CRC 723 * - 24: encryption overhead (if wep bit) 724 * - 4 + 6: fast-frame header and padding 725 * - 16: 2 LLC FF tunnel headers 726 * - 14: 1 802.3 FF tunnel header (mbuf already accounts for 2nd) 727 */ 728 framelen = m1->m_pkthdr.len + 32 + 729 ATH_FF_MAX_HDR_PAD + ATH_FF_MAX_SEP_PAD + ATH_FF_MAX_HDR; 730 if (vap->iv_flags & IEEE80211_F_PRIVACY) 731 framelen += 24; 732 if (m2 != NULL) 733 framelen += m2->m_pkthdr.len; 734 735 /* 736 * For now, we assume non-shortgi, 20MHz, just because I want to 737 * at least test 802.11n. 738 */ 739 if (ni->ni_txrate & IEEE80211_RATE_MCS) 740 frame_time = ieee80211_compute_duration_ht(framelen, 741 ni->ni_txrate, 742 IEEE80211_HT_RC_2_STREAMS(ni->ni_txrate), 743 0, /* isht40 */ 744 0); /* isshortgi */ 745 else 746 frame_time = ieee80211_compute_duration(ic->ic_rt, framelen, 747 ni->ni_txrate, 0); 748 return (frame_time); 749 } 750 751 /* 752 * Check if the supplied frame can be partnered with an existing 753 * or pending frame. Return a reference to any frame that should be 754 * sent on return; otherwise return NULL. 755 */ 756 struct mbuf * 757 ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m) 758 { 759 struct ieee80211vap *vap = ni->ni_vap; 760 struct ieee80211com *ic = ni->ni_ic; 761 struct ieee80211_superg *sg = ic->ic_superg; 762 const int pri = M_WME_GETAC(m); 763 struct ieee80211_stageq *sq; 764 struct ieee80211_tx_ampdu *tap; 765 struct mbuf *mstaged; 766 uint32_t txtime, limit; 767 768 IEEE80211_TX_UNLOCK_ASSERT(ic); 769 770 /* 771 * Check if the supplied frame can be aggregated. 772 * 773 * NB: we allow EAPOL frames to be aggregated with other ucast traffic. 774 * Do 802.1x EAPOL frames proceed in the clear? Then they couldn't 775 * be aggregated with other types of frames when encryption is on? 776 */ 777 IEEE80211_LOCK(ic); 778 tap = &ni->ni_tx_ampdu[WME_AC_TO_TID(pri)]; 779 mstaged = ni->ni_tx_superg[WME_AC_TO_TID(pri)]; 780 /* XXX NOTE: reusing packet counter state from A-MPDU */ 781 /* 782 * XXX NOTE: this means we're double-counting; it should just 783 * be done in ieee80211_output.c once for both superg and A-MPDU. 784 */ 785 ieee80211_txampdu_count_packet(tap); 786 787 /* 788 * When not in station mode never aggregate a multicast 789 * frame; this insures, for example, that a combined frame 790 * does not require multiple encryption keys. 791 */ 792 if (vap->iv_opmode != IEEE80211_M_STA && 793 ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) { 794 /* XXX flush staged frame? */ 795 IEEE80211_UNLOCK(ic); 796 return m; 797 } 798 /* 799 * If there is no frame to combine with and the pps is 800 * too low; then do not attempt to aggregate this frame. 801 */ 802 if (mstaged == NULL && 803 ieee80211_txampdu_getpps(tap) < ieee80211_ffppsmin) { 804 IEEE80211_UNLOCK(ic); 805 return m; 806 } 807 sq = &sg->ff_stageq[pri]; 808 /* 809 * Check the txop limit to insure the aggregate fits. 810 */ 811 limit = IEEE80211_TXOP_TO_US( 812 ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit); 813 if (limit != 0 && 814 (txtime = ff_approx_txtime(ni, m, mstaged)) > limit) { 815 /* 816 * Aggregate too long, return to the caller for direct 817 * transmission. In addition, flush any pending frame 818 * before sending this one. 819 */ 820 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 821 "%s: txtime %u exceeds txop limit %u\n", 822 __func__, txtime, limit); 823 824 ni->ni_tx_superg[WME_AC_TO_TID(pri)] = NULL; 825 if (mstaged != NULL) 826 stageq_remove(ic, sq, mstaged); 827 IEEE80211_UNLOCK(ic); 828 829 if (mstaged != NULL) { 830 IEEE80211_TX_LOCK(ic); 831 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 832 "%s: flush staged frame", __func__); 833 /* encap and xmit */ 834 ff_transmit(ni, mstaged); 835 IEEE80211_TX_UNLOCK(ic); 836 } 837 return m; /* NB: original frame */ 838 } 839 /* 840 * An aggregation candidate. If there's a frame to partner 841 * with then combine and return for processing. Otherwise 842 * save this frame and wait for a partner to show up (or 843 * the frame to be flushed). Note that staged frames also 844 * hold their node reference. 845 */ 846 if (mstaged != NULL) { 847 ni->ni_tx_superg[WME_AC_TO_TID(pri)] = NULL; 848 stageq_remove(ic, sq, mstaged); 849 IEEE80211_UNLOCK(ic); 850 851 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 852 "%s: aggregate fast-frame", __func__); 853 /* 854 * Release the node reference; we only need 855 * the one already in mstaged. 856 */ 857 KASSERT(mstaged->m_pkthdr.rcvif == (void *)ni, 858 ("rcvif %p ni %p", mstaged->m_pkthdr.rcvif, ni)); 859 ieee80211_free_node(ni); 860 861 m->m_nextpkt = NULL; 862 mstaged->m_nextpkt = m; 863 mstaged->m_flags |= M_FF; /* NB: mark for encap work */ 864 } else { 865 KASSERT(ni->ni_tx_superg[WME_AC_TO_TID(pri)]== NULL, 866 ("ni_tx_superg[]: %p", 867 ni->ni_tx_superg[WME_AC_TO_TID(pri)])); 868 ni->ni_tx_superg[WME_AC_TO_TID(pri)] = m; 869 870 stageq_add(ic, sq, m); 871 IEEE80211_UNLOCK(ic); 872 873 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 874 "%s: stage frame, %u queued", __func__, sq->depth); 875 /* NB: mstaged is NULL */ 876 } 877 return mstaged; 878 } 879 880 struct mbuf * 881 ieee80211_amsdu_check(struct ieee80211_node *ni, struct mbuf *m) 882 { 883 /* 884 * XXX TODO: actually enforce the node support 885 * and HTCAP requirements for the maximum A-MSDU 886 * size. 887 */ 888 889 /* First: software A-MSDU transmit? */ 890 if (! ieee80211_amsdu_tx_ok(ni)) 891 return (m); 892 893 /* Next - EAPOL? Nope, don't aggregate; we don't QoS encap them */ 894 if (m->m_flags & (M_EAPOL | M_MCAST | M_BCAST)) 895 return (m); 896 897 /* Next - needs to be a data frame, non-broadcast, etc */ 898 if (ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) 899 return (m); 900 901 return (ieee80211_ff_check(ni, m)); 902 } 903 904 void 905 ieee80211_ff_node_init(struct ieee80211_node *ni) 906 { 907 /* 908 * Clean FF state on re-associate. This handles the case 909 * where a station leaves w/o notifying us and then returns 910 * before node is reaped for inactivity. 911 */ 912 ieee80211_ff_node_cleanup(ni); 913 } 914 915 void 916 ieee80211_ff_node_cleanup(struct ieee80211_node *ni) 917 { 918 struct ieee80211com *ic = ni->ni_ic; 919 struct ieee80211_superg *sg = ic->ic_superg; 920 struct mbuf *m, *next_m, *head; 921 int tid; 922 923 IEEE80211_LOCK(ic); 924 head = NULL; 925 for (tid = 0; tid < WME_NUM_TID; tid++) { 926 int ac = TID_TO_WME_AC(tid); 927 /* 928 * XXX Initialise the packet counter. 929 * 930 * This may be double-work for 11n stations; 931 * but without it we never setup things. 932 */ 933 ieee80211_txampdu_init_pps(&ni->ni_tx_ampdu[tid]); 934 m = ni->ni_tx_superg[tid]; 935 if (m != NULL) { 936 ni->ni_tx_superg[tid] = NULL; 937 stageq_remove(ic, &sg->ff_stageq[ac], m); 938 m->m_nextpkt = head; 939 head = m; 940 } 941 } 942 IEEE80211_UNLOCK(ic); 943 944 /* 945 * Free mbufs, taking care to not dereference the mbuf after 946 * we free it (hence grabbing m_nextpkt before we free it.) 947 */ 948 m = head; 949 while (m != NULL) { 950 next_m = m->m_nextpkt; 951 m_freem(m); 952 ieee80211_free_node(ni); 953 m = next_m; 954 } 955 } 956 957 /* 958 * Switch between turbo and non-turbo operating modes. 959 * Use the specified channel flags to locate the new 960 * channel, update 802.11 state, and then call back into 961 * the driver to effect the change. 962 */ 963 void 964 ieee80211_dturbo_switch(struct ieee80211vap *vap, int newflags) 965 { 966 struct ieee80211com *ic = vap->iv_ic; 967 struct ieee80211_channel *chan; 968 969 chan = ieee80211_find_channel(ic, ic->ic_bsschan->ic_freq, newflags); 970 if (chan == NULL) { /* XXX should not happen */ 971 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 972 "%s: no channel with freq %u flags 0x%x\n", 973 __func__, ic->ic_bsschan->ic_freq, newflags); 974 return; 975 } 976 977 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 978 "%s: %s -> %s (freq %u flags 0x%x)\n", __func__, 979 ieee80211_phymode_name[ieee80211_chan2mode(ic->ic_bsschan)], 980 ieee80211_phymode_name[ieee80211_chan2mode(chan)], 981 chan->ic_freq, chan->ic_flags); 982 983 ic->ic_bsschan = chan; 984 ic->ic_prevchan = ic->ic_curchan; 985 ic->ic_curchan = chan; 986 ic->ic_rt = ieee80211_get_ratetable(chan); 987 ic->ic_set_channel(ic); 988 ieee80211_radiotap_chan_change(ic); 989 /* NB: do not need to reset ERP state 'cuz we're in sta mode */ 990 } 991 992 /* 993 * Return the current ``state'' of an Atheros capbility. 994 * If associated in station mode report the negotiated 995 * setting. Otherwise report the current setting. 996 */ 997 static int 998 getathcap(struct ieee80211vap *vap, int cap) 999 { 1000 if (vap->iv_opmode == IEEE80211_M_STA && 1001 vap->iv_state == IEEE80211_S_RUN) 1002 return IEEE80211_ATH_CAP(vap, vap->iv_bss, cap) != 0; 1003 else 1004 return (vap->iv_flags & cap) != 0; 1005 } 1006 1007 static int 1008 superg_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq) 1009 { 1010 switch (ireq->i_type) { 1011 case IEEE80211_IOC_FF: 1012 ireq->i_val = getathcap(vap, IEEE80211_F_FF); 1013 break; 1014 case IEEE80211_IOC_TURBOP: 1015 ireq->i_val = getathcap(vap, IEEE80211_F_TURBOP); 1016 break; 1017 default: 1018 return ENOSYS; 1019 } 1020 return 0; 1021 } 1022 IEEE80211_IOCTL_GET(superg, superg_ioctl_get80211); 1023 1024 static int 1025 superg_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq) 1026 { 1027 switch (ireq->i_type) { 1028 case IEEE80211_IOC_FF: 1029 if (ireq->i_val) { 1030 if ((vap->iv_caps & IEEE80211_C_FF) == 0) 1031 return EOPNOTSUPP; 1032 vap->iv_flags |= IEEE80211_F_FF; 1033 } else 1034 vap->iv_flags &= ~IEEE80211_F_FF; 1035 return ENETRESET; 1036 case IEEE80211_IOC_TURBOP: 1037 if (ireq->i_val) { 1038 if ((vap->iv_caps & IEEE80211_C_TURBOP) == 0) 1039 return EOPNOTSUPP; 1040 vap->iv_flags |= IEEE80211_F_TURBOP; 1041 } else 1042 vap->iv_flags &= ~IEEE80211_F_TURBOP; 1043 return ENETRESET; 1044 default: 1045 return ENOSYS; 1046 } 1047 return 0; 1048 } 1049 IEEE80211_IOCTL_SET(superg, superg_ioctl_set80211); 1050 1051 #endif /* IEEE80211_SUPPORT_SUPERG */ 1052