1 /*- 2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26 #include <sys/cdefs.h> 27 __FBSDID("$FreeBSD$"); 28 29 #include "opt_wlan.h" 30 31 #ifdef IEEE80211_SUPPORT_SUPERG 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/mbuf.h> 36 #include <sys/kernel.h> 37 #include <sys/endian.h> 38 39 #include <sys/socket.h> 40 41 #include <net/if.h> 42 #include <net/if_var.h> 43 #include <net/if_llc.h> 44 #include <net/if_media.h> 45 #include <net/bpf.h> 46 #include <net/ethernet.h> 47 48 #include <net80211/ieee80211_var.h> 49 #include <net80211/ieee80211_input.h> 50 #include <net80211/ieee80211_phy.h> 51 #include <net80211/ieee80211_superg.h> 52 53 /* 54 * Atheros fast-frame encapsulation format. 55 * FF max payload: 56 * 802.2 + FFHDR + HPAD + 802.3 + 802.2 + 1500 + SPAD + 802.3 + 802.2 + 1500: 57 * 8 + 4 + 4 + 14 + 8 + 1500 + 6 + 14 + 8 + 1500 58 * = 3066 59 */ 60 /* fast frame header is 32-bits */ 61 #define ATH_FF_PROTO 0x0000003f /* protocol */ 62 #define ATH_FF_PROTO_S 0 63 #define ATH_FF_FTYPE 0x000000c0 /* frame type */ 64 #define ATH_FF_FTYPE_S 6 65 #define ATH_FF_HLEN32 0x00000300 /* optional hdr length */ 66 #define ATH_FF_HLEN32_S 8 67 #define ATH_FF_SEQNUM 0x001ffc00 /* sequence number */ 68 #define ATH_FF_SEQNUM_S 10 69 #define ATH_FF_OFFSET 0xffe00000 /* offset to 2nd payload */ 70 #define ATH_FF_OFFSET_S 21 71 72 #define ATH_FF_MAX_HDR_PAD 4 73 #define ATH_FF_MAX_SEP_PAD 6 74 #define ATH_FF_MAX_HDR 30 75 76 #define ATH_FF_PROTO_L2TUNNEL 0 /* L2 tunnel protocol */ 77 #define ATH_FF_ETH_TYPE 0x88bd /* Ether type for encapsulated frames */ 78 #define ATH_FF_SNAP_ORGCODE_0 0x00 79 #define ATH_FF_SNAP_ORGCODE_1 0x03 80 #define ATH_FF_SNAP_ORGCODE_2 0x7f 81 82 #define ATH_FF_TXQMIN 2 /* min txq depth for staging */ 83 #define ATH_FF_TXQMAX 50 /* maximum # of queued frames allowed */ 84 #define ATH_FF_STAGEMAX 5 /* max waiting period for staged frame*/ 85 86 #define ETHER_HEADER_COPY(dst, src) \ 87 memcpy(dst, src, sizeof(struct ether_header)) 88 89 static int ieee80211_ffppsmin = 2; /* pps threshold for ff aggregation */ 90 SYSCTL_INT(_net_wlan, OID_AUTO, ffppsmin, CTLFLAG_RW, 91 &ieee80211_ffppsmin, 0, "min packet rate before fast-frame staging"); 92 static int ieee80211_ffagemax = -1; /* max time frames held on stage q */ 93 SYSCTL_PROC(_net_wlan, OID_AUTO, ffagemax, CTLTYPE_INT | CTLFLAG_RW, 94 &ieee80211_ffagemax, 0, ieee80211_sysctl_msecs_ticks, "I", 95 "max hold time for fast-frame staging (ms)"); 96 97 void 98 ieee80211_superg_attach(struct ieee80211com *ic) 99 { 100 struct ieee80211_superg *sg; 101 102 if (ic->ic_caps & IEEE80211_C_FF) { 103 sg = (struct ieee80211_superg *) IEEE80211_MALLOC( 104 sizeof(struct ieee80211_superg), M_80211_VAP, 105 IEEE80211_M_NOWAIT | IEEE80211_M_ZERO); 106 if (sg == NULL) { 107 printf("%s: cannot allocate SuperG state block\n", 108 __func__); 109 return; 110 } 111 ic->ic_superg = sg; 112 } 113 ieee80211_ffagemax = msecs_to_ticks(150); 114 } 115 116 void 117 ieee80211_superg_detach(struct ieee80211com *ic) 118 { 119 if (ic->ic_superg != NULL) { 120 IEEE80211_FREE(ic->ic_superg, M_80211_VAP); 121 ic->ic_superg = NULL; 122 } 123 } 124 125 void 126 ieee80211_superg_vattach(struct ieee80211vap *vap) 127 { 128 struct ieee80211com *ic = vap->iv_ic; 129 130 if (ic->ic_superg == NULL) /* NB: can't do fast-frames w/o state */ 131 vap->iv_caps &= ~IEEE80211_C_FF; 132 if (vap->iv_caps & IEEE80211_C_FF) 133 vap->iv_flags |= IEEE80211_F_FF; 134 /* NB: we only implement sta mode */ 135 if (vap->iv_opmode == IEEE80211_M_STA && 136 (vap->iv_caps & IEEE80211_C_TURBOP)) 137 vap->iv_flags |= IEEE80211_F_TURBOP; 138 } 139 140 void 141 ieee80211_superg_vdetach(struct ieee80211vap *vap) 142 { 143 } 144 145 #define ATH_OUI_BYTES 0x00, 0x03, 0x7f 146 /* 147 * Add a WME information element to a frame. 148 */ 149 uint8_t * 150 ieee80211_add_ath(uint8_t *frm, uint8_t caps, ieee80211_keyix defkeyix) 151 { 152 static const struct ieee80211_ath_ie info = { 153 .ath_id = IEEE80211_ELEMID_VENDOR, 154 .ath_len = sizeof(struct ieee80211_ath_ie) - 2, 155 .ath_oui = { ATH_OUI_BYTES }, 156 .ath_oui_type = ATH_OUI_TYPE, 157 .ath_oui_subtype= ATH_OUI_SUBTYPE, 158 .ath_version = ATH_OUI_VERSION, 159 }; 160 struct ieee80211_ath_ie *ath = (struct ieee80211_ath_ie *) frm; 161 162 memcpy(frm, &info, sizeof(info)); 163 ath->ath_capability = caps; 164 if (defkeyix != IEEE80211_KEYIX_NONE) { 165 ath->ath_defkeyix[0] = (defkeyix & 0xff); 166 ath->ath_defkeyix[1] = ((defkeyix >> 8) & 0xff); 167 } else { 168 ath->ath_defkeyix[0] = 0xff; 169 ath->ath_defkeyix[1] = 0x7f; 170 } 171 return frm + sizeof(info); 172 } 173 #undef ATH_OUI_BYTES 174 175 uint8_t * 176 ieee80211_add_athcaps(uint8_t *frm, const struct ieee80211_node *bss) 177 { 178 const struct ieee80211vap *vap = bss->ni_vap; 179 180 return ieee80211_add_ath(frm, 181 vap->iv_flags & IEEE80211_F_ATHEROS, 182 ((vap->iv_flags & IEEE80211_F_WPA) == 0 && 183 bss->ni_authmode != IEEE80211_AUTH_8021X) ? 184 vap->iv_def_txkey : IEEE80211_KEYIX_NONE); 185 } 186 187 void 188 ieee80211_parse_ath(struct ieee80211_node *ni, uint8_t *ie) 189 { 190 const struct ieee80211_ath_ie *ath = 191 (const struct ieee80211_ath_ie *) ie; 192 193 ni->ni_ath_flags = ath->ath_capability; 194 ni->ni_ath_defkeyix = LE_READ_2(&ath->ath_defkeyix); 195 } 196 197 int 198 ieee80211_parse_athparams(struct ieee80211_node *ni, uint8_t *frm, 199 const struct ieee80211_frame *wh) 200 { 201 struct ieee80211vap *vap = ni->ni_vap; 202 const struct ieee80211_ath_ie *ath; 203 u_int len = frm[1]; 204 int capschanged; 205 uint16_t defkeyix; 206 207 if (len < sizeof(struct ieee80211_ath_ie)-2) { 208 IEEE80211_DISCARD_IE(vap, 209 IEEE80211_MSG_ELEMID | IEEE80211_MSG_SUPERG, 210 wh, "Atheros", "too short, len %u", len); 211 return -1; 212 } 213 ath = (const struct ieee80211_ath_ie *)frm; 214 capschanged = (ni->ni_ath_flags != ath->ath_capability); 215 defkeyix = LE_READ_2(ath->ath_defkeyix); 216 if (capschanged || defkeyix != ni->ni_ath_defkeyix) { 217 ni->ni_ath_flags = ath->ath_capability; 218 ni->ni_ath_defkeyix = defkeyix; 219 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 220 "ath ie change: new caps 0x%x defkeyix 0x%x", 221 ni->ni_ath_flags, ni->ni_ath_defkeyix); 222 } 223 if (IEEE80211_ATH_CAP(vap, ni, ATHEROS_CAP_TURBO_PRIME)) { 224 uint16_t curflags, newflags; 225 226 /* 227 * Check for turbo mode switch. Calculate flags 228 * for the new mode and effect the switch. 229 */ 230 newflags = curflags = vap->iv_ic->ic_bsschan->ic_flags; 231 /* NB: BOOST is not in ic_flags, so get it from the ie */ 232 if (ath->ath_capability & ATHEROS_CAP_BOOST) 233 newflags |= IEEE80211_CHAN_TURBO; 234 else 235 newflags &= ~IEEE80211_CHAN_TURBO; 236 if (newflags != curflags) 237 ieee80211_dturbo_switch(vap, newflags); 238 } 239 return capschanged; 240 } 241 242 /* 243 * Decap the encapsulated frame pair and dispatch the first 244 * for delivery. The second frame is returned for delivery 245 * via the normal path. 246 */ 247 struct mbuf * 248 ieee80211_ff_decap(struct ieee80211_node *ni, struct mbuf *m) 249 { 250 #define FF_LLC_SIZE (sizeof(struct ether_header) + sizeof(struct llc)) 251 #define MS(x,f) (((x) & f) >> f##_S) 252 struct ieee80211vap *vap = ni->ni_vap; 253 struct llc *llc; 254 uint32_t ath; 255 struct mbuf *n; 256 int framelen; 257 258 /* NB: we assume caller does this check for us */ 259 KASSERT(IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF), 260 ("ff not negotiated")); 261 /* 262 * Check for fast-frame tunnel encapsulation. 263 */ 264 if (m->m_pkthdr.len < 3*FF_LLC_SIZE) 265 return m; 266 if (m->m_len < FF_LLC_SIZE && 267 (m = m_pullup(m, FF_LLC_SIZE)) == NULL) { 268 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 269 ni->ni_macaddr, "fast-frame", 270 "%s", "m_pullup(llc) failed"); 271 vap->iv_stats.is_rx_tooshort++; 272 return NULL; 273 } 274 llc = (struct llc *)(mtod(m, uint8_t *) + 275 sizeof(struct ether_header)); 276 if (llc->llc_snap.ether_type != htons(ATH_FF_ETH_TYPE)) 277 return m; 278 m_adj(m, FF_LLC_SIZE); 279 m_copydata(m, 0, sizeof(uint32_t), (caddr_t) &ath); 280 if (MS(ath, ATH_FF_PROTO) != ATH_FF_PROTO_L2TUNNEL) { 281 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 282 ni->ni_macaddr, "fast-frame", 283 "unsupport tunnel protocol, header 0x%x", ath); 284 vap->iv_stats.is_ff_badhdr++; 285 m_freem(m); 286 return NULL; 287 } 288 /* NB: skip header and alignment padding */ 289 m_adj(m, roundup(sizeof(uint32_t) - 2, 4) + 2); 290 291 vap->iv_stats.is_ff_decap++; 292 293 /* 294 * Decap the first frame, bust it apart from the 295 * second and deliver; then decap the second frame 296 * and return it to the caller for normal delivery. 297 */ 298 m = ieee80211_decap1(m, &framelen); 299 if (m == NULL) { 300 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 301 ni->ni_macaddr, "fast-frame", "%s", "first decap failed"); 302 vap->iv_stats.is_ff_tooshort++; 303 return NULL; 304 } 305 n = m_split(m, framelen, M_NOWAIT); 306 if (n == NULL) { 307 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 308 ni->ni_macaddr, "fast-frame", 309 "%s", "unable to split encapsulated frames"); 310 vap->iv_stats.is_ff_split++; 311 m_freem(m); /* NB: must reclaim */ 312 return NULL; 313 } 314 /* XXX not right for WDS */ 315 vap->iv_deliver_data(vap, ni, m); /* 1st of pair */ 316 317 /* 318 * Decap second frame. 319 */ 320 m_adj(n, roundup2(framelen, 4) - framelen); /* padding */ 321 n = ieee80211_decap1(n, &framelen); 322 if (n == NULL) { 323 IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_ANY, 324 ni->ni_macaddr, "fast-frame", "%s", "second decap failed"); 325 vap->iv_stats.is_ff_tooshort++; 326 } 327 /* XXX verify framelen against mbuf contents */ 328 return n; /* 2nd delivered by caller */ 329 #undef MS 330 #undef FF_LLC_SIZE 331 } 332 333 /* 334 * Fast frame encapsulation. There must be two packets 335 * chained with m_nextpkt. We do header adjustment for 336 * each, add the tunnel encapsulation, and then concatenate 337 * the mbuf chains to form a single frame for transmission. 338 */ 339 struct mbuf * 340 ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace, 341 struct ieee80211_key *key) 342 { 343 struct mbuf *m2; 344 struct ether_header eh1, eh2; 345 struct llc *llc; 346 struct mbuf *m; 347 int pad; 348 349 m2 = m1->m_nextpkt; 350 if (m2 == NULL) { 351 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 352 "%s: only one frame\n", __func__); 353 goto bad; 354 } 355 m1->m_nextpkt = NULL; 356 /* 357 * Include fast frame headers in adjusting header layout. 358 */ 359 KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!")); 360 ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t)); 361 m1 = ieee80211_mbuf_adjust(vap, 362 hdrspace + sizeof(struct llc) + sizeof(uint32_t) + 2 + 363 sizeof(struct ether_header), 364 key, m1); 365 if (m1 == NULL) { 366 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ 367 m_freem(m2); 368 goto bad; 369 } 370 371 /* 372 * Copy second frame's Ethernet header out of line 373 * and adjust for encapsulation headers. Note that 374 * we make room for padding in case there isn't room 375 * at the end of first frame. 376 */ 377 KASSERT(m2->m_len >= sizeof(eh2), ("no ethernet header!")); 378 ETHER_HEADER_COPY(&eh2, mtod(m2, caddr_t)); 379 m2 = ieee80211_mbuf_adjust(vap, 380 ATH_FF_MAX_HDR_PAD + sizeof(struct ether_header), 381 NULL, m2); 382 if (m2 == NULL) { 383 /* NB: ieee80211_mbuf_adjust handles msgs+statistics */ 384 goto bad; 385 } 386 387 /* 388 * Now do tunnel encapsulation. First, each 389 * frame gets a standard encapsulation. 390 */ 391 m1 = ieee80211_ff_encap1(vap, m1, &eh1); 392 if (m1 == NULL) 393 goto bad; 394 m2 = ieee80211_ff_encap1(vap, m2, &eh2); 395 if (m2 == NULL) 396 goto bad; 397 398 /* 399 * Pad leading frame to a 4-byte boundary. If there 400 * is space at the end of the first frame, put it 401 * there; otherwise prepend to the front of the second 402 * frame. We know doing the second will always work 403 * because we reserve space above. We prefer appending 404 * as this typically has better DMA alignment properties. 405 */ 406 for (m = m1; m->m_next != NULL; m = m->m_next) 407 ; 408 pad = roundup2(m1->m_pkthdr.len, 4) - m1->m_pkthdr.len; 409 if (pad) { 410 if (M_TRAILINGSPACE(m) < pad) { /* prepend to second */ 411 m2->m_data -= pad; 412 m2->m_len += pad; 413 m2->m_pkthdr.len += pad; 414 } else { /* append to first */ 415 m->m_len += pad; 416 m1->m_pkthdr.len += pad; 417 } 418 } 419 420 /* 421 * Now, stick 'em together and prepend the tunnel headers; 422 * first the Atheros tunnel header (all zero for now) and 423 * then a special fast frame LLC. 424 * 425 * XXX optimize by prepending together 426 */ 427 m->m_next = m2; /* NB: last mbuf from above */ 428 m1->m_pkthdr.len += m2->m_pkthdr.len; 429 M_PREPEND(m1, sizeof(uint32_t)+2, M_NOWAIT); 430 if (m1 == NULL) { /* XXX cannot happen */ 431 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 432 "%s: no space for tunnel header\n", __func__); 433 vap->iv_stats.is_tx_nobuf++; 434 return NULL; 435 } 436 memset(mtod(m1, void *), 0, sizeof(uint32_t)+2); 437 438 M_PREPEND(m1, sizeof(struct llc), M_NOWAIT); 439 if (m1 == NULL) { /* XXX cannot happen */ 440 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 441 "%s: no space for llc header\n", __func__); 442 vap->iv_stats.is_tx_nobuf++; 443 return NULL; 444 } 445 llc = mtod(m1, struct llc *); 446 llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP; 447 llc->llc_control = LLC_UI; 448 llc->llc_snap.org_code[0] = ATH_FF_SNAP_ORGCODE_0; 449 llc->llc_snap.org_code[1] = ATH_FF_SNAP_ORGCODE_1; 450 llc->llc_snap.org_code[2] = ATH_FF_SNAP_ORGCODE_2; 451 llc->llc_snap.ether_type = htons(ATH_FF_ETH_TYPE); 452 453 vap->iv_stats.is_ff_encap++; 454 455 return m1; 456 bad: 457 if (m1 != NULL) 458 m_freem(m1); 459 if (m2 != NULL) 460 m_freem(m2); 461 return NULL; 462 } 463 464 static void 465 ff_transmit(struct ieee80211_node *ni, struct mbuf *m) 466 { 467 struct ieee80211vap *vap = ni->ni_vap; 468 struct ieee80211com *ic = ni->ni_ic; 469 int error; 470 471 IEEE80211_TX_LOCK_ASSERT(vap->iv_ic); 472 473 /* encap and xmit */ 474 m = ieee80211_encap(vap, ni, m); 475 if (m != NULL) { 476 struct ifnet *ifp = vap->iv_ifp; 477 478 error = ieee80211_parent_xmitpkt(ic, m); 479 if (!error) 480 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); 481 } else 482 ieee80211_free_node(ni); 483 } 484 485 /* 486 * Flush frames to device; note we re-use the linked list 487 * the frames were stored on and use the sentinel (unchanged) 488 * which may be non-NULL. 489 */ 490 static void 491 ff_flush(struct mbuf *head, struct mbuf *last) 492 { 493 struct mbuf *m, *next; 494 struct ieee80211_node *ni; 495 struct ieee80211vap *vap; 496 497 for (m = head; m != last; m = next) { 498 next = m->m_nextpkt; 499 m->m_nextpkt = NULL; 500 501 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 502 vap = ni->ni_vap; 503 504 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 505 "%s: flush frame, age %u", __func__, M_AGE_GET(m)); 506 vap->iv_stats.is_ff_flush++; 507 508 ff_transmit(ni, m); 509 } 510 } 511 512 /* 513 * Age frames on the staging queue. 514 * 515 * This is called without the comlock held, but it does all its work 516 * behind the comlock. Because of this, it's possible that the 517 * staging queue will be serviced between the function which called 518 * it and now; thus simply checking that the queue has work in it 519 * may fail. 520 * 521 * See PR kern/174283 for more details. 522 */ 523 void 524 ieee80211_ff_age(struct ieee80211com *ic, struct ieee80211_stageq *sq, 525 int quanta) 526 { 527 struct mbuf *m, *head; 528 struct ieee80211_node *ni; 529 530 #if 0 531 KASSERT(sq->head != NULL, ("stageq empty")); 532 #endif 533 534 IEEE80211_LOCK(ic); 535 head = sq->head; 536 while ((m = sq->head) != NULL && M_AGE_GET(m) < quanta) { 537 int tid = WME_AC_TO_TID(M_WME_GETAC(m)); 538 539 /* clear staging ref to frame */ 540 ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; 541 KASSERT(ni->ni_tx_superg[tid] == m, ("staging queue empty")); 542 ni->ni_tx_superg[tid] = NULL; 543 544 sq->head = m->m_nextpkt; 545 sq->depth--; 546 } 547 if (m == NULL) 548 sq->tail = NULL; 549 else 550 M_AGE_SUB(m, quanta); 551 IEEE80211_UNLOCK(ic); 552 553 IEEE80211_TX_LOCK(ic); 554 ff_flush(head, m); 555 IEEE80211_TX_UNLOCK(ic); 556 } 557 558 static void 559 stageq_add(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *m) 560 { 561 int age = ieee80211_ffagemax; 562 563 IEEE80211_LOCK_ASSERT(ic); 564 565 if (sq->tail != NULL) { 566 sq->tail->m_nextpkt = m; 567 age -= M_AGE_GET(sq->head); 568 } else 569 sq->head = m; 570 KASSERT(age >= 0, ("age %d", age)); 571 M_AGE_SET(m, age); 572 m->m_nextpkt = NULL; 573 sq->tail = m; 574 sq->depth++; 575 } 576 577 static void 578 stageq_remove(struct ieee80211com *ic, struct ieee80211_stageq *sq, struct mbuf *mstaged) 579 { 580 struct mbuf *m, *mprev; 581 582 IEEE80211_LOCK_ASSERT(ic); 583 584 mprev = NULL; 585 for (m = sq->head; m != NULL; m = m->m_nextpkt) { 586 if (m == mstaged) { 587 if (mprev == NULL) 588 sq->head = m->m_nextpkt; 589 else 590 mprev->m_nextpkt = m->m_nextpkt; 591 if (sq->tail == m) 592 sq->tail = mprev; 593 sq->depth--; 594 return; 595 } 596 mprev = m; 597 } 598 printf("%s: packet not found\n", __func__); 599 } 600 601 static uint32_t 602 ff_approx_txtime(struct ieee80211_node *ni, 603 const struct mbuf *m1, const struct mbuf *m2) 604 { 605 struct ieee80211com *ic = ni->ni_ic; 606 struct ieee80211vap *vap = ni->ni_vap; 607 uint32_t framelen; 608 609 /* 610 * Approximate the frame length to be transmitted. A swag to add 611 * the following maximal values to the skb payload: 612 * - 32: 802.11 encap + CRC 613 * - 24: encryption overhead (if wep bit) 614 * - 4 + 6: fast-frame header and padding 615 * - 16: 2 LLC FF tunnel headers 616 * - 14: 1 802.3 FF tunnel header (mbuf already accounts for 2nd) 617 */ 618 framelen = m1->m_pkthdr.len + 32 + 619 ATH_FF_MAX_HDR_PAD + ATH_FF_MAX_SEP_PAD + ATH_FF_MAX_HDR; 620 if (vap->iv_flags & IEEE80211_F_PRIVACY) 621 framelen += 24; 622 if (m2 != NULL) 623 framelen += m2->m_pkthdr.len; 624 return ieee80211_compute_duration(ic->ic_rt, framelen, ni->ni_txrate, 0); 625 } 626 627 /* 628 * Check if the supplied frame can be partnered with an existing 629 * or pending frame. Return a reference to any frame that should be 630 * sent on return; otherwise return NULL. 631 */ 632 struct mbuf * 633 ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m) 634 { 635 struct ieee80211vap *vap = ni->ni_vap; 636 struct ieee80211com *ic = ni->ni_ic; 637 struct ieee80211_superg *sg = ic->ic_superg; 638 const int pri = M_WME_GETAC(m); 639 struct ieee80211_stageq *sq; 640 struct ieee80211_tx_ampdu *tap; 641 struct mbuf *mstaged; 642 uint32_t txtime, limit; 643 644 IEEE80211_TX_UNLOCK_ASSERT(ic); 645 646 /* 647 * Check if the supplied frame can be aggregated. 648 * 649 * NB: we allow EAPOL frames to be aggregated with other ucast traffic. 650 * Do 802.1x EAPOL frames proceed in the clear? Then they couldn't 651 * be aggregated with other types of frames when encryption is on? 652 */ 653 IEEE80211_LOCK(ic); 654 tap = &ni->ni_tx_ampdu[WME_AC_TO_TID(pri)]; 655 mstaged = ni->ni_tx_superg[WME_AC_TO_TID(pri)]; 656 /* XXX NOTE: reusing packet counter state from A-MPDU */ 657 /* 658 * XXX NOTE: this means we're double-counting; it should just 659 * be done in ieee80211_output.c once for both superg and A-MPDU. 660 */ 661 ieee80211_txampdu_count_packet(tap); 662 663 /* 664 * When not in station mode never aggregate a multicast 665 * frame; this insures, for example, that a combined frame 666 * does not require multiple encryption keys. 667 */ 668 if (vap->iv_opmode != IEEE80211_M_STA && 669 ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) { 670 /* XXX flush staged frame? */ 671 IEEE80211_UNLOCK(ic); 672 return m; 673 } 674 /* 675 * If there is no frame to combine with and the pps is 676 * too low; then do not attempt to aggregate this frame. 677 */ 678 if (mstaged == NULL && 679 ieee80211_txampdu_getpps(tap) < ieee80211_ffppsmin) { 680 IEEE80211_UNLOCK(ic); 681 return m; 682 } 683 sq = &sg->ff_stageq[pri]; 684 /* 685 * Check the txop limit to insure the aggregate fits. 686 */ 687 limit = IEEE80211_TXOP_TO_US( 688 ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit); 689 if (limit != 0 && 690 (txtime = ff_approx_txtime(ni, m, mstaged)) > limit) { 691 /* 692 * Aggregate too long, return to the caller for direct 693 * transmission. In addition, flush any pending frame 694 * before sending this one. 695 */ 696 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 697 "%s: txtime %u exceeds txop limit %u\n", 698 __func__, txtime, limit); 699 700 ni->ni_tx_superg[WME_AC_TO_TID(pri)] = NULL; 701 if (mstaged != NULL) 702 stageq_remove(ic, sq, mstaged); 703 IEEE80211_UNLOCK(ic); 704 705 if (mstaged != NULL) { 706 IEEE80211_TX_LOCK(ic); 707 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 708 "%s: flush staged frame", __func__); 709 /* encap and xmit */ 710 ff_transmit(ni, mstaged); 711 IEEE80211_TX_UNLOCK(ic); 712 } 713 return m; /* NB: original frame */ 714 } 715 /* 716 * An aggregation candidate. If there's a frame to partner 717 * with then combine and return for processing. Otherwise 718 * save this frame and wait for a partner to show up (or 719 * the frame to be flushed). Note that staged frames also 720 * hold their node reference. 721 */ 722 if (mstaged != NULL) { 723 ni->ni_tx_superg[WME_AC_TO_TID(pri)] = NULL; 724 stageq_remove(ic, sq, mstaged); 725 IEEE80211_UNLOCK(ic); 726 727 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 728 "%s: aggregate fast-frame", __func__); 729 /* 730 * Release the node reference; we only need 731 * the one already in mstaged. 732 */ 733 KASSERT(mstaged->m_pkthdr.rcvif == (void *)ni, 734 ("rcvif %p ni %p", mstaged->m_pkthdr.rcvif, ni)); 735 ieee80211_free_node(ni); 736 737 m->m_nextpkt = NULL; 738 mstaged->m_nextpkt = m; 739 mstaged->m_flags |= M_FF; /* NB: mark for encap work */ 740 } else { 741 KASSERT(ni->ni_tx_superg[WME_AC_TO_TID(pri)]== NULL, 742 ("ni_tx_superg[]: %p", 743 ni->ni_tx_superg[WME_AC_TO_TID(pri)])); 744 ni->ni_tx_superg[WME_AC_TO_TID(pri)] = m; 745 746 stageq_add(ic, sq, m); 747 IEEE80211_UNLOCK(ic); 748 749 IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni, 750 "%s: stage frame, %u queued", __func__, sq->depth); 751 /* NB: mstaged is NULL */ 752 } 753 return mstaged; 754 } 755 756 void 757 ieee80211_ff_node_init(struct ieee80211_node *ni) 758 { 759 /* 760 * Clean FF state on re-associate. This handles the case 761 * where a station leaves w/o notifying us and then returns 762 * before node is reaped for inactivity. 763 */ 764 ieee80211_ff_node_cleanup(ni); 765 } 766 767 void 768 ieee80211_ff_node_cleanup(struct ieee80211_node *ni) 769 { 770 struct ieee80211com *ic = ni->ni_ic; 771 struct ieee80211_superg *sg = ic->ic_superg; 772 struct mbuf *m, *next_m, *head; 773 int tid; 774 775 IEEE80211_LOCK(ic); 776 head = NULL; 777 for (tid = 0; tid < WME_NUM_TID; tid++) { 778 int ac = TID_TO_WME_AC(tid); 779 /* 780 * XXX Initialise the packet counter. 781 * 782 * This may be double-work for 11n stations; 783 * but without it we never setup things. 784 */ 785 ieee80211_txampdu_init_pps(&ni->ni_tx_ampdu[tid]); 786 m = ni->ni_tx_superg[tid]; 787 if (m != NULL) { 788 ni->ni_tx_superg[tid] = NULL; 789 stageq_remove(ic, &sg->ff_stageq[ac], m); 790 m->m_nextpkt = head; 791 head = m; 792 } 793 } 794 IEEE80211_UNLOCK(ic); 795 796 /* 797 * Free mbufs, taking care to not dereference the mbuf after 798 * we free it (hence grabbing m_nextpkt before we free it.) 799 */ 800 m = head; 801 while (m != NULL) { 802 next_m = m->m_nextpkt; 803 m_freem(m); 804 ieee80211_free_node(ni); 805 m = next_m; 806 } 807 } 808 809 /* 810 * Switch between turbo and non-turbo operating modes. 811 * Use the specified channel flags to locate the new 812 * channel, update 802.11 state, and then call back into 813 * the driver to effect the change. 814 */ 815 void 816 ieee80211_dturbo_switch(struct ieee80211vap *vap, int newflags) 817 { 818 struct ieee80211com *ic = vap->iv_ic; 819 struct ieee80211_channel *chan; 820 821 chan = ieee80211_find_channel(ic, ic->ic_bsschan->ic_freq, newflags); 822 if (chan == NULL) { /* XXX should not happen */ 823 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 824 "%s: no channel with freq %u flags 0x%x\n", 825 __func__, ic->ic_bsschan->ic_freq, newflags); 826 return; 827 } 828 829 IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG, 830 "%s: %s -> %s (freq %u flags 0x%x)\n", __func__, 831 ieee80211_phymode_name[ieee80211_chan2mode(ic->ic_bsschan)], 832 ieee80211_phymode_name[ieee80211_chan2mode(chan)], 833 chan->ic_freq, chan->ic_flags); 834 835 ic->ic_bsschan = chan; 836 ic->ic_prevchan = ic->ic_curchan; 837 ic->ic_curchan = chan; 838 ic->ic_rt = ieee80211_get_ratetable(chan); 839 ic->ic_set_channel(ic); 840 ieee80211_radiotap_chan_change(ic); 841 /* NB: do not need to reset ERP state 'cuz we're in sta mode */ 842 } 843 844 /* 845 * Return the current ``state'' of an Atheros capbility. 846 * If associated in station mode report the negotiated 847 * setting. Otherwise report the current setting. 848 */ 849 static int 850 getathcap(struct ieee80211vap *vap, int cap) 851 { 852 if (vap->iv_opmode == IEEE80211_M_STA && 853 vap->iv_state == IEEE80211_S_RUN) 854 return IEEE80211_ATH_CAP(vap, vap->iv_bss, cap) != 0; 855 else 856 return (vap->iv_flags & cap) != 0; 857 } 858 859 static int 860 superg_ioctl_get80211(struct ieee80211vap *vap, struct ieee80211req *ireq) 861 { 862 switch (ireq->i_type) { 863 case IEEE80211_IOC_FF: 864 ireq->i_val = getathcap(vap, IEEE80211_F_FF); 865 break; 866 case IEEE80211_IOC_TURBOP: 867 ireq->i_val = getathcap(vap, IEEE80211_F_TURBOP); 868 break; 869 default: 870 return ENOSYS; 871 } 872 return 0; 873 } 874 IEEE80211_IOCTL_GET(superg, superg_ioctl_get80211); 875 876 static int 877 superg_ioctl_set80211(struct ieee80211vap *vap, struct ieee80211req *ireq) 878 { 879 switch (ireq->i_type) { 880 case IEEE80211_IOC_FF: 881 if (ireq->i_val) { 882 if ((vap->iv_caps & IEEE80211_C_FF) == 0) 883 return EOPNOTSUPP; 884 vap->iv_flags |= IEEE80211_F_FF; 885 } else 886 vap->iv_flags &= ~IEEE80211_F_FF; 887 return ENETRESET; 888 case IEEE80211_IOC_TURBOP: 889 if (ireq->i_val) { 890 if ((vap->iv_caps & IEEE80211_C_TURBOP) == 0) 891 return EOPNOTSUPP; 892 vap->iv_flags |= IEEE80211_F_TURBOP; 893 } else 894 vap->iv_flags &= ~IEEE80211_F_TURBOP; 895 return ENETRESET; 896 default: 897 return ENOSYS; 898 } 899 return 0; 900 } 901 IEEE80211_IOCTL_SET(superg, superg_ioctl_set80211); 902 903 #endif /* IEEE80211_SUPPORT_SUPERG */ 904