1 /* 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/skbuff.h> 14 #include <linux/netdevice.h> 15 #include <linux/etherdevice.h> 16 #include <linux/rcupdate.h> 17 #include <net/mac80211.h> 18 #include <net/ieee80211_radiotap.h> 19 20 #include "ieee80211_i.h" 21 #include "ieee80211_led.h" 22 #include "wep.h" 23 #include "wpa.h" 24 #include "tkip.h" 25 #include "wme.h" 26 27 u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 28 struct tid_ampdu_rx *tid_agg_rx, 29 struct sk_buff *skb, u16 mpdu_seq_num, 30 int bar_req); 31 /* 32 * monitor mode reception 33 * 34 * This function cleans up the SKB, i.e. it removes all the stuff 35 * only useful for monitoring. 36 */ 37 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, 38 struct sk_buff *skb, 39 int rtap_len) 40 { 41 skb_pull(skb, rtap_len); 42 43 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { 44 if (likely(skb->len > FCS_LEN)) 45 skb_trim(skb, skb->len - FCS_LEN); 46 else { 47 /* driver bug */ 48 WARN_ON(1); 49 dev_kfree_skb(skb); 50 skb = NULL; 51 } 52 } 53 54 return skb; 55 } 56 57 static inline int should_drop_frame(struct ieee80211_rx_status *status, 58 struct sk_buff *skb, 59 int present_fcs_len, 60 int radiotap_len) 61 { 62 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 63 64 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 65 return 1; 66 if (unlikely(skb->len < 16 + present_fcs_len + radiotap_len)) 67 return 1; 68 if (((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FTYPE)) == 69 cpu_to_le16(IEEE80211_FTYPE_CTL)) && 70 ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE)) != 71 cpu_to_le16(IEEE80211_STYPE_PSPOLL)) && 72 ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE)) != 73 cpu_to_le16(IEEE80211_STYPE_BACK_REQ))) 74 return 1; 75 return 0; 76 } 77 78 /* 79 * This function copies a received frame to all monitor interfaces and 80 * returns a cleaned-up SKB that no longer includes the FCS nor the 81 * radiotap header the driver might have added. 82 */ 83 static struct sk_buff * 84 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 85 struct ieee80211_rx_status *status) 86 { 87 struct ieee80211_sub_if_data *sdata; 88 struct ieee80211_rate *rate; 89 int needed_headroom = 0; 90 struct ieee80211_radiotap_header *rthdr; 91 __le64 *rttsft = NULL; 92 struct ieee80211_rtap_fixed_data { 93 u8 flags; 94 u8 rate; 95 __le16 chan_freq; 96 __le16 chan_flags; 97 u8 antsignal; 98 u8 padding_for_rxflags; 99 __le16 rx_flags; 100 } __attribute__ ((packed)) *rtfixed; 101 struct sk_buff *skb, *skb2; 102 struct net_device *prev_dev = NULL; 103 int present_fcs_len = 0; 104 int rtap_len = 0; 105 106 /* 107 * First, we may need to make a copy of the skb because 108 * (1) we need to modify it for radiotap (if not present), and 109 * (2) the other RX handlers will modify the skb we got. 110 * 111 * We don't need to, of course, if we aren't going to return 112 * the SKB because it has a bad FCS/PLCP checksum. 113 */ 114 if (status->flag & RX_FLAG_RADIOTAP) 115 rtap_len = ieee80211_get_radiotap_len(origskb->data); 116 else 117 /* room for radiotap header, always present fields and TSFT */ 118 needed_headroom = sizeof(*rthdr) + sizeof(*rtfixed) + 8; 119 120 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 121 present_fcs_len = FCS_LEN; 122 123 if (!local->monitors) { 124 if (should_drop_frame(status, origskb, present_fcs_len, 125 rtap_len)) { 126 dev_kfree_skb(origskb); 127 return NULL; 128 } 129 130 return remove_monitor_info(local, origskb, rtap_len); 131 } 132 133 if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) { 134 /* only need to expand headroom if necessary */ 135 skb = origskb; 136 origskb = NULL; 137 138 /* 139 * This shouldn't trigger often because most devices have an 140 * RX header they pull before we get here, and that should 141 * be big enough for our radiotap information. We should 142 * probably export the length to drivers so that we can have 143 * them allocate enough headroom to start with. 144 */ 145 if (skb_headroom(skb) < needed_headroom && 146 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 147 dev_kfree_skb(skb); 148 return NULL; 149 } 150 } else { 151 /* 152 * Need to make a copy and possibly remove radiotap header 153 * and FCS from the original. 154 */ 155 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); 156 157 origskb = remove_monitor_info(local, origskb, rtap_len); 158 159 if (!skb) 160 return origskb; 161 } 162 163 /* if necessary, prepend radiotap information */ 164 if (!(status->flag & RX_FLAG_RADIOTAP)) { 165 rtfixed = (void *) skb_push(skb, sizeof(*rtfixed)); 166 rtap_len = sizeof(*rthdr) + sizeof(*rtfixed); 167 if (status->flag & RX_FLAG_TSFT) { 168 rttsft = (void *) skb_push(skb, sizeof(*rttsft)); 169 rtap_len += 8; 170 } 171 rthdr = (void *) skb_push(skb, sizeof(*rthdr)); 172 memset(rthdr, 0, sizeof(*rthdr)); 173 memset(rtfixed, 0, sizeof(*rtfixed)); 174 rthdr->it_present = 175 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 176 (1 << IEEE80211_RADIOTAP_RATE) | 177 (1 << IEEE80211_RADIOTAP_CHANNEL) | 178 (1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL) | 179 (1 << IEEE80211_RADIOTAP_RX_FLAGS)); 180 rtfixed->flags = 0; 181 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 182 rtfixed->flags |= IEEE80211_RADIOTAP_F_FCS; 183 184 if (rttsft) { 185 *rttsft = cpu_to_le64(status->mactime); 186 rthdr->it_present |= 187 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 188 } 189 190 /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */ 191 rtfixed->rx_flags = 0; 192 if (status->flag & 193 (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 194 rtfixed->rx_flags |= 195 cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS); 196 197 rate = ieee80211_get_rate(local, status->phymode, 198 status->rate); 199 if (rate) 200 rtfixed->rate = rate->rate / 5; 201 202 rtfixed->chan_freq = cpu_to_le16(status->freq); 203 204 if (status->phymode == MODE_IEEE80211A) 205 rtfixed->chan_flags = 206 cpu_to_le16(IEEE80211_CHAN_OFDM | 207 IEEE80211_CHAN_5GHZ); 208 else 209 rtfixed->chan_flags = 210 cpu_to_le16(IEEE80211_CHAN_DYN | 211 IEEE80211_CHAN_2GHZ); 212 213 rtfixed->antsignal = status->ssi; 214 rthdr->it_len = cpu_to_le16(rtap_len); 215 } 216 217 skb_reset_mac_header(skb); 218 skb->ip_summed = CHECKSUM_UNNECESSARY; 219 skb->pkt_type = PACKET_OTHERHOST; 220 skb->protocol = htons(ETH_P_802_2); 221 222 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 223 if (!netif_running(sdata->dev)) 224 continue; 225 226 if (sdata->vif.type != IEEE80211_IF_TYPE_MNTR) 227 continue; 228 229 if (prev_dev) { 230 skb2 = skb_clone(skb, GFP_ATOMIC); 231 if (skb2) { 232 skb2->dev = prev_dev; 233 netif_rx(skb2); 234 } 235 } 236 237 prev_dev = sdata->dev; 238 sdata->dev->stats.rx_packets++; 239 sdata->dev->stats.rx_bytes += skb->len; 240 } 241 242 if (prev_dev) { 243 skb->dev = prev_dev; 244 netif_rx(skb); 245 } else 246 dev_kfree_skb(skb); 247 248 return origskb; 249 } 250 251 252 /* pre-rx handlers 253 * 254 * these don't have dev/sdata fields in the rx data 255 * The sta value should also not be used because it may 256 * be NULL even though a STA (in IBSS mode) will be added. 257 */ 258 259 static ieee80211_txrx_result 260 ieee80211_rx_h_parse_qos(struct ieee80211_txrx_data *rx) 261 { 262 u8 *data = rx->skb->data; 263 int tid; 264 265 /* does the frame have a qos control field? */ 266 if (WLAN_FC_IS_QOS_DATA(rx->fc)) { 267 u8 *qc = data + ieee80211_get_hdrlen(rx->fc) - QOS_CONTROL_LEN; 268 /* frame has qos control */ 269 tid = qc[0] & QOS_CONTROL_TID_MASK; 270 if (qc[0] & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) 271 rx->flags |= IEEE80211_TXRXD_RX_AMSDU; 272 else 273 rx->flags &= ~IEEE80211_TXRXD_RX_AMSDU; 274 } else { 275 if (unlikely((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)) { 276 /* Separate TID for management frames */ 277 tid = NUM_RX_DATA_QUEUES - 1; 278 } else { 279 /* no qos control present */ 280 tid = 0; /* 802.1d - Best Effort */ 281 } 282 } 283 284 I802_DEBUG_INC(rx->local->wme_rx_queue[tid]); 285 /* only a debug counter, sta might not be assigned properly yet */ 286 if (rx->sta) 287 I802_DEBUG_INC(rx->sta->wme_rx_queue[tid]); 288 289 rx->u.rx.queue = tid; 290 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 291 * For now, set skb->priority to 0 for other cases. */ 292 rx->skb->priority = (tid > 7) ? 0 : tid; 293 294 return TXRX_CONTINUE; 295 } 296 297 298 static u32 ieee80211_rx_load_stats(struct ieee80211_local *local, 299 struct sk_buff *skb, 300 struct ieee80211_rx_status *status) 301 { 302 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 303 u32 load = 0, hdrtime; 304 struct ieee80211_rate *rate; 305 struct ieee80211_hw_mode *mode = local->hw.conf.mode; 306 int i; 307 308 /* Estimate total channel use caused by this frame */ 309 310 if (unlikely(mode->num_rates < 0)) 311 return TXRX_CONTINUE; 312 313 rate = &mode->rates[0]; 314 for (i = 0; i < mode->num_rates; i++) { 315 if (mode->rates[i].val == status->rate) { 316 rate = &mode->rates[i]; 317 break; 318 } 319 } 320 321 /* 1 bit at 1 Mbit/s takes 1 usec; in channel_use values, 322 * 1 usec = 1/8 * (1080 / 10) = 13.5 */ 323 324 if (mode->mode == MODE_IEEE80211A || 325 (mode->mode == MODE_IEEE80211G && 326 rate->flags & IEEE80211_RATE_ERP)) 327 hdrtime = CHAN_UTIL_HDR_SHORT; 328 else 329 hdrtime = CHAN_UTIL_HDR_LONG; 330 331 load = hdrtime; 332 if (!is_multicast_ether_addr(hdr->addr1)) 333 load += hdrtime; 334 335 load += skb->len * rate->rate_inv; 336 337 /* Divide channel_use by 8 to avoid wrapping around the counter */ 338 load >>= CHAN_UTIL_SHIFT; 339 340 return load; 341 } 342 343 static ieee80211_txrx_result 344 ieee80211_rx_h_verify_ip_alignment(struct ieee80211_txrx_data *rx) 345 { 346 int hdrlen; 347 348 /* 349 * Drivers are required to align the payload data in a way that 350 * guarantees that the contained IP header is aligned to a four- 351 * byte boundary. In the case of regular frames, this simply means 352 * aligning the payload to a four-byte boundary (because either 353 * the IP header is directly contained, or IV/RFC1042 headers that 354 * have a length divisible by four are in front of it. 355 * 356 * With A-MSDU frames, however, the payload data address must 357 * yield two modulo four because there are 14-byte 802.3 headers 358 * within the A-MSDU frames that push the IP header further back 359 * to a multiple of four again. Thankfully, the specs were sane 360 * enough this time around to require padding each A-MSDU subframe 361 * to a length that is a multiple of four. 362 * 363 * Padding like atheros hardware adds which is inbetween the 802.11 364 * header and the payload is not supported, the driver is required 365 * to move the 802.11 header further back in that case. 366 */ 367 hdrlen = ieee80211_get_hdrlen(rx->fc); 368 if (rx->flags & IEEE80211_TXRXD_RX_AMSDU) 369 hdrlen += ETH_HLEN; 370 WARN_ON_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3); 371 372 return TXRX_CONTINUE; 373 } 374 375 ieee80211_rx_handler ieee80211_rx_pre_handlers[] = 376 { 377 ieee80211_rx_h_parse_qos, 378 ieee80211_rx_h_verify_ip_alignment, 379 NULL 380 }; 381 382 /* rx handlers */ 383 384 static ieee80211_txrx_result 385 ieee80211_rx_h_if_stats(struct ieee80211_txrx_data *rx) 386 { 387 if (rx->sta) 388 rx->sta->channel_use_raw += rx->u.rx.load; 389 rx->sdata->channel_use_raw += rx->u.rx.load; 390 return TXRX_CONTINUE; 391 } 392 393 static ieee80211_txrx_result 394 ieee80211_rx_h_passive_scan(struct ieee80211_txrx_data *rx) 395 { 396 struct ieee80211_local *local = rx->local; 397 struct sk_buff *skb = rx->skb; 398 399 if (unlikely(local->sta_hw_scanning)) 400 return ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status); 401 402 if (unlikely(local->sta_sw_scanning)) { 403 /* drop all the other packets during a software scan anyway */ 404 if (ieee80211_sta_rx_scan(rx->dev, skb, rx->u.rx.status) 405 != TXRX_QUEUED) 406 dev_kfree_skb(skb); 407 return TXRX_QUEUED; 408 } 409 410 if (unlikely(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) { 411 /* scanning finished during invoking of handlers */ 412 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); 413 return TXRX_DROP; 414 } 415 416 return TXRX_CONTINUE; 417 } 418 419 static ieee80211_txrx_result 420 ieee80211_rx_h_check(struct ieee80211_txrx_data *rx) 421 { 422 struct ieee80211_hdr *hdr; 423 hdr = (struct ieee80211_hdr *) rx->skb->data; 424 425 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ 426 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { 427 if (unlikely(rx->fc & IEEE80211_FCTL_RETRY && 428 rx->sta->last_seq_ctrl[rx->u.rx.queue] == 429 hdr->seq_ctrl)) { 430 if (rx->flags & IEEE80211_TXRXD_RXRA_MATCH) { 431 rx->local->dot11FrameDuplicateCount++; 432 rx->sta->num_duplicates++; 433 } 434 return TXRX_DROP; 435 } else 436 rx->sta->last_seq_ctrl[rx->u.rx.queue] = hdr->seq_ctrl; 437 } 438 439 if (unlikely(rx->skb->len < 16)) { 440 I802_DEBUG_INC(rx->local->rx_handlers_drop_short); 441 return TXRX_DROP; 442 } 443 444 /* Drop disallowed frame classes based on STA auth/assoc state; 445 * IEEE 802.11, Chap 5.5. 446 * 447 * 80211.o does filtering only based on association state, i.e., it 448 * drops Class 3 frames from not associated stations. hostapd sends 449 * deauth/disassoc frames when needed. In addition, hostapd is 450 * responsible for filtering on both auth and assoc states. 451 */ 452 if (unlikely(((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA || 453 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL && 454 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PSPOLL)) && 455 rx->sdata->vif.type != IEEE80211_IF_TYPE_IBSS && 456 (!rx->sta || !(rx->sta->flags & WLAN_STA_ASSOC)))) { 457 if ((!(rx->fc & IEEE80211_FCTL_FROMDS) && 458 !(rx->fc & IEEE80211_FCTL_TODS) && 459 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) 460 || !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) { 461 /* Drop IBSS frames and frames for other hosts 462 * silently. */ 463 return TXRX_DROP; 464 } 465 466 return TXRX_DROP; 467 } 468 469 return TXRX_CONTINUE; 470 } 471 472 473 static ieee80211_txrx_result 474 ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx) 475 { 476 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 477 int keyidx; 478 int hdrlen; 479 ieee80211_txrx_result result = TXRX_DROP; 480 struct ieee80211_key *stakey = NULL; 481 482 /* 483 * Key selection 101 484 * 485 * There are three types of keys: 486 * - GTK (group keys) 487 * - PTK (pairwise keys) 488 * - STK (station-to-station pairwise keys) 489 * 490 * When selecting a key, we have to distinguish between multicast 491 * (including broadcast) and unicast frames, the latter can only 492 * use PTKs and STKs while the former always use GTKs. Unless, of 493 * course, actual WEP keys ("pre-RSNA") are used, then unicast 494 * frames can also use key indizes like GTKs. Hence, if we don't 495 * have a PTK/STK we check the key index for a WEP key. 496 * 497 * Note that in a regular BSS, multicast frames are sent by the 498 * AP only, associated stations unicast the frame to the AP first 499 * which then multicasts it on their behalf. 500 * 501 * There is also a slight problem in IBSS mode: GTKs are negotiated 502 * with each station, that is something we don't currently handle. 503 * The spec seems to expect that one negotiates the same key with 504 * every station but there's no such requirement; VLANs could be 505 * possible. 506 */ 507 508 if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) 509 return TXRX_CONTINUE; 510 511 /* 512 * No point in finding a key and decrypting if the frame is neither 513 * addressed to us nor a multicast frame. 514 */ 515 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) 516 return TXRX_CONTINUE; 517 518 if (rx->sta) 519 stakey = rcu_dereference(rx->sta->key); 520 521 if (!is_multicast_ether_addr(hdr->addr1) && stakey) { 522 rx->key = stakey; 523 } else { 524 /* 525 * The device doesn't give us the IV so we won't be 526 * able to look up the key. That's ok though, we 527 * don't need to decrypt the frame, we just won't 528 * be able to keep statistics accurate. 529 * Except for key threshold notifications, should 530 * we somehow allow the driver to tell us which key 531 * the hardware used if this flag is set? 532 */ 533 if ((rx->u.rx.status->flag & RX_FLAG_DECRYPTED) && 534 (rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) 535 return TXRX_CONTINUE; 536 537 hdrlen = ieee80211_get_hdrlen(rx->fc); 538 539 if (rx->skb->len < 8 + hdrlen) 540 return TXRX_DROP; /* TODO: count this? */ 541 542 /* 543 * no need to call ieee80211_wep_get_keyidx, 544 * it verifies a bunch of things we've done already 545 */ 546 keyidx = rx->skb->data[hdrlen + 3] >> 6; 547 548 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 549 550 /* 551 * RSNA-protected unicast frames should always be sent with 552 * pairwise or station-to-station keys, but for WEP we allow 553 * using a key index as well. 554 */ 555 if (rx->key && rx->key->conf.alg != ALG_WEP && 556 !is_multicast_ether_addr(hdr->addr1)) 557 rx->key = NULL; 558 } 559 560 if (rx->key) { 561 rx->key->tx_rx_count++; 562 /* TODO: add threshold stuff again */ 563 } else { 564 #ifdef CONFIG_MAC80211_DEBUG 565 if (net_ratelimit()) 566 printk(KERN_DEBUG "%s: RX protected frame," 567 " but have no key\n", rx->dev->name); 568 #endif /* CONFIG_MAC80211_DEBUG */ 569 return TXRX_DROP; 570 } 571 572 /* Check for weak IVs if possible */ 573 if (rx->sta && rx->key->conf.alg == ALG_WEP && 574 ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && 575 (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED) || 576 !(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) && 577 ieee80211_wep_is_weak_iv(rx->skb, rx->key)) 578 rx->sta->wep_weak_iv_count++; 579 580 switch (rx->key->conf.alg) { 581 case ALG_WEP: 582 result = ieee80211_crypto_wep_decrypt(rx); 583 break; 584 case ALG_TKIP: 585 result = ieee80211_crypto_tkip_decrypt(rx); 586 break; 587 case ALG_CCMP: 588 result = ieee80211_crypto_ccmp_decrypt(rx); 589 break; 590 } 591 592 /* either the frame has been decrypted or will be dropped */ 593 rx->u.rx.status->flag |= RX_FLAG_DECRYPTED; 594 595 return result; 596 } 597 598 static void ap_sta_ps_start(struct net_device *dev, struct sta_info *sta) 599 { 600 struct ieee80211_sub_if_data *sdata; 601 DECLARE_MAC_BUF(mac); 602 603 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 604 605 if (sdata->bss) 606 atomic_inc(&sdata->bss->num_sta_ps); 607 sta->flags |= WLAN_STA_PS; 608 sta->pspoll = 0; 609 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 610 printk(KERN_DEBUG "%s: STA %s aid %d enters power save mode\n", 611 dev->name, print_mac(mac, sta->addr), sta->aid); 612 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 613 } 614 615 static int ap_sta_ps_end(struct net_device *dev, struct sta_info *sta) 616 { 617 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 618 struct sk_buff *skb; 619 int sent = 0; 620 struct ieee80211_sub_if_data *sdata; 621 struct ieee80211_tx_packet_data *pkt_data; 622 DECLARE_MAC_BUF(mac); 623 624 sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); 625 if (sdata->bss) 626 atomic_dec(&sdata->bss->num_sta_ps); 627 sta->flags &= ~(WLAN_STA_PS | WLAN_STA_TIM); 628 sta->pspoll = 0; 629 if (!skb_queue_empty(&sta->ps_tx_buf)) { 630 if (local->ops->set_tim) 631 local->ops->set_tim(local_to_hw(local), sta->aid, 0); 632 if (sdata->bss) 633 bss_tim_clear(local, sdata->bss, sta->aid); 634 } 635 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 636 printk(KERN_DEBUG "%s: STA %s aid %d exits power save mode\n", 637 dev->name, print_mac(mac, sta->addr), sta->aid); 638 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 639 /* Send all buffered frames to the station */ 640 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) { 641 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 642 sent++; 643 pkt_data->flags |= IEEE80211_TXPD_REQUEUE; 644 dev_queue_xmit(skb); 645 } 646 while ((skb = skb_dequeue(&sta->ps_tx_buf)) != NULL) { 647 pkt_data = (struct ieee80211_tx_packet_data *) skb->cb; 648 local->total_ps_buffered--; 649 sent++; 650 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 651 printk(KERN_DEBUG "%s: STA %s aid %d send PS frame " 652 "since STA not sleeping anymore\n", dev->name, 653 print_mac(mac, sta->addr), sta->aid); 654 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 655 pkt_data->flags |= IEEE80211_TXPD_REQUEUE; 656 dev_queue_xmit(skb); 657 } 658 659 return sent; 660 } 661 662 static ieee80211_txrx_result 663 ieee80211_rx_h_sta_process(struct ieee80211_txrx_data *rx) 664 { 665 struct sta_info *sta = rx->sta; 666 struct net_device *dev = rx->dev; 667 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 668 669 if (!sta) 670 return TXRX_CONTINUE; 671 672 /* Update last_rx only for IBSS packets which are for the current 673 * BSSID to avoid keeping the current IBSS network alive in cases where 674 * other STAs are using different BSSID. */ 675 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { 676 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 677 IEEE80211_IF_TYPE_IBSS); 678 if (compare_ether_addr(bssid, rx->sdata->u.sta.bssid) == 0) 679 sta->last_rx = jiffies; 680 } else 681 if (!is_multicast_ether_addr(hdr->addr1) || 682 rx->sdata->vif.type == IEEE80211_IF_TYPE_STA) { 683 /* Update last_rx only for unicast frames in order to prevent 684 * the Probe Request frames (the only broadcast frames from a 685 * STA in infrastructure mode) from keeping a connection alive. 686 */ 687 sta->last_rx = jiffies; 688 } 689 690 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) 691 return TXRX_CONTINUE; 692 693 sta->rx_fragments++; 694 sta->rx_bytes += rx->skb->len; 695 sta->last_rssi = rx->u.rx.status->ssi; 696 sta->last_signal = rx->u.rx.status->signal; 697 sta->last_noise = rx->u.rx.status->noise; 698 699 if (!(rx->fc & IEEE80211_FCTL_MOREFRAGS)) { 700 /* Change STA power saving mode only in the end of a frame 701 * exchange sequence */ 702 if ((sta->flags & WLAN_STA_PS) && !(rx->fc & IEEE80211_FCTL_PM)) 703 rx->u.rx.sent_ps_buffered += ap_sta_ps_end(dev, sta); 704 else if (!(sta->flags & WLAN_STA_PS) && 705 (rx->fc & IEEE80211_FCTL_PM)) 706 ap_sta_ps_start(dev, sta); 707 } 708 709 /* Drop data::nullfunc frames silently, since they are used only to 710 * control station power saving mode. */ 711 if ((rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 712 (rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_NULLFUNC) { 713 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 714 /* Update counter and free packet here to avoid counting this 715 * as a dropped packed. */ 716 sta->rx_packets++; 717 dev_kfree_skb(rx->skb); 718 return TXRX_QUEUED; 719 } 720 721 return TXRX_CONTINUE; 722 } /* ieee80211_rx_h_sta_process */ 723 724 static inline struct ieee80211_fragment_entry * 725 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 726 unsigned int frag, unsigned int seq, int rx_queue, 727 struct sk_buff **skb) 728 { 729 struct ieee80211_fragment_entry *entry; 730 int idx; 731 732 idx = sdata->fragment_next; 733 entry = &sdata->fragments[sdata->fragment_next++]; 734 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 735 sdata->fragment_next = 0; 736 737 if (!skb_queue_empty(&entry->skb_list)) { 738 #ifdef CONFIG_MAC80211_DEBUG 739 struct ieee80211_hdr *hdr = 740 (struct ieee80211_hdr *) entry->skb_list.next->data; 741 DECLARE_MAC_BUF(mac); 742 DECLARE_MAC_BUF(mac2); 743 printk(KERN_DEBUG "%s: RX reassembly removed oldest " 744 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d " 745 "addr1=%s addr2=%s\n", 746 sdata->dev->name, idx, 747 jiffies - entry->first_frag_time, entry->seq, 748 entry->last_frag, print_mac(mac, hdr->addr1), 749 print_mac(mac2, hdr->addr2)); 750 #endif /* CONFIG_MAC80211_DEBUG */ 751 __skb_queue_purge(&entry->skb_list); 752 } 753 754 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 755 *skb = NULL; 756 entry->first_frag_time = jiffies; 757 entry->seq = seq; 758 entry->rx_queue = rx_queue; 759 entry->last_frag = frag; 760 entry->ccmp = 0; 761 entry->extra_len = 0; 762 763 return entry; 764 } 765 766 static inline struct ieee80211_fragment_entry * 767 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 768 u16 fc, unsigned int frag, unsigned int seq, 769 int rx_queue, struct ieee80211_hdr *hdr) 770 { 771 struct ieee80211_fragment_entry *entry; 772 int i, idx; 773 774 idx = sdata->fragment_next; 775 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 776 struct ieee80211_hdr *f_hdr; 777 u16 f_fc; 778 779 idx--; 780 if (idx < 0) 781 idx = IEEE80211_FRAGMENT_MAX - 1; 782 783 entry = &sdata->fragments[idx]; 784 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 785 entry->rx_queue != rx_queue || 786 entry->last_frag + 1 != frag) 787 continue; 788 789 f_hdr = (struct ieee80211_hdr *) entry->skb_list.next->data; 790 f_fc = le16_to_cpu(f_hdr->frame_control); 791 792 if ((fc & IEEE80211_FCTL_FTYPE) != (f_fc & IEEE80211_FCTL_FTYPE) || 793 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || 794 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) 795 continue; 796 797 if (entry->first_frag_time + 2 * HZ < jiffies) { 798 __skb_queue_purge(&entry->skb_list); 799 continue; 800 } 801 return entry; 802 } 803 804 return NULL; 805 } 806 807 static ieee80211_txrx_result 808 ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) 809 { 810 struct ieee80211_hdr *hdr; 811 u16 sc; 812 unsigned int frag, seq; 813 struct ieee80211_fragment_entry *entry; 814 struct sk_buff *skb; 815 DECLARE_MAC_BUF(mac); 816 817 hdr = (struct ieee80211_hdr *) rx->skb->data; 818 sc = le16_to_cpu(hdr->seq_ctrl); 819 frag = sc & IEEE80211_SCTL_FRAG; 820 821 if (likely((!(rx->fc & IEEE80211_FCTL_MOREFRAGS) && frag == 0) || 822 (rx->skb)->len < 24 || 823 is_multicast_ether_addr(hdr->addr1))) { 824 /* not fragmented */ 825 goto out; 826 } 827 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 828 829 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 830 831 if (frag == 0) { 832 /* This is the first fragment of a new frame. */ 833 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 834 rx->u.rx.queue, &(rx->skb)); 835 if (rx->key && rx->key->conf.alg == ALG_CCMP && 836 (rx->fc & IEEE80211_FCTL_PROTECTED)) { 837 /* Store CCMP PN so that we can verify that the next 838 * fragment has a sequential PN value. */ 839 entry->ccmp = 1; 840 memcpy(entry->last_pn, 841 rx->key->u.ccmp.rx_pn[rx->u.rx.queue], 842 CCMP_PN_LEN); 843 } 844 return TXRX_QUEUED; 845 } 846 847 /* This is a fragment for a frame that should already be pending in 848 * fragment cache. Add this fragment to the end of the pending entry. 849 */ 850 entry = ieee80211_reassemble_find(rx->sdata, rx->fc, frag, seq, 851 rx->u.rx.queue, hdr); 852 if (!entry) { 853 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 854 return TXRX_DROP; 855 } 856 857 /* Verify that MPDUs within one MSDU have sequential PN values. 858 * (IEEE 802.11i, 8.3.3.4.5) */ 859 if (entry->ccmp) { 860 int i; 861 u8 pn[CCMP_PN_LEN], *rpn; 862 if (!rx->key || rx->key->conf.alg != ALG_CCMP) 863 return TXRX_DROP; 864 memcpy(pn, entry->last_pn, CCMP_PN_LEN); 865 for (i = CCMP_PN_LEN - 1; i >= 0; i--) { 866 pn[i]++; 867 if (pn[i]) 868 break; 869 } 870 rpn = rx->key->u.ccmp.rx_pn[rx->u.rx.queue]; 871 if (memcmp(pn, rpn, CCMP_PN_LEN) != 0) { 872 if (net_ratelimit()) 873 printk(KERN_DEBUG "%s: defrag: CCMP PN not " 874 "sequential A2=%s" 875 " PN=%02x%02x%02x%02x%02x%02x " 876 "(expected %02x%02x%02x%02x%02x%02x)\n", 877 rx->dev->name, print_mac(mac, hdr->addr2), 878 rpn[0], rpn[1], rpn[2], rpn[3], rpn[4], 879 rpn[5], pn[0], pn[1], pn[2], pn[3], 880 pn[4], pn[5]); 881 return TXRX_DROP; 882 } 883 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 884 } 885 886 skb_pull(rx->skb, ieee80211_get_hdrlen(rx->fc)); 887 __skb_queue_tail(&entry->skb_list, rx->skb); 888 entry->last_frag = frag; 889 entry->extra_len += rx->skb->len; 890 if (rx->fc & IEEE80211_FCTL_MOREFRAGS) { 891 rx->skb = NULL; 892 return TXRX_QUEUED; 893 } 894 895 rx->skb = __skb_dequeue(&entry->skb_list); 896 if (skb_tailroom(rx->skb) < entry->extra_len) { 897 I802_DEBUG_INC(rx->local->rx_expand_skb_head2); 898 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 899 GFP_ATOMIC))) { 900 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 901 __skb_queue_purge(&entry->skb_list); 902 return TXRX_DROP; 903 } 904 } 905 while ((skb = __skb_dequeue(&entry->skb_list))) { 906 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); 907 dev_kfree_skb(skb); 908 } 909 910 /* Complete frame has been reassembled - process it now */ 911 rx->flags |= IEEE80211_TXRXD_FRAGMENTED; 912 913 out: 914 if (rx->sta) 915 rx->sta->rx_packets++; 916 if (is_multicast_ether_addr(hdr->addr1)) 917 rx->local->dot11MulticastReceivedFrameCount++; 918 else 919 ieee80211_led_rx(rx->local); 920 return TXRX_CONTINUE; 921 } 922 923 static ieee80211_txrx_result 924 ieee80211_rx_h_ps_poll(struct ieee80211_txrx_data *rx) 925 { 926 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 927 struct sk_buff *skb; 928 int no_pending_pkts; 929 DECLARE_MAC_BUF(mac); 930 931 if (likely(!rx->sta || 932 (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL || 933 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_PSPOLL || 934 !(rx->flags & IEEE80211_TXRXD_RXRA_MATCH))) 935 return TXRX_CONTINUE; 936 937 if ((sdata->vif.type != IEEE80211_IF_TYPE_AP) && 938 (sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) 939 return TXRX_DROP; 940 941 skb = skb_dequeue(&rx->sta->tx_filtered); 942 if (!skb) { 943 skb = skb_dequeue(&rx->sta->ps_tx_buf); 944 if (skb) 945 rx->local->total_ps_buffered--; 946 } 947 no_pending_pkts = skb_queue_empty(&rx->sta->tx_filtered) && 948 skb_queue_empty(&rx->sta->ps_tx_buf); 949 950 if (skb) { 951 struct ieee80211_hdr *hdr = 952 (struct ieee80211_hdr *) skb->data; 953 954 /* tell TX path to send one frame even though the STA may 955 * still remain is PS mode after this frame exchange */ 956 rx->sta->pspoll = 1; 957 958 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 959 printk(KERN_DEBUG "STA %s aid %d: PS Poll (entries after %d)\n", 960 print_mac(mac, rx->sta->addr), rx->sta->aid, 961 skb_queue_len(&rx->sta->ps_tx_buf)); 962 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 963 964 /* Use MoreData flag to indicate whether there are more 965 * buffered frames for this STA */ 966 if (no_pending_pkts) { 967 hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREDATA); 968 rx->sta->flags &= ~WLAN_STA_TIM; 969 } else 970 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 971 972 dev_queue_xmit(skb); 973 974 if (no_pending_pkts) { 975 if (rx->local->ops->set_tim) 976 rx->local->ops->set_tim(local_to_hw(rx->local), 977 rx->sta->aid, 0); 978 if (rx->sdata->bss) 979 bss_tim_clear(rx->local, rx->sdata->bss, rx->sta->aid); 980 } 981 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 982 } else if (!rx->u.rx.sent_ps_buffered) { 983 printk(KERN_DEBUG "%s: STA %s sent PS Poll even " 984 "though there is no buffered frames for it\n", 985 rx->dev->name, print_mac(mac, rx->sta->addr)); 986 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 987 988 } 989 990 /* Free PS Poll skb here instead of returning TXRX_DROP that would 991 * count as an dropped frame. */ 992 dev_kfree_skb(rx->skb); 993 994 return TXRX_QUEUED; 995 } 996 997 static ieee80211_txrx_result 998 ieee80211_rx_h_remove_qos_control(struct ieee80211_txrx_data *rx) 999 { 1000 u16 fc = rx->fc; 1001 u8 *data = rx->skb->data; 1002 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) data; 1003 1004 if (!WLAN_FC_IS_QOS_DATA(fc)) 1005 return TXRX_CONTINUE; 1006 1007 /* remove the qos control field, update frame type and meta-data */ 1008 memmove(data + 2, data, ieee80211_get_hdrlen(fc) - 2); 1009 hdr = (struct ieee80211_hdr *) skb_pull(rx->skb, 2); 1010 /* change frame type to non QOS */ 1011 rx->fc = fc &= ~IEEE80211_STYPE_QOS_DATA; 1012 hdr->frame_control = cpu_to_le16(fc); 1013 1014 return TXRX_CONTINUE; 1015 } 1016 1017 static int 1018 ieee80211_802_1x_port_control(struct ieee80211_txrx_data *rx) 1019 { 1020 if (unlikely(rx->sdata->ieee802_1x_pac && 1021 (!rx->sta || !(rx->sta->flags & WLAN_STA_AUTHORIZED)))) { 1022 #ifdef CONFIG_MAC80211_DEBUG 1023 printk(KERN_DEBUG "%s: dropped frame " 1024 "(unauthorized port)\n", rx->dev->name); 1025 #endif /* CONFIG_MAC80211_DEBUG */ 1026 return -EACCES; 1027 } 1028 1029 return 0; 1030 } 1031 1032 static int 1033 ieee80211_drop_unencrypted(struct ieee80211_txrx_data *rx) 1034 { 1035 /* 1036 * Pass through unencrypted frames if the hardware has 1037 * decrypted them already. 1038 */ 1039 if (rx->u.rx.status->flag & RX_FLAG_DECRYPTED) 1040 return 0; 1041 1042 /* Drop unencrypted frames if key is set. */ 1043 if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) && 1044 (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && 1045 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC && 1046 (rx->key || rx->sdata->drop_unencrypted))) { 1047 if (net_ratelimit()) 1048 printk(KERN_DEBUG "%s: RX non-WEP frame, but expected " 1049 "encryption\n", rx->dev->name); 1050 return -EACCES; 1051 } 1052 return 0; 1053 } 1054 1055 static int 1056 ieee80211_data_to_8023(struct ieee80211_txrx_data *rx) 1057 { 1058 struct net_device *dev = rx->dev; 1059 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; 1060 u16 fc, hdrlen, ethertype; 1061 u8 *payload; 1062 u8 dst[ETH_ALEN]; 1063 u8 src[ETH_ALEN]; 1064 struct sk_buff *skb = rx->skb; 1065 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1066 DECLARE_MAC_BUF(mac); 1067 DECLARE_MAC_BUF(mac2); 1068 DECLARE_MAC_BUF(mac3); 1069 DECLARE_MAC_BUF(mac4); 1070 1071 fc = rx->fc; 1072 1073 if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) 1074 return -1; 1075 1076 hdrlen = ieee80211_get_hdrlen(fc); 1077 1078 /* convert IEEE 802.11 header + possible LLC headers into Ethernet 1079 * header 1080 * IEEE 802.11 address fields: 1081 * ToDS FromDS Addr1 Addr2 Addr3 Addr4 1082 * 0 0 DA SA BSSID n/a 1083 * 0 1 DA BSSID SA n/a 1084 * 1 0 BSSID SA DA n/a 1085 * 1 1 RA TA DA SA 1086 */ 1087 1088 switch (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { 1089 case IEEE80211_FCTL_TODS: 1090 /* BSSID SA DA */ 1091 memcpy(dst, hdr->addr3, ETH_ALEN); 1092 memcpy(src, hdr->addr2, ETH_ALEN); 1093 1094 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_AP && 1095 sdata->vif.type != IEEE80211_IF_TYPE_VLAN)) { 1096 if (net_ratelimit()) 1097 printk(KERN_DEBUG "%s: dropped ToDS frame " 1098 "(BSSID=%s SA=%s DA=%s)\n", 1099 dev->name, 1100 print_mac(mac, hdr->addr1), 1101 print_mac(mac2, hdr->addr2), 1102 print_mac(mac3, hdr->addr3)); 1103 return -1; 1104 } 1105 break; 1106 case (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS): 1107 /* RA TA DA SA */ 1108 memcpy(dst, hdr->addr3, ETH_ALEN); 1109 memcpy(src, hdr->addr4, ETH_ALEN); 1110 1111 if (unlikely(sdata->vif.type != IEEE80211_IF_TYPE_WDS)) { 1112 if (net_ratelimit()) 1113 printk(KERN_DEBUG "%s: dropped FromDS&ToDS " 1114 "frame (RA=%s TA=%s DA=%s SA=%s)\n", 1115 rx->dev->name, 1116 print_mac(mac, hdr->addr1), 1117 print_mac(mac2, hdr->addr2), 1118 print_mac(mac3, hdr->addr3), 1119 print_mac(mac4, hdr->addr4)); 1120 return -1; 1121 } 1122 break; 1123 case IEEE80211_FCTL_FROMDS: 1124 /* DA BSSID SA */ 1125 memcpy(dst, hdr->addr1, ETH_ALEN); 1126 memcpy(src, hdr->addr3, ETH_ALEN); 1127 1128 if (sdata->vif.type != IEEE80211_IF_TYPE_STA || 1129 (is_multicast_ether_addr(dst) && 1130 !compare_ether_addr(src, dev->dev_addr))) 1131 return -1; 1132 break; 1133 case 0: 1134 /* DA SA BSSID */ 1135 memcpy(dst, hdr->addr1, ETH_ALEN); 1136 memcpy(src, hdr->addr2, ETH_ALEN); 1137 1138 if (sdata->vif.type != IEEE80211_IF_TYPE_IBSS) { 1139 if (net_ratelimit()) { 1140 printk(KERN_DEBUG "%s: dropped IBSS frame " 1141 "(DA=%s SA=%s BSSID=%s)\n", 1142 dev->name, 1143 print_mac(mac, hdr->addr1), 1144 print_mac(mac2, hdr->addr2), 1145 print_mac(mac3, hdr->addr3)); 1146 } 1147 return -1; 1148 } 1149 break; 1150 } 1151 1152 if (unlikely(skb->len - hdrlen < 8)) { 1153 if (net_ratelimit()) { 1154 printk(KERN_DEBUG "%s: RX too short data frame " 1155 "payload\n", dev->name); 1156 } 1157 return -1; 1158 } 1159 1160 payload = skb->data + hdrlen; 1161 ethertype = (payload[6] << 8) | payload[7]; 1162 1163 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && 1164 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || 1165 compare_ether_addr(payload, bridge_tunnel_header) == 0)) { 1166 /* remove RFC1042 or Bridge-Tunnel encapsulation and 1167 * replace EtherType */ 1168 skb_pull(skb, hdrlen + 6); 1169 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); 1170 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); 1171 } else { 1172 struct ethhdr *ehdr; 1173 __be16 len; 1174 1175 skb_pull(skb, hdrlen); 1176 len = htons(skb->len); 1177 ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr)); 1178 memcpy(ehdr->h_dest, dst, ETH_ALEN); 1179 memcpy(ehdr->h_source, src, ETH_ALEN); 1180 ehdr->h_proto = len; 1181 } 1182 return 0; 1183 } 1184 1185 /* 1186 * requires that rx->skb is a frame with ethernet header 1187 */ 1188 static bool ieee80211_frame_allowed(struct ieee80211_txrx_data *rx) 1189 { 1190 static const u8 pae_group_addr[ETH_ALEN] 1191 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 1192 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1193 1194 /* 1195 * Allow EAPOL frames to us/the PAE group address regardless 1196 * of whether the frame was encrypted or not. 1197 */ 1198 if (ehdr->h_proto == htons(ETH_P_PAE) && 1199 (compare_ether_addr(ehdr->h_dest, rx->dev->dev_addr) == 0 || 1200 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) 1201 return true; 1202 1203 if (ieee80211_802_1x_port_control(rx) || 1204 ieee80211_drop_unencrypted(rx)) 1205 return false; 1206 1207 return true; 1208 } 1209 1210 /* 1211 * requires that rx->skb is a frame with ethernet header 1212 */ 1213 static void 1214 ieee80211_deliver_skb(struct ieee80211_txrx_data *rx) 1215 { 1216 struct net_device *dev = rx->dev; 1217 struct ieee80211_local *local = rx->local; 1218 struct sk_buff *skb, *xmit_skb; 1219 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1220 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1221 struct sta_info *dsta; 1222 1223 skb = rx->skb; 1224 xmit_skb = NULL; 1225 1226 if (local->bridge_packets && (sdata->vif.type == IEEE80211_IF_TYPE_AP || 1227 sdata->vif.type == IEEE80211_IF_TYPE_VLAN) && 1228 (rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) { 1229 if (is_multicast_ether_addr(ehdr->h_dest)) { 1230 /* 1231 * send multicast frames both to higher layers in 1232 * local net stack and back to the wireless medium 1233 */ 1234 xmit_skb = skb_copy(skb, GFP_ATOMIC); 1235 if (!xmit_skb && net_ratelimit()) 1236 printk(KERN_DEBUG "%s: failed to clone " 1237 "multicast frame\n", dev->name); 1238 } else { 1239 dsta = sta_info_get(local, skb->data); 1240 if (dsta && dsta->dev == dev) { 1241 /* 1242 * The destination station is associated to 1243 * this AP (in this VLAN), so send the frame 1244 * directly to it and do not pass it to local 1245 * net stack. 1246 */ 1247 xmit_skb = skb; 1248 skb = NULL; 1249 } 1250 if (dsta) 1251 sta_info_put(dsta); 1252 } 1253 } 1254 1255 if (skb) { 1256 /* deliver to local stack */ 1257 skb->protocol = eth_type_trans(skb, dev); 1258 memset(skb->cb, 0, sizeof(skb->cb)); 1259 netif_rx(skb); 1260 } 1261 1262 if (xmit_skb) { 1263 /* send to wireless media */ 1264 xmit_skb->protocol = htons(ETH_P_802_3); 1265 skb_reset_network_header(xmit_skb); 1266 skb_reset_mac_header(xmit_skb); 1267 dev_queue_xmit(xmit_skb); 1268 } 1269 } 1270 1271 static ieee80211_txrx_result 1272 ieee80211_rx_h_amsdu(struct ieee80211_txrx_data *rx) 1273 { 1274 struct net_device *dev = rx->dev; 1275 struct ieee80211_local *local = rx->local; 1276 u16 fc, ethertype; 1277 u8 *payload; 1278 struct sk_buff *skb = rx->skb, *frame = NULL; 1279 const struct ethhdr *eth; 1280 int remaining, err; 1281 u8 dst[ETH_ALEN]; 1282 u8 src[ETH_ALEN]; 1283 DECLARE_MAC_BUF(mac); 1284 1285 fc = rx->fc; 1286 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) 1287 return TXRX_CONTINUE; 1288 1289 if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) 1290 return TXRX_DROP; 1291 1292 if (!(rx->flags & IEEE80211_TXRXD_RX_AMSDU)) 1293 return TXRX_CONTINUE; 1294 1295 err = ieee80211_data_to_8023(rx); 1296 if (unlikely(err)) 1297 return TXRX_DROP; 1298 1299 skb->dev = dev; 1300 1301 dev->stats.rx_packets++; 1302 dev->stats.rx_bytes += skb->len; 1303 1304 /* skip the wrapping header */ 1305 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr)); 1306 if (!eth) 1307 return TXRX_DROP; 1308 1309 while (skb != frame) { 1310 u8 padding; 1311 __be16 len = eth->h_proto; 1312 unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len); 1313 1314 remaining = skb->len; 1315 memcpy(dst, eth->h_dest, ETH_ALEN); 1316 memcpy(src, eth->h_source, ETH_ALEN); 1317 1318 padding = ((4 - subframe_len) & 0x3); 1319 /* the last MSDU has no padding */ 1320 if (subframe_len > remaining) { 1321 printk(KERN_DEBUG "%s: wrong buffer size", dev->name); 1322 return TXRX_DROP; 1323 } 1324 1325 skb_pull(skb, sizeof(struct ethhdr)); 1326 /* if last subframe reuse skb */ 1327 if (remaining <= subframe_len + padding) 1328 frame = skb; 1329 else { 1330 frame = dev_alloc_skb(local->hw.extra_tx_headroom + 1331 subframe_len); 1332 1333 if (frame == NULL) 1334 return TXRX_DROP; 1335 1336 skb_reserve(frame, local->hw.extra_tx_headroom + 1337 sizeof(struct ethhdr)); 1338 memcpy(skb_put(frame, ntohs(len)), skb->data, 1339 ntohs(len)); 1340 1341 eth = (struct ethhdr *) skb_pull(skb, ntohs(len) + 1342 padding); 1343 if (!eth) { 1344 printk(KERN_DEBUG "%s: wrong buffer size ", 1345 dev->name); 1346 dev_kfree_skb(frame); 1347 return TXRX_DROP; 1348 } 1349 } 1350 1351 skb_reset_network_header(frame); 1352 frame->dev = dev; 1353 frame->priority = skb->priority; 1354 rx->skb = frame; 1355 1356 payload = frame->data; 1357 ethertype = (payload[6] << 8) | payload[7]; 1358 1359 if (likely((compare_ether_addr(payload, rfc1042_header) == 0 && 1360 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || 1361 compare_ether_addr(payload, 1362 bridge_tunnel_header) == 0)) { 1363 /* remove RFC1042 or Bridge-Tunnel 1364 * encapsulation and replace EtherType */ 1365 skb_pull(frame, 6); 1366 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN); 1367 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); 1368 } else { 1369 memcpy(skb_push(frame, sizeof(__be16)), 1370 &len, sizeof(__be16)); 1371 memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN); 1372 memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN); 1373 } 1374 1375 if (!ieee80211_frame_allowed(rx)) { 1376 if (skb == frame) /* last frame */ 1377 return TXRX_DROP; 1378 dev_kfree_skb(frame); 1379 continue; 1380 } 1381 1382 ieee80211_deliver_skb(rx); 1383 } 1384 1385 return TXRX_QUEUED; 1386 } 1387 1388 static ieee80211_txrx_result 1389 ieee80211_rx_h_data(struct ieee80211_txrx_data *rx) 1390 { 1391 struct net_device *dev = rx->dev; 1392 u16 fc; 1393 int err; 1394 1395 fc = rx->fc; 1396 if (unlikely((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA)) 1397 return TXRX_CONTINUE; 1398 1399 if (unlikely(!WLAN_FC_DATA_PRESENT(fc))) 1400 return TXRX_DROP; 1401 1402 err = ieee80211_data_to_8023(rx); 1403 if (unlikely(err)) 1404 return TXRX_DROP; 1405 1406 if (!ieee80211_frame_allowed(rx)) 1407 return TXRX_DROP; 1408 1409 rx->skb->dev = dev; 1410 1411 dev->stats.rx_packets++; 1412 dev->stats.rx_bytes += rx->skb->len; 1413 1414 ieee80211_deliver_skb(rx); 1415 1416 return TXRX_QUEUED; 1417 } 1418 1419 static ieee80211_txrx_result 1420 ieee80211_rx_h_ctrl(struct ieee80211_txrx_data *rx) 1421 { 1422 struct ieee80211_local *local = rx->local; 1423 struct ieee80211_hw *hw = &local->hw; 1424 struct sk_buff *skb = rx->skb; 1425 struct ieee80211_bar *bar = (struct ieee80211_bar *) skb->data; 1426 struct tid_ampdu_rx *tid_agg_rx; 1427 u16 start_seq_num; 1428 u16 tid; 1429 1430 if (likely((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_CTL)) 1431 return TXRX_CONTINUE; 1432 1433 if ((rx->fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BACK_REQ) { 1434 if (!rx->sta) 1435 return TXRX_CONTINUE; 1436 tid = le16_to_cpu(bar->control) >> 12; 1437 tid_agg_rx = &(rx->sta->ampdu_mlme.tid_rx[tid]); 1438 if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL) 1439 return TXRX_CONTINUE; 1440 1441 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; 1442 1443 /* reset session timer */ 1444 if (tid_agg_rx->timeout) { 1445 unsigned long expires = 1446 jiffies + (tid_agg_rx->timeout / 1000) * HZ; 1447 mod_timer(&tid_agg_rx->session_timer, expires); 1448 } 1449 1450 /* manage reordering buffer according to requested */ 1451 /* sequence number */ 1452 rcu_read_lock(); 1453 ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, 1454 start_seq_num, 1); 1455 rcu_read_unlock(); 1456 return TXRX_DROP; 1457 } 1458 1459 return TXRX_CONTINUE; 1460 } 1461 1462 static ieee80211_txrx_result 1463 ieee80211_rx_h_mgmt(struct ieee80211_txrx_data *rx) 1464 { 1465 struct ieee80211_sub_if_data *sdata; 1466 1467 if (!(rx->flags & IEEE80211_TXRXD_RXRA_MATCH)) 1468 return TXRX_DROP; 1469 1470 sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev); 1471 if ((sdata->vif.type == IEEE80211_IF_TYPE_STA || 1472 sdata->vif.type == IEEE80211_IF_TYPE_IBSS) && 1473 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)) 1474 ieee80211_sta_rx_mgmt(rx->dev, rx->skb, rx->u.rx.status); 1475 else 1476 return TXRX_DROP; 1477 1478 return TXRX_QUEUED; 1479 } 1480 1481 static inline ieee80211_txrx_result __ieee80211_invoke_rx_handlers( 1482 struct ieee80211_local *local, 1483 ieee80211_rx_handler *handlers, 1484 struct ieee80211_txrx_data *rx, 1485 struct sta_info *sta) 1486 { 1487 ieee80211_rx_handler *handler; 1488 ieee80211_txrx_result res = TXRX_DROP; 1489 1490 for (handler = handlers; *handler != NULL; handler++) { 1491 res = (*handler)(rx); 1492 1493 switch (res) { 1494 case TXRX_CONTINUE: 1495 continue; 1496 case TXRX_DROP: 1497 I802_DEBUG_INC(local->rx_handlers_drop); 1498 if (sta) 1499 sta->rx_dropped++; 1500 break; 1501 case TXRX_QUEUED: 1502 I802_DEBUG_INC(local->rx_handlers_queued); 1503 break; 1504 } 1505 break; 1506 } 1507 1508 if (res == TXRX_DROP) 1509 dev_kfree_skb(rx->skb); 1510 return res; 1511 } 1512 1513 static inline void ieee80211_invoke_rx_handlers(struct ieee80211_local *local, 1514 ieee80211_rx_handler *handlers, 1515 struct ieee80211_txrx_data *rx, 1516 struct sta_info *sta) 1517 { 1518 if (__ieee80211_invoke_rx_handlers(local, handlers, rx, sta) == 1519 TXRX_CONTINUE) 1520 dev_kfree_skb(rx->skb); 1521 } 1522 1523 static void ieee80211_rx_michael_mic_report(struct net_device *dev, 1524 struct ieee80211_hdr *hdr, 1525 struct sta_info *sta, 1526 struct ieee80211_txrx_data *rx) 1527 { 1528 int keyidx, hdrlen; 1529 DECLARE_MAC_BUF(mac); 1530 DECLARE_MAC_BUF(mac2); 1531 1532 hdrlen = ieee80211_get_hdrlen_from_skb(rx->skb); 1533 if (rx->skb->len >= hdrlen + 4) 1534 keyidx = rx->skb->data[hdrlen + 3] >> 6; 1535 else 1536 keyidx = -1; 1537 1538 if (net_ratelimit()) 1539 printk(KERN_DEBUG "%s: TKIP hwaccel reported Michael MIC " 1540 "failure from %s to %s keyidx=%d\n", 1541 dev->name, print_mac(mac, hdr->addr2), 1542 print_mac(mac2, hdr->addr1), keyidx); 1543 1544 if (!sta) { 1545 /* 1546 * Some hardware seem to generate incorrect Michael MIC 1547 * reports; ignore them to avoid triggering countermeasures. 1548 */ 1549 if (net_ratelimit()) 1550 printk(KERN_DEBUG "%s: ignored spurious Michael MIC " 1551 "error for unknown address %s\n", 1552 dev->name, print_mac(mac, hdr->addr2)); 1553 goto ignore; 1554 } 1555 1556 if (!(rx->fc & IEEE80211_FCTL_PROTECTED)) { 1557 if (net_ratelimit()) 1558 printk(KERN_DEBUG "%s: ignored spurious Michael MIC " 1559 "error for a frame with no PROTECTED flag (src " 1560 "%s)\n", dev->name, print_mac(mac, hdr->addr2)); 1561 goto ignore; 1562 } 1563 1564 if (rx->sdata->vif.type == IEEE80211_IF_TYPE_AP && keyidx) { 1565 /* 1566 * APs with pairwise keys should never receive Michael MIC 1567 * errors for non-zero keyidx because these are reserved for 1568 * group keys and only the AP is sending real multicast 1569 * frames in the BSS. 1570 */ 1571 if (net_ratelimit()) 1572 printk(KERN_DEBUG "%s: ignored Michael MIC error for " 1573 "a frame with non-zero keyidx (%d)" 1574 " (src %s)\n", dev->name, keyidx, 1575 print_mac(mac, hdr->addr2)); 1576 goto ignore; 1577 } 1578 1579 if ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA && 1580 ((rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_MGMT || 1581 (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_AUTH)) { 1582 if (net_ratelimit()) 1583 printk(KERN_DEBUG "%s: ignored spurious Michael MIC " 1584 "error for a frame that cannot be encrypted " 1585 "(fc=0x%04x) (src %s)\n", 1586 dev->name, rx->fc, print_mac(mac, hdr->addr2)); 1587 goto ignore; 1588 } 1589 1590 mac80211_ev_michael_mic_failure(rx->dev, keyidx, hdr); 1591 ignore: 1592 dev_kfree_skb(rx->skb); 1593 rx->skb = NULL; 1594 } 1595 1596 ieee80211_rx_handler ieee80211_rx_handlers[] = 1597 { 1598 ieee80211_rx_h_if_stats, 1599 ieee80211_rx_h_passive_scan, 1600 ieee80211_rx_h_check, 1601 ieee80211_rx_h_decrypt, 1602 ieee80211_rx_h_sta_process, 1603 ieee80211_rx_h_defragment, 1604 ieee80211_rx_h_ps_poll, 1605 ieee80211_rx_h_michael_mic_verify, 1606 /* this must be after decryption - so header is counted in MPDU mic 1607 * must be before pae and data, so QOS_DATA format frames 1608 * are not passed to user space by these functions 1609 */ 1610 ieee80211_rx_h_remove_qos_control, 1611 ieee80211_rx_h_amsdu, 1612 ieee80211_rx_h_data, 1613 ieee80211_rx_h_ctrl, 1614 ieee80211_rx_h_mgmt, 1615 NULL 1616 }; 1617 1618 /* main receive path */ 1619 1620 static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, 1621 u8 *bssid, struct ieee80211_txrx_data *rx, 1622 struct ieee80211_hdr *hdr) 1623 { 1624 int multicast = is_multicast_ether_addr(hdr->addr1); 1625 1626 switch (sdata->vif.type) { 1627 case IEEE80211_IF_TYPE_STA: 1628 if (!bssid) 1629 return 0; 1630 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { 1631 if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) 1632 return 0; 1633 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; 1634 } else if (!multicast && 1635 compare_ether_addr(sdata->dev->dev_addr, 1636 hdr->addr1) != 0) { 1637 if (!(sdata->dev->flags & IFF_PROMISC)) 1638 return 0; 1639 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; 1640 } 1641 break; 1642 case IEEE80211_IF_TYPE_IBSS: 1643 if (!bssid) 1644 return 0; 1645 if (!ieee80211_bssid_match(bssid, sdata->u.sta.bssid)) { 1646 if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) 1647 return 0; 1648 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; 1649 } else if (!multicast && 1650 compare_ether_addr(sdata->dev->dev_addr, 1651 hdr->addr1) != 0) { 1652 if (!(sdata->dev->flags & IFF_PROMISC)) 1653 return 0; 1654 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; 1655 } else if (!rx->sta) 1656 rx->sta = ieee80211_ibss_add_sta(sdata->dev, rx->skb, 1657 bssid, hdr->addr2); 1658 break; 1659 case IEEE80211_IF_TYPE_VLAN: 1660 case IEEE80211_IF_TYPE_AP: 1661 if (!bssid) { 1662 if (compare_ether_addr(sdata->dev->dev_addr, 1663 hdr->addr1)) 1664 return 0; 1665 } else if (!ieee80211_bssid_match(bssid, 1666 sdata->dev->dev_addr)) { 1667 if (!(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) 1668 return 0; 1669 rx->flags &= ~IEEE80211_TXRXD_RXRA_MATCH; 1670 } 1671 if (sdata->dev == sdata->local->mdev && 1672 !(rx->flags & IEEE80211_TXRXD_RXIN_SCAN)) 1673 /* do not receive anything via 1674 * master device when not scanning */ 1675 return 0; 1676 break; 1677 case IEEE80211_IF_TYPE_WDS: 1678 if (bssid || 1679 (rx->fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) 1680 return 0; 1681 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) 1682 return 0; 1683 break; 1684 case IEEE80211_IF_TYPE_MNTR: 1685 /* take everything */ 1686 break; 1687 case IEEE80211_IF_TYPE_INVALID: 1688 /* should never get here */ 1689 WARN_ON(1); 1690 break; 1691 } 1692 1693 return 1; 1694 } 1695 1696 /* 1697 * This is the actual Rx frames handler. as it blongs to Rx path it must 1698 * be called with rcu_read_lock protection. 1699 */ 1700 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 1701 struct sk_buff *skb, 1702 struct ieee80211_rx_status *status, 1703 u32 load) 1704 { 1705 struct ieee80211_local *local = hw_to_local(hw); 1706 struct ieee80211_sub_if_data *sdata; 1707 struct sta_info *sta; 1708 struct ieee80211_hdr *hdr; 1709 struct ieee80211_txrx_data rx; 1710 u16 type; 1711 int prepares; 1712 struct ieee80211_sub_if_data *prev = NULL; 1713 struct sk_buff *skb_new; 1714 u8 *bssid; 1715 1716 hdr = (struct ieee80211_hdr *) skb->data; 1717 memset(&rx, 0, sizeof(rx)); 1718 rx.skb = skb; 1719 rx.local = local; 1720 1721 rx.u.rx.status = status; 1722 rx.u.rx.load = load; 1723 rx.fc = le16_to_cpu(hdr->frame_control); 1724 type = rx.fc & IEEE80211_FCTL_FTYPE; 1725 1726 if (type == IEEE80211_FTYPE_DATA || type == IEEE80211_FTYPE_MGMT) 1727 local->dot11ReceivedFragmentCount++; 1728 1729 sta = rx.sta = sta_info_get(local, hdr->addr2); 1730 if (sta) { 1731 rx.dev = rx.sta->dev; 1732 rx.sdata = IEEE80211_DEV_TO_SUB_IF(rx.dev); 1733 } 1734 1735 if ((status->flag & RX_FLAG_MMIC_ERROR)) { 1736 ieee80211_rx_michael_mic_report(local->mdev, hdr, sta, &rx); 1737 goto end; 1738 } 1739 1740 if (unlikely(local->sta_sw_scanning || local->sta_hw_scanning)) 1741 rx.flags |= IEEE80211_TXRXD_RXIN_SCAN; 1742 1743 if (__ieee80211_invoke_rx_handlers(local, local->rx_pre_handlers, &rx, 1744 sta) != TXRX_CONTINUE) 1745 goto end; 1746 skb = rx.skb; 1747 1748 if (sta && !(sta->flags & (WLAN_STA_WDS | WLAN_STA_ASSOC_AP)) && 1749 !atomic_read(&local->iff_promiscs) && 1750 !is_multicast_ether_addr(hdr->addr1)) { 1751 rx.flags |= IEEE80211_TXRXD_RXRA_MATCH; 1752 ieee80211_invoke_rx_handlers(local, local->rx_handlers, &rx, 1753 rx.sta); 1754 sta_info_put(sta); 1755 return; 1756 } 1757 1758 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 1759 if (!netif_running(sdata->dev)) 1760 continue; 1761 1762 if (sdata->vif.type == IEEE80211_IF_TYPE_MNTR) 1763 continue; 1764 1765 bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 1766 rx.flags |= IEEE80211_TXRXD_RXRA_MATCH; 1767 prepares = prepare_for_handlers(sdata, bssid, &rx, hdr); 1768 /* prepare_for_handlers can change sta */ 1769 sta = rx.sta; 1770 1771 if (!prepares) 1772 continue; 1773 1774 /* 1775 * frame is destined for this interface, but if it's not 1776 * also for the previous one we handle that after the 1777 * loop to avoid copying the SKB once too much 1778 */ 1779 1780 if (!prev) { 1781 prev = sdata; 1782 continue; 1783 } 1784 1785 /* 1786 * frame was destined for the previous interface 1787 * so invoke RX handlers for it 1788 */ 1789 1790 skb_new = skb_copy(skb, GFP_ATOMIC); 1791 if (!skb_new) { 1792 if (net_ratelimit()) 1793 printk(KERN_DEBUG "%s: failed to copy " 1794 "multicast frame for %s", 1795 wiphy_name(local->hw.wiphy), 1796 prev->dev->name); 1797 continue; 1798 } 1799 rx.fc = le16_to_cpu(hdr->frame_control); 1800 rx.skb = skb_new; 1801 rx.dev = prev->dev; 1802 rx.sdata = prev; 1803 ieee80211_invoke_rx_handlers(local, local->rx_handlers, 1804 &rx, sta); 1805 prev = sdata; 1806 } 1807 if (prev) { 1808 rx.fc = le16_to_cpu(hdr->frame_control); 1809 rx.skb = skb; 1810 rx.dev = prev->dev; 1811 rx.sdata = prev; 1812 ieee80211_invoke_rx_handlers(local, local->rx_handlers, 1813 &rx, sta); 1814 } else 1815 dev_kfree_skb(skb); 1816 1817 end: 1818 if (sta) 1819 sta_info_put(sta); 1820 } 1821 1822 #define SEQ_MODULO 0x1000 1823 #define SEQ_MASK 0xfff 1824 1825 static inline int seq_less(u16 sq1, u16 sq2) 1826 { 1827 return (((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1)); 1828 } 1829 1830 static inline u16 seq_inc(u16 sq) 1831 { 1832 return ((sq + 1) & SEQ_MASK); 1833 } 1834 1835 static inline u16 seq_sub(u16 sq1, u16 sq2) 1836 { 1837 return ((sq1 - sq2) & SEQ_MASK); 1838 } 1839 1840 1841 /* 1842 * As it function blongs to Rx path it must be called with 1843 * the proper rcu_read_lock protection for its flow. 1844 */ 1845 u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 1846 struct tid_ampdu_rx *tid_agg_rx, 1847 struct sk_buff *skb, u16 mpdu_seq_num, 1848 int bar_req) 1849 { 1850 struct ieee80211_local *local = hw_to_local(hw); 1851 struct ieee80211_rx_status status; 1852 u16 head_seq_num, buf_size; 1853 int index; 1854 u32 pkt_load; 1855 1856 buf_size = tid_agg_rx->buf_size; 1857 head_seq_num = tid_agg_rx->head_seq_num; 1858 1859 /* frame with out of date sequence number */ 1860 if (seq_less(mpdu_seq_num, head_seq_num)) { 1861 dev_kfree_skb(skb); 1862 return 1; 1863 } 1864 1865 /* if frame sequence number exceeds our buffering window size or 1866 * block Ack Request arrived - release stored frames */ 1867 if ((!seq_less(mpdu_seq_num, head_seq_num + buf_size)) || (bar_req)) { 1868 /* new head to the ordering buffer */ 1869 if (bar_req) 1870 head_seq_num = mpdu_seq_num; 1871 else 1872 head_seq_num = 1873 seq_inc(seq_sub(mpdu_seq_num, buf_size)); 1874 /* release stored frames up to new head to stack */ 1875 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 1876 index = seq_sub(tid_agg_rx->head_seq_num, 1877 tid_agg_rx->ssn) 1878 % tid_agg_rx->buf_size; 1879 1880 if (tid_agg_rx->reorder_buf[index]) { 1881 /* release the reordered frames to stack */ 1882 memcpy(&status, 1883 tid_agg_rx->reorder_buf[index]->cb, 1884 sizeof(status)); 1885 pkt_load = ieee80211_rx_load_stats(local, 1886 tid_agg_rx->reorder_buf[index], 1887 &status); 1888 __ieee80211_rx_handle_packet(hw, 1889 tid_agg_rx->reorder_buf[index], 1890 &status, pkt_load); 1891 tid_agg_rx->stored_mpdu_num--; 1892 tid_agg_rx->reorder_buf[index] = NULL; 1893 } 1894 tid_agg_rx->head_seq_num = 1895 seq_inc(tid_agg_rx->head_seq_num); 1896 } 1897 if (bar_req) 1898 return 1; 1899 } 1900 1901 /* now the new frame is always in the range of the reordering */ 1902 /* buffer window */ 1903 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) 1904 % tid_agg_rx->buf_size; 1905 /* check if we already stored this frame */ 1906 if (tid_agg_rx->reorder_buf[index]) { 1907 dev_kfree_skb(skb); 1908 return 1; 1909 } 1910 1911 /* if arrived mpdu is in the right order and nothing else stored */ 1912 /* release it immediately */ 1913 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 1914 tid_agg_rx->stored_mpdu_num == 0) { 1915 tid_agg_rx->head_seq_num = 1916 seq_inc(tid_agg_rx->head_seq_num); 1917 return 0; 1918 } 1919 1920 /* put the frame in the reordering buffer */ 1921 tid_agg_rx->reorder_buf[index] = skb; 1922 tid_agg_rx->stored_mpdu_num++; 1923 /* release the buffer until next missing frame */ 1924 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) 1925 % tid_agg_rx->buf_size; 1926 while (tid_agg_rx->reorder_buf[index]) { 1927 /* release the reordered frame back to stack */ 1928 memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, 1929 sizeof(status)); 1930 pkt_load = ieee80211_rx_load_stats(local, 1931 tid_agg_rx->reorder_buf[index], 1932 &status); 1933 __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index], 1934 &status, pkt_load); 1935 tid_agg_rx->stored_mpdu_num--; 1936 tid_agg_rx->reorder_buf[index] = NULL; 1937 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 1938 index = seq_sub(tid_agg_rx->head_seq_num, 1939 tid_agg_rx->ssn) % tid_agg_rx->buf_size; 1940 } 1941 return 1; 1942 } 1943 1944 static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, 1945 struct sk_buff *skb) 1946 { 1947 struct ieee80211_hw *hw = &local->hw; 1948 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1949 struct sta_info *sta; 1950 struct tid_ampdu_rx *tid_agg_rx; 1951 u16 fc, sc; 1952 u16 mpdu_seq_num; 1953 u8 ret = 0, *qc; 1954 int tid; 1955 1956 sta = sta_info_get(local, hdr->addr2); 1957 if (!sta) 1958 return ret; 1959 1960 fc = le16_to_cpu(hdr->frame_control); 1961 1962 /* filter the QoS data rx stream according to 1963 * STA/TID and check if this STA/TID is on aggregation */ 1964 if (!WLAN_FC_IS_QOS_DATA(fc)) 1965 goto end_reorder; 1966 1967 qc = skb->data + ieee80211_get_hdrlen(fc) - QOS_CONTROL_LEN; 1968 tid = qc[0] & QOS_CONTROL_TID_MASK; 1969 tid_agg_rx = &(sta->ampdu_mlme.tid_rx[tid]); 1970 1971 if (tid_agg_rx->state != HT_AGG_STATE_OPERATIONAL) 1972 goto end_reorder; 1973 1974 /* null data frames are excluded */ 1975 if (unlikely(fc & IEEE80211_STYPE_NULLFUNC)) 1976 goto end_reorder; 1977 1978 /* new un-ordered ampdu frame - process it */ 1979 1980 /* reset session timer */ 1981 if (tid_agg_rx->timeout) { 1982 unsigned long expires = 1983 jiffies + (tid_agg_rx->timeout / 1000) * HZ; 1984 mod_timer(&tid_agg_rx->session_timer, expires); 1985 } 1986 1987 /* if this mpdu is fragmented - terminate rx aggregation session */ 1988 sc = le16_to_cpu(hdr->seq_ctrl); 1989 if (sc & IEEE80211_SCTL_FRAG) { 1990 ieee80211_sta_stop_rx_ba_session(sta->dev, sta->addr, 1991 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); 1992 ret = 1; 1993 goto end_reorder; 1994 } 1995 1996 /* according to mpdu sequence number deal with reordering buffer */ 1997 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 1998 ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, 1999 mpdu_seq_num, 0); 2000 end_reorder: 2001 if (sta) 2002 sta_info_put(sta); 2003 return ret; 2004 } 2005 2006 /* 2007 * This is the receive path handler. It is called by a low level driver when an 2008 * 802.11 MPDU is received from the hardware. 2009 */ 2010 void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb, 2011 struct ieee80211_rx_status *status) 2012 { 2013 struct ieee80211_local *local = hw_to_local(hw); 2014 u32 pkt_load; 2015 2016 /* 2017 * key references and virtual interfaces are protected using RCU 2018 * and this requires that we are in a read-side RCU section during 2019 * receive processing 2020 */ 2021 rcu_read_lock(); 2022 2023 /* 2024 * Frames with failed FCS/PLCP checksum are not returned, 2025 * all other frames are returned without radiotap header 2026 * if it was previously present. 2027 * Also, frames with less than 16 bytes are dropped. 2028 */ 2029 skb = ieee80211_rx_monitor(local, skb, status); 2030 if (!skb) { 2031 rcu_read_unlock(); 2032 return; 2033 } 2034 2035 pkt_load = ieee80211_rx_load_stats(local, skb, status); 2036 local->channel_use_raw += pkt_load; 2037 2038 if (!ieee80211_rx_reorder_ampdu(local, skb)) 2039 __ieee80211_rx_handle_packet(hw, skb, status, pkt_load); 2040 2041 rcu_read_unlock(); 2042 } 2043 EXPORT_SYMBOL(__ieee80211_rx); 2044 2045 /* This is a version of the rx handler that can be called from hard irq 2046 * context. Post the skb on the queue and schedule the tasklet */ 2047 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb, 2048 struct ieee80211_rx_status *status) 2049 { 2050 struct ieee80211_local *local = hw_to_local(hw); 2051 2052 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 2053 2054 skb->dev = local->mdev; 2055 /* copy status into skb->cb for use by tasklet */ 2056 memcpy(skb->cb, status, sizeof(*status)); 2057 skb->pkt_type = IEEE80211_RX_MSG; 2058 skb_queue_tail(&local->skb_queue, skb); 2059 tasklet_schedule(&local->tasklet); 2060 } 2061 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 2062