1 /* 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/jiffies.h> 13 #include <linux/slab.h> 14 #include <linux/kernel.h> 15 #include <linux/skbuff.h> 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rcupdate.h> 19 #include <linux/export.h> 20 #include <net/mac80211.h> 21 #include <net/ieee80211_radiotap.h> 22 #include <asm/unaligned.h> 23 24 #include "ieee80211_i.h" 25 #include "driver-ops.h" 26 #include "led.h" 27 #include "mesh.h" 28 #include "wep.h" 29 #include "wpa.h" 30 #include "tkip.h" 31 #include "wme.h" 32 #include "rate.h" 33 34 /* 35 * monitor mode reception 36 * 37 * This function cleans up the SKB, i.e. it removes all the stuff 38 * only useful for monitoring. 39 */ 40 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, 41 struct sk_buff *skb) 42 { 43 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { 44 if (likely(skb->len > FCS_LEN)) 45 __pskb_trim(skb, skb->len - FCS_LEN); 46 else { 47 /* driver bug */ 48 WARN_ON(1); 49 dev_kfree_skb(skb); 50 skb = NULL; 51 } 52 } 53 54 return skb; 55 } 56 57 static inline int should_drop_frame(struct sk_buff *skb, 58 int present_fcs_len) 59 { 60 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 61 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 62 63 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 64 return 1; 65 if (unlikely(skb->len < 16 + present_fcs_len)) 66 return 1; 67 if (ieee80211_is_ctl(hdr->frame_control) && 68 !ieee80211_is_pspoll(hdr->frame_control) && 69 !ieee80211_is_back_req(hdr->frame_control)) 70 return 1; 71 return 0; 72 } 73 74 static int 75 ieee80211_rx_radiotap_len(struct ieee80211_local *local, 76 struct ieee80211_rx_status *status) 77 { 78 int len; 79 80 /* always present fields */ 81 len = sizeof(struct ieee80211_radiotap_header) + 9; 82 83 if (status->flag & RX_FLAG_MACTIME_MPDU) 84 len += 8; 85 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 86 len += 1; 87 88 if (len & 1) /* padding for RX_FLAGS if necessary */ 89 len++; 90 91 if (status->flag & RX_FLAG_HT) /* HT info */ 92 len += 3; 93 94 return len; 95 } 96 97 /* 98 * ieee80211_add_rx_radiotap_header - add radiotap header 99 * 100 * add a radiotap header containing all the fields which the hardware provided. 101 */ 102 static void 103 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 104 struct sk_buff *skb, 105 struct ieee80211_rate *rate, 106 int rtap_len) 107 { 108 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 109 struct ieee80211_radiotap_header *rthdr; 110 unsigned char *pos; 111 u16 rx_flags = 0; 112 113 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); 114 memset(rthdr, 0, rtap_len); 115 116 /* radiotap header, set always present flags */ 117 rthdr->it_present = 118 cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 119 (1 << IEEE80211_RADIOTAP_CHANNEL) | 120 (1 << IEEE80211_RADIOTAP_ANTENNA) | 121 (1 << IEEE80211_RADIOTAP_RX_FLAGS)); 122 rthdr->it_len = cpu_to_le16(rtap_len); 123 124 pos = (unsigned char *)(rthdr+1); 125 126 /* the order of the following fields is important */ 127 128 /* IEEE80211_RADIOTAP_TSFT */ 129 if (status->flag & RX_FLAG_MACTIME_MPDU) { 130 put_unaligned_le64(status->mactime, pos); 131 rthdr->it_present |= 132 cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 133 pos += 8; 134 } 135 136 /* IEEE80211_RADIOTAP_FLAGS */ 137 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 138 *pos |= IEEE80211_RADIOTAP_F_FCS; 139 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 140 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 141 if (status->flag & RX_FLAG_SHORTPRE) 142 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 143 pos++; 144 145 /* IEEE80211_RADIOTAP_RATE */ 146 if (!rate || status->flag & RX_FLAG_HT) { 147 /* 148 * Without rate information don't add it. If we have, 149 * MCS information is a separate field in radiotap, 150 * added below. The byte here is needed as padding 151 * for the channel though, so initialise it to 0. 152 */ 153 *pos = 0; 154 } else { 155 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 156 *pos = rate->bitrate / 5; 157 } 158 pos++; 159 160 /* IEEE80211_RADIOTAP_CHANNEL */ 161 put_unaligned_le16(status->freq, pos); 162 pos += 2; 163 if (status->band == IEEE80211_BAND_5GHZ) 164 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, 165 pos); 166 else if (status->flag & RX_FLAG_HT) 167 put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ, 168 pos); 169 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 170 put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ, 171 pos); 172 else if (rate) 173 put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, 174 pos); 175 else 176 put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos); 177 pos += 2; 178 179 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 180 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM && 181 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 182 *pos = status->signal; 183 rthdr->it_present |= 184 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 185 pos++; 186 } 187 188 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 189 190 /* IEEE80211_RADIOTAP_ANTENNA */ 191 *pos = status->antenna; 192 pos++; 193 194 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 195 196 /* IEEE80211_RADIOTAP_RX_FLAGS */ 197 /* ensure 2 byte alignment for the 2 byte field as required */ 198 if ((pos - (u8 *)rthdr) & 1) 199 pos++; 200 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 201 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 202 put_unaligned_le16(rx_flags, pos); 203 pos += 2; 204 205 if (status->flag & RX_FLAG_HT) { 206 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 207 *pos++ = IEEE80211_RADIOTAP_MCS_HAVE_MCS | 208 IEEE80211_RADIOTAP_MCS_HAVE_GI | 209 IEEE80211_RADIOTAP_MCS_HAVE_BW; 210 *pos = 0; 211 if (status->flag & RX_FLAG_SHORT_GI) 212 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 213 if (status->flag & RX_FLAG_40MHZ) 214 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 215 pos++; 216 *pos++ = status->rate_idx; 217 } 218 } 219 220 /* 221 * This function copies a received frame to all monitor interfaces and 222 * returns a cleaned-up SKB that no longer includes the FCS nor the 223 * radiotap header the driver might have added. 224 */ 225 static struct sk_buff * 226 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 227 struct ieee80211_rate *rate) 228 { 229 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 230 struct ieee80211_sub_if_data *sdata; 231 int needed_headroom; 232 struct sk_buff *skb, *skb2; 233 struct net_device *prev_dev = NULL; 234 int present_fcs_len = 0; 235 236 /* 237 * First, we may need to make a copy of the skb because 238 * (1) we need to modify it for radiotap (if not present), and 239 * (2) the other RX handlers will modify the skb we got. 240 * 241 * We don't need to, of course, if we aren't going to return 242 * the SKB because it has a bad FCS/PLCP checksum. 243 */ 244 245 /* room for the radiotap header based on driver features */ 246 needed_headroom = ieee80211_rx_radiotap_len(local, status); 247 248 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 249 present_fcs_len = FCS_LEN; 250 251 /* make sure hdr->frame_control is on the linear part */ 252 if (!pskb_may_pull(origskb, 2)) { 253 dev_kfree_skb(origskb); 254 return NULL; 255 } 256 257 if (!local->monitors) { 258 if (should_drop_frame(origskb, present_fcs_len)) { 259 dev_kfree_skb(origskb); 260 return NULL; 261 } 262 263 return remove_monitor_info(local, origskb); 264 } 265 266 if (should_drop_frame(origskb, present_fcs_len)) { 267 /* only need to expand headroom if necessary */ 268 skb = origskb; 269 origskb = NULL; 270 271 /* 272 * This shouldn't trigger often because most devices have an 273 * RX header they pull before we get here, and that should 274 * be big enough for our radiotap information. We should 275 * probably export the length to drivers so that we can have 276 * them allocate enough headroom to start with. 277 */ 278 if (skb_headroom(skb) < needed_headroom && 279 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 280 dev_kfree_skb(skb); 281 return NULL; 282 } 283 } else { 284 /* 285 * Need to make a copy and possibly remove radiotap header 286 * and FCS from the original. 287 */ 288 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); 289 290 origskb = remove_monitor_info(local, origskb); 291 292 if (!skb) 293 return origskb; 294 } 295 296 /* prepend radiotap information */ 297 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); 298 299 skb_reset_mac_header(skb); 300 skb->ip_summed = CHECKSUM_UNNECESSARY; 301 skb->pkt_type = PACKET_OTHERHOST; 302 skb->protocol = htons(ETH_P_802_2); 303 304 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 305 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 306 continue; 307 308 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 309 continue; 310 311 if (!ieee80211_sdata_running(sdata)) 312 continue; 313 314 if (prev_dev) { 315 skb2 = skb_clone(skb, GFP_ATOMIC); 316 if (skb2) { 317 skb2->dev = prev_dev; 318 netif_receive_skb(skb2); 319 } 320 } 321 322 prev_dev = sdata->dev; 323 sdata->dev->stats.rx_packets++; 324 sdata->dev->stats.rx_bytes += skb->len; 325 } 326 327 if (prev_dev) { 328 skb->dev = prev_dev; 329 netif_receive_skb(skb); 330 } else 331 dev_kfree_skb(skb); 332 333 return origskb; 334 } 335 336 337 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 338 { 339 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 340 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 341 int tid, seqno_idx, security_idx; 342 343 /* does the frame have a qos control field? */ 344 if (ieee80211_is_data_qos(hdr->frame_control)) { 345 u8 *qc = ieee80211_get_qos_ctl(hdr); 346 /* frame has qos control */ 347 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 348 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 349 status->rx_flags |= IEEE80211_RX_AMSDU; 350 351 seqno_idx = tid; 352 security_idx = tid; 353 } else { 354 /* 355 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 356 * 357 * Sequence numbers for management frames, QoS data 358 * frames with a broadcast/multicast address in the 359 * Address 1 field, and all non-QoS data frames sent 360 * by QoS STAs are assigned using an additional single 361 * modulo-4096 counter, [...] 362 * 363 * We also use that counter for non-QoS STAs. 364 */ 365 seqno_idx = NUM_RX_DATA_QUEUES; 366 security_idx = 0; 367 if (ieee80211_is_mgmt(hdr->frame_control)) 368 security_idx = NUM_RX_DATA_QUEUES; 369 tid = 0; 370 } 371 372 rx->seqno_idx = seqno_idx; 373 rx->security_idx = security_idx; 374 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 375 * For now, set skb->priority to 0 for other cases. */ 376 rx->skb->priority = (tid > 7) ? 0 : tid; 377 } 378 379 /** 380 * DOC: Packet alignment 381 * 382 * Drivers always need to pass packets that are aligned to two-byte boundaries 383 * to the stack. 384 * 385 * Additionally, should, if possible, align the payload data in a way that 386 * guarantees that the contained IP header is aligned to a four-byte 387 * boundary. In the case of regular frames, this simply means aligning the 388 * payload to a four-byte boundary (because either the IP header is directly 389 * contained, or IV/RFC1042 headers that have a length divisible by four are 390 * in front of it). If the payload data is not properly aligned and the 391 * architecture doesn't support efficient unaligned operations, mac80211 392 * will align the data. 393 * 394 * With A-MSDU frames, however, the payload data address must yield two modulo 395 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 396 * push the IP header further back to a multiple of four again. Thankfully, the 397 * specs were sane enough this time around to require padding each A-MSDU 398 * subframe to a length that is a multiple of four. 399 * 400 * Padding like Atheros hardware adds which is between the 802.11 header and 401 * the payload is not supported, the driver is required to move the 802.11 402 * header to be directly in front of the payload in that case. 403 */ 404 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 405 { 406 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 407 WARN_ONCE((unsigned long)rx->skb->data & 1, 408 "unaligned packet at 0x%p\n", rx->skb->data); 409 #endif 410 } 411 412 413 /* rx handlers */ 414 415 static ieee80211_rx_result debug_noinline 416 ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx) 417 { 418 struct ieee80211_local *local = rx->local; 419 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 420 struct sk_buff *skb = rx->skb; 421 422 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN) && 423 !local->sched_scanning)) 424 return RX_CONTINUE; 425 426 if (test_bit(SCAN_HW_SCANNING, &local->scanning) || 427 test_bit(SCAN_SW_SCANNING, &local->scanning) || 428 local->sched_scanning) 429 return ieee80211_scan_rx(rx->sdata, skb); 430 431 /* scanning finished during invoking of handlers */ 432 I802_DEBUG_INC(local->rx_handlers_drop_passive_scan); 433 return RX_DROP_UNUSABLE; 434 } 435 436 437 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 438 { 439 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 440 441 if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1)) 442 return 0; 443 444 return ieee80211_is_robust_mgmt_frame(hdr); 445 } 446 447 448 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 449 { 450 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 451 452 if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1)) 453 return 0; 454 455 return ieee80211_is_robust_mgmt_frame(hdr); 456 } 457 458 459 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 460 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 461 { 462 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 463 struct ieee80211_mmie *mmie; 464 465 if (skb->len < 24 + sizeof(*mmie) || 466 !is_multicast_ether_addr(hdr->da)) 467 return -1; 468 469 if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr)) 470 return -1; /* not a robust management frame */ 471 472 mmie = (struct ieee80211_mmie *) 473 (skb->data + skb->len - sizeof(*mmie)); 474 if (mmie->element_id != WLAN_EID_MMIE || 475 mmie->length != sizeof(*mmie) - 2) 476 return -1; 477 478 return le16_to_cpu(mmie->key_id); 479 } 480 481 482 static ieee80211_rx_result 483 ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 484 { 485 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 486 char *dev_addr = rx->sdata->vif.addr; 487 488 if (ieee80211_is_data(hdr->frame_control)) { 489 if (is_multicast_ether_addr(hdr->addr1)) { 490 if (ieee80211_has_tods(hdr->frame_control) || 491 !ieee80211_has_fromds(hdr->frame_control)) 492 return RX_DROP_MONITOR; 493 if (compare_ether_addr(hdr->addr3, dev_addr) == 0) 494 return RX_DROP_MONITOR; 495 } else { 496 if (!ieee80211_has_a4(hdr->frame_control)) 497 return RX_DROP_MONITOR; 498 if (compare_ether_addr(hdr->addr4, dev_addr) == 0) 499 return RX_DROP_MONITOR; 500 } 501 } 502 503 /* If there is not an established peer link and this is not a peer link 504 * establisment frame, beacon or probe, drop the frame. 505 */ 506 507 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 508 struct ieee80211_mgmt *mgmt; 509 510 if (!ieee80211_is_mgmt(hdr->frame_control)) 511 return RX_DROP_MONITOR; 512 513 if (ieee80211_is_action(hdr->frame_control)) { 514 u8 category; 515 mgmt = (struct ieee80211_mgmt *)hdr; 516 category = mgmt->u.action.category; 517 if (category != WLAN_CATEGORY_MESH_ACTION && 518 category != WLAN_CATEGORY_SELF_PROTECTED) 519 return RX_DROP_MONITOR; 520 return RX_CONTINUE; 521 } 522 523 if (ieee80211_is_probe_req(hdr->frame_control) || 524 ieee80211_is_probe_resp(hdr->frame_control) || 525 ieee80211_is_beacon(hdr->frame_control) || 526 ieee80211_is_auth(hdr->frame_control)) 527 return RX_CONTINUE; 528 529 return RX_DROP_MONITOR; 530 531 } 532 533 return RX_CONTINUE; 534 } 535 536 #define SEQ_MODULO 0x1000 537 #define SEQ_MASK 0xfff 538 539 static inline int seq_less(u16 sq1, u16 sq2) 540 { 541 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1); 542 } 543 544 static inline u16 seq_inc(u16 sq) 545 { 546 return (sq + 1) & SEQ_MASK; 547 } 548 549 static inline u16 seq_sub(u16 sq1, u16 sq2) 550 { 551 return (sq1 - sq2) & SEQ_MASK; 552 } 553 554 555 static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, 556 struct tid_ampdu_rx *tid_agg_rx, 557 int index) 558 { 559 struct ieee80211_local *local = hw_to_local(hw); 560 struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; 561 struct ieee80211_rx_status *status; 562 563 lockdep_assert_held(&tid_agg_rx->reorder_lock); 564 565 if (!skb) 566 goto no_frame; 567 568 /* release the frame from the reorder ring buffer */ 569 tid_agg_rx->stored_mpdu_num--; 570 tid_agg_rx->reorder_buf[index] = NULL; 571 status = IEEE80211_SKB_RXCB(skb); 572 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 573 skb_queue_tail(&local->rx_skb_queue, skb); 574 575 no_frame: 576 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 577 } 578 579 static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, 580 struct tid_ampdu_rx *tid_agg_rx, 581 u16 head_seq_num) 582 { 583 int index; 584 585 lockdep_assert_held(&tid_agg_rx->reorder_lock); 586 587 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 588 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 589 tid_agg_rx->buf_size; 590 ieee80211_release_reorder_frame(hw, tid_agg_rx, index); 591 } 592 } 593 594 /* 595 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 596 * the skb was added to the buffer longer than this time ago, the earlier 597 * frames that have not yet been received are assumed to be lost and the skb 598 * can be released for processing. This may also release other skb's from the 599 * reorder buffer if there are no additional gaps between the frames. 600 * 601 * Callers must hold tid_agg_rx->reorder_lock. 602 */ 603 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 604 605 static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, 606 struct tid_ampdu_rx *tid_agg_rx) 607 { 608 int index, j; 609 610 lockdep_assert_held(&tid_agg_rx->reorder_lock); 611 612 /* release the buffer until next missing frame */ 613 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 614 tid_agg_rx->buf_size; 615 if (!tid_agg_rx->reorder_buf[index] && 616 tid_agg_rx->stored_mpdu_num) { 617 /* 618 * No buffers ready to be released, but check whether any 619 * frames in the reorder buffer have timed out. 620 */ 621 int skipped = 1; 622 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 623 j = (j + 1) % tid_agg_rx->buf_size) { 624 if (!tid_agg_rx->reorder_buf[j]) { 625 skipped++; 626 continue; 627 } 628 if (skipped && 629 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 630 HT_RX_REORDER_BUF_TIMEOUT)) 631 goto set_release_timer; 632 633 #ifdef CONFIG_MAC80211_HT_DEBUG 634 if (net_ratelimit()) 635 wiphy_debug(hw->wiphy, 636 "release an RX reorder frame due to timeout on earlier frames\n"); 637 #endif 638 ieee80211_release_reorder_frame(hw, tid_agg_rx, j); 639 640 /* 641 * Increment the head seq# also for the skipped slots. 642 */ 643 tid_agg_rx->head_seq_num = 644 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; 645 skipped = 0; 646 } 647 } else while (tid_agg_rx->reorder_buf[index]) { 648 ieee80211_release_reorder_frame(hw, tid_agg_rx, index); 649 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 650 tid_agg_rx->buf_size; 651 } 652 653 if (tid_agg_rx->stored_mpdu_num) { 654 j = index = seq_sub(tid_agg_rx->head_seq_num, 655 tid_agg_rx->ssn) % tid_agg_rx->buf_size; 656 657 for (; j != (index - 1) % tid_agg_rx->buf_size; 658 j = (j + 1) % tid_agg_rx->buf_size) { 659 if (tid_agg_rx->reorder_buf[j]) 660 break; 661 } 662 663 set_release_timer: 664 665 mod_timer(&tid_agg_rx->reorder_timer, 666 tid_agg_rx->reorder_time[j] + 1 + 667 HT_RX_REORDER_BUF_TIMEOUT); 668 } else { 669 del_timer(&tid_agg_rx->reorder_timer); 670 } 671 } 672 673 /* 674 * As this function belongs to the RX path it must be under 675 * rcu_read_lock protection. It returns false if the frame 676 * can be processed immediately, true if it was consumed. 677 */ 678 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, 679 struct tid_ampdu_rx *tid_agg_rx, 680 struct sk_buff *skb) 681 { 682 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 683 u16 sc = le16_to_cpu(hdr->seq_ctrl); 684 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 685 u16 head_seq_num, buf_size; 686 int index; 687 bool ret = true; 688 689 spin_lock(&tid_agg_rx->reorder_lock); 690 691 buf_size = tid_agg_rx->buf_size; 692 head_seq_num = tid_agg_rx->head_seq_num; 693 694 /* frame with out of date sequence number */ 695 if (seq_less(mpdu_seq_num, head_seq_num)) { 696 dev_kfree_skb(skb); 697 goto out; 698 } 699 700 /* 701 * If frame the sequence number exceeds our buffering window 702 * size release some previous frames to make room for this one. 703 */ 704 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { 705 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); 706 /* release stored frames up to new head to stack */ 707 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num); 708 } 709 710 /* Now the new frame is always in the range of the reordering buffer */ 711 712 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size; 713 714 /* check if we already stored this frame */ 715 if (tid_agg_rx->reorder_buf[index]) { 716 dev_kfree_skb(skb); 717 goto out; 718 } 719 720 /* 721 * If the current MPDU is in the right order and nothing else 722 * is stored we can process it directly, no need to buffer it. 723 * If it is first but there's something stored, we may be able 724 * to release frames after this one. 725 */ 726 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 727 tid_agg_rx->stored_mpdu_num == 0) { 728 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 729 ret = false; 730 goto out; 731 } 732 733 /* put the frame in the reordering buffer */ 734 tid_agg_rx->reorder_buf[index] = skb; 735 tid_agg_rx->reorder_time[index] = jiffies; 736 tid_agg_rx->stored_mpdu_num++; 737 ieee80211_sta_reorder_release(hw, tid_agg_rx); 738 739 out: 740 spin_unlock(&tid_agg_rx->reorder_lock); 741 return ret; 742 } 743 744 /* 745 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 746 * true if the MPDU was buffered, false if it should be processed. 747 */ 748 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) 749 { 750 struct sk_buff *skb = rx->skb; 751 struct ieee80211_local *local = rx->local; 752 struct ieee80211_hw *hw = &local->hw; 753 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 754 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 755 struct sta_info *sta = rx->sta; 756 struct tid_ampdu_rx *tid_agg_rx; 757 u16 sc; 758 u8 tid, ack_policy; 759 760 if (!ieee80211_is_data_qos(hdr->frame_control)) 761 goto dont_reorder; 762 763 /* 764 * filter the QoS data rx stream according to 765 * STA/TID and check if this STA/TID is on aggregation 766 */ 767 768 if (!sta) 769 goto dont_reorder; 770 771 ack_policy = *ieee80211_get_qos_ctl(hdr) & 772 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 773 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 774 775 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 776 if (!tid_agg_rx) 777 goto dont_reorder; 778 779 /* qos null data frames are excluded */ 780 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 781 goto dont_reorder; 782 783 /* not part of a BA session */ 784 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 785 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) 786 goto dont_reorder; 787 788 /* not actually part of this BA session */ 789 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 790 goto dont_reorder; 791 792 /* new, potentially un-ordered, ampdu frame - process it */ 793 794 /* reset session timer */ 795 if (tid_agg_rx->timeout) 796 mod_timer(&tid_agg_rx->session_timer, 797 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 798 799 /* if this mpdu is fragmented - terminate rx aggregation session */ 800 sc = le16_to_cpu(hdr->seq_ctrl); 801 if (sc & IEEE80211_SCTL_FRAG) { 802 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 803 skb_queue_tail(&rx->sdata->skb_queue, skb); 804 ieee80211_queue_work(&local->hw, &rx->sdata->work); 805 return; 806 } 807 808 /* 809 * No locking needed -- we will only ever process one 810 * RX packet at a time, and thus own tid_agg_rx. All 811 * other code manipulating it needs to (and does) make 812 * sure that we cannot get to it any more before doing 813 * anything with it. 814 */ 815 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb)) 816 return; 817 818 dont_reorder: 819 skb_queue_tail(&local->rx_skb_queue, skb); 820 } 821 822 static ieee80211_rx_result debug_noinline 823 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 824 { 825 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 826 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 827 828 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ 829 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { 830 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 831 rx->sta->last_seq_ctrl[rx->seqno_idx] == 832 hdr->seq_ctrl)) { 833 if (status->rx_flags & IEEE80211_RX_RA_MATCH) { 834 rx->local->dot11FrameDuplicateCount++; 835 rx->sta->num_duplicates++; 836 } 837 return RX_DROP_UNUSABLE; 838 } else 839 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 840 } 841 842 if (unlikely(rx->skb->len < 16)) { 843 I802_DEBUG_INC(rx->local->rx_handlers_drop_short); 844 return RX_DROP_MONITOR; 845 } 846 847 /* Drop disallowed frame classes based on STA auth/assoc state; 848 * IEEE 802.11, Chap 5.5. 849 * 850 * mac80211 filters only based on association state, i.e. it drops 851 * Class 3 frames from not associated stations. hostapd sends 852 * deauth/disassoc frames when needed. In addition, hostapd is 853 * responsible for filtering on both auth and assoc states. 854 */ 855 856 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 857 return ieee80211_rx_mesh_check(rx); 858 859 if (unlikely((ieee80211_is_data(hdr->frame_control) || 860 ieee80211_is_pspoll(hdr->frame_control)) && 861 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 862 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 863 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 864 /* 865 * accept port control frames from the AP even when it's not 866 * yet marked ASSOC to prevent a race where we don't set the 867 * assoc bit quickly enough before it sends the first frame 868 */ 869 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 870 ieee80211_is_data_present(hdr->frame_control)) { 871 u16 ethertype; 872 u8 *payload; 873 874 payload = rx->skb->data + 875 ieee80211_hdrlen(hdr->frame_control); 876 ethertype = (payload[6] << 8) | payload[7]; 877 if (cpu_to_be16(ethertype) == 878 rx->sdata->control_port_protocol) 879 return RX_CONTINUE; 880 } 881 882 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 883 cfg80211_rx_spurious_frame(rx->sdata->dev, 884 hdr->addr2, 885 GFP_ATOMIC)) 886 return RX_DROP_UNUSABLE; 887 888 return RX_DROP_MONITOR; 889 } 890 891 return RX_CONTINUE; 892 } 893 894 895 static ieee80211_rx_result debug_noinline 896 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 897 { 898 struct sk_buff *skb = rx->skb; 899 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 900 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 901 int keyidx; 902 int hdrlen; 903 ieee80211_rx_result result = RX_DROP_UNUSABLE; 904 struct ieee80211_key *sta_ptk = NULL; 905 int mmie_keyidx = -1; 906 __le16 fc; 907 908 /* 909 * Key selection 101 910 * 911 * There are four types of keys: 912 * - GTK (group keys) 913 * - IGTK (group keys for management frames) 914 * - PTK (pairwise keys) 915 * - STK (station-to-station pairwise keys) 916 * 917 * When selecting a key, we have to distinguish between multicast 918 * (including broadcast) and unicast frames, the latter can only 919 * use PTKs and STKs while the former always use GTKs and IGTKs. 920 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then 921 * unicast frames can also use key indices like GTKs. Hence, if we 922 * don't have a PTK/STK we check the key index for a WEP key. 923 * 924 * Note that in a regular BSS, multicast frames are sent by the 925 * AP only, associated stations unicast the frame to the AP first 926 * which then multicasts it on their behalf. 927 * 928 * There is also a slight problem in IBSS mode: GTKs are negotiated 929 * with each station, that is something we don't currently handle. 930 * The spec seems to expect that one negotiates the same key with 931 * every station but there's no such requirement; VLANs could be 932 * possible. 933 */ 934 935 /* 936 * No point in finding a key and decrypting if the frame is neither 937 * addressed to us nor a multicast frame. 938 */ 939 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 940 return RX_CONTINUE; 941 942 /* start without a key */ 943 rx->key = NULL; 944 945 if (rx->sta) 946 sta_ptk = rcu_dereference(rx->sta->ptk); 947 948 fc = hdr->frame_control; 949 950 if (!ieee80211_has_protected(fc)) 951 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 952 953 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 954 rx->key = sta_ptk; 955 if ((status->flag & RX_FLAG_DECRYPTED) && 956 (status->flag & RX_FLAG_IV_STRIPPED)) 957 return RX_CONTINUE; 958 /* Skip decryption if the frame is not protected. */ 959 if (!ieee80211_has_protected(fc)) 960 return RX_CONTINUE; 961 } else if (mmie_keyidx >= 0) { 962 /* Broadcast/multicast robust management frame / BIP */ 963 if ((status->flag & RX_FLAG_DECRYPTED) && 964 (status->flag & RX_FLAG_IV_STRIPPED)) 965 return RX_CONTINUE; 966 967 if (mmie_keyidx < NUM_DEFAULT_KEYS || 968 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 969 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 970 if (rx->sta) 971 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 972 if (!rx->key) 973 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 974 } else if (!ieee80211_has_protected(fc)) { 975 /* 976 * The frame was not protected, so skip decryption. However, we 977 * need to set rx->key if there is a key that could have been 978 * used so that the frame may be dropped if encryption would 979 * have been expected. 980 */ 981 struct ieee80211_key *key = NULL; 982 struct ieee80211_sub_if_data *sdata = rx->sdata; 983 int i; 984 985 if (ieee80211_is_mgmt(fc) && 986 is_multicast_ether_addr(hdr->addr1) && 987 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 988 rx->key = key; 989 else { 990 if (rx->sta) { 991 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 992 key = rcu_dereference(rx->sta->gtk[i]); 993 if (key) 994 break; 995 } 996 } 997 if (!key) { 998 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 999 key = rcu_dereference(sdata->keys[i]); 1000 if (key) 1001 break; 1002 } 1003 } 1004 if (key) 1005 rx->key = key; 1006 } 1007 return RX_CONTINUE; 1008 } else { 1009 u8 keyid; 1010 /* 1011 * The device doesn't give us the IV so we won't be 1012 * able to look up the key. That's ok though, we 1013 * don't need to decrypt the frame, we just won't 1014 * be able to keep statistics accurate. 1015 * Except for key threshold notifications, should 1016 * we somehow allow the driver to tell us which key 1017 * the hardware used if this flag is set? 1018 */ 1019 if ((status->flag & RX_FLAG_DECRYPTED) && 1020 (status->flag & RX_FLAG_IV_STRIPPED)) 1021 return RX_CONTINUE; 1022 1023 hdrlen = ieee80211_hdrlen(fc); 1024 1025 if (rx->skb->len < 8 + hdrlen) 1026 return RX_DROP_UNUSABLE; /* TODO: count this? */ 1027 1028 /* 1029 * no need to call ieee80211_wep_get_keyidx, 1030 * it verifies a bunch of things we've done already 1031 */ 1032 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); 1033 keyidx = keyid >> 6; 1034 1035 /* check per-station GTK first, if multicast packet */ 1036 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 1037 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 1038 1039 /* if not found, try default key */ 1040 if (!rx->key) { 1041 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 1042 1043 /* 1044 * RSNA-protected unicast frames should always be 1045 * sent with pairwise or station-to-station keys, 1046 * but for WEP we allow using a key index as well. 1047 */ 1048 if (rx->key && 1049 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 1050 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 1051 !is_multicast_ether_addr(hdr->addr1)) 1052 rx->key = NULL; 1053 } 1054 } 1055 1056 if (rx->key) { 1057 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 1058 return RX_DROP_MONITOR; 1059 1060 rx->key->tx_rx_count++; 1061 /* TODO: add threshold stuff again */ 1062 } else { 1063 return RX_DROP_MONITOR; 1064 } 1065 1066 switch (rx->key->conf.cipher) { 1067 case WLAN_CIPHER_SUITE_WEP40: 1068 case WLAN_CIPHER_SUITE_WEP104: 1069 result = ieee80211_crypto_wep_decrypt(rx); 1070 break; 1071 case WLAN_CIPHER_SUITE_TKIP: 1072 result = ieee80211_crypto_tkip_decrypt(rx); 1073 break; 1074 case WLAN_CIPHER_SUITE_CCMP: 1075 result = ieee80211_crypto_ccmp_decrypt(rx); 1076 break; 1077 case WLAN_CIPHER_SUITE_AES_CMAC: 1078 result = ieee80211_crypto_aes_cmac_decrypt(rx); 1079 break; 1080 default: 1081 /* 1082 * We can reach here only with HW-only algorithms 1083 * but why didn't it decrypt the frame?! 1084 */ 1085 return RX_DROP_UNUSABLE; 1086 } 1087 1088 /* the hdr variable is invalid after the decrypt handlers */ 1089 1090 /* either the frame has been decrypted or will be dropped */ 1091 status->flag |= RX_FLAG_DECRYPTED; 1092 1093 return result; 1094 } 1095 1096 static ieee80211_rx_result debug_noinline 1097 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1098 { 1099 struct ieee80211_local *local; 1100 struct ieee80211_hdr *hdr; 1101 struct sk_buff *skb; 1102 1103 local = rx->local; 1104 skb = rx->skb; 1105 hdr = (struct ieee80211_hdr *) skb->data; 1106 1107 if (!local->pspolling) 1108 return RX_CONTINUE; 1109 1110 if (!ieee80211_has_fromds(hdr->frame_control)) 1111 /* this is not from AP */ 1112 return RX_CONTINUE; 1113 1114 if (!ieee80211_is_data(hdr->frame_control)) 1115 return RX_CONTINUE; 1116 1117 if (!ieee80211_has_moredata(hdr->frame_control)) { 1118 /* AP has no more frames buffered for us */ 1119 local->pspolling = false; 1120 return RX_CONTINUE; 1121 } 1122 1123 /* more data bit is set, let's request a new frame from the AP */ 1124 ieee80211_send_pspoll(local, rx->sdata); 1125 1126 return RX_CONTINUE; 1127 } 1128 1129 static void ap_sta_ps_start(struct sta_info *sta) 1130 { 1131 struct ieee80211_sub_if_data *sdata = sta->sdata; 1132 struct ieee80211_local *local = sdata->local; 1133 1134 atomic_inc(&sdata->bss->num_sta_ps); 1135 set_sta_flag(sta, WLAN_STA_PS_STA); 1136 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 1137 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1138 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1139 printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n", 1140 sdata->name, sta->sta.addr, sta->sta.aid); 1141 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1142 } 1143 1144 static void ap_sta_ps_end(struct sta_info *sta) 1145 { 1146 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1147 printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n", 1148 sta->sdata->name, sta->sta.addr, sta->sta.aid); 1149 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1150 1151 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1152 #ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 1153 printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n", 1154 sta->sdata->name, sta->sta.addr, sta->sta.aid); 1155 #endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */ 1156 return; 1157 } 1158 1159 ieee80211_sta_ps_deliver_wakeup(sta); 1160 } 1161 1162 int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start) 1163 { 1164 struct sta_info *sta_inf = container_of(sta, struct sta_info, sta); 1165 bool in_ps; 1166 1167 WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS)); 1168 1169 /* Don't let the same PS state be set twice */ 1170 in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA); 1171 if ((start && in_ps) || (!start && !in_ps)) 1172 return -EINVAL; 1173 1174 if (start) 1175 ap_sta_ps_start(sta_inf); 1176 else 1177 ap_sta_ps_end(sta_inf); 1178 1179 return 0; 1180 } 1181 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1182 1183 static ieee80211_rx_result debug_noinline 1184 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1185 { 1186 struct ieee80211_sub_if_data *sdata = rx->sdata; 1187 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1188 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1189 int tid, ac; 1190 1191 if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1192 return RX_CONTINUE; 1193 1194 if (sdata->vif.type != NL80211_IFTYPE_AP && 1195 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1196 return RX_CONTINUE; 1197 1198 /* 1199 * The device handles station powersave, so don't do anything about 1200 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1201 * it to mac80211 since they're handled.) 1202 */ 1203 if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS) 1204 return RX_CONTINUE; 1205 1206 /* 1207 * Don't do anything if the station isn't already asleep. In 1208 * the uAPSD case, the station will probably be marked asleep, 1209 * in the PS-Poll case the station must be confused ... 1210 */ 1211 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1212 return RX_CONTINUE; 1213 1214 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1215 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) { 1216 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) 1217 ieee80211_sta_ps_deliver_poll_response(rx->sta); 1218 else 1219 set_sta_flag(rx->sta, WLAN_STA_PSPOLL); 1220 } 1221 1222 /* Free PS Poll skb here instead of returning RX_DROP that would 1223 * count as an dropped frame. */ 1224 dev_kfree_skb(rx->skb); 1225 1226 return RX_QUEUED; 1227 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1228 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1229 ieee80211_has_pm(hdr->frame_control) && 1230 (ieee80211_is_data_qos(hdr->frame_control) || 1231 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1232 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 1233 ac = ieee802_1d_to_ac[tid & 7]; 1234 1235 /* 1236 * If this AC is not trigger-enabled do nothing. 1237 * 1238 * NB: This could/should check a separate bitmap of trigger- 1239 * enabled queues, but for now we only implement uAPSD w/o 1240 * TSPEC changes to the ACs, so they're always the same. 1241 */ 1242 if (!(rx->sta->sta.uapsd_queues & BIT(ac))) 1243 return RX_CONTINUE; 1244 1245 /* if we are in a service period, do nothing */ 1246 if (test_sta_flag(rx->sta, WLAN_STA_SP)) 1247 return RX_CONTINUE; 1248 1249 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) 1250 ieee80211_sta_ps_deliver_uapsd(rx->sta); 1251 else 1252 set_sta_flag(rx->sta, WLAN_STA_UAPSD); 1253 } 1254 1255 return RX_CONTINUE; 1256 } 1257 1258 static ieee80211_rx_result debug_noinline 1259 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1260 { 1261 struct sta_info *sta = rx->sta; 1262 struct sk_buff *skb = rx->skb; 1263 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1264 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1265 1266 if (!sta) 1267 return RX_CONTINUE; 1268 1269 /* 1270 * Update last_rx only for IBSS packets which are for the current 1271 * BSSID to avoid keeping the current IBSS network alive in cases 1272 * where other STAs start using different BSSID. 1273 */ 1274 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1275 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1276 NL80211_IFTYPE_ADHOC); 1277 if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0) { 1278 sta->last_rx = jiffies; 1279 if (ieee80211_is_data(hdr->frame_control)) { 1280 sta->last_rx_rate_idx = status->rate_idx; 1281 sta->last_rx_rate_flag = status->flag; 1282 } 1283 } 1284 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1285 /* 1286 * Mesh beacons will update last_rx when if they are found to 1287 * match the current local configuration when processed. 1288 */ 1289 sta->last_rx = jiffies; 1290 if (ieee80211_is_data(hdr->frame_control)) { 1291 sta->last_rx_rate_idx = status->rate_idx; 1292 sta->last_rx_rate_flag = status->flag; 1293 } 1294 } 1295 1296 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1297 return RX_CONTINUE; 1298 1299 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) 1300 ieee80211_sta_rx_notify(rx->sdata, hdr); 1301 1302 sta->rx_fragments++; 1303 sta->rx_bytes += rx->skb->len; 1304 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1305 sta->last_signal = status->signal; 1306 ewma_add(&sta->avg_signal, -status->signal); 1307 } 1308 1309 /* 1310 * Change STA power saving mode only at the end of a frame 1311 * exchange sequence. 1312 */ 1313 if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) && 1314 !ieee80211_has_morefrags(hdr->frame_control) && 1315 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1316 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1317 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1318 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1319 /* 1320 * Ignore doze->wake transitions that are 1321 * indicated by non-data frames, the standard 1322 * is unclear here, but for example going to 1323 * PS mode and then scanning would cause a 1324 * doze->wake transition for the probe request, 1325 * and that is clearly undesirable. 1326 */ 1327 if (ieee80211_is_data(hdr->frame_control) && 1328 !ieee80211_has_pm(hdr->frame_control)) 1329 ap_sta_ps_end(sta); 1330 } else { 1331 if (ieee80211_has_pm(hdr->frame_control)) 1332 ap_sta_ps_start(sta); 1333 } 1334 } 1335 1336 /* 1337 * Drop (qos-)data::nullfunc frames silently, since they 1338 * are used only to control station power saving mode. 1339 */ 1340 if (ieee80211_is_nullfunc(hdr->frame_control) || 1341 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1342 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1343 1344 /* 1345 * If we receive a 4-addr nullfunc frame from a STA 1346 * that was not moved to a 4-addr STA vlan yet send 1347 * the event to userspace and for older hostapd drop 1348 * the frame to the monitor interface. 1349 */ 1350 if (ieee80211_has_a4(hdr->frame_control) && 1351 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1352 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1353 !rx->sdata->u.vlan.sta))) { 1354 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1355 cfg80211_rx_unexpected_4addr_frame( 1356 rx->sdata->dev, sta->sta.addr, 1357 GFP_ATOMIC); 1358 return RX_DROP_MONITOR; 1359 } 1360 /* 1361 * Update counter and free packet here to avoid 1362 * counting this as a dropped packed. 1363 */ 1364 sta->rx_packets++; 1365 dev_kfree_skb(rx->skb); 1366 return RX_QUEUED; 1367 } 1368 1369 return RX_CONTINUE; 1370 } /* ieee80211_rx_h_sta_process */ 1371 1372 static inline struct ieee80211_fragment_entry * 1373 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 1374 unsigned int frag, unsigned int seq, int rx_queue, 1375 struct sk_buff **skb) 1376 { 1377 struct ieee80211_fragment_entry *entry; 1378 int idx; 1379 1380 idx = sdata->fragment_next; 1381 entry = &sdata->fragments[sdata->fragment_next++]; 1382 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 1383 sdata->fragment_next = 0; 1384 1385 if (!skb_queue_empty(&entry->skb_list)) { 1386 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1387 struct ieee80211_hdr *hdr = 1388 (struct ieee80211_hdr *) entry->skb_list.next->data; 1389 printk(KERN_DEBUG "%s: RX reassembly removed oldest " 1390 "fragment entry (idx=%d age=%lu seq=%d last_frag=%d " 1391 "addr1=%pM addr2=%pM\n", 1392 sdata->name, idx, 1393 jiffies - entry->first_frag_time, entry->seq, 1394 entry->last_frag, hdr->addr1, hdr->addr2); 1395 #endif 1396 __skb_queue_purge(&entry->skb_list); 1397 } 1398 1399 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 1400 *skb = NULL; 1401 entry->first_frag_time = jiffies; 1402 entry->seq = seq; 1403 entry->rx_queue = rx_queue; 1404 entry->last_frag = frag; 1405 entry->ccmp = 0; 1406 entry->extra_len = 0; 1407 1408 return entry; 1409 } 1410 1411 static inline struct ieee80211_fragment_entry * 1412 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 1413 unsigned int frag, unsigned int seq, 1414 int rx_queue, struct ieee80211_hdr *hdr) 1415 { 1416 struct ieee80211_fragment_entry *entry; 1417 int i, idx; 1418 1419 idx = sdata->fragment_next; 1420 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 1421 struct ieee80211_hdr *f_hdr; 1422 1423 idx--; 1424 if (idx < 0) 1425 idx = IEEE80211_FRAGMENT_MAX - 1; 1426 1427 entry = &sdata->fragments[idx]; 1428 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 1429 entry->rx_queue != rx_queue || 1430 entry->last_frag + 1 != frag) 1431 continue; 1432 1433 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; 1434 1435 /* 1436 * Check ftype and addresses are equal, else check next fragment 1437 */ 1438 if (((hdr->frame_control ^ f_hdr->frame_control) & 1439 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 1440 compare_ether_addr(hdr->addr1, f_hdr->addr1) != 0 || 1441 compare_ether_addr(hdr->addr2, f_hdr->addr2) != 0) 1442 continue; 1443 1444 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 1445 __skb_queue_purge(&entry->skb_list); 1446 continue; 1447 } 1448 return entry; 1449 } 1450 1451 return NULL; 1452 } 1453 1454 static ieee80211_rx_result debug_noinline 1455 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 1456 { 1457 struct ieee80211_hdr *hdr; 1458 u16 sc; 1459 __le16 fc; 1460 unsigned int frag, seq; 1461 struct ieee80211_fragment_entry *entry; 1462 struct sk_buff *skb; 1463 struct ieee80211_rx_status *status; 1464 1465 hdr = (struct ieee80211_hdr *)rx->skb->data; 1466 fc = hdr->frame_control; 1467 sc = le16_to_cpu(hdr->seq_ctrl); 1468 frag = sc & IEEE80211_SCTL_FRAG; 1469 1470 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || 1471 (rx->skb)->len < 24 || 1472 is_multicast_ether_addr(hdr->addr1))) { 1473 /* not fragmented */ 1474 goto out; 1475 } 1476 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1477 1478 if (skb_linearize(rx->skb)) 1479 return RX_DROP_UNUSABLE; 1480 1481 /* 1482 * skb_linearize() might change the skb->data and 1483 * previously cached variables (in this case, hdr) need to 1484 * be refreshed with the new data. 1485 */ 1486 hdr = (struct ieee80211_hdr *)rx->skb->data; 1487 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 1488 1489 if (frag == 0) { 1490 /* This is the first fragment of a new frame. */ 1491 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 1492 rx->seqno_idx, &(rx->skb)); 1493 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP && 1494 ieee80211_has_protected(fc)) { 1495 int queue = rx->security_idx; 1496 /* Store CCMP PN so that we can verify that the next 1497 * fragment has a sequential PN value. */ 1498 entry->ccmp = 1; 1499 memcpy(entry->last_pn, 1500 rx->key->u.ccmp.rx_pn[queue], 1501 CCMP_PN_LEN); 1502 } 1503 return RX_QUEUED; 1504 } 1505 1506 /* This is a fragment for a frame that should already be pending in 1507 * fragment cache. Add this fragment to the end of the pending entry. 1508 */ 1509 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, 1510 rx->seqno_idx, hdr); 1511 if (!entry) { 1512 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1513 return RX_DROP_MONITOR; 1514 } 1515 1516 /* Verify that MPDUs within one MSDU have sequential PN values. 1517 * (IEEE 802.11i, 8.3.3.4.5) */ 1518 if (entry->ccmp) { 1519 int i; 1520 u8 pn[CCMP_PN_LEN], *rpn; 1521 int queue; 1522 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP) 1523 return RX_DROP_UNUSABLE; 1524 memcpy(pn, entry->last_pn, CCMP_PN_LEN); 1525 for (i = CCMP_PN_LEN - 1; i >= 0; i--) { 1526 pn[i]++; 1527 if (pn[i]) 1528 break; 1529 } 1530 queue = rx->security_idx; 1531 rpn = rx->key->u.ccmp.rx_pn[queue]; 1532 if (memcmp(pn, rpn, CCMP_PN_LEN)) 1533 return RX_DROP_UNUSABLE; 1534 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 1535 } 1536 1537 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 1538 __skb_queue_tail(&entry->skb_list, rx->skb); 1539 entry->last_frag = frag; 1540 entry->extra_len += rx->skb->len; 1541 if (ieee80211_has_morefrags(fc)) { 1542 rx->skb = NULL; 1543 return RX_QUEUED; 1544 } 1545 1546 rx->skb = __skb_dequeue(&entry->skb_list); 1547 if (skb_tailroom(rx->skb) < entry->extra_len) { 1548 I802_DEBUG_INC(rx->local->rx_expand_skb_head2); 1549 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 1550 GFP_ATOMIC))) { 1551 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1552 __skb_queue_purge(&entry->skb_list); 1553 return RX_DROP_UNUSABLE; 1554 } 1555 } 1556 while ((skb = __skb_dequeue(&entry->skb_list))) { 1557 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); 1558 dev_kfree_skb(skb); 1559 } 1560 1561 /* Complete frame has been reassembled - process it now */ 1562 status = IEEE80211_SKB_RXCB(rx->skb); 1563 status->rx_flags |= IEEE80211_RX_FRAGMENTED; 1564 1565 out: 1566 if (rx->sta) 1567 rx->sta->rx_packets++; 1568 if (is_multicast_ether_addr(hdr->addr1)) 1569 rx->local->dot11MulticastReceivedFrameCount++; 1570 else 1571 ieee80211_led_rx(rx->local); 1572 return RX_CONTINUE; 1573 } 1574 1575 static int 1576 ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1577 { 1578 if (unlikely(!rx->sta || 1579 !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 1580 return -EACCES; 1581 1582 return 0; 1583 } 1584 1585 static int 1586 ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 1587 { 1588 struct sk_buff *skb = rx->skb; 1589 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1590 1591 /* 1592 * Pass through unencrypted frames if the hardware has 1593 * decrypted them already. 1594 */ 1595 if (status->flag & RX_FLAG_DECRYPTED) 1596 return 0; 1597 1598 /* Drop unencrypted frames if key is set. */ 1599 if (unlikely(!ieee80211_has_protected(fc) && 1600 !ieee80211_is_nullfunc(fc) && 1601 ieee80211_is_data(fc) && 1602 (rx->key || rx->sdata->drop_unencrypted))) 1603 return -EACCES; 1604 1605 return 0; 1606 } 1607 1608 static int 1609 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 1610 { 1611 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1612 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1613 __le16 fc = hdr->frame_control; 1614 1615 /* 1616 * Pass through unencrypted frames if the hardware has 1617 * decrypted them already. 1618 */ 1619 if (status->flag & RX_FLAG_DECRYPTED) 1620 return 0; 1621 1622 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 1623 if (unlikely(!ieee80211_has_protected(fc) && 1624 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1625 rx->key)) { 1626 if (ieee80211_is_deauth(fc)) 1627 cfg80211_send_unprot_deauth(rx->sdata->dev, 1628 rx->skb->data, 1629 rx->skb->len); 1630 else if (ieee80211_is_disassoc(fc)) 1631 cfg80211_send_unprot_disassoc(rx->sdata->dev, 1632 rx->skb->data, 1633 rx->skb->len); 1634 return -EACCES; 1635 } 1636 /* BIP does not use Protected field, so need to check MMIE */ 1637 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 1638 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 1639 if (ieee80211_is_deauth(fc)) 1640 cfg80211_send_unprot_deauth(rx->sdata->dev, 1641 rx->skb->data, 1642 rx->skb->len); 1643 else if (ieee80211_is_disassoc(fc)) 1644 cfg80211_send_unprot_disassoc(rx->sdata->dev, 1645 rx->skb->data, 1646 rx->skb->len); 1647 return -EACCES; 1648 } 1649 /* 1650 * When using MFP, Action frames are not allowed prior to 1651 * having configured keys. 1652 */ 1653 if (unlikely(ieee80211_is_action(fc) && !rx->key && 1654 ieee80211_is_robust_mgmt_frame( 1655 (struct ieee80211_hdr *) rx->skb->data))) 1656 return -EACCES; 1657 } 1658 1659 return 0; 1660 } 1661 1662 static int 1663 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 1664 { 1665 struct ieee80211_sub_if_data *sdata = rx->sdata; 1666 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1667 bool check_port_control = false; 1668 struct ethhdr *ehdr; 1669 int ret; 1670 1671 *port_control = false; 1672 if (ieee80211_has_a4(hdr->frame_control) && 1673 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 1674 return -1; 1675 1676 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1677 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 1678 1679 if (!sdata->u.mgd.use_4addr) 1680 return -1; 1681 else 1682 check_port_control = true; 1683 } 1684 1685 if (is_multicast_ether_addr(hdr->addr1) && 1686 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 1687 return -1; 1688 1689 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 1690 if (ret < 0) 1691 return ret; 1692 1693 ehdr = (struct ethhdr *) rx->skb->data; 1694 if (ehdr->h_proto == rx->sdata->control_port_protocol) 1695 *port_control = true; 1696 else if (check_port_control) 1697 return -1; 1698 1699 return 0; 1700 } 1701 1702 /* 1703 * requires that rx->skb is a frame with ethernet header 1704 */ 1705 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 1706 { 1707 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 1708 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 1709 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1710 1711 /* 1712 * Allow EAPOL frames to us/the PAE group address regardless 1713 * of whether the frame was encrypted or not. 1714 */ 1715 if (ehdr->h_proto == rx->sdata->control_port_protocol && 1716 (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 || 1717 compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0)) 1718 return true; 1719 1720 if (ieee80211_802_1x_port_control(rx) || 1721 ieee80211_drop_unencrypted(rx, fc)) 1722 return false; 1723 1724 return true; 1725 } 1726 1727 /* 1728 * requires that rx->skb is a frame with ethernet header 1729 */ 1730 static void 1731 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 1732 { 1733 struct ieee80211_sub_if_data *sdata = rx->sdata; 1734 struct net_device *dev = sdata->dev; 1735 struct sk_buff *skb, *xmit_skb; 1736 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 1737 struct sta_info *dsta; 1738 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1739 1740 skb = rx->skb; 1741 xmit_skb = NULL; 1742 1743 if ((sdata->vif.type == NL80211_IFTYPE_AP || 1744 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1745 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 1746 (status->rx_flags & IEEE80211_RX_RA_MATCH) && 1747 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 1748 if (is_multicast_ether_addr(ehdr->h_dest)) { 1749 /* 1750 * send multicast frames both to higher layers in 1751 * local net stack and back to the wireless medium 1752 */ 1753 xmit_skb = skb_copy(skb, GFP_ATOMIC); 1754 if (!xmit_skb && net_ratelimit()) 1755 printk(KERN_DEBUG "%s: failed to clone " 1756 "multicast frame\n", dev->name); 1757 } else { 1758 dsta = sta_info_get(sdata, skb->data); 1759 if (dsta) { 1760 /* 1761 * The destination station is associated to 1762 * this AP (in this VLAN), so send the frame 1763 * directly to it and do not pass it to local 1764 * net stack. 1765 */ 1766 xmit_skb = skb; 1767 skb = NULL; 1768 } 1769 } 1770 } 1771 1772 if (skb) { 1773 int align __maybe_unused; 1774 1775 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1776 /* 1777 * 'align' will only take the values 0 or 2 here 1778 * since all frames are required to be aligned 1779 * to 2-byte boundaries when being passed to 1780 * mac80211. That also explains the __skb_push() 1781 * below. 1782 */ 1783 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3; 1784 if (align) { 1785 if (WARN_ON(skb_headroom(skb) < 3)) { 1786 dev_kfree_skb(skb); 1787 skb = NULL; 1788 } else { 1789 u8 *data = skb->data; 1790 size_t len = skb_headlen(skb); 1791 skb->data -= align; 1792 memmove(skb->data, data, len); 1793 skb_set_tail_pointer(skb, len); 1794 } 1795 } 1796 #endif 1797 1798 if (skb) { 1799 /* deliver to local stack */ 1800 skb->protocol = eth_type_trans(skb, dev); 1801 memset(skb->cb, 0, sizeof(skb->cb)); 1802 netif_receive_skb(skb); 1803 } 1804 } 1805 1806 if (xmit_skb) { 1807 /* 1808 * Send to wireless media and increase priority by 256 to 1809 * keep the received priority instead of reclassifying 1810 * the frame (see cfg80211_classify8021d). 1811 */ 1812 xmit_skb->priority += 256; 1813 xmit_skb->protocol = htons(ETH_P_802_3); 1814 skb_reset_network_header(xmit_skb); 1815 skb_reset_mac_header(xmit_skb); 1816 dev_queue_xmit(xmit_skb); 1817 } 1818 } 1819 1820 static ieee80211_rx_result debug_noinline 1821 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 1822 { 1823 struct net_device *dev = rx->sdata->dev; 1824 struct sk_buff *skb = rx->skb; 1825 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1826 __le16 fc = hdr->frame_control; 1827 struct sk_buff_head frame_list; 1828 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1829 1830 if (unlikely(!ieee80211_is_data(fc))) 1831 return RX_CONTINUE; 1832 1833 if (unlikely(!ieee80211_is_data_present(fc))) 1834 return RX_DROP_MONITOR; 1835 1836 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 1837 return RX_CONTINUE; 1838 1839 if (ieee80211_has_a4(hdr->frame_control) && 1840 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1841 !rx->sdata->u.vlan.sta) 1842 return RX_DROP_UNUSABLE; 1843 1844 if (is_multicast_ether_addr(hdr->addr1) && 1845 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1846 rx->sdata->u.vlan.sta) || 1847 (rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1848 rx->sdata->u.mgd.use_4addr))) 1849 return RX_DROP_UNUSABLE; 1850 1851 skb->dev = dev; 1852 __skb_queue_head_init(&frame_list); 1853 1854 if (skb_linearize(skb)) 1855 return RX_DROP_UNUSABLE; 1856 1857 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 1858 rx->sdata->vif.type, 1859 rx->local->hw.extra_tx_headroom, true); 1860 1861 while (!skb_queue_empty(&frame_list)) { 1862 rx->skb = __skb_dequeue(&frame_list); 1863 1864 if (!ieee80211_frame_allowed(rx, fc)) { 1865 dev_kfree_skb(rx->skb); 1866 continue; 1867 } 1868 dev->stats.rx_packets++; 1869 dev->stats.rx_bytes += rx->skb->len; 1870 1871 ieee80211_deliver_skb(rx); 1872 } 1873 1874 return RX_QUEUED; 1875 } 1876 1877 #ifdef CONFIG_MAC80211_MESH 1878 static ieee80211_rx_result 1879 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 1880 { 1881 struct ieee80211_hdr *fwd_hdr, *hdr; 1882 struct ieee80211_tx_info *info; 1883 struct ieee80211s_hdr *mesh_hdr; 1884 struct sk_buff *skb = rx->skb, *fwd_skb; 1885 struct ieee80211_local *local = rx->local; 1886 struct ieee80211_sub_if_data *sdata = rx->sdata; 1887 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1888 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 1889 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD); 1890 u16 q, hdrlen; 1891 1892 hdr = (struct ieee80211_hdr *) skb->data; 1893 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1894 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 1895 1896 /* frame is in RMC, don't forward */ 1897 if (ieee80211_is_data(hdr->frame_control) && 1898 is_multicast_ether_addr(hdr->addr1) && 1899 mesh_rmc_check(hdr->addr3, mesh_hdr, rx->sdata)) 1900 return RX_DROP_MONITOR; 1901 1902 if (!ieee80211_is_data(hdr->frame_control)) 1903 return RX_CONTINUE; 1904 1905 if (!mesh_hdr->ttl) 1906 return RX_DROP_MONITOR; 1907 1908 if (mesh_hdr->flags & MESH_FLAGS_AE) { 1909 struct mesh_path *mppath; 1910 char *proxied_addr; 1911 char *mpp_addr; 1912 1913 if (is_multicast_ether_addr(hdr->addr1)) { 1914 mpp_addr = hdr->addr3; 1915 proxied_addr = mesh_hdr->eaddr1; 1916 } else { 1917 mpp_addr = hdr->addr4; 1918 proxied_addr = mesh_hdr->eaddr2; 1919 } 1920 1921 rcu_read_lock(); 1922 mppath = mpp_path_lookup(proxied_addr, sdata); 1923 if (!mppath) { 1924 mpp_path_add(proxied_addr, mpp_addr, sdata); 1925 } else { 1926 spin_lock_bh(&mppath->state_lock); 1927 if (compare_ether_addr(mppath->mpp, mpp_addr) != 0) 1928 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 1929 spin_unlock_bh(&mppath->state_lock); 1930 } 1931 rcu_read_unlock(); 1932 } 1933 1934 /* Frame has reached destination. Don't forward */ 1935 if (!is_multicast_ether_addr(hdr->addr1) && 1936 compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0) 1937 return RX_CONTINUE; 1938 1939 q = ieee80211_select_queue_80211(local, skb, hdr); 1940 if (ieee80211_queue_stopped(&local->hw, q)) { 1941 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 1942 return RX_DROP_MONITOR; 1943 } 1944 skb_set_queue_mapping(skb, q); 1945 1946 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 1947 goto out; 1948 1949 if (!--mesh_hdr->ttl) { 1950 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); 1951 return RX_DROP_MONITOR; 1952 } 1953 1954 if (!ifmsh->mshcfg.dot11MeshForwarding) 1955 goto out; 1956 1957 fwd_skb = skb_copy(skb, GFP_ATOMIC); 1958 if (!fwd_skb) { 1959 if (net_ratelimit()) 1960 printk(KERN_DEBUG "%s: failed to clone mesh frame\n", 1961 sdata->name); 1962 goto out; 1963 } 1964 1965 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 1966 info = IEEE80211_SKB_CB(fwd_skb); 1967 memset(info, 0, sizeof(*info)); 1968 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1969 info->control.vif = &rx->sdata->vif; 1970 info->control.jiffies = jiffies; 1971 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 1972 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 1973 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 1974 } else if (!mesh_nexthop_lookup(fwd_skb, sdata)) { 1975 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 1976 } else { 1977 /* unable to resolve next hop */ 1978 mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3, 1979 0, reason, fwd_hdr->addr2, sdata); 1980 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 1981 kfree_skb(fwd_skb); 1982 return RX_DROP_MONITOR; 1983 } 1984 1985 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 1986 ieee80211_add_pending_skb(local, fwd_skb); 1987 out: 1988 if (is_multicast_ether_addr(hdr->addr1) || 1989 sdata->dev->flags & IFF_PROMISC) 1990 return RX_CONTINUE; 1991 else 1992 return RX_DROP_MONITOR; 1993 } 1994 #endif 1995 1996 static ieee80211_rx_result debug_noinline 1997 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 1998 { 1999 struct ieee80211_sub_if_data *sdata = rx->sdata; 2000 struct ieee80211_local *local = rx->local; 2001 struct net_device *dev = sdata->dev; 2002 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2003 __le16 fc = hdr->frame_control; 2004 bool port_control; 2005 int err; 2006 2007 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 2008 return RX_CONTINUE; 2009 2010 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 2011 return RX_DROP_MONITOR; 2012 2013 /* 2014 * Send unexpected-4addr-frame event to hostapd. For older versions, 2015 * also drop the frame to cooked monitor interfaces. 2016 */ 2017 if (ieee80211_has_a4(hdr->frame_control) && 2018 sdata->vif.type == NL80211_IFTYPE_AP) { 2019 if (rx->sta && 2020 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 2021 cfg80211_rx_unexpected_4addr_frame( 2022 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); 2023 return RX_DROP_MONITOR; 2024 } 2025 2026 err = __ieee80211_data_to_8023(rx, &port_control); 2027 if (unlikely(err)) 2028 return RX_DROP_UNUSABLE; 2029 2030 if (!ieee80211_frame_allowed(rx, fc)) 2031 return RX_DROP_MONITOR; 2032 2033 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2034 unlikely(port_control) && sdata->bss) { 2035 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 2036 u.ap); 2037 dev = sdata->dev; 2038 rx->sdata = sdata; 2039 } 2040 2041 rx->skb->dev = dev; 2042 2043 dev->stats.rx_packets++; 2044 dev->stats.rx_bytes += rx->skb->len; 2045 2046 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 2047 !is_multicast_ether_addr( 2048 ((struct ethhdr *)rx->skb->data)->h_dest) && 2049 (!local->scanning && 2050 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) { 2051 mod_timer(&local->dynamic_ps_timer, jiffies + 2052 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 2053 } 2054 2055 ieee80211_deliver_skb(rx); 2056 2057 return RX_QUEUED; 2058 } 2059 2060 static ieee80211_rx_result debug_noinline 2061 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) 2062 { 2063 struct ieee80211_local *local = rx->local; 2064 struct ieee80211_hw *hw = &local->hw; 2065 struct sk_buff *skb = rx->skb; 2066 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2067 struct tid_ampdu_rx *tid_agg_rx; 2068 u16 start_seq_num; 2069 u16 tid; 2070 2071 if (likely(!ieee80211_is_ctl(bar->frame_control))) 2072 return RX_CONTINUE; 2073 2074 if (ieee80211_is_back_req(bar->frame_control)) { 2075 struct { 2076 __le16 control, start_seq_num; 2077 } __packed bar_data; 2078 2079 if (!rx->sta) 2080 return RX_DROP_MONITOR; 2081 2082 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 2083 &bar_data, sizeof(bar_data))) 2084 return RX_DROP_MONITOR; 2085 2086 tid = le16_to_cpu(bar_data.control) >> 12; 2087 2088 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 2089 if (!tid_agg_rx) 2090 return RX_DROP_MONITOR; 2091 2092 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 2093 2094 /* reset session timer */ 2095 if (tid_agg_rx->timeout) 2096 mod_timer(&tid_agg_rx->session_timer, 2097 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 2098 2099 spin_lock(&tid_agg_rx->reorder_lock); 2100 /* release stored frames up to start of BAR */ 2101 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num); 2102 spin_unlock(&tid_agg_rx->reorder_lock); 2103 2104 kfree_skb(skb); 2105 return RX_QUEUED; 2106 } 2107 2108 /* 2109 * After this point, we only want management frames, 2110 * so we can drop all remaining control frames to 2111 * cooked monitor interfaces. 2112 */ 2113 return RX_DROP_MONITOR; 2114 } 2115 2116 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 2117 struct ieee80211_mgmt *mgmt, 2118 size_t len) 2119 { 2120 struct ieee80211_local *local = sdata->local; 2121 struct sk_buff *skb; 2122 struct ieee80211_mgmt *resp; 2123 2124 if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) { 2125 /* Not to own unicast address */ 2126 return; 2127 } 2128 2129 if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 || 2130 compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) { 2131 /* Not from the current AP or not associated yet. */ 2132 return; 2133 } 2134 2135 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 2136 /* Too short SA Query request frame */ 2137 return; 2138 } 2139 2140 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 2141 if (skb == NULL) 2142 return; 2143 2144 skb_reserve(skb, local->hw.extra_tx_headroom); 2145 resp = (struct ieee80211_mgmt *) skb_put(skb, 24); 2146 memset(resp, 0, 24); 2147 memcpy(resp->da, mgmt->sa, ETH_ALEN); 2148 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 2149 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 2150 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2151 IEEE80211_STYPE_ACTION); 2152 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 2153 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 2154 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 2155 memcpy(resp->u.action.u.sa_query.trans_id, 2156 mgmt->u.action.u.sa_query.trans_id, 2157 WLAN_SA_QUERY_TR_ID_LEN); 2158 2159 ieee80211_tx_skb(sdata, skb); 2160 } 2161 2162 static ieee80211_rx_result debug_noinline 2163 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 2164 { 2165 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2166 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2167 2168 /* 2169 * From here on, look only at management frames. 2170 * Data and control frames are already handled, 2171 * and unknown (reserved) frames are useless. 2172 */ 2173 if (rx->skb->len < 24) 2174 return RX_DROP_MONITOR; 2175 2176 if (!ieee80211_is_mgmt(mgmt->frame_control)) 2177 return RX_DROP_MONITOR; 2178 2179 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 2180 ieee80211_is_beacon(mgmt->frame_control) && 2181 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 2182 int sig = 0; 2183 2184 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 2185 sig = status->signal; 2186 2187 cfg80211_report_obss_beacon(rx->local->hw.wiphy, 2188 rx->skb->data, rx->skb->len, 2189 status->freq, sig, GFP_ATOMIC); 2190 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 2191 } 2192 2193 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 2194 return RX_DROP_MONITOR; 2195 2196 if (ieee80211_drop_unencrypted_mgmt(rx)) 2197 return RX_DROP_UNUSABLE; 2198 2199 return RX_CONTINUE; 2200 } 2201 2202 static ieee80211_rx_result debug_noinline 2203 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 2204 { 2205 struct ieee80211_local *local = rx->local; 2206 struct ieee80211_sub_if_data *sdata = rx->sdata; 2207 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2208 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2209 int len = rx->skb->len; 2210 2211 if (!ieee80211_is_action(mgmt->frame_control)) 2212 return RX_CONTINUE; 2213 2214 /* drop too small frames */ 2215 if (len < IEEE80211_MIN_ACTION_SIZE) 2216 return RX_DROP_UNUSABLE; 2217 2218 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) 2219 return RX_DROP_UNUSABLE; 2220 2221 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 2222 return RX_DROP_UNUSABLE; 2223 2224 switch (mgmt->u.action.category) { 2225 case WLAN_CATEGORY_HT: 2226 /* reject HT action frames from stations not supporting HT */ 2227 if (!rx->sta->sta.ht_cap.ht_supported) 2228 goto invalid; 2229 2230 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2231 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2232 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2233 sdata->vif.type != NL80211_IFTYPE_AP && 2234 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2235 break; 2236 2237 /* verify action & smps_control are present */ 2238 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 2239 goto invalid; 2240 2241 switch (mgmt->u.action.u.ht_smps.action) { 2242 case WLAN_HT_ACTION_SMPS: { 2243 struct ieee80211_supported_band *sband; 2244 u8 smps; 2245 2246 /* convert to HT capability */ 2247 switch (mgmt->u.action.u.ht_smps.smps_control) { 2248 case WLAN_HT_SMPS_CONTROL_DISABLED: 2249 smps = WLAN_HT_CAP_SM_PS_DISABLED; 2250 break; 2251 case WLAN_HT_SMPS_CONTROL_STATIC: 2252 smps = WLAN_HT_CAP_SM_PS_STATIC; 2253 break; 2254 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 2255 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 2256 break; 2257 default: 2258 goto invalid; 2259 } 2260 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 2261 2262 /* if no change do nothing */ 2263 if ((rx->sta->sta.ht_cap.cap & 2264 IEEE80211_HT_CAP_SM_PS) == smps) 2265 goto handled; 2266 2267 rx->sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SM_PS; 2268 rx->sta->sta.ht_cap.cap |= smps; 2269 2270 sband = rx->local->hw.wiphy->bands[status->band]; 2271 2272 rate_control_rate_update( 2273 local, sband, rx->sta, 2274 IEEE80211_RC_SMPS_CHANGED, 2275 ieee80211_get_tx_channel_type( 2276 local, local->_oper_channel_type)); 2277 goto handled; 2278 } 2279 default: 2280 goto invalid; 2281 } 2282 2283 break; 2284 case WLAN_CATEGORY_BACK: 2285 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2286 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2287 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2288 sdata->vif.type != NL80211_IFTYPE_AP && 2289 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2290 break; 2291 2292 /* verify action_code is present */ 2293 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2294 break; 2295 2296 switch (mgmt->u.action.u.addba_req.action_code) { 2297 case WLAN_ACTION_ADDBA_REQ: 2298 if (len < (IEEE80211_MIN_ACTION_SIZE + 2299 sizeof(mgmt->u.action.u.addba_req))) 2300 goto invalid; 2301 break; 2302 case WLAN_ACTION_ADDBA_RESP: 2303 if (len < (IEEE80211_MIN_ACTION_SIZE + 2304 sizeof(mgmt->u.action.u.addba_resp))) 2305 goto invalid; 2306 break; 2307 case WLAN_ACTION_DELBA: 2308 if (len < (IEEE80211_MIN_ACTION_SIZE + 2309 sizeof(mgmt->u.action.u.delba))) 2310 goto invalid; 2311 break; 2312 default: 2313 goto invalid; 2314 } 2315 2316 goto queue; 2317 case WLAN_CATEGORY_SPECTRUM_MGMT: 2318 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) 2319 break; 2320 2321 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2322 break; 2323 2324 /* verify action_code is present */ 2325 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2326 break; 2327 2328 switch (mgmt->u.action.u.measurement.action_code) { 2329 case WLAN_ACTION_SPCT_MSR_REQ: 2330 if (len < (IEEE80211_MIN_ACTION_SIZE + 2331 sizeof(mgmt->u.action.u.measurement))) 2332 break; 2333 ieee80211_process_measurement_req(sdata, mgmt, len); 2334 goto handled; 2335 case WLAN_ACTION_SPCT_CHL_SWITCH: 2336 if (len < (IEEE80211_MIN_ACTION_SIZE + 2337 sizeof(mgmt->u.action.u.chan_switch))) 2338 break; 2339 2340 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2341 break; 2342 2343 if (compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid)) 2344 break; 2345 2346 goto queue; 2347 } 2348 break; 2349 case WLAN_CATEGORY_SA_QUERY: 2350 if (len < (IEEE80211_MIN_ACTION_SIZE + 2351 sizeof(mgmt->u.action.u.sa_query))) 2352 break; 2353 2354 switch (mgmt->u.action.u.sa_query.action) { 2355 case WLAN_ACTION_SA_QUERY_REQUEST: 2356 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2357 break; 2358 ieee80211_process_sa_query_req(sdata, mgmt, len); 2359 goto handled; 2360 } 2361 break; 2362 case WLAN_CATEGORY_SELF_PROTECTED: 2363 switch (mgmt->u.action.u.self_prot.action_code) { 2364 case WLAN_SP_MESH_PEERING_OPEN: 2365 case WLAN_SP_MESH_PEERING_CLOSE: 2366 case WLAN_SP_MESH_PEERING_CONFIRM: 2367 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2368 goto invalid; 2369 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE) 2370 /* userspace handles this frame */ 2371 break; 2372 goto queue; 2373 case WLAN_SP_MGK_INFORM: 2374 case WLAN_SP_MGK_ACK: 2375 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2376 goto invalid; 2377 break; 2378 } 2379 break; 2380 case WLAN_CATEGORY_MESH_ACTION: 2381 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2382 break; 2383 if (mesh_action_is_path_sel(mgmt) && 2384 (!mesh_path_sel_is_hwmp(sdata))) 2385 break; 2386 goto queue; 2387 } 2388 2389 return RX_CONTINUE; 2390 2391 invalid: 2392 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 2393 /* will return in the next handlers */ 2394 return RX_CONTINUE; 2395 2396 handled: 2397 if (rx->sta) 2398 rx->sta->rx_packets++; 2399 dev_kfree_skb(rx->skb); 2400 return RX_QUEUED; 2401 2402 queue: 2403 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2404 skb_queue_tail(&sdata->skb_queue, rx->skb); 2405 ieee80211_queue_work(&local->hw, &sdata->work); 2406 if (rx->sta) 2407 rx->sta->rx_packets++; 2408 return RX_QUEUED; 2409 } 2410 2411 static ieee80211_rx_result debug_noinline 2412 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 2413 { 2414 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2415 int sig = 0; 2416 2417 /* skip known-bad action frames and return them in the next handler */ 2418 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 2419 return RX_CONTINUE; 2420 2421 /* 2422 * Getting here means the kernel doesn't know how to handle 2423 * it, but maybe userspace does ... include returned frames 2424 * so userspace can register for those to know whether ones 2425 * it transmitted were processed or returned. 2426 */ 2427 2428 if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) 2429 sig = status->signal; 2430 2431 if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq, sig, 2432 rx->skb->data, rx->skb->len, 2433 GFP_ATOMIC)) { 2434 if (rx->sta) 2435 rx->sta->rx_packets++; 2436 dev_kfree_skb(rx->skb); 2437 return RX_QUEUED; 2438 } 2439 2440 2441 return RX_CONTINUE; 2442 } 2443 2444 static ieee80211_rx_result debug_noinline 2445 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 2446 { 2447 struct ieee80211_local *local = rx->local; 2448 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2449 struct sk_buff *nskb; 2450 struct ieee80211_sub_if_data *sdata = rx->sdata; 2451 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2452 2453 if (!ieee80211_is_action(mgmt->frame_control)) 2454 return RX_CONTINUE; 2455 2456 /* 2457 * For AP mode, hostapd is responsible for handling any action 2458 * frames that we didn't handle, including returning unknown 2459 * ones. For all other modes we will return them to the sender, 2460 * setting the 0x80 bit in the action category, as required by 2461 * 802.11-2007 7.3.1.11. 2462 * Newer versions of hostapd shall also use the management frame 2463 * registration mechanisms, but older ones still use cooked 2464 * monitor interfaces so push all frames there. 2465 */ 2466 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 2467 (sdata->vif.type == NL80211_IFTYPE_AP || 2468 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 2469 return RX_DROP_MONITOR; 2470 2471 /* do not return rejected action frames */ 2472 if (mgmt->u.action.category & 0x80) 2473 return RX_DROP_UNUSABLE; 2474 2475 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 2476 GFP_ATOMIC); 2477 if (nskb) { 2478 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 2479 2480 nmgmt->u.action.category |= 0x80; 2481 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 2482 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 2483 2484 memset(nskb->cb, 0, sizeof(nskb->cb)); 2485 2486 ieee80211_tx_skb(rx->sdata, nskb); 2487 } 2488 dev_kfree_skb(rx->skb); 2489 return RX_QUEUED; 2490 } 2491 2492 static ieee80211_rx_result debug_noinline 2493 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 2494 { 2495 struct ieee80211_sub_if_data *sdata = rx->sdata; 2496 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2497 __le16 stype; 2498 2499 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 2500 2501 if (!ieee80211_vif_is_mesh(&sdata->vif) && 2502 sdata->vif.type != NL80211_IFTYPE_ADHOC && 2503 sdata->vif.type != NL80211_IFTYPE_STATION) 2504 return RX_DROP_MONITOR; 2505 2506 switch (stype) { 2507 case cpu_to_le16(IEEE80211_STYPE_AUTH): 2508 case cpu_to_le16(IEEE80211_STYPE_BEACON): 2509 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 2510 /* process for all: mesh, mlme, ibss */ 2511 break; 2512 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 2513 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 2514 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 2515 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 2516 if (is_multicast_ether_addr(mgmt->da) && 2517 !is_broadcast_ether_addr(mgmt->da)) 2518 return RX_DROP_MONITOR; 2519 2520 /* process only for station */ 2521 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2522 return RX_DROP_MONITOR; 2523 break; 2524 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 2525 /* process only for ibss */ 2526 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 2527 return RX_DROP_MONITOR; 2528 break; 2529 default: 2530 return RX_DROP_MONITOR; 2531 } 2532 2533 /* queue up frame and kick off work to process it */ 2534 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2535 skb_queue_tail(&sdata->skb_queue, rx->skb); 2536 ieee80211_queue_work(&rx->local->hw, &sdata->work); 2537 if (rx->sta) 2538 rx->sta->rx_packets++; 2539 2540 return RX_QUEUED; 2541 } 2542 2543 /* TODO: use IEEE80211_RX_FRAGMENTED */ 2544 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 2545 struct ieee80211_rate *rate) 2546 { 2547 struct ieee80211_sub_if_data *sdata; 2548 struct ieee80211_local *local = rx->local; 2549 struct sk_buff *skb = rx->skb, *skb2; 2550 struct net_device *prev_dev = NULL; 2551 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2552 int needed_headroom; 2553 2554 /* 2555 * If cooked monitor has been processed already, then 2556 * don't do it again. If not, set the flag. 2557 */ 2558 if (rx->flags & IEEE80211_RX_CMNTR) 2559 goto out_free_skb; 2560 rx->flags |= IEEE80211_RX_CMNTR; 2561 2562 /* If there are no cooked monitor interfaces, just free the SKB */ 2563 if (!local->cooked_mntrs) 2564 goto out_free_skb; 2565 2566 /* room for the radiotap header based on driver features */ 2567 needed_headroom = ieee80211_rx_radiotap_len(local, status); 2568 2569 if (skb_headroom(skb) < needed_headroom && 2570 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) 2571 goto out_free_skb; 2572 2573 /* prepend radiotap information */ 2574 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); 2575 2576 skb_set_mac_header(skb, 0); 2577 skb->ip_summed = CHECKSUM_UNNECESSARY; 2578 skb->pkt_type = PACKET_OTHERHOST; 2579 skb->protocol = htons(ETH_P_802_2); 2580 2581 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2582 if (!ieee80211_sdata_running(sdata)) 2583 continue; 2584 2585 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 2586 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) 2587 continue; 2588 2589 if (prev_dev) { 2590 skb2 = skb_clone(skb, GFP_ATOMIC); 2591 if (skb2) { 2592 skb2->dev = prev_dev; 2593 netif_receive_skb(skb2); 2594 } 2595 } 2596 2597 prev_dev = sdata->dev; 2598 sdata->dev->stats.rx_packets++; 2599 sdata->dev->stats.rx_bytes += skb->len; 2600 } 2601 2602 if (prev_dev) { 2603 skb->dev = prev_dev; 2604 netif_receive_skb(skb); 2605 return; 2606 } 2607 2608 out_free_skb: 2609 dev_kfree_skb(skb); 2610 } 2611 2612 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 2613 ieee80211_rx_result res) 2614 { 2615 switch (res) { 2616 case RX_DROP_MONITOR: 2617 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 2618 if (rx->sta) 2619 rx->sta->rx_dropped++; 2620 /* fall through */ 2621 case RX_CONTINUE: { 2622 struct ieee80211_rate *rate = NULL; 2623 struct ieee80211_supported_band *sband; 2624 struct ieee80211_rx_status *status; 2625 2626 status = IEEE80211_SKB_RXCB((rx->skb)); 2627 2628 sband = rx->local->hw.wiphy->bands[status->band]; 2629 if (!(status->flag & RX_FLAG_HT)) 2630 rate = &sband->bitrates[status->rate_idx]; 2631 2632 ieee80211_rx_cooked_monitor(rx, rate); 2633 break; 2634 } 2635 case RX_DROP_UNUSABLE: 2636 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 2637 if (rx->sta) 2638 rx->sta->rx_dropped++; 2639 dev_kfree_skb(rx->skb); 2640 break; 2641 case RX_QUEUED: 2642 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 2643 break; 2644 } 2645 } 2646 2647 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) 2648 { 2649 ieee80211_rx_result res = RX_DROP_MONITOR; 2650 struct sk_buff *skb; 2651 2652 #define CALL_RXH(rxh) \ 2653 do { \ 2654 res = rxh(rx); \ 2655 if (res != RX_CONTINUE) \ 2656 goto rxh_next; \ 2657 } while (0); 2658 2659 spin_lock(&rx->local->rx_skb_queue.lock); 2660 if (rx->local->running_rx_handler) 2661 goto unlock; 2662 2663 rx->local->running_rx_handler = true; 2664 2665 while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) { 2666 spin_unlock(&rx->local->rx_skb_queue.lock); 2667 2668 /* 2669 * all the other fields are valid across frames 2670 * that belong to an aMPDU since they are on the 2671 * same TID from the same station 2672 */ 2673 rx->skb = skb; 2674 2675 CALL_RXH(ieee80211_rx_h_decrypt) 2676 CALL_RXH(ieee80211_rx_h_check_more_data) 2677 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll) 2678 CALL_RXH(ieee80211_rx_h_sta_process) 2679 CALL_RXH(ieee80211_rx_h_defragment) 2680 CALL_RXH(ieee80211_rx_h_michael_mic_verify) 2681 /* must be after MMIC verify so header is counted in MPDU mic */ 2682 #ifdef CONFIG_MAC80211_MESH 2683 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 2684 CALL_RXH(ieee80211_rx_h_mesh_fwding); 2685 #endif 2686 CALL_RXH(ieee80211_rx_h_amsdu) 2687 CALL_RXH(ieee80211_rx_h_data) 2688 CALL_RXH(ieee80211_rx_h_ctrl); 2689 CALL_RXH(ieee80211_rx_h_mgmt_check) 2690 CALL_RXH(ieee80211_rx_h_action) 2691 CALL_RXH(ieee80211_rx_h_userspace_mgmt) 2692 CALL_RXH(ieee80211_rx_h_action_return) 2693 CALL_RXH(ieee80211_rx_h_mgmt) 2694 2695 rxh_next: 2696 ieee80211_rx_handlers_result(rx, res); 2697 spin_lock(&rx->local->rx_skb_queue.lock); 2698 #undef CALL_RXH 2699 } 2700 2701 rx->local->running_rx_handler = false; 2702 2703 unlock: 2704 spin_unlock(&rx->local->rx_skb_queue.lock); 2705 } 2706 2707 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 2708 { 2709 ieee80211_rx_result res = RX_DROP_MONITOR; 2710 2711 #define CALL_RXH(rxh) \ 2712 do { \ 2713 res = rxh(rx); \ 2714 if (res != RX_CONTINUE) \ 2715 goto rxh_next; \ 2716 } while (0); 2717 2718 CALL_RXH(ieee80211_rx_h_passive_scan) 2719 CALL_RXH(ieee80211_rx_h_check) 2720 2721 ieee80211_rx_reorder_ampdu(rx); 2722 2723 ieee80211_rx_handlers(rx); 2724 return; 2725 2726 rxh_next: 2727 ieee80211_rx_handlers_result(rx, res); 2728 2729 #undef CALL_RXH 2730 } 2731 2732 /* 2733 * This function makes calls into the RX path, therefore 2734 * it has to be invoked under RCU read lock. 2735 */ 2736 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 2737 { 2738 struct ieee80211_rx_data rx = { 2739 .sta = sta, 2740 .sdata = sta->sdata, 2741 .local = sta->local, 2742 /* This is OK -- must be QoS data frame */ 2743 .security_idx = tid, 2744 .seqno_idx = tid, 2745 .flags = 0, 2746 }; 2747 struct tid_ampdu_rx *tid_agg_rx; 2748 2749 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 2750 if (!tid_agg_rx) 2751 return; 2752 2753 spin_lock(&tid_agg_rx->reorder_lock); 2754 ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx); 2755 spin_unlock(&tid_agg_rx->reorder_lock); 2756 2757 ieee80211_rx_handlers(&rx); 2758 } 2759 2760 /* main receive path */ 2761 2762 static int prepare_for_handlers(struct ieee80211_rx_data *rx, 2763 struct ieee80211_hdr *hdr) 2764 { 2765 struct ieee80211_sub_if_data *sdata = rx->sdata; 2766 struct sk_buff *skb = rx->skb; 2767 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2768 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 2769 int multicast = is_multicast_ether_addr(hdr->addr1); 2770 2771 switch (sdata->vif.type) { 2772 case NL80211_IFTYPE_STATION: 2773 if (!bssid && !sdata->u.mgd.use_4addr) 2774 return 0; 2775 if (!multicast && 2776 compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) { 2777 if (!(sdata->dev->flags & IFF_PROMISC) || 2778 sdata->u.mgd.use_4addr) 2779 return 0; 2780 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2781 } 2782 break; 2783 case NL80211_IFTYPE_ADHOC: 2784 if (!bssid) 2785 return 0; 2786 if (ieee80211_is_beacon(hdr->frame_control)) { 2787 return 1; 2788 } 2789 else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) { 2790 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN)) 2791 return 0; 2792 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2793 } else if (!multicast && 2794 compare_ether_addr(sdata->vif.addr, 2795 hdr->addr1) != 0) { 2796 if (!(sdata->dev->flags & IFF_PROMISC)) 2797 return 0; 2798 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2799 } else if (!rx->sta) { 2800 int rate_idx; 2801 if (status->flag & RX_FLAG_HT) 2802 rate_idx = 0; /* TODO: HT rates */ 2803 else 2804 rate_idx = status->rate_idx; 2805 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 2806 BIT(rate_idx)); 2807 } 2808 break; 2809 case NL80211_IFTYPE_MESH_POINT: 2810 if (!multicast && 2811 compare_ether_addr(sdata->vif.addr, 2812 hdr->addr1) != 0) { 2813 if (!(sdata->dev->flags & IFF_PROMISC)) 2814 return 0; 2815 2816 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2817 } 2818 break; 2819 case NL80211_IFTYPE_AP_VLAN: 2820 case NL80211_IFTYPE_AP: 2821 if (!bssid) { 2822 if (compare_ether_addr(sdata->vif.addr, 2823 hdr->addr1)) 2824 return 0; 2825 } else if (!ieee80211_bssid_match(bssid, 2826 sdata->vif.addr)) { 2827 /* 2828 * Accept public action frames even when the 2829 * BSSID doesn't match, this is used for P2P 2830 * and location updates. Note that mac80211 2831 * itself never looks at these frames. 2832 */ 2833 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) && 2834 ieee80211_is_public_action(hdr, skb->len)) 2835 return 1; 2836 if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) && 2837 !ieee80211_is_beacon(hdr->frame_control)) 2838 return 0; 2839 status->rx_flags &= ~IEEE80211_RX_RA_MATCH; 2840 } 2841 break; 2842 case NL80211_IFTYPE_WDS: 2843 if (bssid || !ieee80211_is_data(hdr->frame_control)) 2844 return 0; 2845 if (compare_ether_addr(sdata->u.wds.remote_addr, hdr->addr2)) 2846 return 0; 2847 break; 2848 default: 2849 /* should never get here */ 2850 WARN_ON(1); 2851 break; 2852 } 2853 2854 return 1; 2855 } 2856 2857 /* 2858 * This function returns whether or not the SKB 2859 * was destined for RX processing or not, which, 2860 * if consume is true, is equivalent to whether 2861 * or not the skb was consumed. 2862 */ 2863 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 2864 struct sk_buff *skb, bool consume) 2865 { 2866 struct ieee80211_local *local = rx->local; 2867 struct ieee80211_sub_if_data *sdata = rx->sdata; 2868 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2869 struct ieee80211_hdr *hdr = (void *)skb->data; 2870 int prepares; 2871 2872 rx->skb = skb; 2873 status->rx_flags |= IEEE80211_RX_RA_MATCH; 2874 prepares = prepare_for_handlers(rx, hdr); 2875 2876 if (!prepares) 2877 return false; 2878 2879 if (!consume) { 2880 skb = skb_copy(skb, GFP_ATOMIC); 2881 if (!skb) { 2882 if (net_ratelimit()) 2883 wiphy_debug(local->hw.wiphy, 2884 "failed to copy skb for %s\n", 2885 sdata->name); 2886 return true; 2887 } 2888 2889 rx->skb = skb; 2890 } 2891 2892 ieee80211_invoke_rx_handlers(rx); 2893 return true; 2894 } 2895 2896 /* 2897 * This is the actual Rx frames handler. as it blongs to Rx path it must 2898 * be called with rcu_read_lock protection. 2899 */ 2900 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 2901 struct sk_buff *skb) 2902 { 2903 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2904 struct ieee80211_local *local = hw_to_local(hw); 2905 struct ieee80211_sub_if_data *sdata; 2906 struct ieee80211_hdr *hdr; 2907 __le16 fc; 2908 struct ieee80211_rx_data rx; 2909 struct ieee80211_sub_if_data *prev; 2910 struct sta_info *sta, *tmp, *prev_sta; 2911 int err = 0; 2912 2913 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 2914 memset(&rx, 0, sizeof(rx)); 2915 rx.skb = skb; 2916 rx.local = local; 2917 2918 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 2919 local->dot11ReceivedFragmentCount++; 2920 2921 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || 2922 test_bit(SCAN_SW_SCANNING, &local->scanning))) 2923 status->rx_flags |= IEEE80211_RX_IN_SCAN; 2924 2925 if (ieee80211_is_mgmt(fc)) 2926 err = skb_linearize(skb); 2927 else 2928 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 2929 2930 if (err) { 2931 dev_kfree_skb(skb); 2932 return; 2933 } 2934 2935 hdr = (struct ieee80211_hdr *)skb->data; 2936 ieee80211_parse_qos(&rx); 2937 ieee80211_verify_alignment(&rx); 2938 2939 if (ieee80211_is_data(fc)) { 2940 prev_sta = NULL; 2941 2942 for_each_sta_info(local, hdr->addr2, sta, tmp) { 2943 if (!prev_sta) { 2944 prev_sta = sta; 2945 continue; 2946 } 2947 2948 rx.sta = prev_sta; 2949 rx.sdata = prev_sta->sdata; 2950 ieee80211_prepare_and_rx_handle(&rx, skb, false); 2951 2952 prev_sta = sta; 2953 } 2954 2955 if (prev_sta) { 2956 rx.sta = prev_sta; 2957 rx.sdata = prev_sta->sdata; 2958 2959 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 2960 return; 2961 goto out; 2962 } 2963 } 2964 2965 prev = NULL; 2966 2967 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 2968 if (!ieee80211_sdata_running(sdata)) 2969 continue; 2970 2971 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 2972 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 2973 continue; 2974 2975 /* 2976 * frame is destined for this interface, but if it's 2977 * not also for the previous one we handle that after 2978 * the loop to avoid copying the SKB once too much 2979 */ 2980 2981 if (!prev) { 2982 prev = sdata; 2983 continue; 2984 } 2985 2986 rx.sta = sta_info_get_bss(prev, hdr->addr2); 2987 rx.sdata = prev; 2988 ieee80211_prepare_and_rx_handle(&rx, skb, false); 2989 2990 prev = sdata; 2991 } 2992 2993 if (prev) { 2994 rx.sta = sta_info_get_bss(prev, hdr->addr2); 2995 rx.sdata = prev; 2996 2997 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 2998 return; 2999 } 3000 3001 out: 3002 dev_kfree_skb(skb); 3003 } 3004 3005 /* 3006 * This is the receive path handler. It is called by a low level driver when an 3007 * 802.11 MPDU is received from the hardware. 3008 */ 3009 void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) 3010 { 3011 struct ieee80211_local *local = hw_to_local(hw); 3012 struct ieee80211_rate *rate = NULL; 3013 struct ieee80211_supported_band *sband; 3014 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3015 3016 WARN_ON_ONCE(softirq_count() == 0); 3017 3018 if (WARN_ON(status->band < 0 || 3019 status->band >= IEEE80211_NUM_BANDS)) 3020 goto drop; 3021 3022 sband = local->hw.wiphy->bands[status->band]; 3023 if (WARN_ON(!sband)) 3024 goto drop; 3025 3026 /* 3027 * If we're suspending, it is possible although not too likely 3028 * that we'd be receiving frames after having already partially 3029 * quiesced the stack. We can't process such frames then since 3030 * that might, for example, cause stations to be added or other 3031 * driver callbacks be invoked. 3032 */ 3033 if (unlikely(local->quiescing || local->suspended)) 3034 goto drop; 3035 3036 /* 3037 * The same happens when we're not even started, 3038 * but that's worth a warning. 3039 */ 3040 if (WARN_ON(!local->started)) 3041 goto drop; 3042 3043 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 3044 /* 3045 * Validate the rate, unless a PLCP error means that 3046 * we probably can't have a valid rate here anyway. 3047 */ 3048 3049 if (status->flag & RX_FLAG_HT) { 3050 /* 3051 * rate_idx is MCS index, which can be [0-76] 3052 * as documented on: 3053 * 3054 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 3055 * 3056 * Anything else would be some sort of driver or 3057 * hardware error. The driver should catch hardware 3058 * errors. 3059 */ 3060 if (WARN((status->rate_idx < 0 || 3061 status->rate_idx > 76), 3062 "Rate marked as an HT rate but passed " 3063 "status->rate_idx is not " 3064 "an MCS index [0-76]: %d (0x%02x)\n", 3065 status->rate_idx, 3066 status->rate_idx)) 3067 goto drop; 3068 } else { 3069 if (WARN_ON(status->rate_idx < 0 || 3070 status->rate_idx >= sband->n_bitrates)) 3071 goto drop; 3072 rate = &sband->bitrates[status->rate_idx]; 3073 } 3074 } 3075 3076 status->rx_flags = 0; 3077 3078 /* 3079 * key references and virtual interfaces are protected using RCU 3080 * and this requires that we are in a read-side RCU section during 3081 * receive processing 3082 */ 3083 rcu_read_lock(); 3084 3085 /* 3086 * Frames with failed FCS/PLCP checksum are not returned, 3087 * all other frames are returned without radiotap header 3088 * if it was previously present. 3089 * Also, frames with less than 16 bytes are dropped. 3090 */ 3091 skb = ieee80211_rx_monitor(local, skb, rate); 3092 if (!skb) { 3093 rcu_read_unlock(); 3094 return; 3095 } 3096 3097 ieee80211_tpt_led_trig_rx(local, 3098 ((struct ieee80211_hdr *)skb->data)->frame_control, 3099 skb->len); 3100 __ieee80211_rx_handle_packet(hw, skb); 3101 3102 rcu_read_unlock(); 3103 3104 return; 3105 drop: 3106 kfree_skb(skb); 3107 } 3108 EXPORT_SYMBOL(ieee80211_rx); 3109 3110 /* This is a version of the rx handler that can be called from hard irq 3111 * context. Post the skb on the queue and schedule the tasklet */ 3112 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 3113 { 3114 struct ieee80211_local *local = hw_to_local(hw); 3115 3116 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 3117 3118 skb->pkt_type = IEEE80211_RX_MSG; 3119 skb_queue_tail(&local->skb_queue, skb); 3120 tasklet_schedule(&local->tasklet); 3121 } 3122 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 3123