1 /* 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/jiffies.h> 15 #include <linux/slab.h> 16 #include <linux/kernel.h> 17 #include <linux/skbuff.h> 18 #include <linux/netdevice.h> 19 #include <linux/etherdevice.h> 20 #include <linux/rcupdate.h> 21 #include <linux/export.h> 22 #include <linux/bitops.h> 23 #include <net/mac80211.h> 24 #include <net/ieee80211_radiotap.h> 25 #include <asm/unaligned.h> 26 27 #include "ieee80211_i.h" 28 #include "driver-ops.h" 29 #include "led.h" 30 #include "mesh.h" 31 #include "wep.h" 32 #include "wpa.h" 33 #include "tkip.h" 34 #include "wme.h" 35 #include "rate.h" 36 37 static inline void ieee80211_rx_stats(struct net_device *dev, u32 len) 38 { 39 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 40 41 u64_stats_update_begin(&tstats->syncp); 42 tstats->rx_packets++; 43 tstats->rx_bytes += len; 44 u64_stats_update_end(&tstats->syncp); 45 } 46 47 static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 48 enum nl80211_iftype type) 49 { 50 __le16 fc = hdr->frame_control; 51 52 if (ieee80211_is_data(fc)) { 53 if (len < 24) /* drop incorrect hdr len (data) */ 54 return NULL; 55 56 if (ieee80211_has_a4(fc)) 57 return NULL; 58 if (ieee80211_has_tods(fc)) 59 return hdr->addr1; 60 if (ieee80211_has_fromds(fc)) 61 return hdr->addr2; 62 63 return hdr->addr3; 64 } 65 66 if (ieee80211_is_mgmt(fc)) { 67 if (len < 24) /* drop incorrect hdr len (mgmt) */ 68 return NULL; 69 return hdr->addr3; 70 } 71 72 if (ieee80211_is_ctl(fc)) { 73 if (ieee80211_is_pspoll(fc)) 74 return hdr->addr1; 75 76 if (ieee80211_is_back_req(fc)) { 77 switch (type) { 78 case NL80211_IFTYPE_STATION: 79 return hdr->addr2; 80 case NL80211_IFTYPE_AP: 81 case NL80211_IFTYPE_AP_VLAN: 82 return hdr->addr1; 83 default: 84 break; /* fall through to the return */ 85 } 86 } 87 } 88 89 return NULL; 90 } 91 92 /* 93 * monitor mode reception 94 * 95 * This function cleans up the SKB, i.e. it removes all the stuff 96 * only useful for monitoring. 97 */ 98 static struct sk_buff *remove_monitor_info(struct ieee80211_local *local, 99 struct sk_buff *skb, 100 unsigned int rtap_vendor_space) 101 { 102 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { 103 if (likely(skb->len > FCS_LEN)) 104 __pskb_trim(skb, skb->len - FCS_LEN); 105 else { 106 /* driver bug */ 107 WARN_ON(1); 108 dev_kfree_skb(skb); 109 return NULL; 110 } 111 } 112 113 __pskb_pull(skb, rtap_vendor_space); 114 115 return skb; 116 } 117 118 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, 119 unsigned int rtap_vendor_space) 120 { 121 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 122 struct ieee80211_hdr *hdr; 123 124 hdr = (void *)(skb->data + rtap_vendor_space); 125 126 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 127 RX_FLAG_FAILED_PLCP_CRC | 128 RX_FLAG_ONLY_MONITOR)) 129 return true; 130 131 if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space)) 132 return true; 133 134 if (ieee80211_is_ctl(hdr->frame_control) && 135 !ieee80211_is_pspoll(hdr->frame_control) && 136 !ieee80211_is_back_req(hdr->frame_control)) 137 return true; 138 139 return false; 140 } 141 142 static int 143 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, 144 struct ieee80211_rx_status *status, 145 struct sk_buff *skb) 146 { 147 int len; 148 149 /* always present fields */ 150 len = sizeof(struct ieee80211_radiotap_header) + 8; 151 152 /* allocate extra bitmaps */ 153 if (status->chains) 154 len += 4 * hweight8(status->chains); 155 156 if (ieee80211_have_rx_timestamp(status)) { 157 len = ALIGN(len, 8); 158 len += 8; 159 } 160 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) 161 len += 1; 162 163 /* antenna field, if we don't have per-chain info */ 164 if (!status->chains) 165 len += 1; 166 167 /* padding for RX_FLAGS if necessary */ 168 len = ALIGN(len, 2); 169 170 if (status->flag & RX_FLAG_HT) /* HT info */ 171 len += 3; 172 173 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 174 len = ALIGN(len, 4); 175 len += 8; 176 } 177 178 if (status->flag & RX_FLAG_VHT) { 179 len = ALIGN(len, 2); 180 len += 12; 181 } 182 183 if (status->chains) { 184 /* antenna and antenna signal fields */ 185 len += 2 * hweight8(status->chains); 186 } 187 188 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 189 struct ieee80211_vendor_radiotap *rtap = (void *)skb->data; 190 191 /* vendor presence bitmap */ 192 len += 4; 193 /* alignment for fixed 6-byte vendor data header */ 194 len = ALIGN(len, 2); 195 /* vendor data header */ 196 len += 6; 197 if (WARN_ON(rtap->align == 0)) 198 rtap->align = 1; 199 len = ALIGN(len, rtap->align); 200 len += rtap->len + rtap->pad; 201 } 202 203 return len; 204 } 205 206 /* 207 * ieee80211_add_rx_radiotap_header - add radiotap header 208 * 209 * add a radiotap header containing all the fields which the hardware provided. 210 */ 211 static void 212 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 213 struct sk_buff *skb, 214 struct ieee80211_rate *rate, 215 int rtap_len, bool has_fcs) 216 { 217 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 218 struct ieee80211_radiotap_header *rthdr; 219 unsigned char *pos; 220 __le32 *it_present; 221 u32 it_present_val; 222 u16 rx_flags = 0; 223 u16 channel_flags = 0; 224 int mpdulen, chain; 225 unsigned long chains = status->chains; 226 struct ieee80211_vendor_radiotap rtap = {}; 227 228 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 229 rtap = *(struct ieee80211_vendor_radiotap *)skb->data; 230 /* rtap.len and rtap.pad are undone immediately */ 231 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad); 232 } 233 234 mpdulen = skb->len; 235 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) 236 mpdulen += FCS_LEN; 237 238 rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len); 239 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad); 240 it_present = &rthdr->it_present; 241 242 /* radiotap header, set always present flags */ 243 rthdr->it_len = cpu_to_le16(rtap_len); 244 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | 245 BIT(IEEE80211_RADIOTAP_CHANNEL) | 246 BIT(IEEE80211_RADIOTAP_RX_FLAGS); 247 248 if (!status->chains) 249 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); 250 251 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 252 it_present_val |= 253 BIT(IEEE80211_RADIOTAP_EXT) | 254 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); 255 put_unaligned_le32(it_present_val, it_present); 256 it_present++; 257 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | 258 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 259 } 260 261 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 262 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | 263 BIT(IEEE80211_RADIOTAP_EXT); 264 put_unaligned_le32(it_present_val, it_present); 265 it_present++; 266 it_present_val = rtap.present; 267 } 268 269 put_unaligned_le32(it_present_val, it_present); 270 271 pos = (void *)(it_present + 1); 272 273 /* the order of the following fields is important */ 274 275 /* IEEE80211_RADIOTAP_TSFT */ 276 if (ieee80211_have_rx_timestamp(status)) { 277 /* padding */ 278 while ((pos - (u8 *)rthdr) & 7) 279 *pos++ = 0; 280 put_unaligned_le64( 281 ieee80211_calculate_rx_timestamp(local, status, 282 mpdulen, 0), 283 pos); 284 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 285 pos += 8; 286 } 287 288 /* IEEE80211_RADIOTAP_FLAGS */ 289 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 290 *pos |= IEEE80211_RADIOTAP_F_FCS; 291 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 292 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 293 if (status->flag & RX_FLAG_SHORTPRE) 294 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 295 pos++; 296 297 /* IEEE80211_RADIOTAP_RATE */ 298 if (!rate || status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) { 299 /* 300 * Without rate information don't add it. If we have, 301 * MCS information is a separate field in radiotap, 302 * added below. The byte here is needed as padding 303 * for the channel though, so initialise it to 0. 304 */ 305 *pos = 0; 306 } else { 307 int shift = 0; 308 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 309 if (status->flag & RX_FLAG_10MHZ) 310 shift = 1; 311 else if (status->flag & RX_FLAG_5MHZ) 312 shift = 2; 313 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); 314 } 315 pos++; 316 317 /* IEEE80211_RADIOTAP_CHANNEL */ 318 put_unaligned_le16(status->freq, pos); 319 pos += 2; 320 if (status->flag & RX_FLAG_10MHZ) 321 channel_flags |= IEEE80211_CHAN_HALF; 322 else if (status->flag & RX_FLAG_5MHZ) 323 channel_flags |= IEEE80211_CHAN_QUARTER; 324 325 if (status->band == IEEE80211_BAND_5GHZ) 326 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; 327 else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) 328 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 329 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 330 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 331 else if (rate) 332 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; 333 else 334 channel_flags |= IEEE80211_CHAN_2GHZ; 335 put_unaligned_le16(channel_flags, pos); 336 pos += 2; 337 338 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 339 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && 340 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 341 *pos = status->signal; 342 rthdr->it_present |= 343 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 344 pos++; 345 } 346 347 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 348 349 if (!status->chains) { 350 /* IEEE80211_RADIOTAP_ANTENNA */ 351 *pos = status->antenna; 352 pos++; 353 } 354 355 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 356 357 /* IEEE80211_RADIOTAP_RX_FLAGS */ 358 /* ensure 2 byte alignment for the 2 byte field as required */ 359 if ((pos - (u8 *)rthdr) & 1) 360 *pos++ = 0; 361 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 362 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 363 put_unaligned_le16(rx_flags, pos); 364 pos += 2; 365 366 if (status->flag & RX_FLAG_HT) { 367 unsigned int stbc; 368 369 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 370 *pos++ = local->hw.radiotap_mcs_details; 371 *pos = 0; 372 if (status->flag & RX_FLAG_SHORT_GI) 373 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 374 if (status->flag & RX_FLAG_40MHZ) 375 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 376 if (status->flag & RX_FLAG_HT_GF) 377 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 378 if (status->flag & RX_FLAG_LDPC) 379 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; 380 stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT; 381 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; 382 pos++; 383 *pos++ = status->rate_idx; 384 } 385 386 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 387 u16 flags = 0; 388 389 /* ensure 4 byte alignment */ 390 while ((pos - (u8 *)rthdr) & 3) 391 pos++; 392 rthdr->it_present |= 393 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); 394 put_unaligned_le32(status->ampdu_reference, pos); 395 pos += 4; 396 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 397 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 398 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 399 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 400 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 401 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 402 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 403 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; 404 put_unaligned_le16(flags, pos); 405 pos += 2; 406 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 407 *pos++ = status->ampdu_delimiter_crc; 408 else 409 *pos++ = 0; 410 *pos++ = 0; 411 } 412 413 if (status->flag & RX_FLAG_VHT) { 414 u16 known = local->hw.radiotap_vht_details; 415 416 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); 417 put_unaligned_le16(known, pos); 418 pos += 2; 419 /* flags */ 420 if (status->flag & RX_FLAG_SHORT_GI) 421 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; 422 /* in VHT, STBC is binary */ 423 if (status->flag & RX_FLAG_STBC_MASK) 424 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; 425 if (status->vht_flag & RX_VHT_FLAG_BF) 426 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; 427 pos++; 428 /* bandwidth */ 429 if (status->vht_flag & RX_VHT_FLAG_80MHZ) 430 *pos++ = 4; 431 else if (status->vht_flag & RX_VHT_FLAG_160MHZ) 432 *pos++ = 11; 433 else if (status->flag & RX_FLAG_40MHZ) 434 *pos++ = 1; 435 else /* 20 MHz */ 436 *pos++ = 0; 437 /* MCS/NSS */ 438 *pos = (status->rate_idx << 4) | status->vht_nss; 439 pos += 4; 440 /* coding field */ 441 if (status->flag & RX_FLAG_LDPC) 442 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; 443 pos++; 444 /* group ID */ 445 pos++; 446 /* partial_aid */ 447 pos += 2; 448 } 449 450 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 451 *pos++ = status->chain_signal[chain]; 452 *pos++ = chain; 453 } 454 455 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 456 /* ensure 2 byte alignment for the vendor field as required */ 457 if ((pos - (u8 *)rthdr) & 1) 458 *pos++ = 0; 459 *pos++ = rtap.oui[0]; 460 *pos++ = rtap.oui[1]; 461 *pos++ = rtap.oui[2]; 462 *pos++ = rtap.subns; 463 put_unaligned_le16(rtap.len, pos); 464 pos += 2; 465 /* align the actual payload as requested */ 466 while ((pos - (u8 *)rthdr) & (rtap.align - 1)) 467 *pos++ = 0; 468 /* data (and possible padding) already follows */ 469 } 470 } 471 472 /* 473 * This function copies a received frame to all monitor interfaces and 474 * returns a cleaned-up SKB that no longer includes the FCS nor the 475 * radiotap header the driver might have added. 476 */ 477 static struct sk_buff * 478 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 479 struct ieee80211_rate *rate) 480 { 481 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 482 struct ieee80211_sub_if_data *sdata; 483 int rt_hdrlen, needed_headroom; 484 struct sk_buff *skb, *skb2; 485 struct net_device *prev_dev = NULL; 486 int present_fcs_len = 0; 487 unsigned int rtap_vendor_space = 0; 488 489 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { 490 struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data; 491 492 rtap_vendor_space = sizeof(*rtap) + rtap->len + rtap->pad; 493 } 494 495 /* 496 * First, we may need to make a copy of the skb because 497 * (1) we need to modify it for radiotap (if not present), and 498 * (2) the other RX handlers will modify the skb we got. 499 * 500 * We don't need to, of course, if we aren't going to return 501 * the SKB because it has a bad FCS/PLCP checksum. 502 */ 503 504 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 505 present_fcs_len = FCS_LEN; 506 507 /* ensure hdr->frame_control and vendor radiotap data are in skb head */ 508 if (!pskb_may_pull(origskb, 2 + rtap_vendor_space)) { 509 dev_kfree_skb(origskb); 510 return NULL; 511 } 512 513 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { 514 if (should_drop_frame(origskb, present_fcs_len, 515 rtap_vendor_space)) { 516 dev_kfree_skb(origskb); 517 return NULL; 518 } 519 520 return remove_monitor_info(local, origskb, rtap_vendor_space); 521 } 522 523 /* room for the radiotap header based on driver features */ 524 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, origskb); 525 needed_headroom = rt_hdrlen - rtap_vendor_space; 526 527 if (should_drop_frame(origskb, present_fcs_len, rtap_vendor_space)) { 528 /* only need to expand headroom if necessary */ 529 skb = origskb; 530 origskb = NULL; 531 532 /* 533 * This shouldn't trigger often because most devices have an 534 * RX header they pull before we get here, and that should 535 * be big enough for our radiotap information. We should 536 * probably export the length to drivers so that we can have 537 * them allocate enough headroom to start with. 538 */ 539 if (skb_headroom(skb) < needed_headroom && 540 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 541 dev_kfree_skb(skb); 542 return NULL; 543 } 544 } else { 545 /* 546 * Need to make a copy and possibly remove radiotap header 547 * and FCS from the original. 548 */ 549 skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC); 550 551 origskb = remove_monitor_info(local, origskb, 552 rtap_vendor_space); 553 554 if (!skb) 555 return origskb; 556 } 557 558 /* prepend radiotap information */ 559 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); 560 561 skb_reset_mac_header(skb); 562 skb->ip_summed = CHECKSUM_UNNECESSARY; 563 skb->pkt_type = PACKET_OTHERHOST; 564 skb->protocol = htons(ETH_P_802_2); 565 566 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 567 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) 568 continue; 569 570 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) 571 continue; 572 573 if (!ieee80211_sdata_running(sdata)) 574 continue; 575 576 if (prev_dev) { 577 skb2 = skb_clone(skb, GFP_ATOMIC); 578 if (skb2) { 579 skb2->dev = prev_dev; 580 netif_receive_skb(skb2); 581 } 582 } 583 584 prev_dev = sdata->dev; 585 ieee80211_rx_stats(sdata->dev, skb->len); 586 } 587 588 if (prev_dev) { 589 skb->dev = prev_dev; 590 netif_receive_skb(skb); 591 } else 592 dev_kfree_skb(skb); 593 594 return origskb; 595 } 596 597 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 598 { 599 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 600 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 601 int tid, seqno_idx, security_idx; 602 603 /* does the frame have a qos control field? */ 604 if (ieee80211_is_data_qos(hdr->frame_control)) { 605 u8 *qc = ieee80211_get_qos_ctl(hdr); 606 /* frame has qos control */ 607 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 608 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 609 status->rx_flags |= IEEE80211_RX_AMSDU; 610 611 seqno_idx = tid; 612 security_idx = tid; 613 } else { 614 /* 615 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 616 * 617 * Sequence numbers for management frames, QoS data 618 * frames with a broadcast/multicast address in the 619 * Address 1 field, and all non-QoS data frames sent 620 * by QoS STAs are assigned using an additional single 621 * modulo-4096 counter, [...] 622 * 623 * We also use that counter for non-QoS STAs. 624 */ 625 seqno_idx = IEEE80211_NUM_TIDS; 626 security_idx = 0; 627 if (ieee80211_is_mgmt(hdr->frame_control)) 628 security_idx = IEEE80211_NUM_TIDS; 629 tid = 0; 630 } 631 632 rx->seqno_idx = seqno_idx; 633 rx->security_idx = security_idx; 634 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 635 * For now, set skb->priority to 0 for other cases. */ 636 rx->skb->priority = (tid > 7) ? 0 : tid; 637 } 638 639 /** 640 * DOC: Packet alignment 641 * 642 * Drivers always need to pass packets that are aligned to two-byte boundaries 643 * to the stack. 644 * 645 * Additionally, should, if possible, align the payload data in a way that 646 * guarantees that the contained IP header is aligned to a four-byte 647 * boundary. In the case of regular frames, this simply means aligning the 648 * payload to a four-byte boundary (because either the IP header is directly 649 * contained, or IV/RFC1042 headers that have a length divisible by four are 650 * in front of it). If the payload data is not properly aligned and the 651 * architecture doesn't support efficient unaligned operations, mac80211 652 * will align the data. 653 * 654 * With A-MSDU frames, however, the payload data address must yield two modulo 655 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 656 * push the IP header further back to a multiple of four again. Thankfully, the 657 * specs were sane enough this time around to require padding each A-MSDU 658 * subframe to a length that is a multiple of four. 659 * 660 * Padding like Atheros hardware adds which is between the 802.11 header and 661 * the payload is not supported, the driver is required to move the 802.11 662 * header to be directly in front of the payload in that case. 663 */ 664 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 665 { 666 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 667 WARN_ON_ONCE((unsigned long)rx->skb->data & 1); 668 #endif 669 } 670 671 672 /* rx handlers */ 673 674 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 675 { 676 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 677 678 if (is_multicast_ether_addr(hdr->addr1)) 679 return 0; 680 681 return ieee80211_is_robust_mgmt_frame(skb); 682 } 683 684 685 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 686 { 687 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 688 689 if (!is_multicast_ether_addr(hdr->addr1)) 690 return 0; 691 692 return ieee80211_is_robust_mgmt_frame(skb); 693 } 694 695 696 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 697 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 698 { 699 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 700 struct ieee80211_mmie *mmie; 701 struct ieee80211_mmie_16 *mmie16; 702 703 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) 704 return -1; 705 706 if (!ieee80211_is_robust_mgmt_frame(skb)) 707 return -1; /* not a robust management frame */ 708 709 mmie = (struct ieee80211_mmie *) 710 (skb->data + skb->len - sizeof(*mmie)); 711 if (mmie->element_id == WLAN_EID_MMIE && 712 mmie->length == sizeof(*mmie) - 2) 713 return le16_to_cpu(mmie->key_id); 714 715 mmie16 = (struct ieee80211_mmie_16 *) 716 (skb->data + skb->len - sizeof(*mmie16)); 717 if (skb->len >= 24 + sizeof(*mmie16) && 718 mmie16->element_id == WLAN_EID_MMIE && 719 mmie16->length == sizeof(*mmie16) - 2) 720 return le16_to_cpu(mmie16->key_id); 721 722 return -1; 723 } 724 725 static int iwl80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs, 726 struct sk_buff *skb) 727 { 728 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 729 __le16 fc; 730 int hdrlen; 731 u8 keyid; 732 733 fc = hdr->frame_control; 734 hdrlen = ieee80211_hdrlen(fc); 735 736 if (skb->len < hdrlen + cs->hdr_len) 737 return -EINVAL; 738 739 skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1); 740 keyid &= cs->key_idx_mask; 741 keyid >>= cs->key_idx_shift; 742 743 return keyid; 744 } 745 746 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 747 { 748 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 749 char *dev_addr = rx->sdata->vif.addr; 750 751 if (ieee80211_is_data(hdr->frame_control)) { 752 if (is_multicast_ether_addr(hdr->addr1)) { 753 if (ieee80211_has_tods(hdr->frame_control) || 754 !ieee80211_has_fromds(hdr->frame_control)) 755 return RX_DROP_MONITOR; 756 if (ether_addr_equal(hdr->addr3, dev_addr)) 757 return RX_DROP_MONITOR; 758 } else { 759 if (!ieee80211_has_a4(hdr->frame_control)) 760 return RX_DROP_MONITOR; 761 if (ether_addr_equal(hdr->addr4, dev_addr)) 762 return RX_DROP_MONITOR; 763 } 764 } 765 766 /* If there is not an established peer link and this is not a peer link 767 * establisment frame, beacon or probe, drop the frame. 768 */ 769 770 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 771 struct ieee80211_mgmt *mgmt; 772 773 if (!ieee80211_is_mgmt(hdr->frame_control)) 774 return RX_DROP_MONITOR; 775 776 if (ieee80211_is_action(hdr->frame_control)) { 777 u8 category; 778 779 /* make sure category field is present */ 780 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 781 return RX_DROP_MONITOR; 782 783 mgmt = (struct ieee80211_mgmt *)hdr; 784 category = mgmt->u.action.category; 785 if (category != WLAN_CATEGORY_MESH_ACTION && 786 category != WLAN_CATEGORY_SELF_PROTECTED) 787 return RX_DROP_MONITOR; 788 return RX_CONTINUE; 789 } 790 791 if (ieee80211_is_probe_req(hdr->frame_control) || 792 ieee80211_is_probe_resp(hdr->frame_control) || 793 ieee80211_is_beacon(hdr->frame_control) || 794 ieee80211_is_auth(hdr->frame_control)) 795 return RX_CONTINUE; 796 797 return RX_DROP_MONITOR; 798 } 799 800 return RX_CONTINUE; 801 } 802 803 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, 804 int index) 805 { 806 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index]; 807 struct sk_buff *tail = skb_peek_tail(frames); 808 struct ieee80211_rx_status *status; 809 810 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) 811 return true; 812 813 if (!tail) 814 return false; 815 816 status = IEEE80211_SKB_RXCB(tail); 817 if (status->flag & RX_FLAG_AMSDU_MORE) 818 return false; 819 820 return true; 821 } 822 823 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 824 struct tid_ampdu_rx *tid_agg_rx, 825 int index, 826 struct sk_buff_head *frames) 827 { 828 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; 829 struct sk_buff *skb; 830 struct ieee80211_rx_status *status; 831 832 lockdep_assert_held(&tid_agg_rx->reorder_lock); 833 834 if (skb_queue_empty(skb_list)) 835 goto no_frame; 836 837 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 838 __skb_queue_purge(skb_list); 839 goto no_frame; 840 } 841 842 /* release frames from the reorder ring buffer */ 843 tid_agg_rx->stored_mpdu_num--; 844 while ((skb = __skb_dequeue(skb_list))) { 845 status = IEEE80211_SKB_RXCB(skb); 846 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 847 __skb_queue_tail(frames, skb); 848 } 849 850 no_frame: 851 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 852 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 853 } 854 855 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 856 struct tid_ampdu_rx *tid_agg_rx, 857 u16 head_seq_num, 858 struct sk_buff_head *frames) 859 { 860 int index; 861 862 lockdep_assert_held(&tid_agg_rx->reorder_lock); 863 864 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { 865 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 866 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 867 frames); 868 } 869 } 870 871 /* 872 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 873 * the skb was added to the buffer longer than this time ago, the earlier 874 * frames that have not yet been received are assumed to be lost and the skb 875 * can be released for processing. This may also release other skb's from the 876 * reorder buffer if there are no additional gaps between the frames. 877 * 878 * Callers must hold tid_agg_rx->reorder_lock. 879 */ 880 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 881 882 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 883 struct tid_ampdu_rx *tid_agg_rx, 884 struct sk_buff_head *frames) 885 { 886 int index, i, j; 887 888 lockdep_assert_held(&tid_agg_rx->reorder_lock); 889 890 /* release the buffer until next missing frame */ 891 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 892 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) && 893 tid_agg_rx->stored_mpdu_num) { 894 /* 895 * No buffers ready to be released, but check whether any 896 * frames in the reorder buffer have timed out. 897 */ 898 int skipped = 1; 899 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 900 j = (j + 1) % tid_agg_rx->buf_size) { 901 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) { 902 skipped++; 903 continue; 904 } 905 if (skipped && 906 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 907 HT_RX_REORDER_BUF_TIMEOUT)) 908 goto set_release_timer; 909 910 /* don't leave incomplete A-MSDUs around */ 911 for (i = (index + 1) % tid_agg_rx->buf_size; i != j; 912 i = (i + 1) % tid_agg_rx->buf_size) 913 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); 914 915 ht_dbg_ratelimited(sdata, 916 "release an RX reorder frame due to timeout on earlier frames\n"); 917 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, 918 frames); 919 920 /* 921 * Increment the head seq# also for the skipped slots. 922 */ 923 tid_agg_rx->head_seq_num = 924 (tid_agg_rx->head_seq_num + 925 skipped) & IEEE80211_SN_MASK; 926 skipped = 0; 927 } 928 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 929 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 930 frames); 931 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 932 } 933 934 if (tid_agg_rx->stored_mpdu_num) { 935 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 936 937 for (; j != (index - 1) % tid_agg_rx->buf_size; 938 j = (j + 1) % tid_agg_rx->buf_size) { 939 if (ieee80211_rx_reorder_ready(tid_agg_rx, j)) 940 break; 941 } 942 943 set_release_timer: 944 945 if (!tid_agg_rx->removed) 946 mod_timer(&tid_agg_rx->reorder_timer, 947 tid_agg_rx->reorder_time[j] + 1 + 948 HT_RX_REORDER_BUF_TIMEOUT); 949 } else { 950 del_timer(&tid_agg_rx->reorder_timer); 951 } 952 } 953 954 /* 955 * As this function belongs to the RX path it must be under 956 * rcu_read_lock protection. It returns false if the frame 957 * can be processed immediately, true if it was consumed. 958 */ 959 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 960 struct tid_ampdu_rx *tid_agg_rx, 961 struct sk_buff *skb, 962 struct sk_buff_head *frames) 963 { 964 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 965 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 966 u16 sc = le16_to_cpu(hdr->seq_ctrl); 967 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 968 u16 head_seq_num, buf_size; 969 int index; 970 bool ret = true; 971 972 spin_lock(&tid_agg_rx->reorder_lock); 973 974 /* 975 * Offloaded BA sessions have no known starting sequence number so pick 976 * one from first Rxed frame for this tid after BA was started. 977 */ 978 if (unlikely(tid_agg_rx->auto_seq)) { 979 tid_agg_rx->auto_seq = false; 980 tid_agg_rx->ssn = mpdu_seq_num; 981 tid_agg_rx->head_seq_num = mpdu_seq_num; 982 } 983 984 buf_size = tid_agg_rx->buf_size; 985 head_seq_num = tid_agg_rx->head_seq_num; 986 987 /* frame with out of date sequence number */ 988 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 989 dev_kfree_skb(skb); 990 goto out; 991 } 992 993 /* 994 * If frame the sequence number exceeds our buffering window 995 * size release some previous frames to make room for this one. 996 */ 997 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { 998 head_seq_num = ieee80211_sn_inc( 999 ieee80211_sn_sub(mpdu_seq_num, buf_size)); 1000 /* release stored frames up to new head to stack */ 1001 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 1002 head_seq_num, frames); 1003 } 1004 1005 /* Now the new frame is always in the range of the reordering buffer */ 1006 1007 index = mpdu_seq_num % tid_agg_rx->buf_size; 1008 1009 /* check if we already stored this frame */ 1010 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1011 dev_kfree_skb(skb); 1012 goto out; 1013 } 1014 1015 /* 1016 * If the current MPDU is in the right order and nothing else 1017 * is stored we can process it directly, no need to buffer it. 1018 * If it is first but there's something stored, we may be able 1019 * to release frames after this one. 1020 */ 1021 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 1022 tid_agg_rx->stored_mpdu_num == 0) { 1023 if (!(status->flag & RX_FLAG_AMSDU_MORE)) 1024 tid_agg_rx->head_seq_num = 1025 ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1026 ret = false; 1027 goto out; 1028 } 1029 1030 /* put the frame in the reordering buffer */ 1031 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); 1032 if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1033 tid_agg_rx->reorder_time[index] = jiffies; 1034 tid_agg_rx->stored_mpdu_num++; 1035 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); 1036 } 1037 1038 out: 1039 spin_unlock(&tid_agg_rx->reorder_lock); 1040 return ret; 1041 } 1042 1043 /* 1044 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 1045 * true if the MPDU was buffered, false if it should be processed. 1046 */ 1047 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 1048 struct sk_buff_head *frames) 1049 { 1050 struct sk_buff *skb = rx->skb; 1051 struct ieee80211_local *local = rx->local; 1052 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1053 struct sta_info *sta = rx->sta; 1054 struct tid_ampdu_rx *tid_agg_rx; 1055 u16 sc; 1056 u8 tid, ack_policy; 1057 1058 if (!ieee80211_is_data_qos(hdr->frame_control) || 1059 is_multicast_ether_addr(hdr->addr1)) 1060 goto dont_reorder; 1061 1062 /* 1063 * filter the QoS data rx stream according to 1064 * STA/TID and check if this STA/TID is on aggregation 1065 */ 1066 1067 if (!sta) 1068 goto dont_reorder; 1069 1070 ack_policy = *ieee80211_get_qos_ctl(hdr) & 1071 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 1072 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 1073 1074 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 1075 if (!tid_agg_rx) 1076 goto dont_reorder; 1077 1078 /* qos null data frames are excluded */ 1079 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 1080 goto dont_reorder; 1081 1082 /* not part of a BA session */ 1083 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1084 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) 1085 goto dont_reorder; 1086 1087 /* new, potentially un-ordered, ampdu frame - process it */ 1088 1089 /* reset session timer */ 1090 if (tid_agg_rx->timeout) 1091 tid_agg_rx->last_rx = jiffies; 1092 1093 /* if this mpdu is fragmented - terminate rx aggregation session */ 1094 sc = le16_to_cpu(hdr->seq_ctrl); 1095 if (sc & IEEE80211_SCTL_FRAG) { 1096 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 1097 skb_queue_tail(&rx->sdata->skb_queue, skb); 1098 ieee80211_queue_work(&local->hw, &rx->sdata->work); 1099 return; 1100 } 1101 1102 /* 1103 * No locking needed -- we will only ever process one 1104 * RX packet at a time, and thus own tid_agg_rx. All 1105 * other code manipulating it needs to (and does) make 1106 * sure that we cannot get to it any more before doing 1107 * anything with it. 1108 */ 1109 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, 1110 frames)) 1111 return; 1112 1113 dont_reorder: 1114 __skb_queue_tail(frames, skb); 1115 } 1116 1117 static ieee80211_rx_result debug_noinline 1118 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) 1119 { 1120 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1121 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1122 1123 if (status->flag & RX_FLAG_DUP_VALIDATED) 1124 return RX_CONTINUE; 1125 1126 /* 1127 * Drop duplicate 802.11 retransmissions 1128 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") 1129 */ 1130 1131 if (rx->skb->len < 24) 1132 return RX_CONTINUE; 1133 1134 if (ieee80211_is_ctl(hdr->frame_control) || 1135 ieee80211_is_qos_nullfunc(hdr->frame_control) || 1136 is_multicast_ether_addr(hdr->addr1)) 1137 return RX_CONTINUE; 1138 1139 if (!rx->sta) 1140 return RX_CONTINUE; 1141 1142 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 1143 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { 1144 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); 1145 rx->sta->rx_stats.num_duplicates++; 1146 return RX_DROP_UNUSABLE; 1147 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1148 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 1149 } 1150 1151 return RX_CONTINUE; 1152 } 1153 1154 static ieee80211_rx_result debug_noinline 1155 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 1156 { 1157 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1158 1159 /* Drop disallowed frame classes based on STA auth/assoc state; 1160 * IEEE 802.11, Chap 5.5. 1161 * 1162 * mac80211 filters only based on association state, i.e. it drops 1163 * Class 3 frames from not associated stations. hostapd sends 1164 * deauth/disassoc frames when needed. In addition, hostapd is 1165 * responsible for filtering on both auth and assoc states. 1166 */ 1167 1168 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1169 return ieee80211_rx_mesh_check(rx); 1170 1171 if (unlikely((ieee80211_is_data(hdr->frame_control) || 1172 ieee80211_is_pspoll(hdr->frame_control)) && 1173 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 1174 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 1175 rx->sdata->vif.type != NL80211_IFTYPE_OCB && 1176 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 1177 /* 1178 * accept port control frames from the AP even when it's not 1179 * yet marked ASSOC to prevent a race where we don't set the 1180 * assoc bit quickly enough before it sends the first frame 1181 */ 1182 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1183 ieee80211_is_data_present(hdr->frame_control)) { 1184 unsigned int hdrlen; 1185 __be16 ethertype; 1186 1187 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1188 1189 if (rx->skb->len < hdrlen + 8) 1190 return RX_DROP_MONITOR; 1191 1192 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 1193 if (ethertype == rx->sdata->control_port_protocol) 1194 return RX_CONTINUE; 1195 } 1196 1197 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 1198 cfg80211_rx_spurious_frame(rx->sdata->dev, 1199 hdr->addr2, 1200 GFP_ATOMIC)) 1201 return RX_DROP_UNUSABLE; 1202 1203 return RX_DROP_MONITOR; 1204 } 1205 1206 return RX_CONTINUE; 1207 } 1208 1209 1210 static ieee80211_rx_result debug_noinline 1211 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1212 { 1213 struct ieee80211_local *local; 1214 struct ieee80211_hdr *hdr; 1215 struct sk_buff *skb; 1216 1217 local = rx->local; 1218 skb = rx->skb; 1219 hdr = (struct ieee80211_hdr *) skb->data; 1220 1221 if (!local->pspolling) 1222 return RX_CONTINUE; 1223 1224 if (!ieee80211_has_fromds(hdr->frame_control)) 1225 /* this is not from AP */ 1226 return RX_CONTINUE; 1227 1228 if (!ieee80211_is_data(hdr->frame_control)) 1229 return RX_CONTINUE; 1230 1231 if (!ieee80211_has_moredata(hdr->frame_control)) { 1232 /* AP has no more frames buffered for us */ 1233 local->pspolling = false; 1234 return RX_CONTINUE; 1235 } 1236 1237 /* more data bit is set, let's request a new frame from the AP */ 1238 ieee80211_send_pspoll(local, rx->sdata); 1239 1240 return RX_CONTINUE; 1241 } 1242 1243 static void sta_ps_start(struct sta_info *sta) 1244 { 1245 struct ieee80211_sub_if_data *sdata = sta->sdata; 1246 struct ieee80211_local *local = sdata->local; 1247 struct ps_data *ps; 1248 int tid; 1249 1250 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1251 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1252 ps = &sdata->bss->ps; 1253 else 1254 return; 1255 1256 atomic_inc(&ps->num_sta_ps); 1257 set_sta_flag(sta, WLAN_STA_PS_STA); 1258 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1259 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1260 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1261 sta->sta.addr, sta->sta.aid); 1262 1263 ieee80211_clear_fast_xmit(sta); 1264 1265 if (!sta->sta.txq[0]) 1266 return; 1267 1268 for (tid = 0; tid < ARRAY_SIZE(sta->sta.txq); tid++) { 1269 struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]); 1270 1271 if (!skb_queue_len(&txqi->queue)) 1272 set_bit(tid, &sta->txq_buffered_tids); 1273 else 1274 clear_bit(tid, &sta->txq_buffered_tids); 1275 } 1276 } 1277 1278 static void sta_ps_end(struct sta_info *sta) 1279 { 1280 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1281 sta->sta.addr, sta->sta.aid); 1282 1283 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1284 /* 1285 * Clear the flag only if the other one is still set 1286 * so that the TX path won't start TX'ing new frames 1287 * directly ... In the case that the driver flag isn't 1288 * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1289 */ 1290 clear_sta_flag(sta, WLAN_STA_PS_STA); 1291 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1292 sta->sta.addr, sta->sta.aid); 1293 return; 1294 } 1295 1296 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1297 clear_sta_flag(sta, WLAN_STA_PS_STA); 1298 ieee80211_sta_ps_deliver_wakeup(sta); 1299 } 1300 1301 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) 1302 { 1303 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1304 bool in_ps; 1305 1306 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); 1307 1308 /* Don't let the same PS state be set twice */ 1309 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); 1310 if ((start && in_ps) || (!start && !in_ps)) 1311 return -EINVAL; 1312 1313 if (start) 1314 sta_ps_start(sta); 1315 else 1316 sta_ps_end(sta); 1317 1318 return 0; 1319 } 1320 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1321 1322 static ieee80211_rx_result debug_noinline 1323 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1324 { 1325 struct ieee80211_sub_if_data *sdata = rx->sdata; 1326 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1327 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1328 int tid, ac; 1329 1330 if (!rx->sta) 1331 return RX_CONTINUE; 1332 1333 if (sdata->vif.type != NL80211_IFTYPE_AP && 1334 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1335 return RX_CONTINUE; 1336 1337 /* 1338 * The device handles station powersave, so don't do anything about 1339 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1340 * it to mac80211 since they're handled.) 1341 */ 1342 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) 1343 return RX_CONTINUE; 1344 1345 /* 1346 * Don't do anything if the station isn't already asleep. In 1347 * the uAPSD case, the station will probably be marked asleep, 1348 * in the PS-Poll case the station must be confused ... 1349 */ 1350 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1351 return RX_CONTINUE; 1352 1353 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1354 if (!test_sta_flag(rx->sta, WLAN_STA_SP)) { 1355 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) 1356 ieee80211_sta_ps_deliver_poll_response(rx->sta); 1357 else 1358 set_sta_flag(rx->sta, WLAN_STA_PSPOLL); 1359 } 1360 1361 /* Free PS Poll skb here instead of returning RX_DROP that would 1362 * count as an dropped frame. */ 1363 dev_kfree_skb(rx->skb); 1364 1365 return RX_QUEUED; 1366 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1367 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1368 ieee80211_has_pm(hdr->frame_control) && 1369 (ieee80211_is_data_qos(hdr->frame_control) || 1370 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1371 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 1372 ac = ieee802_1d_to_ac[tid & 7]; 1373 1374 /* 1375 * If this AC is not trigger-enabled do nothing. 1376 * 1377 * NB: This could/should check a separate bitmap of trigger- 1378 * enabled queues, but for now we only implement uAPSD w/o 1379 * TSPEC changes to the ACs, so they're always the same. 1380 */ 1381 if (!(rx->sta->sta.uapsd_queues & BIT(ac))) 1382 return RX_CONTINUE; 1383 1384 /* if we are in a service period, do nothing */ 1385 if (test_sta_flag(rx->sta, WLAN_STA_SP)) 1386 return RX_CONTINUE; 1387 1388 if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER)) 1389 ieee80211_sta_ps_deliver_uapsd(rx->sta); 1390 else 1391 set_sta_flag(rx->sta, WLAN_STA_UAPSD); 1392 } 1393 1394 return RX_CONTINUE; 1395 } 1396 1397 static ieee80211_rx_result debug_noinline 1398 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1399 { 1400 struct sta_info *sta = rx->sta; 1401 struct sk_buff *skb = rx->skb; 1402 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1403 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1404 int i; 1405 1406 if (!sta) 1407 return RX_CONTINUE; 1408 1409 /* 1410 * Update last_rx only for IBSS packets which are for the current 1411 * BSSID and for station already AUTHORIZED to avoid keeping the 1412 * current IBSS network alive in cases where other STAs start 1413 * using different BSSID. This will also give the station another 1414 * chance to restart the authentication/authorization in case 1415 * something went wrong the first time. 1416 */ 1417 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1418 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1419 NL80211_IFTYPE_ADHOC); 1420 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1421 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1422 sta->rx_stats.last_rx = jiffies; 1423 if (ieee80211_is_data(hdr->frame_control) && 1424 !is_multicast_ether_addr(hdr->addr1)) { 1425 sta->rx_stats.last_rate_idx = 1426 status->rate_idx; 1427 sta->rx_stats.last_rate_flag = 1428 status->flag; 1429 sta->rx_stats.last_rate_vht_flag = 1430 status->vht_flag; 1431 sta->rx_stats.last_rate_vht_nss = 1432 status->vht_nss; 1433 } 1434 } 1435 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { 1436 sta->rx_stats.last_rx = jiffies; 1437 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1438 /* 1439 * Mesh beacons will update last_rx when if they are found to 1440 * match the current local configuration when processed. 1441 */ 1442 sta->rx_stats.last_rx = jiffies; 1443 if (ieee80211_is_data(hdr->frame_control)) { 1444 sta->rx_stats.last_rate_idx = status->rate_idx; 1445 sta->rx_stats.last_rate_flag = status->flag; 1446 sta->rx_stats.last_rate_vht_flag = status->vht_flag; 1447 sta->rx_stats.last_rate_vht_nss = status->vht_nss; 1448 } 1449 } 1450 1451 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) 1452 ieee80211_sta_rx_notify(rx->sdata, hdr); 1453 1454 sta->rx_stats.fragments++; 1455 sta->rx_stats.bytes += rx->skb->len; 1456 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1457 sta->rx_stats.last_signal = status->signal; 1458 ewma_signal_add(&sta->rx_stats.avg_signal, -status->signal); 1459 } 1460 1461 if (status->chains) { 1462 sta->rx_stats.chains = status->chains; 1463 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 1464 int signal = status->chain_signal[i]; 1465 1466 if (!(status->chains & BIT(i))) 1467 continue; 1468 1469 sta->rx_stats.chain_signal_last[i] = signal; 1470 ewma_signal_add(&sta->rx_stats.chain_signal_avg[i], 1471 -signal); 1472 } 1473 } 1474 1475 /* 1476 * Change STA power saving mode only at the end of a frame 1477 * exchange sequence. 1478 */ 1479 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1480 !ieee80211_has_morefrags(hdr->frame_control) && 1481 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1482 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1483 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 1484 /* PM bit is only checked in frames where it isn't reserved, 1485 * in AP mode it's reserved in non-bufferable management frames 1486 * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field) 1487 */ 1488 (!ieee80211_is_mgmt(hdr->frame_control) || 1489 ieee80211_is_bufferable_mmpdu(hdr->frame_control))) { 1490 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1491 if (!ieee80211_has_pm(hdr->frame_control)) 1492 sta_ps_end(sta); 1493 } else { 1494 if (ieee80211_has_pm(hdr->frame_control)) 1495 sta_ps_start(sta); 1496 } 1497 } 1498 1499 /* mesh power save support */ 1500 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1501 ieee80211_mps_rx_h_sta_process(sta, hdr); 1502 1503 /* 1504 * Drop (qos-)data::nullfunc frames silently, since they 1505 * are used only to control station power saving mode. 1506 */ 1507 if (ieee80211_is_nullfunc(hdr->frame_control) || 1508 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1509 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1510 1511 /* 1512 * If we receive a 4-addr nullfunc frame from a STA 1513 * that was not moved to a 4-addr STA vlan yet send 1514 * the event to userspace and for older hostapd drop 1515 * the frame to the monitor interface. 1516 */ 1517 if (ieee80211_has_a4(hdr->frame_control) && 1518 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1519 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1520 !rx->sdata->u.vlan.sta))) { 1521 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1522 cfg80211_rx_unexpected_4addr_frame( 1523 rx->sdata->dev, sta->sta.addr, 1524 GFP_ATOMIC); 1525 return RX_DROP_MONITOR; 1526 } 1527 /* 1528 * Update counter and free packet here to avoid 1529 * counting this as a dropped packed. 1530 */ 1531 sta->rx_stats.packets++; 1532 dev_kfree_skb(rx->skb); 1533 return RX_QUEUED; 1534 } 1535 1536 return RX_CONTINUE; 1537 } /* ieee80211_rx_h_sta_process */ 1538 1539 static ieee80211_rx_result debug_noinline 1540 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 1541 { 1542 struct sk_buff *skb = rx->skb; 1543 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1544 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1545 int keyidx; 1546 int hdrlen; 1547 ieee80211_rx_result result = RX_DROP_UNUSABLE; 1548 struct ieee80211_key *sta_ptk = NULL; 1549 int mmie_keyidx = -1; 1550 __le16 fc; 1551 const struct ieee80211_cipher_scheme *cs = NULL; 1552 1553 /* 1554 * Key selection 101 1555 * 1556 * There are four types of keys: 1557 * - GTK (group keys) 1558 * - IGTK (group keys for management frames) 1559 * - PTK (pairwise keys) 1560 * - STK (station-to-station pairwise keys) 1561 * 1562 * When selecting a key, we have to distinguish between multicast 1563 * (including broadcast) and unicast frames, the latter can only 1564 * use PTKs and STKs while the former always use GTKs and IGTKs. 1565 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then 1566 * unicast frames can also use key indices like GTKs. Hence, if we 1567 * don't have a PTK/STK we check the key index for a WEP key. 1568 * 1569 * Note that in a regular BSS, multicast frames are sent by the 1570 * AP only, associated stations unicast the frame to the AP first 1571 * which then multicasts it on their behalf. 1572 * 1573 * There is also a slight problem in IBSS mode: GTKs are negotiated 1574 * with each station, that is something we don't currently handle. 1575 * The spec seems to expect that one negotiates the same key with 1576 * every station but there's no such requirement; VLANs could be 1577 * possible. 1578 */ 1579 1580 /* start without a key */ 1581 rx->key = NULL; 1582 fc = hdr->frame_control; 1583 1584 if (rx->sta) { 1585 int keyid = rx->sta->ptk_idx; 1586 1587 if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) { 1588 cs = rx->sta->cipher_scheme; 1589 keyid = iwl80211_get_cs_keyid(cs, rx->skb); 1590 if (unlikely(keyid < 0)) 1591 return RX_DROP_UNUSABLE; 1592 } 1593 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); 1594 } 1595 1596 if (!ieee80211_has_protected(fc)) 1597 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 1598 1599 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 1600 rx->key = sta_ptk; 1601 if ((status->flag & RX_FLAG_DECRYPTED) && 1602 (status->flag & RX_FLAG_IV_STRIPPED)) 1603 return RX_CONTINUE; 1604 /* Skip decryption if the frame is not protected. */ 1605 if (!ieee80211_has_protected(fc)) 1606 return RX_CONTINUE; 1607 } else if (mmie_keyidx >= 0) { 1608 /* Broadcast/multicast robust management frame / BIP */ 1609 if ((status->flag & RX_FLAG_DECRYPTED) && 1610 (status->flag & RX_FLAG_IV_STRIPPED)) 1611 return RX_CONTINUE; 1612 1613 if (mmie_keyidx < NUM_DEFAULT_KEYS || 1614 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1615 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1616 if (rx->sta) 1617 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 1618 if (!rx->key) 1619 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 1620 } else if (!ieee80211_has_protected(fc)) { 1621 /* 1622 * The frame was not protected, so skip decryption. However, we 1623 * need to set rx->key if there is a key that could have been 1624 * used so that the frame may be dropped if encryption would 1625 * have been expected. 1626 */ 1627 struct ieee80211_key *key = NULL; 1628 struct ieee80211_sub_if_data *sdata = rx->sdata; 1629 int i; 1630 1631 if (ieee80211_is_mgmt(fc) && 1632 is_multicast_ether_addr(hdr->addr1) && 1633 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 1634 rx->key = key; 1635 else { 1636 if (rx->sta) { 1637 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1638 key = rcu_dereference(rx->sta->gtk[i]); 1639 if (key) 1640 break; 1641 } 1642 } 1643 if (!key) { 1644 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1645 key = rcu_dereference(sdata->keys[i]); 1646 if (key) 1647 break; 1648 } 1649 } 1650 if (key) 1651 rx->key = key; 1652 } 1653 return RX_CONTINUE; 1654 } else { 1655 u8 keyid; 1656 1657 /* 1658 * The device doesn't give us the IV so we won't be 1659 * able to look up the key. That's ok though, we 1660 * don't need to decrypt the frame, we just won't 1661 * be able to keep statistics accurate. 1662 * Except for key threshold notifications, should 1663 * we somehow allow the driver to tell us which key 1664 * the hardware used if this flag is set? 1665 */ 1666 if ((status->flag & RX_FLAG_DECRYPTED) && 1667 (status->flag & RX_FLAG_IV_STRIPPED)) 1668 return RX_CONTINUE; 1669 1670 hdrlen = ieee80211_hdrlen(fc); 1671 1672 if (cs) { 1673 keyidx = iwl80211_get_cs_keyid(cs, rx->skb); 1674 1675 if (unlikely(keyidx < 0)) 1676 return RX_DROP_UNUSABLE; 1677 } else { 1678 if (rx->skb->len < 8 + hdrlen) 1679 return RX_DROP_UNUSABLE; /* TODO: count this? */ 1680 /* 1681 * no need to call ieee80211_wep_get_keyidx, 1682 * it verifies a bunch of things we've done already 1683 */ 1684 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); 1685 keyidx = keyid >> 6; 1686 } 1687 1688 /* check per-station GTK first, if multicast packet */ 1689 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 1690 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 1691 1692 /* if not found, try default key */ 1693 if (!rx->key) { 1694 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 1695 1696 /* 1697 * RSNA-protected unicast frames should always be 1698 * sent with pairwise or station-to-station keys, 1699 * but for WEP we allow using a key index as well. 1700 */ 1701 if (rx->key && 1702 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 1703 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 1704 !is_multicast_ether_addr(hdr->addr1)) 1705 rx->key = NULL; 1706 } 1707 } 1708 1709 if (rx->key) { 1710 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 1711 return RX_DROP_MONITOR; 1712 1713 /* TODO: add threshold stuff again */ 1714 } else { 1715 return RX_DROP_MONITOR; 1716 } 1717 1718 switch (rx->key->conf.cipher) { 1719 case WLAN_CIPHER_SUITE_WEP40: 1720 case WLAN_CIPHER_SUITE_WEP104: 1721 result = ieee80211_crypto_wep_decrypt(rx); 1722 break; 1723 case WLAN_CIPHER_SUITE_TKIP: 1724 result = ieee80211_crypto_tkip_decrypt(rx); 1725 break; 1726 case WLAN_CIPHER_SUITE_CCMP: 1727 result = ieee80211_crypto_ccmp_decrypt( 1728 rx, IEEE80211_CCMP_MIC_LEN); 1729 break; 1730 case WLAN_CIPHER_SUITE_CCMP_256: 1731 result = ieee80211_crypto_ccmp_decrypt( 1732 rx, IEEE80211_CCMP_256_MIC_LEN); 1733 break; 1734 case WLAN_CIPHER_SUITE_AES_CMAC: 1735 result = ieee80211_crypto_aes_cmac_decrypt(rx); 1736 break; 1737 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 1738 result = ieee80211_crypto_aes_cmac_256_decrypt(rx); 1739 break; 1740 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 1741 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 1742 result = ieee80211_crypto_aes_gmac_decrypt(rx); 1743 break; 1744 case WLAN_CIPHER_SUITE_GCMP: 1745 case WLAN_CIPHER_SUITE_GCMP_256: 1746 result = ieee80211_crypto_gcmp_decrypt(rx); 1747 break; 1748 default: 1749 result = ieee80211_crypto_hw_decrypt(rx); 1750 } 1751 1752 /* the hdr variable is invalid after the decrypt handlers */ 1753 1754 /* either the frame has been decrypted or will be dropped */ 1755 status->flag |= RX_FLAG_DECRYPTED; 1756 1757 return result; 1758 } 1759 1760 static inline struct ieee80211_fragment_entry * 1761 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 1762 unsigned int frag, unsigned int seq, int rx_queue, 1763 struct sk_buff **skb) 1764 { 1765 struct ieee80211_fragment_entry *entry; 1766 1767 entry = &sdata->fragments[sdata->fragment_next++]; 1768 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 1769 sdata->fragment_next = 0; 1770 1771 if (!skb_queue_empty(&entry->skb_list)) 1772 __skb_queue_purge(&entry->skb_list); 1773 1774 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 1775 *skb = NULL; 1776 entry->first_frag_time = jiffies; 1777 entry->seq = seq; 1778 entry->rx_queue = rx_queue; 1779 entry->last_frag = frag; 1780 entry->check_sequential_pn = false; 1781 entry->extra_len = 0; 1782 1783 return entry; 1784 } 1785 1786 static inline struct ieee80211_fragment_entry * 1787 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 1788 unsigned int frag, unsigned int seq, 1789 int rx_queue, struct ieee80211_hdr *hdr) 1790 { 1791 struct ieee80211_fragment_entry *entry; 1792 int i, idx; 1793 1794 idx = sdata->fragment_next; 1795 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 1796 struct ieee80211_hdr *f_hdr; 1797 1798 idx--; 1799 if (idx < 0) 1800 idx = IEEE80211_FRAGMENT_MAX - 1; 1801 1802 entry = &sdata->fragments[idx]; 1803 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 1804 entry->rx_queue != rx_queue || 1805 entry->last_frag + 1 != frag) 1806 continue; 1807 1808 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; 1809 1810 /* 1811 * Check ftype and addresses are equal, else check next fragment 1812 */ 1813 if (((hdr->frame_control ^ f_hdr->frame_control) & 1814 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 1815 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 1816 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 1817 continue; 1818 1819 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 1820 __skb_queue_purge(&entry->skb_list); 1821 continue; 1822 } 1823 return entry; 1824 } 1825 1826 return NULL; 1827 } 1828 1829 static ieee80211_rx_result debug_noinline 1830 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 1831 { 1832 struct ieee80211_hdr *hdr; 1833 u16 sc; 1834 __le16 fc; 1835 unsigned int frag, seq; 1836 struct ieee80211_fragment_entry *entry; 1837 struct sk_buff *skb; 1838 struct ieee80211_rx_status *status; 1839 1840 hdr = (struct ieee80211_hdr *)rx->skb->data; 1841 fc = hdr->frame_control; 1842 1843 if (ieee80211_is_ctl(fc)) 1844 return RX_CONTINUE; 1845 1846 sc = le16_to_cpu(hdr->seq_ctrl); 1847 frag = sc & IEEE80211_SCTL_FRAG; 1848 1849 if (is_multicast_ether_addr(hdr->addr1)) { 1850 I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount); 1851 goto out_no_led; 1852 } 1853 1854 if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) 1855 goto out; 1856 1857 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1858 1859 if (skb_linearize(rx->skb)) 1860 return RX_DROP_UNUSABLE; 1861 1862 /* 1863 * skb_linearize() might change the skb->data and 1864 * previously cached variables (in this case, hdr) need to 1865 * be refreshed with the new data. 1866 */ 1867 hdr = (struct ieee80211_hdr *)rx->skb->data; 1868 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 1869 1870 if (frag == 0) { 1871 /* This is the first fragment of a new frame. */ 1872 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 1873 rx->seqno_idx, &(rx->skb)); 1874 if (rx->key && 1875 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || 1876 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || 1877 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || 1878 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && 1879 ieee80211_has_protected(fc)) { 1880 int queue = rx->security_idx; 1881 1882 /* Store CCMP/GCMP PN so that we can verify that the 1883 * next fragment has a sequential PN value. 1884 */ 1885 entry->check_sequential_pn = true; 1886 memcpy(entry->last_pn, 1887 rx->key->u.ccmp.rx_pn[queue], 1888 IEEE80211_CCMP_PN_LEN); 1889 BUILD_BUG_ON(offsetof(struct ieee80211_key, 1890 u.ccmp.rx_pn) != 1891 offsetof(struct ieee80211_key, 1892 u.gcmp.rx_pn)); 1893 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != 1894 sizeof(rx->key->u.gcmp.rx_pn[queue])); 1895 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != 1896 IEEE80211_GCMP_PN_LEN); 1897 } 1898 return RX_QUEUED; 1899 } 1900 1901 /* This is a fragment for a frame that should already be pending in 1902 * fragment cache. Add this fragment to the end of the pending entry. 1903 */ 1904 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, 1905 rx->seqno_idx, hdr); 1906 if (!entry) { 1907 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1908 return RX_DROP_MONITOR; 1909 } 1910 1911 /* "The receiver shall discard MSDUs and MMPDUs whose constituent 1912 * MPDU PN values are not incrementing in steps of 1." 1913 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) 1914 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) 1915 */ 1916 if (entry->check_sequential_pn) { 1917 int i; 1918 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; 1919 int queue; 1920 1921 if (!rx->key || 1922 (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && 1923 rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 && 1924 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP && 1925 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256)) 1926 return RX_DROP_UNUSABLE; 1927 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); 1928 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { 1929 pn[i]++; 1930 if (pn[i]) 1931 break; 1932 } 1933 queue = rx->security_idx; 1934 rpn = rx->key->u.ccmp.rx_pn[queue]; 1935 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) 1936 return RX_DROP_UNUSABLE; 1937 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); 1938 } 1939 1940 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 1941 __skb_queue_tail(&entry->skb_list, rx->skb); 1942 entry->last_frag = frag; 1943 entry->extra_len += rx->skb->len; 1944 if (ieee80211_has_morefrags(fc)) { 1945 rx->skb = NULL; 1946 return RX_QUEUED; 1947 } 1948 1949 rx->skb = __skb_dequeue(&entry->skb_list); 1950 if (skb_tailroom(rx->skb) < entry->extra_len) { 1951 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); 1952 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 1953 GFP_ATOMIC))) { 1954 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1955 __skb_queue_purge(&entry->skb_list); 1956 return RX_DROP_UNUSABLE; 1957 } 1958 } 1959 while ((skb = __skb_dequeue(&entry->skb_list))) { 1960 memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); 1961 dev_kfree_skb(skb); 1962 } 1963 1964 /* Complete frame has been reassembled - process it now */ 1965 status = IEEE80211_SKB_RXCB(rx->skb); 1966 1967 out: 1968 ieee80211_led_rx(rx->local); 1969 out_no_led: 1970 if (rx->sta) 1971 rx->sta->rx_stats.packets++; 1972 return RX_CONTINUE; 1973 } 1974 1975 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 1976 { 1977 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 1978 return -EACCES; 1979 1980 return 0; 1981 } 1982 1983 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 1984 { 1985 struct sk_buff *skb = rx->skb; 1986 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1987 1988 /* 1989 * Pass through unencrypted frames if the hardware has 1990 * decrypted them already. 1991 */ 1992 if (status->flag & RX_FLAG_DECRYPTED) 1993 return 0; 1994 1995 /* Drop unencrypted frames if key is set. */ 1996 if (unlikely(!ieee80211_has_protected(fc) && 1997 !ieee80211_is_nullfunc(fc) && 1998 ieee80211_is_data(fc) && rx->key)) 1999 return -EACCES; 2000 2001 return 0; 2002 } 2003 2004 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 2005 { 2006 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2007 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2008 __le16 fc = hdr->frame_control; 2009 2010 /* 2011 * Pass through unencrypted frames if the hardware has 2012 * decrypted them already. 2013 */ 2014 if (status->flag & RX_FLAG_DECRYPTED) 2015 return 0; 2016 2017 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 2018 if (unlikely(!ieee80211_has_protected(fc) && 2019 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 2020 rx->key)) { 2021 if (ieee80211_is_deauth(fc) || 2022 ieee80211_is_disassoc(fc)) 2023 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2024 rx->skb->data, 2025 rx->skb->len); 2026 return -EACCES; 2027 } 2028 /* BIP does not use Protected field, so need to check MMIE */ 2029 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 2030 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2031 if (ieee80211_is_deauth(fc) || 2032 ieee80211_is_disassoc(fc)) 2033 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2034 rx->skb->data, 2035 rx->skb->len); 2036 return -EACCES; 2037 } 2038 /* 2039 * When using MFP, Action frames are not allowed prior to 2040 * having configured keys. 2041 */ 2042 if (unlikely(ieee80211_is_action(fc) && !rx->key && 2043 ieee80211_is_robust_mgmt_frame(rx->skb))) 2044 return -EACCES; 2045 } 2046 2047 return 0; 2048 } 2049 2050 static int 2051 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 2052 { 2053 struct ieee80211_sub_if_data *sdata = rx->sdata; 2054 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2055 bool check_port_control = false; 2056 struct ethhdr *ehdr; 2057 int ret; 2058 2059 *port_control = false; 2060 if (ieee80211_has_a4(hdr->frame_control) && 2061 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 2062 return -1; 2063 2064 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2065 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 2066 2067 if (!sdata->u.mgd.use_4addr) 2068 return -1; 2069 else 2070 check_port_control = true; 2071 } 2072 2073 if (is_multicast_ether_addr(hdr->addr1) && 2074 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 2075 return -1; 2076 2077 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 2078 if (ret < 0) 2079 return ret; 2080 2081 ehdr = (struct ethhdr *) rx->skb->data; 2082 if (ehdr->h_proto == rx->sdata->control_port_protocol) 2083 *port_control = true; 2084 else if (check_port_control) 2085 return -1; 2086 2087 return 0; 2088 } 2089 2090 /* 2091 * requires that rx->skb is a frame with ethernet header 2092 */ 2093 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 2094 { 2095 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 2096 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 2097 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2098 2099 /* 2100 * Allow EAPOL frames to us/the PAE group address regardless 2101 * of whether the frame was encrypted or not. 2102 */ 2103 if (ehdr->h_proto == rx->sdata->control_port_protocol && 2104 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || 2105 ether_addr_equal(ehdr->h_dest, pae_group_addr))) 2106 return true; 2107 2108 if (ieee80211_802_1x_port_control(rx) || 2109 ieee80211_drop_unencrypted(rx, fc)) 2110 return false; 2111 2112 return true; 2113 } 2114 2115 /* 2116 * requires that rx->skb is a frame with ethernet header 2117 */ 2118 static void 2119 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 2120 { 2121 struct ieee80211_sub_if_data *sdata = rx->sdata; 2122 struct net_device *dev = sdata->dev; 2123 struct sk_buff *skb, *xmit_skb; 2124 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2125 struct sta_info *dsta; 2126 2127 skb = rx->skb; 2128 xmit_skb = NULL; 2129 2130 ieee80211_rx_stats(dev, skb->len); 2131 2132 if ((sdata->vif.type == NL80211_IFTYPE_AP || 2133 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 2134 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 2135 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 2136 if (is_multicast_ether_addr(ehdr->h_dest)) { 2137 /* 2138 * send multicast frames both to higher layers in 2139 * local net stack and back to the wireless medium 2140 */ 2141 xmit_skb = skb_copy(skb, GFP_ATOMIC); 2142 if (!xmit_skb) 2143 net_info_ratelimited("%s: failed to clone multicast frame\n", 2144 dev->name); 2145 } else { 2146 dsta = sta_info_get(sdata, skb->data); 2147 if (dsta) { 2148 /* 2149 * The destination station is associated to 2150 * this AP (in this VLAN), so send the frame 2151 * directly to it and do not pass it to local 2152 * net stack. 2153 */ 2154 xmit_skb = skb; 2155 skb = NULL; 2156 } 2157 } 2158 } 2159 2160 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2161 if (skb) { 2162 /* 'align' will only take the values 0 or 2 here since all 2163 * frames are required to be aligned to 2-byte boundaries 2164 * when being passed to mac80211; the code here works just 2165 * as well if that isn't true, but mac80211 assumes it can 2166 * access fields as 2-byte aligned (e.g. for ether_addr_equal) 2167 */ 2168 int align; 2169 2170 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; 2171 if (align) { 2172 if (WARN_ON(skb_headroom(skb) < 3)) { 2173 dev_kfree_skb(skb); 2174 skb = NULL; 2175 } else { 2176 u8 *data = skb->data; 2177 size_t len = skb_headlen(skb); 2178 skb->data -= align; 2179 memmove(skb->data, data, len); 2180 skb_set_tail_pointer(skb, len); 2181 } 2182 } 2183 } 2184 #endif 2185 2186 if (skb) { 2187 /* deliver to local stack */ 2188 skb->protocol = eth_type_trans(skb, dev); 2189 memset(skb->cb, 0, sizeof(skb->cb)); 2190 if (rx->napi) 2191 napi_gro_receive(rx->napi, skb); 2192 else 2193 netif_receive_skb(skb); 2194 } 2195 2196 if (xmit_skb) { 2197 /* 2198 * Send to wireless media and increase priority by 256 to 2199 * keep the received priority instead of reclassifying 2200 * the frame (see cfg80211_classify8021d). 2201 */ 2202 xmit_skb->priority += 256; 2203 xmit_skb->protocol = htons(ETH_P_802_3); 2204 skb_reset_network_header(xmit_skb); 2205 skb_reset_mac_header(xmit_skb); 2206 dev_queue_xmit(xmit_skb); 2207 } 2208 } 2209 2210 static ieee80211_rx_result debug_noinline 2211 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 2212 { 2213 struct net_device *dev = rx->sdata->dev; 2214 struct sk_buff *skb = rx->skb; 2215 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2216 __le16 fc = hdr->frame_control; 2217 struct sk_buff_head frame_list; 2218 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2219 2220 if (unlikely(!ieee80211_is_data(fc))) 2221 return RX_CONTINUE; 2222 2223 if (unlikely(!ieee80211_is_data_present(fc))) 2224 return RX_DROP_MONITOR; 2225 2226 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 2227 return RX_CONTINUE; 2228 2229 if (ieee80211_has_a4(hdr->frame_control) && 2230 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2231 !rx->sdata->u.vlan.sta) 2232 return RX_DROP_UNUSABLE; 2233 2234 if (is_multicast_ether_addr(hdr->addr1) && 2235 ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2236 rx->sdata->u.vlan.sta) || 2237 (rx->sdata->vif.type == NL80211_IFTYPE_STATION && 2238 rx->sdata->u.mgd.use_4addr))) 2239 return RX_DROP_UNUSABLE; 2240 2241 skb->dev = dev; 2242 __skb_queue_head_init(&frame_list); 2243 2244 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 2245 rx->sdata->vif.type, 2246 rx->local->hw.extra_tx_headroom, true); 2247 2248 while (!skb_queue_empty(&frame_list)) { 2249 rx->skb = __skb_dequeue(&frame_list); 2250 2251 if (!ieee80211_frame_allowed(rx, fc)) { 2252 dev_kfree_skb(rx->skb); 2253 continue; 2254 } 2255 2256 ieee80211_deliver_skb(rx); 2257 } 2258 2259 return RX_QUEUED; 2260 } 2261 2262 #ifdef CONFIG_MAC80211_MESH 2263 static ieee80211_rx_result 2264 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 2265 { 2266 struct ieee80211_hdr *fwd_hdr, *hdr; 2267 struct ieee80211_tx_info *info; 2268 struct ieee80211s_hdr *mesh_hdr; 2269 struct sk_buff *skb = rx->skb, *fwd_skb; 2270 struct ieee80211_local *local = rx->local; 2271 struct ieee80211_sub_if_data *sdata = rx->sdata; 2272 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2273 u16 ac, q, hdrlen; 2274 2275 hdr = (struct ieee80211_hdr *) skb->data; 2276 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2277 2278 /* make sure fixed part of mesh header is there, also checks skb len */ 2279 if (!pskb_may_pull(rx->skb, hdrlen + 6)) 2280 return RX_DROP_MONITOR; 2281 2282 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2283 2284 /* make sure full mesh header is there, also checks skb len */ 2285 if (!pskb_may_pull(rx->skb, 2286 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) 2287 return RX_DROP_MONITOR; 2288 2289 /* reload pointers */ 2290 hdr = (struct ieee80211_hdr *) skb->data; 2291 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2292 2293 if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) 2294 return RX_DROP_MONITOR; 2295 2296 /* frame is in RMC, don't forward */ 2297 if (ieee80211_is_data(hdr->frame_control) && 2298 is_multicast_ether_addr(hdr->addr1) && 2299 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr)) 2300 return RX_DROP_MONITOR; 2301 2302 if (!ieee80211_is_data(hdr->frame_control)) 2303 return RX_CONTINUE; 2304 2305 if (!mesh_hdr->ttl) 2306 return RX_DROP_MONITOR; 2307 2308 if (mesh_hdr->flags & MESH_FLAGS_AE) { 2309 struct mesh_path *mppath; 2310 char *proxied_addr; 2311 char *mpp_addr; 2312 2313 if (is_multicast_ether_addr(hdr->addr1)) { 2314 mpp_addr = hdr->addr3; 2315 proxied_addr = mesh_hdr->eaddr1; 2316 } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { 2317 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2318 mpp_addr = hdr->addr4; 2319 proxied_addr = mesh_hdr->eaddr2; 2320 } else { 2321 return RX_DROP_MONITOR; 2322 } 2323 2324 rcu_read_lock(); 2325 mppath = mpp_path_lookup(sdata, proxied_addr); 2326 if (!mppath) { 2327 mpp_path_add(sdata, proxied_addr, mpp_addr); 2328 } else { 2329 spin_lock_bh(&mppath->state_lock); 2330 if (!ether_addr_equal(mppath->mpp, mpp_addr)) 2331 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 2332 mppath->exp_time = jiffies; 2333 spin_unlock_bh(&mppath->state_lock); 2334 } 2335 rcu_read_unlock(); 2336 } 2337 2338 /* Frame has reached destination. Don't forward */ 2339 if (!is_multicast_ether_addr(hdr->addr1) && 2340 ether_addr_equal(sdata->vif.addr, hdr->addr3)) 2341 return RX_CONTINUE; 2342 2343 ac = ieee80211_select_queue_80211(sdata, skb, hdr); 2344 q = sdata->vif.hw_queue[ac]; 2345 if (ieee80211_queue_stopped(&local->hw, q)) { 2346 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 2347 return RX_DROP_MONITOR; 2348 } 2349 skb_set_queue_mapping(skb, q); 2350 2351 if (!--mesh_hdr->ttl) { 2352 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); 2353 goto out; 2354 } 2355 2356 if (!ifmsh->mshcfg.dot11MeshForwarding) 2357 goto out; 2358 2359 fwd_skb = skb_copy(skb, GFP_ATOMIC); 2360 if (!fwd_skb) { 2361 net_info_ratelimited("%s: failed to clone mesh frame\n", 2362 sdata->name); 2363 goto out; 2364 } 2365 2366 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 2367 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY); 2368 info = IEEE80211_SKB_CB(fwd_skb); 2369 memset(info, 0, sizeof(*info)); 2370 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 2371 info->control.vif = &rx->sdata->vif; 2372 info->control.jiffies = jiffies; 2373 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 2374 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 2375 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 2376 /* update power mode indication when forwarding */ 2377 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); 2378 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { 2379 /* mesh power mode flags updated in mesh_nexthop_lookup */ 2380 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2381 } else { 2382 /* unable to resolve next hop */ 2383 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, 2384 fwd_hdr->addr3, 0, 2385 WLAN_REASON_MESH_PATH_NOFORWARD, 2386 fwd_hdr->addr2); 2387 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 2388 kfree_skb(fwd_skb); 2389 return RX_DROP_MONITOR; 2390 } 2391 2392 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2393 ieee80211_add_pending_skb(local, fwd_skb); 2394 out: 2395 if (is_multicast_ether_addr(hdr->addr1)) 2396 return RX_CONTINUE; 2397 return RX_DROP_MONITOR; 2398 } 2399 #endif 2400 2401 static ieee80211_rx_result debug_noinline 2402 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 2403 { 2404 struct ieee80211_sub_if_data *sdata = rx->sdata; 2405 struct ieee80211_local *local = rx->local; 2406 struct net_device *dev = sdata->dev; 2407 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2408 __le16 fc = hdr->frame_control; 2409 bool port_control; 2410 int err; 2411 2412 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 2413 return RX_CONTINUE; 2414 2415 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 2416 return RX_DROP_MONITOR; 2417 2418 if (rx->sta) { 2419 /* The seqno index has the same property as needed 2420 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 2421 * for non-QoS-data frames. Here we know it's a data 2422 * frame, so count MSDUs. 2423 */ 2424 rx->sta->rx_stats.msdu[rx->seqno_idx]++; 2425 } 2426 2427 /* 2428 * Send unexpected-4addr-frame event to hostapd. For older versions, 2429 * also drop the frame to cooked monitor interfaces. 2430 */ 2431 if (ieee80211_has_a4(hdr->frame_control) && 2432 sdata->vif.type == NL80211_IFTYPE_AP) { 2433 if (rx->sta && 2434 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 2435 cfg80211_rx_unexpected_4addr_frame( 2436 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); 2437 return RX_DROP_MONITOR; 2438 } 2439 2440 err = __ieee80211_data_to_8023(rx, &port_control); 2441 if (unlikely(err)) 2442 return RX_DROP_UNUSABLE; 2443 2444 if (!ieee80211_frame_allowed(rx, fc)) 2445 return RX_DROP_MONITOR; 2446 2447 /* directly handle TDLS channel switch requests/responses */ 2448 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == 2449 cpu_to_be16(ETH_P_TDLS))) { 2450 struct ieee80211_tdls_data *tf = (void *)rx->skb->data; 2451 2452 if (pskb_may_pull(rx->skb, 2453 offsetof(struct ieee80211_tdls_data, u)) && 2454 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && 2455 tf->category == WLAN_CATEGORY_TDLS && 2456 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || 2457 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { 2458 skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb); 2459 schedule_work(&local->tdls_chsw_work); 2460 if (rx->sta) 2461 rx->sta->rx_stats.packets++; 2462 2463 return RX_QUEUED; 2464 } 2465 } 2466 2467 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2468 unlikely(port_control) && sdata->bss) { 2469 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 2470 u.ap); 2471 dev = sdata->dev; 2472 rx->sdata = sdata; 2473 } 2474 2475 rx->skb->dev = dev; 2476 2477 if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 2478 !is_multicast_ether_addr( 2479 ((struct ethhdr *)rx->skb->data)->h_dest) && 2480 (!local->scanning && 2481 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) { 2482 mod_timer(&local->dynamic_ps_timer, jiffies + 2483 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 2484 } 2485 2486 ieee80211_deliver_skb(rx); 2487 2488 return RX_QUEUED; 2489 } 2490 2491 static ieee80211_rx_result debug_noinline 2492 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 2493 { 2494 struct sk_buff *skb = rx->skb; 2495 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2496 struct tid_ampdu_rx *tid_agg_rx; 2497 u16 start_seq_num; 2498 u16 tid; 2499 2500 if (likely(!ieee80211_is_ctl(bar->frame_control))) 2501 return RX_CONTINUE; 2502 2503 if (ieee80211_is_back_req(bar->frame_control)) { 2504 struct { 2505 __le16 control, start_seq_num; 2506 } __packed bar_data; 2507 struct ieee80211_event event = { 2508 .type = BAR_RX_EVENT, 2509 }; 2510 2511 if (!rx->sta) 2512 return RX_DROP_MONITOR; 2513 2514 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 2515 &bar_data, sizeof(bar_data))) 2516 return RX_DROP_MONITOR; 2517 2518 tid = le16_to_cpu(bar_data.control) >> 12; 2519 2520 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 2521 if (!tid_agg_rx) 2522 return RX_DROP_MONITOR; 2523 2524 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 2525 event.u.ba.tid = tid; 2526 event.u.ba.ssn = start_seq_num; 2527 event.u.ba.sta = &rx->sta->sta; 2528 2529 /* reset session timer */ 2530 if (tid_agg_rx->timeout) 2531 mod_timer(&tid_agg_rx->session_timer, 2532 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 2533 2534 spin_lock(&tid_agg_rx->reorder_lock); 2535 /* release stored frames up to start of BAR */ 2536 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 2537 start_seq_num, frames); 2538 spin_unlock(&tid_agg_rx->reorder_lock); 2539 2540 drv_event_callback(rx->local, rx->sdata, &event); 2541 2542 kfree_skb(skb); 2543 return RX_QUEUED; 2544 } 2545 2546 /* 2547 * After this point, we only want management frames, 2548 * so we can drop all remaining control frames to 2549 * cooked monitor interfaces. 2550 */ 2551 return RX_DROP_MONITOR; 2552 } 2553 2554 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 2555 struct ieee80211_mgmt *mgmt, 2556 size_t len) 2557 { 2558 struct ieee80211_local *local = sdata->local; 2559 struct sk_buff *skb; 2560 struct ieee80211_mgmt *resp; 2561 2562 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 2563 /* Not to own unicast address */ 2564 return; 2565 } 2566 2567 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || 2568 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { 2569 /* Not from the current AP or not associated yet. */ 2570 return; 2571 } 2572 2573 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 2574 /* Too short SA Query request frame */ 2575 return; 2576 } 2577 2578 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 2579 if (skb == NULL) 2580 return; 2581 2582 skb_reserve(skb, local->hw.extra_tx_headroom); 2583 resp = (struct ieee80211_mgmt *) skb_put(skb, 24); 2584 memset(resp, 0, 24); 2585 memcpy(resp->da, mgmt->sa, ETH_ALEN); 2586 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 2587 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 2588 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2589 IEEE80211_STYPE_ACTION); 2590 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 2591 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 2592 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 2593 memcpy(resp->u.action.u.sa_query.trans_id, 2594 mgmt->u.action.u.sa_query.trans_id, 2595 WLAN_SA_QUERY_TR_ID_LEN); 2596 2597 ieee80211_tx_skb(sdata, skb); 2598 } 2599 2600 static ieee80211_rx_result debug_noinline 2601 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 2602 { 2603 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2604 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2605 2606 /* 2607 * From here on, look only at management frames. 2608 * Data and control frames are already handled, 2609 * and unknown (reserved) frames are useless. 2610 */ 2611 if (rx->skb->len < 24) 2612 return RX_DROP_MONITOR; 2613 2614 if (!ieee80211_is_mgmt(mgmt->frame_control)) 2615 return RX_DROP_MONITOR; 2616 2617 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 2618 ieee80211_is_beacon(mgmt->frame_control) && 2619 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 2620 int sig = 0; 2621 2622 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM)) 2623 sig = status->signal; 2624 2625 cfg80211_report_obss_beacon(rx->local->hw.wiphy, 2626 rx->skb->data, rx->skb->len, 2627 status->freq, sig); 2628 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 2629 } 2630 2631 if (ieee80211_drop_unencrypted_mgmt(rx)) 2632 return RX_DROP_UNUSABLE; 2633 2634 return RX_CONTINUE; 2635 } 2636 2637 static ieee80211_rx_result debug_noinline 2638 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 2639 { 2640 struct ieee80211_local *local = rx->local; 2641 struct ieee80211_sub_if_data *sdata = rx->sdata; 2642 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2643 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2644 int len = rx->skb->len; 2645 2646 if (!ieee80211_is_action(mgmt->frame_control)) 2647 return RX_CONTINUE; 2648 2649 /* drop too small frames */ 2650 if (len < IEEE80211_MIN_ACTION_SIZE) 2651 return RX_DROP_UNUSABLE; 2652 2653 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && 2654 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && 2655 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 2656 return RX_DROP_UNUSABLE; 2657 2658 switch (mgmt->u.action.category) { 2659 case WLAN_CATEGORY_HT: 2660 /* reject HT action frames from stations not supporting HT */ 2661 if (!rx->sta->sta.ht_cap.ht_supported) 2662 goto invalid; 2663 2664 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2665 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2666 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2667 sdata->vif.type != NL80211_IFTYPE_AP && 2668 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2669 break; 2670 2671 /* verify action & smps_control/chanwidth are present */ 2672 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 2673 goto invalid; 2674 2675 switch (mgmt->u.action.u.ht_smps.action) { 2676 case WLAN_HT_ACTION_SMPS: { 2677 struct ieee80211_supported_band *sband; 2678 enum ieee80211_smps_mode smps_mode; 2679 2680 /* convert to HT capability */ 2681 switch (mgmt->u.action.u.ht_smps.smps_control) { 2682 case WLAN_HT_SMPS_CONTROL_DISABLED: 2683 smps_mode = IEEE80211_SMPS_OFF; 2684 break; 2685 case WLAN_HT_SMPS_CONTROL_STATIC: 2686 smps_mode = IEEE80211_SMPS_STATIC; 2687 break; 2688 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 2689 smps_mode = IEEE80211_SMPS_DYNAMIC; 2690 break; 2691 default: 2692 goto invalid; 2693 } 2694 2695 /* if no change do nothing */ 2696 if (rx->sta->sta.smps_mode == smps_mode) 2697 goto handled; 2698 rx->sta->sta.smps_mode = smps_mode; 2699 2700 sband = rx->local->hw.wiphy->bands[status->band]; 2701 2702 rate_control_rate_update(local, sband, rx->sta, 2703 IEEE80211_RC_SMPS_CHANGED); 2704 goto handled; 2705 } 2706 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 2707 struct ieee80211_supported_band *sband; 2708 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 2709 enum ieee80211_sta_rx_bandwidth max_bw, new_bw; 2710 2711 /* If it doesn't support 40 MHz it can't change ... */ 2712 if (!(rx->sta->sta.ht_cap.cap & 2713 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 2714 goto handled; 2715 2716 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) 2717 max_bw = IEEE80211_STA_RX_BW_20; 2718 else 2719 max_bw = ieee80211_sta_cap_rx_bw(rx->sta); 2720 2721 /* set cur_max_bandwidth and recalc sta bw */ 2722 rx->sta->cur_max_bandwidth = max_bw; 2723 new_bw = ieee80211_sta_cur_vht_bw(rx->sta); 2724 2725 if (rx->sta->sta.bandwidth == new_bw) 2726 goto handled; 2727 2728 rx->sta->sta.bandwidth = new_bw; 2729 sband = rx->local->hw.wiphy->bands[status->band]; 2730 2731 rate_control_rate_update(local, sband, rx->sta, 2732 IEEE80211_RC_BW_CHANGED); 2733 goto handled; 2734 } 2735 default: 2736 goto invalid; 2737 } 2738 2739 break; 2740 case WLAN_CATEGORY_PUBLIC: 2741 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2742 goto invalid; 2743 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2744 break; 2745 if (!rx->sta) 2746 break; 2747 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) 2748 break; 2749 if (mgmt->u.action.u.ext_chan_switch.action_code != 2750 WLAN_PUB_ACTION_EXT_CHANSW_ANN) 2751 break; 2752 if (len < offsetof(struct ieee80211_mgmt, 2753 u.action.u.ext_chan_switch.variable)) 2754 goto invalid; 2755 goto queue; 2756 case WLAN_CATEGORY_VHT: 2757 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2758 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2759 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2760 sdata->vif.type != NL80211_IFTYPE_AP && 2761 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2762 break; 2763 2764 /* verify action code is present */ 2765 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2766 goto invalid; 2767 2768 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 2769 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 2770 u8 opmode; 2771 2772 /* verify opmode is present */ 2773 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 2774 goto invalid; 2775 2776 opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode; 2777 2778 ieee80211_vht_handle_opmode(rx->sdata, rx->sta, 2779 opmode, status->band); 2780 goto handled; 2781 } 2782 case WLAN_VHT_ACTION_GROUPID_MGMT: { 2783 if (len < IEEE80211_MIN_ACTION_SIZE + 25) 2784 goto invalid; 2785 goto queue; 2786 } 2787 default: 2788 break; 2789 } 2790 break; 2791 case WLAN_CATEGORY_BACK: 2792 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2793 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 2794 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 2795 sdata->vif.type != NL80211_IFTYPE_AP && 2796 sdata->vif.type != NL80211_IFTYPE_ADHOC) 2797 break; 2798 2799 /* verify action_code is present */ 2800 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2801 break; 2802 2803 switch (mgmt->u.action.u.addba_req.action_code) { 2804 case WLAN_ACTION_ADDBA_REQ: 2805 if (len < (IEEE80211_MIN_ACTION_SIZE + 2806 sizeof(mgmt->u.action.u.addba_req))) 2807 goto invalid; 2808 break; 2809 case WLAN_ACTION_ADDBA_RESP: 2810 if (len < (IEEE80211_MIN_ACTION_SIZE + 2811 sizeof(mgmt->u.action.u.addba_resp))) 2812 goto invalid; 2813 break; 2814 case WLAN_ACTION_DELBA: 2815 if (len < (IEEE80211_MIN_ACTION_SIZE + 2816 sizeof(mgmt->u.action.u.delba))) 2817 goto invalid; 2818 break; 2819 default: 2820 goto invalid; 2821 } 2822 2823 goto queue; 2824 case WLAN_CATEGORY_SPECTRUM_MGMT: 2825 /* verify action_code is present */ 2826 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 2827 break; 2828 2829 switch (mgmt->u.action.u.measurement.action_code) { 2830 case WLAN_ACTION_SPCT_MSR_REQ: 2831 if (status->band != IEEE80211_BAND_5GHZ) 2832 break; 2833 2834 if (len < (IEEE80211_MIN_ACTION_SIZE + 2835 sizeof(mgmt->u.action.u.measurement))) 2836 break; 2837 2838 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2839 break; 2840 2841 ieee80211_process_measurement_req(sdata, mgmt, len); 2842 goto handled; 2843 case WLAN_ACTION_SPCT_CHL_SWITCH: { 2844 u8 *bssid; 2845 if (len < (IEEE80211_MIN_ACTION_SIZE + 2846 sizeof(mgmt->u.action.u.chan_switch))) 2847 break; 2848 2849 if (sdata->vif.type != NL80211_IFTYPE_STATION && 2850 sdata->vif.type != NL80211_IFTYPE_ADHOC && 2851 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 2852 break; 2853 2854 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2855 bssid = sdata->u.mgd.bssid; 2856 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 2857 bssid = sdata->u.ibss.bssid; 2858 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 2859 bssid = mgmt->sa; 2860 else 2861 break; 2862 2863 if (!ether_addr_equal(mgmt->bssid, bssid)) 2864 break; 2865 2866 goto queue; 2867 } 2868 } 2869 break; 2870 case WLAN_CATEGORY_SA_QUERY: 2871 if (len < (IEEE80211_MIN_ACTION_SIZE + 2872 sizeof(mgmt->u.action.u.sa_query))) 2873 break; 2874 2875 switch (mgmt->u.action.u.sa_query.action) { 2876 case WLAN_ACTION_SA_QUERY_REQUEST: 2877 if (sdata->vif.type != NL80211_IFTYPE_STATION) 2878 break; 2879 ieee80211_process_sa_query_req(sdata, mgmt, len); 2880 goto handled; 2881 } 2882 break; 2883 case WLAN_CATEGORY_SELF_PROTECTED: 2884 if (len < (IEEE80211_MIN_ACTION_SIZE + 2885 sizeof(mgmt->u.action.u.self_prot.action_code))) 2886 break; 2887 2888 switch (mgmt->u.action.u.self_prot.action_code) { 2889 case WLAN_SP_MESH_PEERING_OPEN: 2890 case WLAN_SP_MESH_PEERING_CLOSE: 2891 case WLAN_SP_MESH_PEERING_CONFIRM: 2892 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2893 goto invalid; 2894 if (sdata->u.mesh.user_mpm) 2895 /* userspace handles this frame */ 2896 break; 2897 goto queue; 2898 case WLAN_SP_MGK_INFORM: 2899 case WLAN_SP_MGK_ACK: 2900 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2901 goto invalid; 2902 break; 2903 } 2904 break; 2905 case WLAN_CATEGORY_MESH_ACTION: 2906 if (len < (IEEE80211_MIN_ACTION_SIZE + 2907 sizeof(mgmt->u.action.u.mesh_action.action_code))) 2908 break; 2909 2910 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2911 break; 2912 if (mesh_action_is_path_sel(mgmt) && 2913 !mesh_path_sel_is_hwmp(sdata)) 2914 break; 2915 goto queue; 2916 } 2917 2918 return RX_CONTINUE; 2919 2920 invalid: 2921 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 2922 /* will return in the next handlers */ 2923 return RX_CONTINUE; 2924 2925 handled: 2926 if (rx->sta) 2927 rx->sta->rx_stats.packets++; 2928 dev_kfree_skb(rx->skb); 2929 return RX_QUEUED; 2930 2931 queue: 2932 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 2933 skb_queue_tail(&sdata->skb_queue, rx->skb); 2934 ieee80211_queue_work(&local->hw, &sdata->work); 2935 if (rx->sta) 2936 rx->sta->rx_stats.packets++; 2937 return RX_QUEUED; 2938 } 2939 2940 static ieee80211_rx_result debug_noinline 2941 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 2942 { 2943 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2944 int sig = 0; 2945 2946 /* skip known-bad action frames and return them in the next handler */ 2947 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 2948 return RX_CONTINUE; 2949 2950 /* 2951 * Getting here means the kernel doesn't know how to handle 2952 * it, but maybe userspace does ... include returned frames 2953 * so userspace can register for those to know whether ones 2954 * it transmitted were processed or returned. 2955 */ 2956 2957 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM)) 2958 sig = status->signal; 2959 2960 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, 2961 rx->skb->data, rx->skb->len, 0)) { 2962 if (rx->sta) 2963 rx->sta->rx_stats.packets++; 2964 dev_kfree_skb(rx->skb); 2965 return RX_QUEUED; 2966 } 2967 2968 return RX_CONTINUE; 2969 } 2970 2971 static ieee80211_rx_result debug_noinline 2972 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 2973 { 2974 struct ieee80211_local *local = rx->local; 2975 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2976 struct sk_buff *nskb; 2977 struct ieee80211_sub_if_data *sdata = rx->sdata; 2978 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2979 2980 if (!ieee80211_is_action(mgmt->frame_control)) 2981 return RX_CONTINUE; 2982 2983 /* 2984 * For AP mode, hostapd is responsible for handling any action 2985 * frames that we didn't handle, including returning unknown 2986 * ones. For all other modes we will return them to the sender, 2987 * setting the 0x80 bit in the action category, as required by 2988 * 802.11-2012 9.24.4. 2989 * Newer versions of hostapd shall also use the management frame 2990 * registration mechanisms, but older ones still use cooked 2991 * monitor interfaces so push all frames there. 2992 */ 2993 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 2994 (sdata->vif.type == NL80211_IFTYPE_AP || 2995 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 2996 return RX_DROP_MONITOR; 2997 2998 if (is_multicast_ether_addr(mgmt->da)) 2999 return RX_DROP_MONITOR; 3000 3001 /* do not return rejected action frames */ 3002 if (mgmt->u.action.category & 0x80) 3003 return RX_DROP_UNUSABLE; 3004 3005 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 3006 GFP_ATOMIC); 3007 if (nskb) { 3008 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 3009 3010 nmgmt->u.action.category |= 0x80; 3011 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 3012 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 3013 3014 memset(nskb->cb, 0, sizeof(nskb->cb)); 3015 3016 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 3017 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 3018 3019 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 3020 IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 3021 IEEE80211_TX_CTL_NO_CCK_RATE; 3022 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) 3023 info->hw_queue = 3024 local->hw.offchannel_tx_hw_queue; 3025 } 3026 3027 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, 3028 status->band); 3029 } 3030 dev_kfree_skb(rx->skb); 3031 return RX_QUEUED; 3032 } 3033 3034 static ieee80211_rx_result debug_noinline 3035 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 3036 { 3037 struct ieee80211_sub_if_data *sdata = rx->sdata; 3038 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 3039 __le16 stype; 3040 3041 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 3042 3043 if (!ieee80211_vif_is_mesh(&sdata->vif) && 3044 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3045 sdata->vif.type != NL80211_IFTYPE_OCB && 3046 sdata->vif.type != NL80211_IFTYPE_STATION) 3047 return RX_DROP_MONITOR; 3048 3049 switch (stype) { 3050 case cpu_to_le16(IEEE80211_STYPE_AUTH): 3051 case cpu_to_le16(IEEE80211_STYPE_BEACON): 3052 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 3053 /* process for all: mesh, mlme, ibss */ 3054 break; 3055 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 3056 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 3057 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 3058 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 3059 if (is_multicast_ether_addr(mgmt->da) && 3060 !is_broadcast_ether_addr(mgmt->da)) 3061 return RX_DROP_MONITOR; 3062 3063 /* process only for station */ 3064 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3065 return RX_DROP_MONITOR; 3066 break; 3067 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 3068 /* process only for ibss and mesh */ 3069 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 3070 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3071 return RX_DROP_MONITOR; 3072 break; 3073 default: 3074 return RX_DROP_MONITOR; 3075 } 3076 3077 /* queue up frame and kick off work to process it */ 3078 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; 3079 skb_queue_tail(&sdata->skb_queue, rx->skb); 3080 ieee80211_queue_work(&rx->local->hw, &sdata->work); 3081 if (rx->sta) 3082 rx->sta->rx_stats.packets++; 3083 3084 return RX_QUEUED; 3085 } 3086 3087 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 3088 struct ieee80211_rate *rate) 3089 { 3090 struct ieee80211_sub_if_data *sdata; 3091 struct ieee80211_local *local = rx->local; 3092 struct sk_buff *skb = rx->skb, *skb2; 3093 struct net_device *prev_dev = NULL; 3094 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3095 int needed_headroom; 3096 3097 /* 3098 * If cooked monitor has been processed already, then 3099 * don't do it again. If not, set the flag. 3100 */ 3101 if (rx->flags & IEEE80211_RX_CMNTR) 3102 goto out_free_skb; 3103 rx->flags |= IEEE80211_RX_CMNTR; 3104 3105 /* If there are no cooked monitor interfaces, just free the SKB */ 3106 if (!local->cooked_mntrs) 3107 goto out_free_skb; 3108 3109 /* vendor data is long removed here */ 3110 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA; 3111 /* room for the radiotap header based on driver features */ 3112 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); 3113 3114 if (skb_headroom(skb) < needed_headroom && 3115 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) 3116 goto out_free_skb; 3117 3118 /* prepend radiotap information */ 3119 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 3120 false); 3121 3122 skb_reset_mac_header(skb); 3123 skb->ip_summed = CHECKSUM_UNNECESSARY; 3124 skb->pkt_type = PACKET_OTHERHOST; 3125 skb->protocol = htons(ETH_P_802_2); 3126 3127 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3128 if (!ieee80211_sdata_running(sdata)) 3129 continue; 3130 3131 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 3132 !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) 3133 continue; 3134 3135 if (prev_dev) { 3136 skb2 = skb_clone(skb, GFP_ATOMIC); 3137 if (skb2) { 3138 skb2->dev = prev_dev; 3139 netif_receive_skb(skb2); 3140 } 3141 } 3142 3143 prev_dev = sdata->dev; 3144 ieee80211_rx_stats(sdata->dev, skb->len); 3145 } 3146 3147 if (prev_dev) { 3148 skb->dev = prev_dev; 3149 netif_receive_skb(skb); 3150 return; 3151 } 3152 3153 out_free_skb: 3154 dev_kfree_skb(skb); 3155 } 3156 3157 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 3158 ieee80211_rx_result res) 3159 { 3160 switch (res) { 3161 case RX_DROP_MONITOR: 3162 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3163 if (rx->sta) 3164 rx->sta->rx_stats.dropped++; 3165 /* fall through */ 3166 case RX_CONTINUE: { 3167 struct ieee80211_rate *rate = NULL; 3168 struct ieee80211_supported_band *sband; 3169 struct ieee80211_rx_status *status; 3170 3171 status = IEEE80211_SKB_RXCB((rx->skb)); 3172 3173 sband = rx->local->hw.wiphy->bands[status->band]; 3174 if (!(status->flag & RX_FLAG_HT) && 3175 !(status->flag & RX_FLAG_VHT)) 3176 rate = &sband->bitrates[status->rate_idx]; 3177 3178 ieee80211_rx_cooked_monitor(rx, rate); 3179 break; 3180 } 3181 case RX_DROP_UNUSABLE: 3182 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3183 if (rx->sta) 3184 rx->sta->rx_stats.dropped++; 3185 dev_kfree_skb(rx->skb); 3186 break; 3187 case RX_QUEUED: 3188 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 3189 break; 3190 } 3191 } 3192 3193 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 3194 struct sk_buff_head *frames) 3195 { 3196 ieee80211_rx_result res = RX_DROP_MONITOR; 3197 struct sk_buff *skb; 3198 3199 #define CALL_RXH(rxh) \ 3200 do { \ 3201 res = rxh(rx); \ 3202 if (res != RX_CONTINUE) \ 3203 goto rxh_next; \ 3204 } while (0); 3205 3206 /* Lock here to avoid hitting all of the data used in the RX 3207 * path (e.g. key data, station data, ...) concurrently when 3208 * a frame is released from the reorder buffer due to timeout 3209 * from the timer, potentially concurrently with RX from the 3210 * driver. 3211 */ 3212 spin_lock_bh(&rx->local->rx_path_lock); 3213 3214 while ((skb = __skb_dequeue(frames))) { 3215 /* 3216 * all the other fields are valid across frames 3217 * that belong to an aMPDU since they are on the 3218 * same TID from the same station 3219 */ 3220 rx->skb = skb; 3221 3222 CALL_RXH(ieee80211_rx_h_check_more_data) 3223 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll) 3224 CALL_RXH(ieee80211_rx_h_sta_process) 3225 CALL_RXH(ieee80211_rx_h_decrypt) 3226 CALL_RXH(ieee80211_rx_h_defragment) 3227 CALL_RXH(ieee80211_rx_h_michael_mic_verify) 3228 /* must be after MMIC verify so header is counted in MPDU mic */ 3229 #ifdef CONFIG_MAC80211_MESH 3230 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 3231 CALL_RXH(ieee80211_rx_h_mesh_fwding); 3232 #endif 3233 CALL_RXH(ieee80211_rx_h_amsdu) 3234 CALL_RXH(ieee80211_rx_h_data) 3235 3236 /* special treatment -- needs the queue */ 3237 res = ieee80211_rx_h_ctrl(rx, frames); 3238 if (res != RX_CONTINUE) 3239 goto rxh_next; 3240 3241 CALL_RXH(ieee80211_rx_h_mgmt_check) 3242 CALL_RXH(ieee80211_rx_h_action) 3243 CALL_RXH(ieee80211_rx_h_userspace_mgmt) 3244 CALL_RXH(ieee80211_rx_h_action_return) 3245 CALL_RXH(ieee80211_rx_h_mgmt) 3246 3247 rxh_next: 3248 ieee80211_rx_handlers_result(rx, res); 3249 3250 #undef CALL_RXH 3251 } 3252 3253 spin_unlock_bh(&rx->local->rx_path_lock); 3254 } 3255 3256 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 3257 { 3258 struct sk_buff_head reorder_release; 3259 ieee80211_rx_result res = RX_DROP_MONITOR; 3260 3261 __skb_queue_head_init(&reorder_release); 3262 3263 #define CALL_RXH(rxh) \ 3264 do { \ 3265 res = rxh(rx); \ 3266 if (res != RX_CONTINUE) \ 3267 goto rxh_next; \ 3268 } while (0); 3269 3270 CALL_RXH(ieee80211_rx_h_check_dup) 3271 CALL_RXH(ieee80211_rx_h_check) 3272 3273 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 3274 3275 ieee80211_rx_handlers(rx, &reorder_release); 3276 return; 3277 3278 rxh_next: 3279 ieee80211_rx_handlers_result(rx, res); 3280 3281 #undef CALL_RXH 3282 } 3283 3284 /* 3285 * This function makes calls into the RX path, therefore 3286 * it has to be invoked under RCU read lock. 3287 */ 3288 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 3289 { 3290 struct sk_buff_head frames; 3291 struct ieee80211_rx_data rx = { 3292 .sta = sta, 3293 .sdata = sta->sdata, 3294 .local = sta->local, 3295 /* This is OK -- must be QoS data frame */ 3296 .security_idx = tid, 3297 .seqno_idx = tid, 3298 .napi = NULL, /* must be NULL to not have races */ 3299 }; 3300 struct tid_ampdu_rx *tid_agg_rx; 3301 3302 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3303 if (!tid_agg_rx) 3304 return; 3305 3306 __skb_queue_head_init(&frames); 3307 3308 spin_lock(&tid_agg_rx->reorder_lock); 3309 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3310 spin_unlock(&tid_agg_rx->reorder_lock); 3311 3312 if (!skb_queue_empty(&frames)) { 3313 struct ieee80211_event event = { 3314 .type = BA_FRAME_TIMEOUT, 3315 .u.ba.tid = tid, 3316 .u.ba.sta = &sta->sta, 3317 }; 3318 drv_event_callback(rx.local, rx.sdata, &event); 3319 } 3320 3321 ieee80211_rx_handlers(&rx, &frames); 3322 } 3323 3324 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, 3325 u16 ssn, u64 filtered, 3326 u16 received_mpdus) 3327 { 3328 struct sta_info *sta; 3329 struct tid_ampdu_rx *tid_agg_rx; 3330 struct sk_buff_head frames; 3331 struct ieee80211_rx_data rx = { 3332 /* This is OK -- must be QoS data frame */ 3333 .security_idx = tid, 3334 .seqno_idx = tid, 3335 }; 3336 int i, diff; 3337 3338 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS)) 3339 return; 3340 3341 __skb_queue_head_init(&frames); 3342 3343 sta = container_of(pubsta, struct sta_info, sta); 3344 3345 rx.sta = sta; 3346 rx.sdata = sta->sdata; 3347 rx.local = sta->local; 3348 3349 rcu_read_lock(); 3350 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3351 if (!tid_agg_rx) 3352 goto out; 3353 3354 spin_lock_bh(&tid_agg_rx->reorder_lock); 3355 3356 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) { 3357 int release; 3358 3359 /* release all frames in the reorder buffer */ 3360 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) % 3361 IEEE80211_SN_MODULO; 3362 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, 3363 release, &frames); 3364 /* update ssn to match received ssn */ 3365 tid_agg_rx->head_seq_num = ssn; 3366 } else { 3367 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn, 3368 &frames); 3369 } 3370 3371 /* handle the case that received ssn is behind the mac ssn. 3372 * it can be tid_agg_rx->buf_size behind and still be valid */ 3373 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK; 3374 if (diff >= tid_agg_rx->buf_size) { 3375 tid_agg_rx->reorder_buf_filtered = 0; 3376 goto release; 3377 } 3378 filtered = filtered >> diff; 3379 ssn += diff; 3380 3381 /* update bitmap */ 3382 for (i = 0; i < tid_agg_rx->buf_size; i++) { 3383 int index = (ssn + i) % tid_agg_rx->buf_size; 3384 3385 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 3386 if (filtered & BIT_ULL(i)) 3387 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index); 3388 } 3389 3390 /* now process also frames that the filter marking released */ 3391 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3392 3393 release: 3394 spin_unlock_bh(&tid_agg_rx->reorder_lock); 3395 3396 ieee80211_rx_handlers(&rx, &frames); 3397 3398 out: 3399 rcu_read_unlock(); 3400 } 3401 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames); 3402 3403 /* main receive path */ 3404 3405 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) 3406 { 3407 struct ieee80211_sub_if_data *sdata = rx->sdata; 3408 struct sk_buff *skb = rx->skb; 3409 struct ieee80211_hdr *hdr = (void *)skb->data; 3410 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3411 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 3412 int multicast = is_multicast_ether_addr(hdr->addr1); 3413 3414 switch (sdata->vif.type) { 3415 case NL80211_IFTYPE_STATION: 3416 if (!bssid && !sdata->u.mgd.use_4addr) 3417 return false; 3418 if (multicast) 3419 return true; 3420 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3421 case NL80211_IFTYPE_ADHOC: 3422 if (!bssid) 3423 return false; 3424 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 3425 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) 3426 return false; 3427 if (ieee80211_is_beacon(hdr->frame_control)) 3428 return true; 3429 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) 3430 return false; 3431 if (!multicast && 3432 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3433 return false; 3434 if (!rx->sta) { 3435 int rate_idx; 3436 if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) 3437 rate_idx = 0; /* TODO: HT/VHT rates */ 3438 else 3439 rate_idx = status->rate_idx; 3440 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 3441 BIT(rate_idx)); 3442 } 3443 return true; 3444 case NL80211_IFTYPE_OCB: 3445 if (!bssid) 3446 return false; 3447 if (!ieee80211_is_data_present(hdr->frame_control)) 3448 return false; 3449 if (!is_broadcast_ether_addr(bssid)) 3450 return false; 3451 if (!multicast && 3452 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) 3453 return false; 3454 if (!rx->sta) { 3455 int rate_idx; 3456 if (status->flag & RX_FLAG_HT) 3457 rate_idx = 0; /* TODO: HT rates */ 3458 else 3459 rate_idx = status->rate_idx; 3460 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, 3461 BIT(rate_idx)); 3462 } 3463 return true; 3464 case NL80211_IFTYPE_MESH_POINT: 3465 if (multicast) 3466 return true; 3467 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3468 case NL80211_IFTYPE_AP_VLAN: 3469 case NL80211_IFTYPE_AP: 3470 if (!bssid) 3471 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3472 3473 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { 3474 /* 3475 * Accept public action frames even when the 3476 * BSSID doesn't match, this is used for P2P 3477 * and location updates. Note that mac80211 3478 * itself never looks at these frames. 3479 */ 3480 if (!multicast && 3481 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3482 return false; 3483 if (ieee80211_is_public_action(hdr, skb->len)) 3484 return true; 3485 return ieee80211_is_beacon(hdr->frame_control); 3486 } 3487 3488 if (!ieee80211_has_tods(hdr->frame_control)) { 3489 /* ignore data frames to TDLS-peers */ 3490 if (ieee80211_is_data(hdr->frame_control)) 3491 return false; 3492 /* ignore action frames to TDLS-peers */ 3493 if (ieee80211_is_action(hdr->frame_control) && 3494 !is_broadcast_ether_addr(bssid) && 3495 !ether_addr_equal(bssid, hdr->addr1)) 3496 return false; 3497 } 3498 return true; 3499 case NL80211_IFTYPE_WDS: 3500 if (bssid || !ieee80211_is_data(hdr->frame_control)) 3501 return false; 3502 return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2); 3503 case NL80211_IFTYPE_P2P_DEVICE: 3504 return ieee80211_is_public_action(hdr, skb->len) || 3505 ieee80211_is_probe_req(hdr->frame_control) || 3506 ieee80211_is_probe_resp(hdr->frame_control) || 3507 ieee80211_is_beacon(hdr->frame_control); 3508 default: 3509 break; 3510 } 3511 3512 WARN_ON_ONCE(1); 3513 return false; 3514 } 3515 3516 /* 3517 * This function returns whether or not the SKB 3518 * was destined for RX processing or not, which, 3519 * if consume is true, is equivalent to whether 3520 * or not the skb was consumed. 3521 */ 3522 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 3523 struct sk_buff *skb, bool consume) 3524 { 3525 struct ieee80211_local *local = rx->local; 3526 struct ieee80211_sub_if_data *sdata = rx->sdata; 3527 3528 rx->skb = skb; 3529 3530 if (!ieee80211_accept_frame(rx)) 3531 return false; 3532 3533 if (!consume) { 3534 skb = skb_copy(skb, GFP_ATOMIC); 3535 if (!skb) { 3536 if (net_ratelimit()) 3537 wiphy_debug(local->hw.wiphy, 3538 "failed to copy skb for %s\n", 3539 sdata->name); 3540 return true; 3541 } 3542 3543 rx->skb = skb; 3544 } 3545 3546 ieee80211_invoke_rx_handlers(rx); 3547 return true; 3548 } 3549 3550 /* 3551 * This is the actual Rx frames handler. as it belongs to Rx path it must 3552 * be called with rcu_read_lock protection. 3553 */ 3554 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 3555 struct sk_buff *skb, 3556 struct napi_struct *napi) 3557 { 3558 struct ieee80211_local *local = hw_to_local(hw); 3559 struct ieee80211_sub_if_data *sdata; 3560 struct ieee80211_hdr *hdr; 3561 __le16 fc; 3562 struct ieee80211_rx_data rx; 3563 struct ieee80211_sub_if_data *prev; 3564 struct sta_info *sta, *prev_sta; 3565 struct rhash_head *tmp; 3566 int err = 0; 3567 3568 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 3569 memset(&rx, 0, sizeof(rx)); 3570 rx.skb = skb; 3571 rx.local = local; 3572 rx.napi = napi; 3573 3574 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 3575 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 3576 3577 if (ieee80211_is_mgmt(fc)) { 3578 /* drop frame if too short for header */ 3579 if (skb->len < ieee80211_hdrlen(fc)) 3580 err = -ENOBUFS; 3581 else 3582 err = skb_linearize(skb); 3583 } else { 3584 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 3585 } 3586 3587 if (err) { 3588 dev_kfree_skb(skb); 3589 return; 3590 } 3591 3592 hdr = (struct ieee80211_hdr *)skb->data; 3593 ieee80211_parse_qos(&rx); 3594 ieee80211_verify_alignment(&rx); 3595 3596 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 3597 ieee80211_is_beacon(hdr->frame_control))) 3598 ieee80211_scan_rx(local, skb); 3599 3600 if (ieee80211_is_data(fc)) { 3601 const struct bucket_table *tbl; 3602 3603 prev_sta = NULL; 3604 3605 tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash); 3606 3607 for_each_sta_info(local, tbl, hdr->addr2, sta, tmp) { 3608 if (!prev_sta) { 3609 prev_sta = sta; 3610 continue; 3611 } 3612 3613 rx.sta = prev_sta; 3614 rx.sdata = prev_sta->sdata; 3615 ieee80211_prepare_and_rx_handle(&rx, skb, false); 3616 3617 prev_sta = sta; 3618 } 3619 3620 if (prev_sta) { 3621 rx.sta = prev_sta; 3622 rx.sdata = prev_sta->sdata; 3623 3624 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 3625 return; 3626 goto out; 3627 } 3628 } 3629 3630 prev = NULL; 3631 3632 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3633 if (!ieee80211_sdata_running(sdata)) 3634 continue; 3635 3636 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 3637 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 3638 continue; 3639 3640 /* 3641 * frame is destined for this interface, but if it's 3642 * not also for the previous one we handle that after 3643 * the loop to avoid copying the SKB once too much 3644 */ 3645 3646 if (!prev) { 3647 prev = sdata; 3648 continue; 3649 } 3650 3651 rx.sta = sta_info_get_bss(prev, hdr->addr2); 3652 rx.sdata = prev; 3653 ieee80211_prepare_and_rx_handle(&rx, skb, false); 3654 3655 prev = sdata; 3656 } 3657 3658 if (prev) { 3659 rx.sta = sta_info_get_bss(prev, hdr->addr2); 3660 rx.sdata = prev; 3661 3662 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 3663 return; 3664 } 3665 3666 out: 3667 dev_kfree_skb(skb); 3668 } 3669 3670 /* 3671 * This is the receive path handler. It is called by a low level driver when an 3672 * 802.11 MPDU is received from the hardware. 3673 */ 3674 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct sk_buff *skb, 3675 struct napi_struct *napi) 3676 { 3677 struct ieee80211_local *local = hw_to_local(hw); 3678 struct ieee80211_rate *rate = NULL; 3679 struct ieee80211_supported_band *sband; 3680 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3681 3682 WARN_ON_ONCE(softirq_count() == 0); 3683 3684 if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) 3685 goto drop; 3686 3687 sband = local->hw.wiphy->bands[status->band]; 3688 if (WARN_ON(!sband)) 3689 goto drop; 3690 3691 /* 3692 * If we're suspending, it is possible although not too likely 3693 * that we'd be receiving frames after having already partially 3694 * quiesced the stack. We can't process such frames then since 3695 * that might, for example, cause stations to be added or other 3696 * driver callbacks be invoked. 3697 */ 3698 if (unlikely(local->quiescing || local->suspended)) 3699 goto drop; 3700 3701 /* We might be during a HW reconfig, prevent Rx for the same reason */ 3702 if (unlikely(local->in_reconfig)) 3703 goto drop; 3704 3705 /* 3706 * The same happens when we're not even started, 3707 * but that's worth a warning. 3708 */ 3709 if (WARN_ON(!local->started)) 3710 goto drop; 3711 3712 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 3713 /* 3714 * Validate the rate, unless a PLCP error means that 3715 * we probably can't have a valid rate here anyway. 3716 */ 3717 3718 if (status->flag & RX_FLAG_HT) { 3719 /* 3720 * rate_idx is MCS index, which can be [0-76] 3721 * as documented on: 3722 * 3723 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 3724 * 3725 * Anything else would be some sort of driver or 3726 * hardware error. The driver should catch hardware 3727 * errors. 3728 */ 3729 if (WARN(status->rate_idx > 76, 3730 "Rate marked as an HT rate but passed " 3731 "status->rate_idx is not " 3732 "an MCS index [0-76]: %d (0x%02x)\n", 3733 status->rate_idx, 3734 status->rate_idx)) 3735 goto drop; 3736 } else if (status->flag & RX_FLAG_VHT) { 3737 if (WARN_ONCE(status->rate_idx > 9 || 3738 !status->vht_nss || 3739 status->vht_nss > 8, 3740 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", 3741 status->rate_idx, status->vht_nss)) 3742 goto drop; 3743 } else { 3744 if (WARN_ON(status->rate_idx >= sband->n_bitrates)) 3745 goto drop; 3746 rate = &sband->bitrates[status->rate_idx]; 3747 } 3748 } 3749 3750 status->rx_flags = 0; 3751 3752 /* 3753 * key references and virtual interfaces are protected using RCU 3754 * and this requires that we are in a read-side RCU section during 3755 * receive processing 3756 */ 3757 rcu_read_lock(); 3758 3759 /* 3760 * Frames with failed FCS/PLCP checksum are not returned, 3761 * all other frames are returned without radiotap header 3762 * if it was previously present. 3763 * Also, frames with less than 16 bytes are dropped. 3764 */ 3765 skb = ieee80211_rx_monitor(local, skb, rate); 3766 if (!skb) { 3767 rcu_read_unlock(); 3768 return; 3769 } 3770 3771 ieee80211_tpt_led_trig_rx(local, 3772 ((struct ieee80211_hdr *)skb->data)->frame_control, 3773 skb->len); 3774 __ieee80211_rx_handle_packet(hw, skb, napi); 3775 3776 rcu_read_unlock(); 3777 3778 return; 3779 drop: 3780 kfree_skb(skb); 3781 } 3782 EXPORT_SYMBOL(ieee80211_rx_napi); 3783 3784 /* This is a version of the rx handler that can be called from hard irq 3785 * context. Post the skb on the queue and schedule the tasklet */ 3786 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 3787 { 3788 struct ieee80211_local *local = hw_to_local(hw); 3789 3790 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 3791 3792 skb->pkt_type = IEEE80211_RX_MSG; 3793 skb_queue_tail(&local->skb_queue, skb); 3794 tasklet_schedule(&local->tasklet); 3795 } 3796 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 3797