1 /* 2 * Copyright 2002-2005, Instant802 Networks, Inc. 3 * Copyright 2005-2006, Devicescape Software, Inc. 4 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 5 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 6 * Copyright 2013-2014 Intel Mobile Communications GmbH 7 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 8 * Copyright (C) 2018 Intel Corporation 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/jiffies.h> 16 #include <linux/slab.h> 17 #include <linux/kernel.h> 18 #include <linux/skbuff.h> 19 #include <linux/netdevice.h> 20 #include <linux/etherdevice.h> 21 #include <linux/rcupdate.h> 22 #include <linux/export.h> 23 #include <linux/bitops.h> 24 #include <net/mac80211.h> 25 #include <net/ieee80211_radiotap.h> 26 #include <asm/unaligned.h> 27 28 #include "ieee80211_i.h" 29 #include "driver-ops.h" 30 #include "led.h" 31 #include "mesh.h" 32 #include "wep.h" 33 #include "wpa.h" 34 #include "tkip.h" 35 #include "wme.h" 36 #include "rate.h" 37 38 static inline void ieee80211_rx_stats(struct net_device *dev, u32 len) 39 { 40 struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); 41 42 u64_stats_update_begin(&tstats->syncp); 43 tstats->rx_packets++; 44 tstats->rx_bytes += len; 45 u64_stats_update_end(&tstats->syncp); 46 } 47 48 static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 49 enum nl80211_iftype type) 50 { 51 __le16 fc = hdr->frame_control; 52 53 if (ieee80211_is_data(fc)) { 54 if (len < 24) /* drop incorrect hdr len (data) */ 55 return NULL; 56 57 if (ieee80211_has_a4(fc)) 58 return NULL; 59 if (ieee80211_has_tods(fc)) 60 return hdr->addr1; 61 if (ieee80211_has_fromds(fc)) 62 return hdr->addr2; 63 64 return hdr->addr3; 65 } 66 67 if (ieee80211_is_mgmt(fc)) { 68 if (len < 24) /* drop incorrect hdr len (mgmt) */ 69 return NULL; 70 return hdr->addr3; 71 } 72 73 if (ieee80211_is_ctl(fc)) { 74 if (ieee80211_is_pspoll(fc)) 75 return hdr->addr1; 76 77 if (ieee80211_is_back_req(fc)) { 78 switch (type) { 79 case NL80211_IFTYPE_STATION: 80 return hdr->addr2; 81 case NL80211_IFTYPE_AP: 82 case NL80211_IFTYPE_AP_VLAN: 83 return hdr->addr1; 84 default: 85 break; /* fall through to the return */ 86 } 87 } 88 } 89 90 return NULL; 91 } 92 93 /* 94 * monitor mode reception 95 * 96 * This function cleans up the SKB, i.e. it removes all the stuff 97 * only useful for monitoring. 98 */ 99 static void remove_monitor_info(struct sk_buff *skb, 100 unsigned int present_fcs_len, 101 unsigned int rtap_space) 102 { 103 if (present_fcs_len) 104 __pskb_trim(skb, skb->len - present_fcs_len); 105 __pskb_pull(skb, rtap_space); 106 } 107 108 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, 109 unsigned int rtap_space) 110 { 111 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 112 struct ieee80211_hdr *hdr; 113 114 hdr = (void *)(skb->data + rtap_space); 115 116 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 117 RX_FLAG_FAILED_PLCP_CRC | 118 RX_FLAG_ONLY_MONITOR | 119 RX_FLAG_NO_PSDU)) 120 return true; 121 122 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space)) 123 return true; 124 125 if (ieee80211_is_ctl(hdr->frame_control) && 126 !ieee80211_is_pspoll(hdr->frame_control) && 127 !ieee80211_is_back_req(hdr->frame_control)) 128 return true; 129 130 return false; 131 } 132 133 static int 134 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, 135 struct ieee80211_rx_status *status, 136 struct sk_buff *skb) 137 { 138 int len; 139 140 /* always present fields */ 141 len = sizeof(struct ieee80211_radiotap_header) + 8; 142 143 /* allocate extra bitmaps */ 144 if (status->chains) 145 len += 4 * hweight8(status->chains); 146 147 if (ieee80211_have_rx_timestamp(status)) { 148 len = ALIGN(len, 8); 149 len += 8; 150 } 151 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) 152 len += 1; 153 154 /* antenna field, if we don't have per-chain info */ 155 if (!status->chains) 156 len += 1; 157 158 /* padding for RX_FLAGS if necessary */ 159 len = ALIGN(len, 2); 160 161 if (status->encoding == RX_ENC_HT) /* HT info */ 162 len += 3; 163 164 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 165 len = ALIGN(len, 4); 166 len += 8; 167 } 168 169 if (status->encoding == RX_ENC_VHT) { 170 len = ALIGN(len, 2); 171 len += 12; 172 } 173 174 if (local->hw.radiotap_timestamp.units_pos >= 0) { 175 len = ALIGN(len, 8); 176 len += 12; 177 } 178 179 if (status->encoding == RX_ENC_HE && 180 status->flag & RX_FLAG_RADIOTAP_HE) { 181 len = ALIGN(len, 2); 182 len += 12; 183 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12); 184 } 185 186 if (status->encoding == RX_ENC_HE && 187 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 188 len = ALIGN(len, 2); 189 len += 12; 190 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); 191 } 192 193 if (status->flag & RX_FLAG_NO_PSDU) 194 len += 1; 195 196 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 197 len = ALIGN(len, 2); 198 len += 4; 199 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4); 200 } 201 202 if (status->chains) { 203 /* antenna and antenna signal fields */ 204 len += 2 * hweight8(status->chains); 205 } 206 207 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 208 struct ieee80211_vendor_radiotap *rtap = (void *)skb->data; 209 210 /* vendor presence bitmap */ 211 len += 4; 212 /* alignment for fixed 6-byte vendor data header */ 213 len = ALIGN(len, 2); 214 /* vendor data header */ 215 len += 6; 216 if (WARN_ON(rtap->align == 0)) 217 rtap->align = 1; 218 len = ALIGN(len, rtap->align); 219 len += rtap->len + rtap->pad; 220 } 221 222 return len; 223 } 224 225 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, 226 struct sk_buff *skb, 227 int rtap_space) 228 { 229 struct { 230 struct ieee80211_hdr_3addr hdr; 231 u8 category; 232 u8 action_code; 233 } __packed action; 234 235 if (!sdata) 236 return; 237 238 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1); 239 240 if (skb->len < rtap_space + sizeof(action) + 241 VHT_MUMIMO_GROUPS_DATA_LEN) 242 return; 243 244 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr)) 245 return; 246 247 skb_copy_bits(skb, rtap_space, &action, sizeof(action)); 248 249 if (!ieee80211_is_action(action.hdr.frame_control)) 250 return; 251 252 if (action.category != WLAN_CATEGORY_VHT) 253 return; 254 255 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT) 256 return; 257 258 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr)) 259 return; 260 261 skb = skb_copy(skb, GFP_ATOMIC); 262 if (!skb) 263 return; 264 265 skb_queue_tail(&sdata->skb_queue, skb); 266 ieee80211_queue_work(&sdata->local->hw, &sdata->work); 267 } 268 269 /* 270 * ieee80211_add_rx_radiotap_header - add radiotap header 271 * 272 * add a radiotap header containing all the fields which the hardware provided. 273 */ 274 static void 275 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 276 struct sk_buff *skb, 277 struct ieee80211_rate *rate, 278 int rtap_len, bool has_fcs) 279 { 280 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 281 struct ieee80211_radiotap_header *rthdr; 282 unsigned char *pos; 283 __le32 *it_present; 284 u32 it_present_val; 285 u16 rx_flags = 0; 286 u16 channel_flags = 0; 287 int mpdulen, chain; 288 unsigned long chains = status->chains; 289 struct ieee80211_vendor_radiotap rtap = {}; 290 struct ieee80211_radiotap_he he = {}; 291 struct ieee80211_radiotap_he_mu he_mu = {}; 292 struct ieee80211_radiotap_lsig lsig = {}; 293 294 if (status->flag & RX_FLAG_RADIOTAP_HE) { 295 he = *(struct ieee80211_radiotap_he *)skb->data; 296 skb_pull(skb, sizeof(he)); 297 WARN_ON_ONCE(status->encoding != RX_ENC_HE); 298 } 299 300 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) { 301 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data; 302 skb_pull(skb, sizeof(he_mu)); 303 } 304 305 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 306 lsig = *(struct ieee80211_radiotap_lsig *)skb->data; 307 skb_pull(skb, sizeof(lsig)); 308 } 309 310 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 311 rtap = *(struct ieee80211_vendor_radiotap *)skb->data; 312 /* rtap.len and rtap.pad are undone immediately */ 313 skb_pull(skb, sizeof(rtap) + rtap.len + rtap.pad); 314 } 315 316 mpdulen = skb->len; 317 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) 318 mpdulen += FCS_LEN; 319 320 rthdr = skb_push(skb, rtap_len); 321 memset(rthdr, 0, rtap_len - rtap.len - rtap.pad); 322 it_present = &rthdr->it_present; 323 324 /* radiotap header, set always present flags */ 325 rthdr->it_len = cpu_to_le16(rtap_len); 326 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | 327 BIT(IEEE80211_RADIOTAP_CHANNEL) | 328 BIT(IEEE80211_RADIOTAP_RX_FLAGS); 329 330 if (!status->chains) 331 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); 332 333 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 334 it_present_val |= 335 BIT(IEEE80211_RADIOTAP_EXT) | 336 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); 337 put_unaligned_le32(it_present_val, it_present); 338 it_present++; 339 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | 340 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 341 } 342 343 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 344 it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) | 345 BIT(IEEE80211_RADIOTAP_EXT); 346 put_unaligned_le32(it_present_val, it_present); 347 it_present++; 348 it_present_val = rtap.present; 349 } 350 351 put_unaligned_le32(it_present_val, it_present); 352 353 pos = (void *)(it_present + 1); 354 355 /* the order of the following fields is important */ 356 357 /* IEEE80211_RADIOTAP_TSFT */ 358 if (ieee80211_have_rx_timestamp(status)) { 359 /* padding */ 360 while ((pos - (u8 *)rthdr) & 7) 361 *pos++ = 0; 362 put_unaligned_le64( 363 ieee80211_calculate_rx_timestamp(local, status, 364 mpdulen, 0), 365 pos); 366 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT); 367 pos += 8; 368 } 369 370 /* IEEE80211_RADIOTAP_FLAGS */ 371 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 372 *pos |= IEEE80211_RADIOTAP_F_FCS; 373 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 374 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 375 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) 376 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 377 pos++; 378 379 /* IEEE80211_RADIOTAP_RATE */ 380 if (!rate || status->encoding != RX_ENC_LEGACY) { 381 /* 382 * Without rate information don't add it. If we have, 383 * MCS information is a separate field in radiotap, 384 * added below. The byte here is needed as padding 385 * for the channel though, so initialise it to 0. 386 */ 387 *pos = 0; 388 } else { 389 int shift = 0; 390 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE); 391 if (status->bw == RATE_INFO_BW_10) 392 shift = 1; 393 else if (status->bw == RATE_INFO_BW_5) 394 shift = 2; 395 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); 396 } 397 pos++; 398 399 /* IEEE80211_RADIOTAP_CHANNEL */ 400 put_unaligned_le16(status->freq, pos); 401 pos += 2; 402 if (status->bw == RATE_INFO_BW_10) 403 channel_flags |= IEEE80211_CHAN_HALF; 404 else if (status->bw == RATE_INFO_BW_5) 405 channel_flags |= IEEE80211_CHAN_QUARTER; 406 407 if (status->band == NL80211_BAND_5GHZ) 408 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; 409 else if (status->encoding != RX_ENC_LEGACY) 410 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 411 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 412 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 413 else if (rate) 414 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; 415 else 416 channel_flags |= IEEE80211_CHAN_2GHZ; 417 put_unaligned_le16(channel_flags, pos); 418 pos += 2; 419 420 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 421 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && 422 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 423 *pos = status->signal; 424 rthdr->it_present |= 425 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 426 pos++; 427 } 428 429 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 430 431 if (!status->chains) { 432 /* IEEE80211_RADIOTAP_ANTENNA */ 433 *pos = status->antenna; 434 pos++; 435 } 436 437 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 438 439 /* IEEE80211_RADIOTAP_RX_FLAGS */ 440 /* ensure 2 byte alignment for the 2 byte field as required */ 441 if ((pos - (u8 *)rthdr) & 1) 442 *pos++ = 0; 443 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 444 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 445 put_unaligned_le16(rx_flags, pos); 446 pos += 2; 447 448 if (status->encoding == RX_ENC_HT) { 449 unsigned int stbc; 450 451 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS); 452 *pos++ = local->hw.radiotap_mcs_details; 453 *pos = 0; 454 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 455 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 456 if (status->bw == RATE_INFO_BW_40) 457 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 458 if (status->enc_flags & RX_ENC_FLAG_HT_GF) 459 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 460 if (status->enc_flags & RX_ENC_FLAG_LDPC) 461 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; 462 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT; 463 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; 464 pos++; 465 *pos++ = status->rate_idx; 466 } 467 468 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 469 u16 flags = 0; 470 471 /* ensure 4 byte alignment */ 472 while ((pos - (u8 *)rthdr) & 3) 473 pos++; 474 rthdr->it_present |= 475 cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS); 476 put_unaligned_le32(status->ampdu_reference, pos); 477 pos += 4; 478 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 479 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 480 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 481 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 482 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 483 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 484 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 485 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN; 486 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN) 487 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN; 488 if (status->flag & RX_FLAG_AMPDU_EOF_BIT) 489 flags |= IEEE80211_RADIOTAP_AMPDU_EOF; 490 put_unaligned_le16(flags, pos); 491 pos += 2; 492 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN) 493 *pos++ = status->ampdu_delimiter_crc; 494 else 495 *pos++ = 0; 496 *pos++ = 0; 497 } 498 499 if (status->encoding == RX_ENC_VHT) { 500 u16 known = local->hw.radiotap_vht_details; 501 502 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT); 503 put_unaligned_le16(known, pos); 504 pos += 2; 505 /* flags */ 506 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 507 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; 508 /* in VHT, STBC is binary */ 509 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) 510 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; 511 if (status->enc_flags & RX_ENC_FLAG_BF) 512 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; 513 pos++; 514 /* bandwidth */ 515 switch (status->bw) { 516 case RATE_INFO_BW_80: 517 *pos++ = 4; 518 break; 519 case RATE_INFO_BW_160: 520 *pos++ = 11; 521 break; 522 case RATE_INFO_BW_40: 523 *pos++ = 1; 524 break; 525 default: 526 *pos++ = 0; 527 } 528 /* MCS/NSS */ 529 *pos = (status->rate_idx << 4) | status->nss; 530 pos += 4; 531 /* coding field */ 532 if (status->enc_flags & RX_ENC_FLAG_LDPC) 533 *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; 534 pos++; 535 /* group ID */ 536 pos++; 537 /* partial_aid */ 538 pos += 2; 539 } 540 541 if (local->hw.radiotap_timestamp.units_pos >= 0) { 542 u16 accuracy = 0; 543 u8 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT; 544 545 rthdr->it_present |= 546 cpu_to_le32(1 << IEEE80211_RADIOTAP_TIMESTAMP); 547 548 /* ensure 8 byte alignment */ 549 while ((pos - (u8 *)rthdr) & 7) 550 pos++; 551 552 put_unaligned_le64(status->device_timestamp, pos); 553 pos += sizeof(u64); 554 555 if (local->hw.radiotap_timestamp.accuracy >= 0) { 556 accuracy = local->hw.radiotap_timestamp.accuracy; 557 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY; 558 } 559 put_unaligned_le16(accuracy, pos); 560 pos += sizeof(u16); 561 562 *pos++ = local->hw.radiotap_timestamp.units_pos; 563 *pos++ = flags; 564 } 565 566 if (status->encoding == RX_ENC_HE && 567 status->flag & RX_FLAG_RADIOTAP_HE) { 568 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) 569 570 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { 571 he.data6 |= HE_PREP(DATA6_NSTS, 572 FIELD_GET(RX_ENC_FLAG_STBC_MASK, 573 status->enc_flags)); 574 he.data3 |= HE_PREP(DATA3_STBC, 1); 575 } else { 576 he.data6 |= HE_PREP(DATA6_NSTS, status->nss); 577 } 578 579 #define CHECK_GI(s) \ 580 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ 581 (int)NL80211_RATE_INFO_HE_GI_##s) 582 583 CHECK_GI(0_8); 584 CHECK_GI(1_6); 585 CHECK_GI(3_2); 586 587 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx); 588 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm); 589 he.data3 |= HE_PREP(DATA3_CODING, 590 !!(status->enc_flags & RX_ENC_FLAG_LDPC)); 591 592 he.data5 |= HE_PREP(DATA5_GI, status->he_gi); 593 594 switch (status->bw) { 595 case RATE_INFO_BW_20: 596 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 597 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); 598 break; 599 case RATE_INFO_BW_40: 600 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 601 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); 602 break; 603 case RATE_INFO_BW_80: 604 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 605 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); 606 break; 607 case RATE_INFO_BW_160: 608 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 609 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); 610 break; 611 case RATE_INFO_BW_HE_RU: 612 #define CHECK_RU_ALLOC(s) \ 613 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ 614 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) 615 616 CHECK_RU_ALLOC(26); 617 CHECK_RU_ALLOC(52); 618 CHECK_RU_ALLOC(106); 619 CHECK_RU_ALLOC(242); 620 CHECK_RU_ALLOC(484); 621 CHECK_RU_ALLOC(996); 622 CHECK_RU_ALLOC(2x996); 623 624 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 625 status->he_ru + 4); 626 break; 627 default: 628 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw); 629 } 630 631 /* ensure 2 byte alignment */ 632 while ((pos - (u8 *)rthdr) & 1) 633 pos++; 634 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE); 635 memcpy(pos, &he, sizeof(he)); 636 pos += sizeof(he); 637 } 638 639 if (status->encoding == RX_ENC_HE && 640 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 641 /* ensure 2 byte alignment */ 642 while ((pos - (u8 *)rthdr) & 1) 643 pos++; 644 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_HE_MU); 645 memcpy(pos, &he_mu, sizeof(he_mu)); 646 pos += sizeof(he_mu); 647 } 648 649 if (status->flag & RX_FLAG_NO_PSDU) { 650 rthdr->it_present |= 651 cpu_to_le32(1 << IEEE80211_RADIOTAP_ZERO_LEN_PSDU); 652 *pos++ = status->zero_length_psdu_type; 653 } 654 655 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 656 /* ensure 2 byte alignment */ 657 while ((pos - (u8 *)rthdr) & 1) 658 pos++; 659 rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_LSIG); 660 memcpy(pos, &lsig, sizeof(lsig)); 661 pos += sizeof(lsig); 662 } 663 664 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 665 *pos++ = status->chain_signal[chain]; 666 *pos++ = chain; 667 } 668 669 if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) { 670 /* ensure 2 byte alignment for the vendor field as required */ 671 if ((pos - (u8 *)rthdr) & 1) 672 *pos++ = 0; 673 *pos++ = rtap.oui[0]; 674 *pos++ = rtap.oui[1]; 675 *pos++ = rtap.oui[2]; 676 *pos++ = rtap.subns; 677 put_unaligned_le16(rtap.len, pos); 678 pos += 2; 679 /* align the actual payload as requested */ 680 while ((pos - (u8 *)rthdr) & (rtap.align - 1)) 681 *pos++ = 0; 682 /* data (and possible padding) already follows */ 683 } 684 } 685 686 static struct sk_buff * 687 ieee80211_make_monitor_skb(struct ieee80211_local *local, 688 struct sk_buff **origskb, 689 struct ieee80211_rate *rate, 690 int rtap_space, bool use_origskb) 691 { 692 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb); 693 int rt_hdrlen, needed_headroom; 694 struct sk_buff *skb; 695 696 /* room for the radiotap header based on driver features */ 697 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb); 698 needed_headroom = rt_hdrlen - rtap_space; 699 700 if (use_origskb) { 701 /* only need to expand headroom if necessary */ 702 skb = *origskb; 703 *origskb = NULL; 704 705 /* 706 * This shouldn't trigger often because most devices have an 707 * RX header they pull before we get here, and that should 708 * be big enough for our radiotap information. We should 709 * probably export the length to drivers so that we can have 710 * them allocate enough headroom to start with. 711 */ 712 if (skb_headroom(skb) < needed_headroom && 713 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 714 dev_kfree_skb(skb); 715 return NULL; 716 } 717 } else { 718 /* 719 * Need to make a copy and possibly remove radiotap header 720 * and FCS from the original. 721 */ 722 skb = skb_copy_expand(*origskb, needed_headroom, 0, GFP_ATOMIC); 723 724 if (!skb) 725 return NULL; 726 } 727 728 /* prepend radiotap information */ 729 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); 730 731 skb_reset_mac_header(skb); 732 skb->ip_summed = CHECKSUM_UNNECESSARY; 733 skb->pkt_type = PACKET_OTHERHOST; 734 skb->protocol = htons(ETH_P_802_2); 735 736 return skb; 737 } 738 739 /* 740 * This function copies a received frame to all monitor interfaces and 741 * returns a cleaned-up SKB that no longer includes the FCS nor the 742 * radiotap header the driver might have added. 743 */ 744 static struct sk_buff * 745 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 746 struct ieee80211_rate *rate) 747 { 748 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 749 struct ieee80211_sub_if_data *sdata; 750 struct sk_buff *monskb = NULL; 751 int present_fcs_len = 0; 752 unsigned int rtap_space = 0; 753 struct ieee80211_sub_if_data *monitor_sdata = 754 rcu_dereference(local->monitor_sdata); 755 bool only_monitor = false; 756 757 if (status->flag & RX_FLAG_RADIOTAP_HE) 758 rtap_space += sizeof(struct ieee80211_radiotap_he); 759 760 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 761 rtap_space += sizeof(struct ieee80211_radiotap_he_mu); 762 763 if (unlikely(status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)) { 764 struct ieee80211_vendor_radiotap *rtap = (void *)origskb->data; 765 766 rtap_space += sizeof(*rtap) + rtap->len + rtap->pad; 767 } 768 769 /* 770 * First, we may need to make a copy of the skb because 771 * (1) we need to modify it for radiotap (if not present), and 772 * (2) the other RX handlers will modify the skb we got. 773 * 774 * We don't need to, of course, if we aren't going to return 775 * the SKB because it has a bad FCS/PLCP checksum. 776 */ 777 778 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { 779 if (unlikely(origskb->len <= FCS_LEN)) { 780 /* driver bug */ 781 WARN_ON(1); 782 dev_kfree_skb(origskb); 783 return NULL; 784 } 785 present_fcs_len = FCS_LEN; 786 } 787 788 /* ensure hdr->frame_control and vendor radiotap data are in skb head */ 789 if (!pskb_may_pull(origskb, 2 + rtap_space)) { 790 dev_kfree_skb(origskb); 791 return NULL; 792 } 793 794 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space); 795 796 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { 797 if (only_monitor) { 798 dev_kfree_skb(origskb); 799 return NULL; 800 } 801 802 remove_monitor_info(origskb, present_fcs_len, rtap_space); 803 return origskb; 804 } 805 806 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space); 807 808 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) { 809 bool last_monitor = list_is_last(&sdata->u.mntr.list, 810 &local->mon_list); 811 812 if (!monskb) 813 monskb = ieee80211_make_monitor_skb(local, &origskb, 814 rate, rtap_space, 815 only_monitor && 816 last_monitor); 817 818 if (monskb) { 819 struct sk_buff *skb; 820 821 if (last_monitor) { 822 skb = monskb; 823 monskb = NULL; 824 } else { 825 skb = skb_clone(monskb, GFP_ATOMIC); 826 } 827 828 if (skb) { 829 skb->dev = sdata->dev; 830 ieee80211_rx_stats(skb->dev, skb->len); 831 netif_receive_skb(skb); 832 } 833 } 834 835 if (last_monitor) 836 break; 837 } 838 839 /* this happens if last_monitor was erroneously false */ 840 dev_kfree_skb(monskb); 841 842 /* ditto */ 843 if (!origskb) 844 return NULL; 845 846 remove_monitor_info(origskb, present_fcs_len, rtap_space); 847 return origskb; 848 } 849 850 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 851 { 852 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 853 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 854 int tid, seqno_idx, security_idx; 855 856 /* does the frame have a qos control field? */ 857 if (ieee80211_is_data_qos(hdr->frame_control)) { 858 u8 *qc = ieee80211_get_qos_ctl(hdr); 859 /* frame has qos control */ 860 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 861 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 862 status->rx_flags |= IEEE80211_RX_AMSDU; 863 864 seqno_idx = tid; 865 security_idx = tid; 866 } else { 867 /* 868 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 869 * 870 * Sequence numbers for management frames, QoS data 871 * frames with a broadcast/multicast address in the 872 * Address 1 field, and all non-QoS data frames sent 873 * by QoS STAs are assigned using an additional single 874 * modulo-4096 counter, [...] 875 * 876 * We also use that counter for non-QoS STAs. 877 */ 878 seqno_idx = IEEE80211_NUM_TIDS; 879 security_idx = 0; 880 if (ieee80211_is_mgmt(hdr->frame_control)) 881 security_idx = IEEE80211_NUM_TIDS; 882 tid = 0; 883 } 884 885 rx->seqno_idx = seqno_idx; 886 rx->security_idx = security_idx; 887 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 888 * For now, set skb->priority to 0 for other cases. */ 889 rx->skb->priority = (tid > 7) ? 0 : tid; 890 } 891 892 /** 893 * DOC: Packet alignment 894 * 895 * Drivers always need to pass packets that are aligned to two-byte boundaries 896 * to the stack. 897 * 898 * Additionally, should, if possible, align the payload data in a way that 899 * guarantees that the contained IP header is aligned to a four-byte 900 * boundary. In the case of regular frames, this simply means aligning the 901 * payload to a four-byte boundary (because either the IP header is directly 902 * contained, or IV/RFC1042 headers that have a length divisible by four are 903 * in front of it). If the payload data is not properly aligned and the 904 * architecture doesn't support efficient unaligned operations, mac80211 905 * will align the data. 906 * 907 * With A-MSDU frames, however, the payload data address must yield two modulo 908 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 909 * push the IP header further back to a multiple of four again. Thankfully, the 910 * specs were sane enough this time around to require padding each A-MSDU 911 * subframe to a length that is a multiple of four. 912 * 913 * Padding like Atheros hardware adds which is between the 802.11 header and 914 * the payload is not supported, the driver is required to move the 802.11 915 * header to be directly in front of the payload in that case. 916 */ 917 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 918 { 919 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 920 WARN_ON_ONCE((unsigned long)rx->skb->data & 1); 921 #endif 922 } 923 924 925 /* rx handlers */ 926 927 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 928 { 929 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 930 931 if (is_multicast_ether_addr(hdr->addr1)) 932 return 0; 933 934 return ieee80211_is_robust_mgmt_frame(skb); 935 } 936 937 938 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 939 { 940 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 941 942 if (!is_multicast_ether_addr(hdr->addr1)) 943 return 0; 944 945 return ieee80211_is_robust_mgmt_frame(skb); 946 } 947 948 949 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 950 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 951 { 952 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 953 struct ieee80211_mmie *mmie; 954 struct ieee80211_mmie_16 *mmie16; 955 956 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) 957 return -1; 958 959 if (!ieee80211_is_robust_mgmt_frame(skb)) 960 return -1; /* not a robust management frame */ 961 962 mmie = (struct ieee80211_mmie *) 963 (skb->data + skb->len - sizeof(*mmie)); 964 if (mmie->element_id == WLAN_EID_MMIE && 965 mmie->length == sizeof(*mmie) - 2) 966 return le16_to_cpu(mmie->key_id); 967 968 mmie16 = (struct ieee80211_mmie_16 *) 969 (skb->data + skb->len - sizeof(*mmie16)); 970 if (skb->len >= 24 + sizeof(*mmie16) && 971 mmie16->element_id == WLAN_EID_MMIE && 972 mmie16->length == sizeof(*mmie16) - 2) 973 return le16_to_cpu(mmie16->key_id); 974 975 return -1; 976 } 977 978 static int ieee80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs, 979 struct sk_buff *skb) 980 { 981 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 982 __le16 fc; 983 int hdrlen; 984 u8 keyid; 985 986 fc = hdr->frame_control; 987 hdrlen = ieee80211_hdrlen(fc); 988 989 if (skb->len < hdrlen + cs->hdr_len) 990 return -EINVAL; 991 992 skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1); 993 keyid &= cs->key_idx_mask; 994 keyid >>= cs->key_idx_shift; 995 996 return keyid; 997 } 998 999 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 1000 { 1001 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1002 char *dev_addr = rx->sdata->vif.addr; 1003 1004 if (ieee80211_is_data(hdr->frame_control)) { 1005 if (is_multicast_ether_addr(hdr->addr1)) { 1006 if (ieee80211_has_tods(hdr->frame_control) || 1007 !ieee80211_has_fromds(hdr->frame_control)) 1008 return RX_DROP_MONITOR; 1009 if (ether_addr_equal(hdr->addr3, dev_addr)) 1010 return RX_DROP_MONITOR; 1011 } else { 1012 if (!ieee80211_has_a4(hdr->frame_control)) 1013 return RX_DROP_MONITOR; 1014 if (ether_addr_equal(hdr->addr4, dev_addr)) 1015 return RX_DROP_MONITOR; 1016 } 1017 } 1018 1019 /* If there is not an established peer link and this is not a peer link 1020 * establisment frame, beacon or probe, drop the frame. 1021 */ 1022 1023 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 1024 struct ieee80211_mgmt *mgmt; 1025 1026 if (!ieee80211_is_mgmt(hdr->frame_control)) 1027 return RX_DROP_MONITOR; 1028 1029 if (ieee80211_is_action(hdr->frame_control)) { 1030 u8 category; 1031 1032 /* make sure category field is present */ 1033 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 1034 return RX_DROP_MONITOR; 1035 1036 mgmt = (struct ieee80211_mgmt *)hdr; 1037 category = mgmt->u.action.category; 1038 if (category != WLAN_CATEGORY_MESH_ACTION && 1039 category != WLAN_CATEGORY_SELF_PROTECTED) 1040 return RX_DROP_MONITOR; 1041 return RX_CONTINUE; 1042 } 1043 1044 if (ieee80211_is_probe_req(hdr->frame_control) || 1045 ieee80211_is_probe_resp(hdr->frame_control) || 1046 ieee80211_is_beacon(hdr->frame_control) || 1047 ieee80211_is_auth(hdr->frame_control)) 1048 return RX_CONTINUE; 1049 1050 return RX_DROP_MONITOR; 1051 } 1052 1053 return RX_CONTINUE; 1054 } 1055 1056 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, 1057 int index) 1058 { 1059 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index]; 1060 struct sk_buff *tail = skb_peek_tail(frames); 1061 struct ieee80211_rx_status *status; 1062 1063 if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) 1064 return true; 1065 1066 if (!tail) 1067 return false; 1068 1069 status = IEEE80211_SKB_RXCB(tail); 1070 if (status->flag & RX_FLAG_AMSDU_MORE) 1071 return false; 1072 1073 return true; 1074 } 1075 1076 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 1077 struct tid_ampdu_rx *tid_agg_rx, 1078 int index, 1079 struct sk_buff_head *frames) 1080 { 1081 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; 1082 struct sk_buff *skb; 1083 struct ieee80211_rx_status *status; 1084 1085 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1086 1087 if (skb_queue_empty(skb_list)) 1088 goto no_frame; 1089 1090 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1091 __skb_queue_purge(skb_list); 1092 goto no_frame; 1093 } 1094 1095 /* release frames from the reorder ring buffer */ 1096 tid_agg_rx->stored_mpdu_num--; 1097 while ((skb = __skb_dequeue(skb_list))) { 1098 status = IEEE80211_SKB_RXCB(skb); 1099 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 1100 __skb_queue_tail(frames, skb); 1101 } 1102 1103 no_frame: 1104 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 1105 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1106 } 1107 1108 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 1109 struct tid_ampdu_rx *tid_agg_rx, 1110 u16 head_seq_num, 1111 struct sk_buff_head *frames) 1112 { 1113 int index; 1114 1115 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1116 1117 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { 1118 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1119 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1120 frames); 1121 } 1122 } 1123 1124 /* 1125 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 1126 * the skb was added to the buffer longer than this time ago, the earlier 1127 * frames that have not yet been received are assumed to be lost and the skb 1128 * can be released for processing. This may also release other skb's from the 1129 * reorder buffer if there are no additional gaps between the frames. 1130 * 1131 * Callers must hold tid_agg_rx->reorder_lock. 1132 */ 1133 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 1134 1135 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 1136 struct tid_ampdu_rx *tid_agg_rx, 1137 struct sk_buff_head *frames) 1138 { 1139 int index, i, j; 1140 1141 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1142 1143 /* release the buffer until next missing frame */ 1144 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1145 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) && 1146 tid_agg_rx->stored_mpdu_num) { 1147 /* 1148 * No buffers ready to be released, but check whether any 1149 * frames in the reorder buffer have timed out. 1150 */ 1151 int skipped = 1; 1152 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 1153 j = (j + 1) % tid_agg_rx->buf_size) { 1154 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) { 1155 skipped++; 1156 continue; 1157 } 1158 if (skipped && 1159 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 1160 HT_RX_REORDER_BUF_TIMEOUT)) 1161 goto set_release_timer; 1162 1163 /* don't leave incomplete A-MSDUs around */ 1164 for (i = (index + 1) % tid_agg_rx->buf_size; i != j; 1165 i = (i + 1) % tid_agg_rx->buf_size) 1166 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); 1167 1168 ht_dbg_ratelimited(sdata, 1169 "release an RX reorder frame due to timeout on earlier frames\n"); 1170 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, 1171 frames); 1172 1173 /* 1174 * Increment the head seq# also for the skipped slots. 1175 */ 1176 tid_agg_rx->head_seq_num = 1177 (tid_agg_rx->head_seq_num + 1178 skipped) & IEEE80211_SN_MASK; 1179 skipped = 0; 1180 } 1181 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1182 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1183 frames); 1184 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1185 } 1186 1187 if (tid_agg_rx->stored_mpdu_num) { 1188 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1189 1190 for (; j != (index - 1) % tid_agg_rx->buf_size; 1191 j = (j + 1) % tid_agg_rx->buf_size) { 1192 if (ieee80211_rx_reorder_ready(tid_agg_rx, j)) 1193 break; 1194 } 1195 1196 set_release_timer: 1197 1198 if (!tid_agg_rx->removed) 1199 mod_timer(&tid_agg_rx->reorder_timer, 1200 tid_agg_rx->reorder_time[j] + 1 + 1201 HT_RX_REORDER_BUF_TIMEOUT); 1202 } else { 1203 del_timer(&tid_agg_rx->reorder_timer); 1204 } 1205 } 1206 1207 /* 1208 * As this function belongs to the RX path it must be under 1209 * rcu_read_lock protection. It returns false if the frame 1210 * can be processed immediately, true if it was consumed. 1211 */ 1212 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 1213 struct tid_ampdu_rx *tid_agg_rx, 1214 struct sk_buff *skb, 1215 struct sk_buff_head *frames) 1216 { 1217 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1218 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1219 u16 sc = le16_to_cpu(hdr->seq_ctrl); 1220 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; 1221 u16 head_seq_num, buf_size; 1222 int index; 1223 bool ret = true; 1224 1225 spin_lock(&tid_agg_rx->reorder_lock); 1226 1227 /* 1228 * Offloaded BA sessions have no known starting sequence number so pick 1229 * one from first Rxed frame for this tid after BA was started. 1230 */ 1231 if (unlikely(tid_agg_rx->auto_seq)) { 1232 tid_agg_rx->auto_seq = false; 1233 tid_agg_rx->ssn = mpdu_seq_num; 1234 tid_agg_rx->head_seq_num = mpdu_seq_num; 1235 } 1236 1237 buf_size = tid_agg_rx->buf_size; 1238 head_seq_num = tid_agg_rx->head_seq_num; 1239 1240 /* 1241 * If the current MPDU's SN is smaller than the SSN, it shouldn't 1242 * be reordered. 1243 */ 1244 if (unlikely(!tid_agg_rx->started)) { 1245 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1246 ret = false; 1247 goto out; 1248 } 1249 tid_agg_rx->started = true; 1250 } 1251 1252 /* frame with out of date sequence number */ 1253 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1254 dev_kfree_skb(skb); 1255 goto out; 1256 } 1257 1258 /* 1259 * If frame the sequence number exceeds our buffering window 1260 * size release some previous frames to make room for this one. 1261 */ 1262 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { 1263 head_seq_num = ieee80211_sn_inc( 1264 ieee80211_sn_sub(mpdu_seq_num, buf_size)); 1265 /* release stored frames up to new head to stack */ 1266 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 1267 head_seq_num, frames); 1268 } 1269 1270 /* Now the new frame is always in the range of the reordering buffer */ 1271 1272 index = mpdu_seq_num % tid_agg_rx->buf_size; 1273 1274 /* check if we already stored this frame */ 1275 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1276 dev_kfree_skb(skb); 1277 goto out; 1278 } 1279 1280 /* 1281 * If the current MPDU is in the right order and nothing else 1282 * is stored we can process it directly, no need to buffer it. 1283 * If it is first but there's something stored, we may be able 1284 * to release frames after this one. 1285 */ 1286 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 1287 tid_agg_rx->stored_mpdu_num == 0) { 1288 if (!(status->flag & RX_FLAG_AMSDU_MORE)) 1289 tid_agg_rx->head_seq_num = 1290 ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1291 ret = false; 1292 goto out; 1293 } 1294 1295 /* put the frame in the reordering buffer */ 1296 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); 1297 if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1298 tid_agg_rx->reorder_time[index] = jiffies; 1299 tid_agg_rx->stored_mpdu_num++; 1300 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); 1301 } 1302 1303 out: 1304 spin_unlock(&tid_agg_rx->reorder_lock); 1305 return ret; 1306 } 1307 1308 /* 1309 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 1310 * true if the MPDU was buffered, false if it should be processed. 1311 */ 1312 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 1313 struct sk_buff_head *frames) 1314 { 1315 struct sk_buff *skb = rx->skb; 1316 struct ieee80211_local *local = rx->local; 1317 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1318 struct sta_info *sta = rx->sta; 1319 struct tid_ampdu_rx *tid_agg_rx; 1320 u16 sc; 1321 u8 tid, ack_policy; 1322 1323 if (!ieee80211_is_data_qos(hdr->frame_control) || 1324 is_multicast_ether_addr(hdr->addr1)) 1325 goto dont_reorder; 1326 1327 /* 1328 * filter the QoS data rx stream according to 1329 * STA/TID and check if this STA/TID is on aggregation 1330 */ 1331 1332 if (!sta) 1333 goto dont_reorder; 1334 1335 ack_policy = *ieee80211_get_qos_ctl(hdr) & 1336 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 1337 tid = ieee80211_get_tid(hdr); 1338 1339 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 1340 if (!tid_agg_rx) { 1341 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1342 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 1343 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 1344 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 1345 WLAN_BACK_RECIPIENT, 1346 WLAN_REASON_QSTA_REQUIRE_SETUP); 1347 goto dont_reorder; 1348 } 1349 1350 /* qos null data frames are excluded */ 1351 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 1352 goto dont_reorder; 1353 1354 /* not part of a BA session */ 1355 if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1356 ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL) 1357 goto dont_reorder; 1358 1359 /* new, potentially un-ordered, ampdu frame - process it */ 1360 1361 /* reset session timer */ 1362 if (tid_agg_rx->timeout) 1363 tid_agg_rx->last_rx = jiffies; 1364 1365 /* if this mpdu is fragmented - terminate rx aggregation session */ 1366 sc = le16_to_cpu(hdr->seq_ctrl); 1367 if (sc & IEEE80211_SCTL_FRAG) { 1368 skb_queue_tail(&rx->sdata->skb_queue, skb); 1369 ieee80211_queue_work(&local->hw, &rx->sdata->work); 1370 return; 1371 } 1372 1373 /* 1374 * No locking needed -- we will only ever process one 1375 * RX packet at a time, and thus own tid_agg_rx. All 1376 * other code manipulating it needs to (and does) make 1377 * sure that we cannot get to it any more before doing 1378 * anything with it. 1379 */ 1380 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, 1381 frames)) 1382 return; 1383 1384 dont_reorder: 1385 __skb_queue_tail(frames, skb); 1386 } 1387 1388 static ieee80211_rx_result debug_noinline 1389 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) 1390 { 1391 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1392 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1393 1394 if (status->flag & RX_FLAG_DUP_VALIDATED) 1395 return RX_CONTINUE; 1396 1397 /* 1398 * Drop duplicate 802.11 retransmissions 1399 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") 1400 */ 1401 1402 if (rx->skb->len < 24) 1403 return RX_CONTINUE; 1404 1405 if (ieee80211_is_ctl(hdr->frame_control) || 1406 ieee80211_is_qos_nullfunc(hdr->frame_control) || 1407 is_multicast_ether_addr(hdr->addr1)) 1408 return RX_CONTINUE; 1409 1410 if (!rx->sta) 1411 return RX_CONTINUE; 1412 1413 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 1414 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { 1415 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); 1416 rx->sta->rx_stats.num_duplicates++; 1417 return RX_DROP_UNUSABLE; 1418 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1419 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 1420 } 1421 1422 return RX_CONTINUE; 1423 } 1424 1425 static ieee80211_rx_result debug_noinline 1426 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 1427 { 1428 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1429 1430 /* Drop disallowed frame classes based on STA auth/assoc state; 1431 * IEEE 802.11, Chap 5.5. 1432 * 1433 * mac80211 filters only based on association state, i.e. it drops 1434 * Class 3 frames from not associated stations. hostapd sends 1435 * deauth/disassoc frames when needed. In addition, hostapd is 1436 * responsible for filtering on both auth and assoc states. 1437 */ 1438 1439 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1440 return ieee80211_rx_mesh_check(rx); 1441 1442 if (unlikely((ieee80211_is_data(hdr->frame_control) || 1443 ieee80211_is_pspoll(hdr->frame_control)) && 1444 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 1445 rx->sdata->vif.type != NL80211_IFTYPE_WDS && 1446 rx->sdata->vif.type != NL80211_IFTYPE_OCB && 1447 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 1448 /* 1449 * accept port control frames from the AP even when it's not 1450 * yet marked ASSOC to prevent a race where we don't set the 1451 * assoc bit quickly enough before it sends the first frame 1452 */ 1453 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1454 ieee80211_is_data_present(hdr->frame_control)) { 1455 unsigned int hdrlen; 1456 __be16 ethertype; 1457 1458 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1459 1460 if (rx->skb->len < hdrlen + 8) 1461 return RX_DROP_MONITOR; 1462 1463 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 1464 if (ethertype == rx->sdata->control_port_protocol) 1465 return RX_CONTINUE; 1466 } 1467 1468 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 1469 cfg80211_rx_spurious_frame(rx->sdata->dev, 1470 hdr->addr2, 1471 GFP_ATOMIC)) 1472 return RX_DROP_UNUSABLE; 1473 1474 return RX_DROP_MONITOR; 1475 } 1476 1477 return RX_CONTINUE; 1478 } 1479 1480 1481 static ieee80211_rx_result debug_noinline 1482 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1483 { 1484 struct ieee80211_local *local; 1485 struct ieee80211_hdr *hdr; 1486 struct sk_buff *skb; 1487 1488 local = rx->local; 1489 skb = rx->skb; 1490 hdr = (struct ieee80211_hdr *) skb->data; 1491 1492 if (!local->pspolling) 1493 return RX_CONTINUE; 1494 1495 if (!ieee80211_has_fromds(hdr->frame_control)) 1496 /* this is not from AP */ 1497 return RX_CONTINUE; 1498 1499 if (!ieee80211_is_data(hdr->frame_control)) 1500 return RX_CONTINUE; 1501 1502 if (!ieee80211_has_moredata(hdr->frame_control)) { 1503 /* AP has no more frames buffered for us */ 1504 local->pspolling = false; 1505 return RX_CONTINUE; 1506 } 1507 1508 /* more data bit is set, let's request a new frame from the AP */ 1509 ieee80211_send_pspoll(local, rx->sdata); 1510 1511 return RX_CONTINUE; 1512 } 1513 1514 static void sta_ps_start(struct sta_info *sta) 1515 { 1516 struct ieee80211_sub_if_data *sdata = sta->sdata; 1517 struct ieee80211_local *local = sdata->local; 1518 struct ps_data *ps; 1519 int tid; 1520 1521 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1522 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1523 ps = &sdata->bss->ps; 1524 else 1525 return; 1526 1527 atomic_inc(&ps->num_sta_ps); 1528 set_sta_flag(sta, WLAN_STA_PS_STA); 1529 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1530 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1531 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1532 sta->sta.addr, sta->sta.aid); 1533 1534 ieee80211_clear_fast_xmit(sta); 1535 1536 if (!sta->sta.txq[0]) 1537 return; 1538 1539 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { 1540 if (txq_has_queue(sta->sta.txq[tid])) 1541 set_bit(tid, &sta->txq_buffered_tids); 1542 else 1543 clear_bit(tid, &sta->txq_buffered_tids); 1544 } 1545 } 1546 1547 static void sta_ps_end(struct sta_info *sta) 1548 { 1549 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1550 sta->sta.addr, sta->sta.aid); 1551 1552 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1553 /* 1554 * Clear the flag only if the other one is still set 1555 * so that the TX path won't start TX'ing new frames 1556 * directly ... In the case that the driver flag isn't 1557 * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1558 */ 1559 clear_sta_flag(sta, WLAN_STA_PS_STA); 1560 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1561 sta->sta.addr, sta->sta.aid); 1562 return; 1563 } 1564 1565 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1566 clear_sta_flag(sta, WLAN_STA_PS_STA); 1567 ieee80211_sta_ps_deliver_wakeup(sta); 1568 } 1569 1570 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) 1571 { 1572 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1573 bool in_ps; 1574 1575 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); 1576 1577 /* Don't let the same PS state be set twice */ 1578 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); 1579 if ((start && in_ps) || (!start && !in_ps)) 1580 return -EINVAL; 1581 1582 if (start) 1583 sta_ps_start(sta); 1584 else 1585 sta_ps_end(sta); 1586 1587 return 0; 1588 } 1589 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1590 1591 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta) 1592 { 1593 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1594 1595 if (test_sta_flag(sta, WLAN_STA_SP)) 1596 return; 1597 1598 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1599 ieee80211_sta_ps_deliver_poll_response(sta); 1600 else 1601 set_sta_flag(sta, WLAN_STA_PSPOLL); 1602 } 1603 EXPORT_SYMBOL(ieee80211_sta_pspoll); 1604 1605 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid) 1606 { 1607 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1608 int ac = ieee80211_ac_from_tid(tid); 1609 1610 /* 1611 * If this AC is not trigger-enabled do nothing unless the 1612 * driver is calling us after it already checked. 1613 * 1614 * NB: This could/should check a separate bitmap of trigger- 1615 * enabled queues, but for now we only implement uAPSD w/o 1616 * TSPEC changes to the ACs, so they're always the same. 1617 */ 1618 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) && 1619 tid != IEEE80211_NUM_TIDS) 1620 return; 1621 1622 /* if we are in a service period, do nothing */ 1623 if (test_sta_flag(sta, WLAN_STA_SP)) 1624 return; 1625 1626 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1627 ieee80211_sta_ps_deliver_uapsd(sta); 1628 else 1629 set_sta_flag(sta, WLAN_STA_UAPSD); 1630 } 1631 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger); 1632 1633 static ieee80211_rx_result debug_noinline 1634 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1635 { 1636 struct ieee80211_sub_if_data *sdata = rx->sdata; 1637 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1638 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1639 1640 if (!rx->sta) 1641 return RX_CONTINUE; 1642 1643 if (sdata->vif.type != NL80211_IFTYPE_AP && 1644 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1645 return RX_CONTINUE; 1646 1647 /* 1648 * The device handles station powersave, so don't do anything about 1649 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1650 * it to mac80211 since they're handled.) 1651 */ 1652 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) 1653 return RX_CONTINUE; 1654 1655 /* 1656 * Don't do anything if the station isn't already asleep. In 1657 * the uAPSD case, the station will probably be marked asleep, 1658 * in the PS-Poll case the station must be confused ... 1659 */ 1660 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1661 return RX_CONTINUE; 1662 1663 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1664 ieee80211_sta_pspoll(&rx->sta->sta); 1665 1666 /* Free PS Poll skb here instead of returning RX_DROP that would 1667 * count as an dropped frame. */ 1668 dev_kfree_skb(rx->skb); 1669 1670 return RX_QUEUED; 1671 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1672 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1673 ieee80211_has_pm(hdr->frame_control) && 1674 (ieee80211_is_data_qos(hdr->frame_control) || 1675 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1676 u8 tid = ieee80211_get_tid(hdr); 1677 1678 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid); 1679 } 1680 1681 return RX_CONTINUE; 1682 } 1683 1684 static ieee80211_rx_result debug_noinline 1685 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1686 { 1687 struct sta_info *sta = rx->sta; 1688 struct sk_buff *skb = rx->skb; 1689 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1690 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1691 int i; 1692 1693 if (!sta) 1694 return RX_CONTINUE; 1695 1696 /* 1697 * Update last_rx only for IBSS packets which are for the current 1698 * BSSID and for station already AUTHORIZED to avoid keeping the 1699 * current IBSS network alive in cases where other STAs start 1700 * using different BSSID. This will also give the station another 1701 * chance to restart the authentication/authorization in case 1702 * something went wrong the first time. 1703 */ 1704 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1705 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1706 NL80211_IFTYPE_ADHOC); 1707 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1708 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1709 sta->rx_stats.last_rx = jiffies; 1710 if (ieee80211_is_data(hdr->frame_control) && 1711 !is_multicast_ether_addr(hdr->addr1)) 1712 sta->rx_stats.last_rate = 1713 sta_stats_encode_rate(status); 1714 } 1715 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { 1716 sta->rx_stats.last_rx = jiffies; 1717 } else if (!is_multicast_ether_addr(hdr->addr1)) { 1718 /* 1719 * Mesh beacons will update last_rx when if they are found to 1720 * match the current local configuration when processed. 1721 */ 1722 sta->rx_stats.last_rx = jiffies; 1723 if (ieee80211_is_data(hdr->frame_control)) 1724 sta->rx_stats.last_rate = sta_stats_encode_rate(status); 1725 } 1726 1727 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION) 1728 ieee80211_sta_rx_notify(rx->sdata, hdr); 1729 1730 sta->rx_stats.fragments++; 1731 1732 u64_stats_update_begin(&rx->sta->rx_stats.syncp); 1733 sta->rx_stats.bytes += rx->skb->len; 1734 u64_stats_update_end(&rx->sta->rx_stats.syncp); 1735 1736 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1737 sta->rx_stats.last_signal = status->signal; 1738 ewma_signal_add(&sta->rx_stats_avg.signal, -status->signal); 1739 } 1740 1741 if (status->chains) { 1742 sta->rx_stats.chains = status->chains; 1743 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 1744 int signal = status->chain_signal[i]; 1745 1746 if (!(status->chains & BIT(i))) 1747 continue; 1748 1749 sta->rx_stats.chain_signal_last[i] = signal; 1750 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], 1751 -signal); 1752 } 1753 } 1754 1755 /* 1756 * Change STA power saving mode only at the end of a frame 1757 * exchange sequence, and only for a data or management 1758 * frame as specified in IEEE 802.11-2016 11.2.3.2 1759 */ 1760 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1761 !ieee80211_has_morefrags(hdr->frame_control) && 1762 !is_multicast_ether_addr(hdr->addr1) && 1763 (ieee80211_is_mgmt(hdr->frame_control) || 1764 ieee80211_is_data(hdr->frame_control)) && 1765 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1766 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1767 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1768 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1769 if (!ieee80211_has_pm(hdr->frame_control)) 1770 sta_ps_end(sta); 1771 } else { 1772 if (ieee80211_has_pm(hdr->frame_control)) 1773 sta_ps_start(sta); 1774 } 1775 } 1776 1777 /* mesh power save support */ 1778 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1779 ieee80211_mps_rx_h_sta_process(sta, hdr); 1780 1781 /* 1782 * Drop (qos-)data::nullfunc frames silently, since they 1783 * are used only to control station power saving mode. 1784 */ 1785 if (ieee80211_is_nullfunc(hdr->frame_control) || 1786 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 1787 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1788 1789 /* 1790 * If we receive a 4-addr nullfunc frame from a STA 1791 * that was not moved to a 4-addr STA vlan yet send 1792 * the event to userspace and for older hostapd drop 1793 * the frame to the monitor interface. 1794 */ 1795 if (ieee80211_has_a4(hdr->frame_control) && 1796 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1797 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1798 !rx->sdata->u.vlan.sta))) { 1799 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1800 cfg80211_rx_unexpected_4addr_frame( 1801 rx->sdata->dev, sta->sta.addr, 1802 GFP_ATOMIC); 1803 return RX_DROP_MONITOR; 1804 } 1805 /* 1806 * Update counter and free packet here to avoid 1807 * counting this as a dropped packed. 1808 */ 1809 sta->rx_stats.packets++; 1810 dev_kfree_skb(rx->skb); 1811 return RX_QUEUED; 1812 } 1813 1814 return RX_CONTINUE; 1815 } /* ieee80211_rx_h_sta_process */ 1816 1817 static ieee80211_rx_result debug_noinline 1818 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 1819 { 1820 struct sk_buff *skb = rx->skb; 1821 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1822 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1823 int keyidx; 1824 int hdrlen; 1825 ieee80211_rx_result result = RX_DROP_UNUSABLE; 1826 struct ieee80211_key *sta_ptk = NULL; 1827 int mmie_keyidx = -1; 1828 __le16 fc; 1829 const struct ieee80211_cipher_scheme *cs = NULL; 1830 1831 /* 1832 * Key selection 101 1833 * 1834 * There are four types of keys: 1835 * - GTK (group keys) 1836 * - IGTK (group keys for management frames) 1837 * - PTK (pairwise keys) 1838 * - STK (station-to-station pairwise keys) 1839 * 1840 * When selecting a key, we have to distinguish between multicast 1841 * (including broadcast) and unicast frames, the latter can only 1842 * use PTKs and STKs while the former always use GTKs and IGTKs. 1843 * Unless, of course, actual WEP keys ("pre-RSNA") are used, then 1844 * unicast frames can also use key indices like GTKs. Hence, if we 1845 * don't have a PTK/STK we check the key index for a WEP key. 1846 * 1847 * Note that in a regular BSS, multicast frames are sent by the 1848 * AP only, associated stations unicast the frame to the AP first 1849 * which then multicasts it on their behalf. 1850 * 1851 * There is also a slight problem in IBSS mode: GTKs are negotiated 1852 * with each station, that is something we don't currently handle. 1853 * The spec seems to expect that one negotiates the same key with 1854 * every station but there's no such requirement; VLANs could be 1855 * possible. 1856 */ 1857 1858 /* start without a key */ 1859 rx->key = NULL; 1860 fc = hdr->frame_control; 1861 1862 if (rx->sta) { 1863 int keyid = rx->sta->ptk_idx; 1864 1865 if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) { 1866 cs = rx->sta->cipher_scheme; 1867 keyid = ieee80211_get_cs_keyid(cs, rx->skb); 1868 if (unlikely(keyid < 0)) 1869 return RX_DROP_UNUSABLE; 1870 } 1871 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); 1872 } 1873 1874 if (!ieee80211_has_protected(fc)) 1875 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 1876 1877 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 1878 rx->key = sta_ptk; 1879 if ((status->flag & RX_FLAG_DECRYPTED) && 1880 (status->flag & RX_FLAG_IV_STRIPPED)) 1881 return RX_CONTINUE; 1882 /* Skip decryption if the frame is not protected. */ 1883 if (!ieee80211_has_protected(fc)) 1884 return RX_CONTINUE; 1885 } else if (mmie_keyidx >= 0) { 1886 /* Broadcast/multicast robust management frame / BIP */ 1887 if ((status->flag & RX_FLAG_DECRYPTED) && 1888 (status->flag & RX_FLAG_IV_STRIPPED)) 1889 return RX_CONTINUE; 1890 1891 if (mmie_keyidx < NUM_DEFAULT_KEYS || 1892 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1893 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 1894 if (rx->sta) { 1895 if (ieee80211_is_group_privacy_action(skb) && 1896 test_sta_flag(rx->sta, WLAN_STA_MFP)) 1897 return RX_DROP_MONITOR; 1898 1899 rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]); 1900 } 1901 if (!rx->key) 1902 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 1903 } else if (!ieee80211_has_protected(fc)) { 1904 /* 1905 * The frame was not protected, so skip decryption. However, we 1906 * need to set rx->key if there is a key that could have been 1907 * used so that the frame may be dropped if encryption would 1908 * have been expected. 1909 */ 1910 struct ieee80211_key *key = NULL; 1911 struct ieee80211_sub_if_data *sdata = rx->sdata; 1912 int i; 1913 1914 if (ieee80211_is_mgmt(fc) && 1915 is_multicast_ether_addr(hdr->addr1) && 1916 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 1917 rx->key = key; 1918 else { 1919 if (rx->sta) { 1920 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1921 key = rcu_dereference(rx->sta->gtk[i]); 1922 if (key) 1923 break; 1924 } 1925 } 1926 if (!key) { 1927 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 1928 key = rcu_dereference(sdata->keys[i]); 1929 if (key) 1930 break; 1931 } 1932 } 1933 if (key) 1934 rx->key = key; 1935 } 1936 return RX_CONTINUE; 1937 } else { 1938 u8 keyid; 1939 1940 /* 1941 * The device doesn't give us the IV so we won't be 1942 * able to look up the key. That's ok though, we 1943 * don't need to decrypt the frame, we just won't 1944 * be able to keep statistics accurate. 1945 * Except for key threshold notifications, should 1946 * we somehow allow the driver to tell us which key 1947 * the hardware used if this flag is set? 1948 */ 1949 if ((status->flag & RX_FLAG_DECRYPTED) && 1950 (status->flag & RX_FLAG_IV_STRIPPED)) 1951 return RX_CONTINUE; 1952 1953 hdrlen = ieee80211_hdrlen(fc); 1954 1955 if (cs) { 1956 keyidx = ieee80211_get_cs_keyid(cs, rx->skb); 1957 1958 if (unlikely(keyidx < 0)) 1959 return RX_DROP_UNUSABLE; 1960 } else { 1961 if (rx->skb->len < 8 + hdrlen) 1962 return RX_DROP_UNUSABLE; /* TODO: count this? */ 1963 /* 1964 * no need to call ieee80211_wep_get_keyidx, 1965 * it verifies a bunch of things we've done already 1966 */ 1967 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1); 1968 keyidx = keyid >> 6; 1969 } 1970 1971 /* check per-station GTK first, if multicast packet */ 1972 if (is_multicast_ether_addr(hdr->addr1) && rx->sta) 1973 rx->key = rcu_dereference(rx->sta->gtk[keyidx]); 1974 1975 /* if not found, try default key */ 1976 if (!rx->key) { 1977 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 1978 1979 /* 1980 * RSNA-protected unicast frames should always be 1981 * sent with pairwise or station-to-station keys, 1982 * but for WEP we allow using a key index as well. 1983 */ 1984 if (rx->key && 1985 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 1986 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 1987 !is_multicast_ether_addr(hdr->addr1)) 1988 rx->key = NULL; 1989 } 1990 } 1991 1992 if (rx->key) { 1993 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 1994 return RX_DROP_MONITOR; 1995 1996 /* TODO: add threshold stuff again */ 1997 } else { 1998 return RX_DROP_MONITOR; 1999 } 2000 2001 switch (rx->key->conf.cipher) { 2002 case WLAN_CIPHER_SUITE_WEP40: 2003 case WLAN_CIPHER_SUITE_WEP104: 2004 result = ieee80211_crypto_wep_decrypt(rx); 2005 break; 2006 case WLAN_CIPHER_SUITE_TKIP: 2007 result = ieee80211_crypto_tkip_decrypt(rx); 2008 break; 2009 case WLAN_CIPHER_SUITE_CCMP: 2010 result = ieee80211_crypto_ccmp_decrypt( 2011 rx, IEEE80211_CCMP_MIC_LEN); 2012 break; 2013 case WLAN_CIPHER_SUITE_CCMP_256: 2014 result = ieee80211_crypto_ccmp_decrypt( 2015 rx, IEEE80211_CCMP_256_MIC_LEN); 2016 break; 2017 case WLAN_CIPHER_SUITE_AES_CMAC: 2018 result = ieee80211_crypto_aes_cmac_decrypt(rx); 2019 break; 2020 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 2021 result = ieee80211_crypto_aes_cmac_256_decrypt(rx); 2022 break; 2023 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 2024 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 2025 result = ieee80211_crypto_aes_gmac_decrypt(rx); 2026 break; 2027 case WLAN_CIPHER_SUITE_GCMP: 2028 case WLAN_CIPHER_SUITE_GCMP_256: 2029 result = ieee80211_crypto_gcmp_decrypt(rx); 2030 break; 2031 default: 2032 result = ieee80211_crypto_hw_decrypt(rx); 2033 } 2034 2035 /* the hdr variable is invalid after the decrypt handlers */ 2036 2037 /* either the frame has been decrypted or will be dropped */ 2038 status->flag |= RX_FLAG_DECRYPTED; 2039 2040 return result; 2041 } 2042 2043 static inline struct ieee80211_fragment_entry * 2044 ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, 2045 unsigned int frag, unsigned int seq, int rx_queue, 2046 struct sk_buff **skb) 2047 { 2048 struct ieee80211_fragment_entry *entry; 2049 2050 entry = &sdata->fragments[sdata->fragment_next++]; 2051 if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) 2052 sdata->fragment_next = 0; 2053 2054 if (!skb_queue_empty(&entry->skb_list)) 2055 __skb_queue_purge(&entry->skb_list); 2056 2057 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 2058 *skb = NULL; 2059 entry->first_frag_time = jiffies; 2060 entry->seq = seq; 2061 entry->rx_queue = rx_queue; 2062 entry->last_frag = frag; 2063 entry->check_sequential_pn = false; 2064 entry->extra_len = 0; 2065 2066 return entry; 2067 } 2068 2069 static inline struct ieee80211_fragment_entry * 2070 ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, 2071 unsigned int frag, unsigned int seq, 2072 int rx_queue, struct ieee80211_hdr *hdr) 2073 { 2074 struct ieee80211_fragment_entry *entry; 2075 int i, idx; 2076 2077 idx = sdata->fragment_next; 2078 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 2079 struct ieee80211_hdr *f_hdr; 2080 2081 idx--; 2082 if (idx < 0) 2083 idx = IEEE80211_FRAGMENT_MAX - 1; 2084 2085 entry = &sdata->fragments[idx]; 2086 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 2087 entry->rx_queue != rx_queue || 2088 entry->last_frag + 1 != frag) 2089 continue; 2090 2091 f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data; 2092 2093 /* 2094 * Check ftype and addresses are equal, else check next fragment 2095 */ 2096 if (((hdr->frame_control ^ f_hdr->frame_control) & 2097 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 2098 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 2099 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 2100 continue; 2101 2102 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 2103 __skb_queue_purge(&entry->skb_list); 2104 continue; 2105 } 2106 return entry; 2107 } 2108 2109 return NULL; 2110 } 2111 2112 static ieee80211_rx_result debug_noinline 2113 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 2114 { 2115 struct ieee80211_hdr *hdr; 2116 u16 sc; 2117 __le16 fc; 2118 unsigned int frag, seq; 2119 struct ieee80211_fragment_entry *entry; 2120 struct sk_buff *skb; 2121 2122 hdr = (struct ieee80211_hdr *)rx->skb->data; 2123 fc = hdr->frame_control; 2124 2125 if (ieee80211_is_ctl(fc)) 2126 return RX_CONTINUE; 2127 2128 sc = le16_to_cpu(hdr->seq_ctrl); 2129 frag = sc & IEEE80211_SCTL_FRAG; 2130 2131 if (is_multicast_ether_addr(hdr->addr1)) { 2132 I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount); 2133 goto out_no_led; 2134 } 2135 2136 if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) 2137 goto out; 2138 2139 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 2140 2141 if (skb_linearize(rx->skb)) 2142 return RX_DROP_UNUSABLE; 2143 2144 /* 2145 * skb_linearize() might change the skb->data and 2146 * previously cached variables (in this case, hdr) need to 2147 * be refreshed with the new data. 2148 */ 2149 hdr = (struct ieee80211_hdr *)rx->skb->data; 2150 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 2151 2152 if (frag == 0) { 2153 /* This is the first fragment of a new frame. */ 2154 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 2155 rx->seqno_idx, &(rx->skb)); 2156 if (rx->key && 2157 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || 2158 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || 2159 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || 2160 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && 2161 ieee80211_has_protected(fc)) { 2162 int queue = rx->security_idx; 2163 2164 /* Store CCMP/GCMP PN so that we can verify that the 2165 * next fragment has a sequential PN value. 2166 */ 2167 entry->check_sequential_pn = true; 2168 memcpy(entry->last_pn, 2169 rx->key->u.ccmp.rx_pn[queue], 2170 IEEE80211_CCMP_PN_LEN); 2171 BUILD_BUG_ON(offsetof(struct ieee80211_key, 2172 u.ccmp.rx_pn) != 2173 offsetof(struct ieee80211_key, 2174 u.gcmp.rx_pn)); 2175 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != 2176 sizeof(rx->key->u.gcmp.rx_pn[queue])); 2177 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != 2178 IEEE80211_GCMP_PN_LEN); 2179 } 2180 return RX_QUEUED; 2181 } 2182 2183 /* This is a fragment for a frame that should already be pending in 2184 * fragment cache. Add this fragment to the end of the pending entry. 2185 */ 2186 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, 2187 rx->seqno_idx, hdr); 2188 if (!entry) { 2189 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2190 return RX_DROP_MONITOR; 2191 } 2192 2193 /* "The receiver shall discard MSDUs and MMPDUs whose constituent 2194 * MPDU PN values are not incrementing in steps of 1." 2195 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) 2196 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) 2197 */ 2198 if (entry->check_sequential_pn) { 2199 int i; 2200 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; 2201 int queue; 2202 2203 if (!rx->key || 2204 (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && 2205 rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 && 2206 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP && 2207 rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256)) 2208 return RX_DROP_UNUSABLE; 2209 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); 2210 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { 2211 pn[i]++; 2212 if (pn[i]) 2213 break; 2214 } 2215 queue = rx->security_idx; 2216 rpn = rx->key->u.ccmp.rx_pn[queue]; 2217 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) 2218 return RX_DROP_UNUSABLE; 2219 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); 2220 } 2221 2222 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 2223 __skb_queue_tail(&entry->skb_list, rx->skb); 2224 entry->last_frag = frag; 2225 entry->extra_len += rx->skb->len; 2226 if (ieee80211_has_morefrags(fc)) { 2227 rx->skb = NULL; 2228 return RX_QUEUED; 2229 } 2230 2231 rx->skb = __skb_dequeue(&entry->skb_list); 2232 if (skb_tailroom(rx->skb) < entry->extra_len) { 2233 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); 2234 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 2235 GFP_ATOMIC))) { 2236 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2237 __skb_queue_purge(&entry->skb_list); 2238 return RX_DROP_UNUSABLE; 2239 } 2240 } 2241 while ((skb = __skb_dequeue(&entry->skb_list))) { 2242 skb_put_data(rx->skb, skb->data, skb->len); 2243 dev_kfree_skb(skb); 2244 } 2245 2246 out: 2247 ieee80211_led_rx(rx->local); 2248 out_no_led: 2249 if (rx->sta) 2250 rx->sta->rx_stats.packets++; 2251 return RX_CONTINUE; 2252 } 2253 2254 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 2255 { 2256 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 2257 return -EACCES; 2258 2259 return 0; 2260 } 2261 2262 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 2263 { 2264 struct sk_buff *skb = rx->skb; 2265 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2266 2267 /* 2268 * Pass through unencrypted frames if the hardware has 2269 * decrypted them already. 2270 */ 2271 if (status->flag & RX_FLAG_DECRYPTED) 2272 return 0; 2273 2274 /* Drop unencrypted frames if key is set. */ 2275 if (unlikely(!ieee80211_has_protected(fc) && 2276 !ieee80211_is_nullfunc(fc) && 2277 ieee80211_is_data(fc) && rx->key)) 2278 return -EACCES; 2279 2280 return 0; 2281 } 2282 2283 static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 2284 { 2285 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2286 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2287 __le16 fc = hdr->frame_control; 2288 2289 /* 2290 * Pass through unencrypted frames if the hardware has 2291 * decrypted them already. 2292 */ 2293 if (status->flag & RX_FLAG_DECRYPTED) 2294 return 0; 2295 2296 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 2297 if (unlikely(!ieee80211_has_protected(fc) && 2298 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 2299 rx->key)) { 2300 if (ieee80211_is_deauth(fc) || 2301 ieee80211_is_disassoc(fc)) 2302 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2303 rx->skb->data, 2304 rx->skb->len); 2305 return -EACCES; 2306 } 2307 /* BIP does not use Protected field, so need to check MMIE */ 2308 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 2309 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2310 if (ieee80211_is_deauth(fc) || 2311 ieee80211_is_disassoc(fc)) 2312 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2313 rx->skb->data, 2314 rx->skb->len); 2315 return -EACCES; 2316 } 2317 /* 2318 * When using MFP, Action frames are not allowed prior to 2319 * having configured keys. 2320 */ 2321 if (unlikely(ieee80211_is_action(fc) && !rx->key && 2322 ieee80211_is_robust_mgmt_frame(rx->skb))) 2323 return -EACCES; 2324 } 2325 2326 return 0; 2327 } 2328 2329 static int 2330 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 2331 { 2332 struct ieee80211_sub_if_data *sdata = rx->sdata; 2333 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2334 bool check_port_control = false; 2335 struct ethhdr *ehdr; 2336 int ret; 2337 2338 *port_control = false; 2339 if (ieee80211_has_a4(hdr->frame_control) && 2340 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 2341 return -1; 2342 2343 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2344 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 2345 2346 if (!sdata->u.mgd.use_4addr) 2347 return -1; 2348 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr)) 2349 check_port_control = true; 2350 } 2351 2352 if (is_multicast_ether_addr(hdr->addr1) && 2353 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 2354 return -1; 2355 2356 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 2357 if (ret < 0) 2358 return ret; 2359 2360 ehdr = (struct ethhdr *) rx->skb->data; 2361 if (ehdr->h_proto == rx->sdata->control_port_protocol) 2362 *port_control = true; 2363 else if (check_port_control) 2364 return -1; 2365 2366 return 0; 2367 } 2368 2369 /* 2370 * requires that rx->skb is a frame with ethernet header 2371 */ 2372 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 2373 { 2374 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 2375 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 2376 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2377 2378 /* 2379 * Allow EAPOL frames to us/the PAE group address regardless 2380 * of whether the frame was encrypted or not. 2381 */ 2382 if (ehdr->h_proto == rx->sdata->control_port_protocol && 2383 (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || 2384 ether_addr_equal(ehdr->h_dest, pae_group_addr))) 2385 return true; 2386 2387 if (ieee80211_802_1x_port_control(rx) || 2388 ieee80211_drop_unencrypted(rx, fc)) 2389 return false; 2390 2391 return true; 2392 } 2393 2394 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, 2395 struct ieee80211_rx_data *rx) 2396 { 2397 struct ieee80211_sub_if_data *sdata = rx->sdata; 2398 struct net_device *dev = sdata->dev; 2399 2400 if (unlikely((skb->protocol == sdata->control_port_protocol || 2401 skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) && 2402 sdata->control_port_over_nl80211)) { 2403 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2404 bool noencrypt = status->flag & RX_FLAG_DECRYPTED; 2405 2406 cfg80211_rx_control_port(dev, skb, noencrypt); 2407 dev_kfree_skb(skb); 2408 } else { 2409 /* deliver to local stack */ 2410 if (rx->napi) 2411 napi_gro_receive(rx->napi, skb); 2412 else 2413 netif_receive_skb(skb); 2414 } 2415 } 2416 2417 /* 2418 * requires that rx->skb is a frame with ethernet header 2419 */ 2420 static void 2421 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 2422 { 2423 struct ieee80211_sub_if_data *sdata = rx->sdata; 2424 struct net_device *dev = sdata->dev; 2425 struct sk_buff *skb, *xmit_skb; 2426 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2427 struct sta_info *dsta; 2428 2429 skb = rx->skb; 2430 xmit_skb = NULL; 2431 2432 ieee80211_rx_stats(dev, skb->len); 2433 2434 if (rx->sta) { 2435 /* The seqno index has the same property as needed 2436 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 2437 * for non-QoS-data frames. Here we know it's a data 2438 * frame, so count MSDUs. 2439 */ 2440 u64_stats_update_begin(&rx->sta->rx_stats.syncp); 2441 rx->sta->rx_stats.msdu[rx->seqno_idx]++; 2442 u64_stats_update_end(&rx->sta->rx_stats.syncp); 2443 } 2444 2445 if ((sdata->vif.type == NL80211_IFTYPE_AP || 2446 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 2447 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 2448 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 2449 if (is_multicast_ether_addr(ehdr->h_dest) && 2450 ieee80211_vif_get_num_mcast_if(sdata) != 0) { 2451 /* 2452 * send multicast frames both to higher layers in 2453 * local net stack and back to the wireless medium 2454 */ 2455 xmit_skb = skb_copy(skb, GFP_ATOMIC); 2456 if (!xmit_skb) 2457 net_info_ratelimited("%s: failed to clone multicast frame\n", 2458 dev->name); 2459 } else if (!is_multicast_ether_addr(ehdr->h_dest)) { 2460 dsta = sta_info_get(sdata, skb->data); 2461 if (dsta) { 2462 /* 2463 * The destination station is associated to 2464 * this AP (in this VLAN), so send the frame 2465 * directly to it and do not pass it to local 2466 * net stack. 2467 */ 2468 xmit_skb = skb; 2469 skb = NULL; 2470 } 2471 } 2472 } 2473 2474 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2475 if (skb) { 2476 /* 'align' will only take the values 0 or 2 here since all 2477 * frames are required to be aligned to 2-byte boundaries 2478 * when being passed to mac80211; the code here works just 2479 * as well if that isn't true, but mac80211 assumes it can 2480 * access fields as 2-byte aligned (e.g. for ether_addr_equal) 2481 */ 2482 int align; 2483 2484 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; 2485 if (align) { 2486 if (WARN_ON(skb_headroom(skb) < 3)) { 2487 dev_kfree_skb(skb); 2488 skb = NULL; 2489 } else { 2490 u8 *data = skb->data; 2491 size_t len = skb_headlen(skb); 2492 skb->data -= align; 2493 memmove(skb->data, data, len); 2494 skb_set_tail_pointer(skb, len); 2495 } 2496 } 2497 } 2498 #endif 2499 2500 if (skb) { 2501 skb->protocol = eth_type_trans(skb, dev); 2502 memset(skb->cb, 0, sizeof(skb->cb)); 2503 2504 ieee80211_deliver_skb_to_local_stack(skb, rx); 2505 } 2506 2507 if (xmit_skb) { 2508 /* 2509 * Send to wireless media and increase priority by 256 to 2510 * keep the received priority instead of reclassifying 2511 * the frame (see cfg80211_classify8021d). 2512 */ 2513 xmit_skb->priority += 256; 2514 xmit_skb->protocol = htons(ETH_P_802_3); 2515 skb_reset_network_header(xmit_skb); 2516 skb_reset_mac_header(xmit_skb); 2517 dev_queue_xmit(xmit_skb); 2518 } 2519 } 2520 2521 static ieee80211_rx_result debug_noinline 2522 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset) 2523 { 2524 struct net_device *dev = rx->sdata->dev; 2525 struct sk_buff *skb = rx->skb; 2526 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2527 __le16 fc = hdr->frame_control; 2528 struct sk_buff_head frame_list; 2529 struct ethhdr ethhdr; 2530 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source; 2531 2532 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 2533 check_da = NULL; 2534 check_sa = NULL; 2535 } else switch (rx->sdata->vif.type) { 2536 case NL80211_IFTYPE_AP: 2537 case NL80211_IFTYPE_AP_VLAN: 2538 check_da = NULL; 2539 break; 2540 case NL80211_IFTYPE_STATION: 2541 if (!rx->sta || 2542 !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER)) 2543 check_sa = NULL; 2544 break; 2545 case NL80211_IFTYPE_MESH_POINT: 2546 check_sa = NULL; 2547 break; 2548 default: 2549 break; 2550 } 2551 2552 skb->dev = dev; 2553 __skb_queue_head_init(&frame_list); 2554 2555 if (ieee80211_data_to_8023_exthdr(skb, ðhdr, 2556 rx->sdata->vif.addr, 2557 rx->sdata->vif.type, 2558 data_offset)) 2559 return RX_DROP_UNUSABLE; 2560 2561 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 2562 rx->sdata->vif.type, 2563 rx->local->hw.extra_tx_headroom, 2564 check_da, check_sa); 2565 2566 while (!skb_queue_empty(&frame_list)) { 2567 rx->skb = __skb_dequeue(&frame_list); 2568 2569 if (!ieee80211_frame_allowed(rx, fc)) { 2570 dev_kfree_skb(rx->skb); 2571 continue; 2572 } 2573 2574 ieee80211_deliver_skb(rx); 2575 } 2576 2577 return RX_QUEUED; 2578 } 2579 2580 static ieee80211_rx_result debug_noinline 2581 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 2582 { 2583 struct sk_buff *skb = rx->skb; 2584 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2585 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2586 __le16 fc = hdr->frame_control; 2587 2588 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 2589 return RX_CONTINUE; 2590 2591 if (unlikely(!ieee80211_is_data(fc))) 2592 return RX_CONTINUE; 2593 2594 if (unlikely(!ieee80211_is_data_present(fc))) 2595 return RX_DROP_MONITOR; 2596 2597 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 2598 switch (rx->sdata->vif.type) { 2599 case NL80211_IFTYPE_AP_VLAN: 2600 if (!rx->sdata->u.vlan.sta) 2601 return RX_DROP_UNUSABLE; 2602 break; 2603 case NL80211_IFTYPE_STATION: 2604 if (!rx->sdata->u.mgd.use_4addr) 2605 return RX_DROP_UNUSABLE; 2606 break; 2607 default: 2608 return RX_DROP_UNUSABLE; 2609 } 2610 } 2611 2612 if (is_multicast_ether_addr(hdr->addr1)) 2613 return RX_DROP_UNUSABLE; 2614 2615 return __ieee80211_rx_h_amsdu(rx, 0); 2616 } 2617 2618 #ifdef CONFIG_MAC80211_MESH 2619 static ieee80211_rx_result 2620 ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) 2621 { 2622 struct ieee80211_hdr *fwd_hdr, *hdr; 2623 struct ieee80211_tx_info *info; 2624 struct ieee80211s_hdr *mesh_hdr; 2625 struct sk_buff *skb = rx->skb, *fwd_skb; 2626 struct ieee80211_local *local = rx->local; 2627 struct ieee80211_sub_if_data *sdata = rx->sdata; 2628 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2629 u16 ac, q, hdrlen; 2630 2631 hdr = (struct ieee80211_hdr *) skb->data; 2632 hdrlen = ieee80211_hdrlen(hdr->frame_control); 2633 2634 /* make sure fixed part of mesh header is there, also checks skb len */ 2635 if (!pskb_may_pull(rx->skb, hdrlen + 6)) 2636 return RX_DROP_MONITOR; 2637 2638 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2639 2640 /* make sure full mesh header is there, also checks skb len */ 2641 if (!pskb_may_pull(rx->skb, 2642 hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr))) 2643 return RX_DROP_MONITOR; 2644 2645 /* reload pointers */ 2646 hdr = (struct ieee80211_hdr *) skb->data; 2647 mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen); 2648 2649 if (ieee80211_drop_unencrypted(rx, hdr->frame_control)) 2650 return RX_DROP_MONITOR; 2651 2652 /* frame is in RMC, don't forward */ 2653 if (ieee80211_is_data(hdr->frame_control) && 2654 is_multicast_ether_addr(hdr->addr1) && 2655 mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr)) 2656 return RX_DROP_MONITOR; 2657 2658 if (!ieee80211_is_data(hdr->frame_control)) 2659 return RX_CONTINUE; 2660 2661 if (!mesh_hdr->ttl) 2662 return RX_DROP_MONITOR; 2663 2664 if (mesh_hdr->flags & MESH_FLAGS_AE) { 2665 struct mesh_path *mppath; 2666 char *proxied_addr; 2667 char *mpp_addr; 2668 2669 if (is_multicast_ether_addr(hdr->addr1)) { 2670 mpp_addr = hdr->addr3; 2671 proxied_addr = mesh_hdr->eaddr1; 2672 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) == 2673 MESH_FLAGS_AE_A5_A6) { 2674 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2675 mpp_addr = hdr->addr4; 2676 proxied_addr = mesh_hdr->eaddr2; 2677 } else { 2678 return RX_DROP_MONITOR; 2679 } 2680 2681 rcu_read_lock(); 2682 mppath = mpp_path_lookup(sdata, proxied_addr); 2683 if (!mppath) { 2684 mpp_path_add(sdata, proxied_addr, mpp_addr); 2685 } else { 2686 spin_lock_bh(&mppath->state_lock); 2687 if (!ether_addr_equal(mppath->mpp, mpp_addr)) 2688 memcpy(mppath->mpp, mpp_addr, ETH_ALEN); 2689 mppath->exp_time = jiffies; 2690 spin_unlock_bh(&mppath->state_lock); 2691 } 2692 rcu_read_unlock(); 2693 } 2694 2695 /* Frame has reached destination. Don't forward */ 2696 if (!is_multicast_ether_addr(hdr->addr1) && 2697 ether_addr_equal(sdata->vif.addr, hdr->addr3)) 2698 return RX_CONTINUE; 2699 2700 ac = ieee80211_select_queue_80211(sdata, skb, hdr); 2701 q = sdata->vif.hw_queue[ac]; 2702 if (ieee80211_queue_stopped(&local->hw, q)) { 2703 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion); 2704 return RX_DROP_MONITOR; 2705 } 2706 skb_set_queue_mapping(skb, q); 2707 2708 if (!--mesh_hdr->ttl) { 2709 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); 2710 goto out; 2711 } 2712 2713 if (!ifmsh->mshcfg.dot11MeshForwarding) 2714 goto out; 2715 2716 fwd_skb = skb_copy_expand(skb, local->tx_headroom + 2717 sdata->encrypt_headroom, 0, GFP_ATOMIC); 2718 if (!fwd_skb) 2719 goto out; 2720 2721 fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data; 2722 fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY); 2723 info = IEEE80211_SKB_CB(fwd_skb); 2724 memset(info, 0, sizeof(*info)); 2725 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 2726 info->control.vif = &rx->sdata->vif; 2727 info->control.jiffies = jiffies; 2728 if (is_multicast_ether_addr(fwd_hdr->addr1)) { 2729 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 2730 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 2731 /* update power mode indication when forwarding */ 2732 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); 2733 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { 2734 /* mesh power mode flags updated in mesh_nexthop_lookup */ 2735 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2736 } else { 2737 /* unable to resolve next hop */ 2738 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, 2739 fwd_hdr->addr3, 0, 2740 WLAN_REASON_MESH_PATH_NOFORWARD, 2741 fwd_hdr->addr2); 2742 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 2743 kfree_skb(fwd_skb); 2744 return RX_DROP_MONITOR; 2745 } 2746 2747 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2748 ieee80211_add_pending_skb(local, fwd_skb); 2749 out: 2750 if (is_multicast_ether_addr(hdr->addr1)) 2751 return RX_CONTINUE; 2752 return RX_DROP_MONITOR; 2753 } 2754 #endif 2755 2756 static ieee80211_rx_result debug_noinline 2757 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 2758 { 2759 struct ieee80211_sub_if_data *sdata = rx->sdata; 2760 struct ieee80211_local *local = rx->local; 2761 struct net_device *dev = sdata->dev; 2762 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2763 __le16 fc = hdr->frame_control; 2764 bool port_control; 2765 int err; 2766 2767 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 2768 return RX_CONTINUE; 2769 2770 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 2771 return RX_DROP_MONITOR; 2772 2773 /* 2774 * Send unexpected-4addr-frame event to hostapd. For older versions, 2775 * also drop the frame to cooked monitor interfaces. 2776 */ 2777 if (ieee80211_has_a4(hdr->frame_control) && 2778 sdata->vif.type == NL80211_IFTYPE_AP) { 2779 if (rx->sta && 2780 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 2781 cfg80211_rx_unexpected_4addr_frame( 2782 rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC); 2783 return RX_DROP_MONITOR; 2784 } 2785 2786 err = __ieee80211_data_to_8023(rx, &port_control); 2787 if (unlikely(err)) 2788 return RX_DROP_UNUSABLE; 2789 2790 if (!ieee80211_frame_allowed(rx, fc)) 2791 return RX_DROP_MONITOR; 2792 2793 /* directly handle TDLS channel switch requests/responses */ 2794 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == 2795 cpu_to_be16(ETH_P_TDLS))) { 2796 struct ieee80211_tdls_data *tf = (void *)rx->skb->data; 2797 2798 if (pskb_may_pull(rx->skb, 2799 offsetof(struct ieee80211_tdls_data, u)) && 2800 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && 2801 tf->category == WLAN_CATEGORY_TDLS && 2802 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || 2803 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { 2804 skb_queue_tail(&local->skb_queue_tdls_chsw, rx->skb); 2805 schedule_work(&local->tdls_chsw_work); 2806 if (rx->sta) 2807 rx->sta->rx_stats.packets++; 2808 2809 return RX_QUEUED; 2810 } 2811 } 2812 2813 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 2814 unlikely(port_control) && sdata->bss) { 2815 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 2816 u.ap); 2817 dev = sdata->dev; 2818 rx->sdata = sdata; 2819 } 2820 2821 rx->skb->dev = dev; 2822 2823 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && 2824 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 2825 !is_multicast_ether_addr( 2826 ((struct ethhdr *)rx->skb->data)->h_dest) && 2827 (!local->scanning && 2828 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) 2829 mod_timer(&local->dynamic_ps_timer, jiffies + 2830 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 2831 2832 ieee80211_deliver_skb(rx); 2833 2834 return RX_QUEUED; 2835 } 2836 2837 static ieee80211_rx_result debug_noinline 2838 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 2839 { 2840 struct sk_buff *skb = rx->skb; 2841 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 2842 struct tid_ampdu_rx *tid_agg_rx; 2843 u16 start_seq_num; 2844 u16 tid; 2845 2846 if (likely(!ieee80211_is_ctl(bar->frame_control))) 2847 return RX_CONTINUE; 2848 2849 if (ieee80211_is_back_req(bar->frame_control)) { 2850 struct { 2851 __le16 control, start_seq_num; 2852 } __packed bar_data; 2853 struct ieee80211_event event = { 2854 .type = BAR_RX_EVENT, 2855 }; 2856 2857 if (!rx->sta) 2858 return RX_DROP_MONITOR; 2859 2860 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 2861 &bar_data, sizeof(bar_data))) 2862 return RX_DROP_MONITOR; 2863 2864 tid = le16_to_cpu(bar_data.control) >> 12; 2865 2866 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 2867 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 2868 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 2869 WLAN_BACK_RECIPIENT, 2870 WLAN_REASON_QSTA_REQUIRE_SETUP); 2871 2872 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 2873 if (!tid_agg_rx) 2874 return RX_DROP_MONITOR; 2875 2876 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 2877 event.u.ba.tid = tid; 2878 event.u.ba.ssn = start_seq_num; 2879 event.u.ba.sta = &rx->sta->sta; 2880 2881 /* reset session timer */ 2882 if (tid_agg_rx->timeout) 2883 mod_timer(&tid_agg_rx->session_timer, 2884 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 2885 2886 spin_lock(&tid_agg_rx->reorder_lock); 2887 /* release stored frames up to start of BAR */ 2888 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 2889 start_seq_num, frames); 2890 spin_unlock(&tid_agg_rx->reorder_lock); 2891 2892 drv_event_callback(rx->local, rx->sdata, &event); 2893 2894 kfree_skb(skb); 2895 return RX_QUEUED; 2896 } 2897 2898 /* 2899 * After this point, we only want management frames, 2900 * so we can drop all remaining control frames to 2901 * cooked monitor interfaces. 2902 */ 2903 return RX_DROP_MONITOR; 2904 } 2905 2906 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 2907 struct ieee80211_mgmt *mgmt, 2908 size_t len) 2909 { 2910 struct ieee80211_local *local = sdata->local; 2911 struct sk_buff *skb; 2912 struct ieee80211_mgmt *resp; 2913 2914 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 2915 /* Not to own unicast address */ 2916 return; 2917 } 2918 2919 if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) || 2920 !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) { 2921 /* Not from the current AP or not associated yet. */ 2922 return; 2923 } 2924 2925 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 2926 /* Too short SA Query request frame */ 2927 return; 2928 } 2929 2930 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 2931 if (skb == NULL) 2932 return; 2933 2934 skb_reserve(skb, local->hw.extra_tx_headroom); 2935 resp = skb_put_zero(skb, 24); 2936 memcpy(resp->da, mgmt->sa, ETH_ALEN); 2937 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 2938 memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN); 2939 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 2940 IEEE80211_STYPE_ACTION); 2941 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 2942 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 2943 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 2944 memcpy(resp->u.action.u.sa_query.trans_id, 2945 mgmt->u.action.u.sa_query.trans_id, 2946 WLAN_SA_QUERY_TR_ID_LEN); 2947 2948 ieee80211_tx_skb(sdata, skb); 2949 } 2950 2951 static ieee80211_rx_result debug_noinline 2952 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 2953 { 2954 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2955 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2956 2957 /* 2958 * From here on, look only at management frames. 2959 * Data and control frames are already handled, 2960 * and unknown (reserved) frames are useless. 2961 */ 2962 if (rx->skb->len < 24) 2963 return RX_DROP_MONITOR; 2964 2965 if (!ieee80211_is_mgmt(mgmt->frame_control)) 2966 return RX_DROP_MONITOR; 2967 2968 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 2969 ieee80211_is_beacon(mgmt->frame_control) && 2970 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 2971 int sig = 0; 2972 2973 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 2974 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 2975 sig = status->signal; 2976 2977 cfg80211_report_obss_beacon(rx->local->hw.wiphy, 2978 rx->skb->data, rx->skb->len, 2979 status->freq, sig); 2980 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 2981 } 2982 2983 if (ieee80211_drop_unencrypted_mgmt(rx)) 2984 return RX_DROP_UNUSABLE; 2985 2986 return RX_CONTINUE; 2987 } 2988 2989 static ieee80211_rx_result debug_noinline 2990 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 2991 { 2992 struct ieee80211_local *local = rx->local; 2993 struct ieee80211_sub_if_data *sdata = rx->sdata; 2994 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 2995 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2996 int len = rx->skb->len; 2997 2998 if (!ieee80211_is_action(mgmt->frame_control)) 2999 return RX_CONTINUE; 3000 3001 /* drop too small frames */ 3002 if (len < IEEE80211_MIN_ACTION_SIZE) 3003 return RX_DROP_UNUSABLE; 3004 3005 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && 3006 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && 3007 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 3008 return RX_DROP_UNUSABLE; 3009 3010 switch (mgmt->u.action.category) { 3011 case WLAN_CATEGORY_HT: 3012 /* reject HT action frames from stations not supporting HT */ 3013 if (!rx->sta->sta.ht_cap.ht_supported) 3014 goto invalid; 3015 3016 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3017 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3018 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3019 sdata->vif.type != NL80211_IFTYPE_AP && 3020 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3021 break; 3022 3023 /* verify action & smps_control/chanwidth are present */ 3024 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3025 goto invalid; 3026 3027 switch (mgmt->u.action.u.ht_smps.action) { 3028 case WLAN_HT_ACTION_SMPS: { 3029 struct ieee80211_supported_band *sband; 3030 enum ieee80211_smps_mode smps_mode; 3031 struct sta_opmode_info sta_opmode = {}; 3032 3033 /* convert to HT capability */ 3034 switch (mgmt->u.action.u.ht_smps.smps_control) { 3035 case WLAN_HT_SMPS_CONTROL_DISABLED: 3036 smps_mode = IEEE80211_SMPS_OFF; 3037 break; 3038 case WLAN_HT_SMPS_CONTROL_STATIC: 3039 smps_mode = IEEE80211_SMPS_STATIC; 3040 break; 3041 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 3042 smps_mode = IEEE80211_SMPS_DYNAMIC; 3043 break; 3044 default: 3045 goto invalid; 3046 } 3047 3048 /* if no change do nothing */ 3049 if (rx->sta->sta.smps_mode == smps_mode) 3050 goto handled; 3051 rx->sta->sta.smps_mode = smps_mode; 3052 sta_opmode.smps_mode = 3053 ieee80211_smps_mode_to_smps_mode(smps_mode); 3054 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED; 3055 3056 sband = rx->local->hw.wiphy->bands[status->band]; 3057 3058 rate_control_rate_update(local, sband, rx->sta, 3059 IEEE80211_RC_SMPS_CHANGED); 3060 cfg80211_sta_opmode_change_notify(sdata->dev, 3061 rx->sta->addr, 3062 &sta_opmode, 3063 GFP_KERNEL); 3064 goto handled; 3065 } 3066 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 3067 struct ieee80211_supported_band *sband; 3068 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 3069 enum ieee80211_sta_rx_bandwidth max_bw, new_bw; 3070 struct sta_opmode_info sta_opmode = {}; 3071 3072 /* If it doesn't support 40 MHz it can't change ... */ 3073 if (!(rx->sta->sta.ht_cap.cap & 3074 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 3075 goto handled; 3076 3077 if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ) 3078 max_bw = IEEE80211_STA_RX_BW_20; 3079 else 3080 max_bw = ieee80211_sta_cap_rx_bw(rx->sta); 3081 3082 /* set cur_max_bandwidth and recalc sta bw */ 3083 rx->sta->cur_max_bandwidth = max_bw; 3084 new_bw = ieee80211_sta_cur_vht_bw(rx->sta); 3085 3086 if (rx->sta->sta.bandwidth == new_bw) 3087 goto handled; 3088 3089 rx->sta->sta.bandwidth = new_bw; 3090 sband = rx->local->hw.wiphy->bands[status->band]; 3091 sta_opmode.bw = 3092 ieee80211_sta_rx_bw_to_chan_width(rx->sta); 3093 sta_opmode.changed = STA_OPMODE_MAX_BW_CHANGED; 3094 3095 rate_control_rate_update(local, sband, rx->sta, 3096 IEEE80211_RC_BW_CHANGED); 3097 cfg80211_sta_opmode_change_notify(sdata->dev, 3098 rx->sta->addr, 3099 &sta_opmode, 3100 GFP_KERNEL); 3101 goto handled; 3102 } 3103 default: 3104 goto invalid; 3105 } 3106 3107 break; 3108 case WLAN_CATEGORY_PUBLIC: 3109 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3110 goto invalid; 3111 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3112 break; 3113 if (!rx->sta) 3114 break; 3115 if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) 3116 break; 3117 if (mgmt->u.action.u.ext_chan_switch.action_code != 3118 WLAN_PUB_ACTION_EXT_CHANSW_ANN) 3119 break; 3120 if (len < offsetof(struct ieee80211_mgmt, 3121 u.action.u.ext_chan_switch.variable)) 3122 goto invalid; 3123 goto queue; 3124 case WLAN_CATEGORY_VHT: 3125 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3126 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3127 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3128 sdata->vif.type != NL80211_IFTYPE_AP && 3129 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3130 break; 3131 3132 /* verify action code is present */ 3133 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3134 goto invalid; 3135 3136 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 3137 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 3138 /* verify opmode is present */ 3139 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3140 goto invalid; 3141 goto queue; 3142 } 3143 case WLAN_VHT_ACTION_GROUPID_MGMT: { 3144 if (len < IEEE80211_MIN_ACTION_SIZE + 25) 3145 goto invalid; 3146 goto queue; 3147 } 3148 default: 3149 break; 3150 } 3151 break; 3152 case WLAN_CATEGORY_BACK: 3153 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3154 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3155 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3156 sdata->vif.type != NL80211_IFTYPE_AP && 3157 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3158 break; 3159 3160 /* verify action_code is present */ 3161 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3162 break; 3163 3164 switch (mgmt->u.action.u.addba_req.action_code) { 3165 case WLAN_ACTION_ADDBA_REQ: 3166 if (len < (IEEE80211_MIN_ACTION_SIZE + 3167 sizeof(mgmt->u.action.u.addba_req))) 3168 goto invalid; 3169 break; 3170 case WLAN_ACTION_ADDBA_RESP: 3171 if (len < (IEEE80211_MIN_ACTION_SIZE + 3172 sizeof(mgmt->u.action.u.addba_resp))) 3173 goto invalid; 3174 break; 3175 case WLAN_ACTION_DELBA: 3176 if (len < (IEEE80211_MIN_ACTION_SIZE + 3177 sizeof(mgmt->u.action.u.delba))) 3178 goto invalid; 3179 break; 3180 default: 3181 goto invalid; 3182 } 3183 3184 goto queue; 3185 case WLAN_CATEGORY_SPECTRUM_MGMT: 3186 /* verify action_code is present */ 3187 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3188 break; 3189 3190 switch (mgmt->u.action.u.measurement.action_code) { 3191 case WLAN_ACTION_SPCT_MSR_REQ: 3192 if (status->band != NL80211_BAND_5GHZ) 3193 break; 3194 3195 if (len < (IEEE80211_MIN_ACTION_SIZE + 3196 sizeof(mgmt->u.action.u.measurement))) 3197 break; 3198 3199 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3200 break; 3201 3202 ieee80211_process_measurement_req(sdata, mgmt, len); 3203 goto handled; 3204 case WLAN_ACTION_SPCT_CHL_SWITCH: { 3205 u8 *bssid; 3206 if (len < (IEEE80211_MIN_ACTION_SIZE + 3207 sizeof(mgmt->u.action.u.chan_switch))) 3208 break; 3209 3210 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3211 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3212 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3213 break; 3214 3215 if (sdata->vif.type == NL80211_IFTYPE_STATION) 3216 bssid = sdata->u.mgd.bssid; 3217 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 3218 bssid = sdata->u.ibss.bssid; 3219 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 3220 bssid = mgmt->sa; 3221 else 3222 break; 3223 3224 if (!ether_addr_equal(mgmt->bssid, bssid)) 3225 break; 3226 3227 goto queue; 3228 } 3229 } 3230 break; 3231 case WLAN_CATEGORY_SA_QUERY: 3232 if (len < (IEEE80211_MIN_ACTION_SIZE + 3233 sizeof(mgmt->u.action.u.sa_query))) 3234 break; 3235 3236 switch (mgmt->u.action.u.sa_query.action) { 3237 case WLAN_ACTION_SA_QUERY_REQUEST: 3238 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3239 break; 3240 ieee80211_process_sa_query_req(sdata, mgmt, len); 3241 goto handled; 3242 } 3243 break; 3244 case WLAN_CATEGORY_SELF_PROTECTED: 3245 if (len < (IEEE80211_MIN_ACTION_SIZE + 3246 sizeof(mgmt->u.action.u.self_prot.action_code))) 3247 break; 3248 3249 switch (mgmt->u.action.u.self_prot.action_code) { 3250 case WLAN_SP_MESH_PEERING_OPEN: 3251 case WLAN_SP_MESH_PEERING_CLOSE: 3252 case WLAN_SP_MESH_PEERING_CONFIRM: 3253 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3254 goto invalid; 3255 if (sdata->u.mesh.user_mpm) 3256 /* userspace handles this frame */ 3257 break; 3258 goto queue; 3259 case WLAN_SP_MGK_INFORM: 3260 case WLAN_SP_MGK_ACK: 3261 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3262 goto invalid; 3263 break; 3264 } 3265 break; 3266 case WLAN_CATEGORY_MESH_ACTION: 3267 if (len < (IEEE80211_MIN_ACTION_SIZE + 3268 sizeof(mgmt->u.action.u.mesh_action.action_code))) 3269 break; 3270 3271 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3272 break; 3273 if (mesh_action_is_path_sel(mgmt) && 3274 !mesh_path_sel_is_hwmp(sdata)) 3275 break; 3276 goto queue; 3277 } 3278 3279 return RX_CONTINUE; 3280 3281 invalid: 3282 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 3283 /* will return in the next handlers */ 3284 return RX_CONTINUE; 3285 3286 handled: 3287 if (rx->sta) 3288 rx->sta->rx_stats.packets++; 3289 dev_kfree_skb(rx->skb); 3290 return RX_QUEUED; 3291 3292 queue: 3293 skb_queue_tail(&sdata->skb_queue, rx->skb); 3294 ieee80211_queue_work(&local->hw, &sdata->work); 3295 if (rx->sta) 3296 rx->sta->rx_stats.packets++; 3297 return RX_QUEUED; 3298 } 3299 3300 static ieee80211_rx_result debug_noinline 3301 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 3302 { 3303 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3304 int sig = 0; 3305 3306 /* skip known-bad action frames and return them in the next handler */ 3307 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 3308 return RX_CONTINUE; 3309 3310 /* 3311 * Getting here means the kernel doesn't know how to handle 3312 * it, but maybe userspace does ... include returned frames 3313 * so userspace can register for those to know whether ones 3314 * it transmitted were processed or returned. 3315 */ 3316 3317 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3318 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3319 sig = status->signal; 3320 3321 if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig, 3322 rx->skb->data, rx->skb->len, 0)) { 3323 if (rx->sta) 3324 rx->sta->rx_stats.packets++; 3325 dev_kfree_skb(rx->skb); 3326 return RX_QUEUED; 3327 } 3328 3329 return RX_CONTINUE; 3330 } 3331 3332 static ieee80211_rx_result debug_noinline 3333 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 3334 { 3335 struct ieee80211_local *local = rx->local; 3336 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3337 struct sk_buff *nskb; 3338 struct ieee80211_sub_if_data *sdata = rx->sdata; 3339 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3340 3341 if (!ieee80211_is_action(mgmt->frame_control)) 3342 return RX_CONTINUE; 3343 3344 /* 3345 * For AP mode, hostapd is responsible for handling any action 3346 * frames that we didn't handle, including returning unknown 3347 * ones. For all other modes we will return them to the sender, 3348 * setting the 0x80 bit in the action category, as required by 3349 * 802.11-2012 9.24.4. 3350 * Newer versions of hostapd shall also use the management frame 3351 * registration mechanisms, but older ones still use cooked 3352 * monitor interfaces so push all frames there. 3353 */ 3354 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 3355 (sdata->vif.type == NL80211_IFTYPE_AP || 3356 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 3357 return RX_DROP_MONITOR; 3358 3359 if (is_multicast_ether_addr(mgmt->da)) 3360 return RX_DROP_MONITOR; 3361 3362 /* do not return rejected action frames */ 3363 if (mgmt->u.action.category & 0x80) 3364 return RX_DROP_UNUSABLE; 3365 3366 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 3367 GFP_ATOMIC); 3368 if (nskb) { 3369 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 3370 3371 nmgmt->u.action.category |= 0x80; 3372 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 3373 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 3374 3375 memset(nskb->cb, 0, sizeof(nskb->cb)); 3376 3377 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 3378 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 3379 3380 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 3381 IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 3382 IEEE80211_TX_CTL_NO_CCK_RATE; 3383 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) 3384 info->hw_queue = 3385 local->hw.offchannel_tx_hw_queue; 3386 } 3387 3388 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, 3389 status->band, 0); 3390 } 3391 dev_kfree_skb(rx->skb); 3392 return RX_QUEUED; 3393 } 3394 3395 static ieee80211_rx_result debug_noinline 3396 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 3397 { 3398 struct ieee80211_sub_if_data *sdata = rx->sdata; 3399 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 3400 __le16 stype; 3401 3402 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 3403 3404 if (!ieee80211_vif_is_mesh(&sdata->vif) && 3405 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3406 sdata->vif.type != NL80211_IFTYPE_OCB && 3407 sdata->vif.type != NL80211_IFTYPE_STATION) 3408 return RX_DROP_MONITOR; 3409 3410 switch (stype) { 3411 case cpu_to_le16(IEEE80211_STYPE_AUTH): 3412 case cpu_to_le16(IEEE80211_STYPE_BEACON): 3413 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 3414 /* process for all: mesh, mlme, ibss */ 3415 break; 3416 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 3417 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 3418 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 3419 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 3420 if (is_multicast_ether_addr(mgmt->da) && 3421 !is_broadcast_ether_addr(mgmt->da)) 3422 return RX_DROP_MONITOR; 3423 3424 /* process only for station */ 3425 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3426 return RX_DROP_MONITOR; 3427 break; 3428 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 3429 /* process only for ibss and mesh */ 3430 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 3431 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3432 return RX_DROP_MONITOR; 3433 break; 3434 default: 3435 return RX_DROP_MONITOR; 3436 } 3437 3438 /* queue up frame and kick off work to process it */ 3439 skb_queue_tail(&sdata->skb_queue, rx->skb); 3440 ieee80211_queue_work(&rx->local->hw, &sdata->work); 3441 if (rx->sta) 3442 rx->sta->rx_stats.packets++; 3443 3444 return RX_QUEUED; 3445 } 3446 3447 static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 3448 struct ieee80211_rate *rate) 3449 { 3450 struct ieee80211_sub_if_data *sdata; 3451 struct ieee80211_local *local = rx->local; 3452 struct sk_buff *skb = rx->skb, *skb2; 3453 struct net_device *prev_dev = NULL; 3454 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3455 int needed_headroom; 3456 3457 /* 3458 * If cooked monitor has been processed already, then 3459 * don't do it again. If not, set the flag. 3460 */ 3461 if (rx->flags & IEEE80211_RX_CMNTR) 3462 goto out_free_skb; 3463 rx->flags |= IEEE80211_RX_CMNTR; 3464 3465 /* If there are no cooked monitor interfaces, just free the SKB */ 3466 if (!local->cooked_mntrs) 3467 goto out_free_skb; 3468 3469 /* vendor data is long removed here */ 3470 status->flag &= ~RX_FLAG_RADIOTAP_VENDOR_DATA; 3471 /* room for the radiotap header based on driver features */ 3472 needed_headroom = ieee80211_rx_radiotap_hdrlen(local, status, skb); 3473 3474 if (skb_headroom(skb) < needed_headroom && 3475 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) 3476 goto out_free_skb; 3477 3478 /* prepend radiotap information */ 3479 ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, 3480 false); 3481 3482 skb_reset_mac_header(skb); 3483 skb->ip_summed = CHECKSUM_UNNECESSARY; 3484 skb->pkt_type = PACKET_OTHERHOST; 3485 skb->protocol = htons(ETH_P_802_2); 3486 3487 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 3488 if (!ieee80211_sdata_running(sdata)) 3489 continue; 3490 3491 if (sdata->vif.type != NL80211_IFTYPE_MONITOR || 3492 !(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)) 3493 continue; 3494 3495 if (prev_dev) { 3496 skb2 = skb_clone(skb, GFP_ATOMIC); 3497 if (skb2) { 3498 skb2->dev = prev_dev; 3499 netif_receive_skb(skb2); 3500 } 3501 } 3502 3503 prev_dev = sdata->dev; 3504 ieee80211_rx_stats(sdata->dev, skb->len); 3505 } 3506 3507 if (prev_dev) { 3508 skb->dev = prev_dev; 3509 netif_receive_skb(skb); 3510 return; 3511 } 3512 3513 out_free_skb: 3514 dev_kfree_skb(skb); 3515 } 3516 3517 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 3518 ieee80211_rx_result res) 3519 { 3520 switch (res) { 3521 case RX_DROP_MONITOR: 3522 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3523 if (rx->sta) 3524 rx->sta->rx_stats.dropped++; 3525 /* fall through */ 3526 case RX_CONTINUE: { 3527 struct ieee80211_rate *rate = NULL; 3528 struct ieee80211_supported_band *sband; 3529 struct ieee80211_rx_status *status; 3530 3531 status = IEEE80211_SKB_RXCB((rx->skb)); 3532 3533 sband = rx->local->hw.wiphy->bands[status->band]; 3534 if (status->encoding == RX_ENC_LEGACY) 3535 rate = &sband->bitrates[status->rate_idx]; 3536 3537 ieee80211_rx_cooked_monitor(rx, rate); 3538 break; 3539 } 3540 case RX_DROP_UNUSABLE: 3541 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 3542 if (rx->sta) 3543 rx->sta->rx_stats.dropped++; 3544 dev_kfree_skb(rx->skb); 3545 break; 3546 case RX_QUEUED: 3547 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 3548 break; 3549 } 3550 } 3551 3552 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 3553 struct sk_buff_head *frames) 3554 { 3555 ieee80211_rx_result res = RX_DROP_MONITOR; 3556 struct sk_buff *skb; 3557 3558 #define CALL_RXH(rxh) \ 3559 do { \ 3560 res = rxh(rx); \ 3561 if (res != RX_CONTINUE) \ 3562 goto rxh_next; \ 3563 } while (0) 3564 3565 /* Lock here to avoid hitting all of the data used in the RX 3566 * path (e.g. key data, station data, ...) concurrently when 3567 * a frame is released from the reorder buffer due to timeout 3568 * from the timer, potentially concurrently with RX from the 3569 * driver. 3570 */ 3571 spin_lock_bh(&rx->local->rx_path_lock); 3572 3573 while ((skb = __skb_dequeue(frames))) { 3574 /* 3575 * all the other fields are valid across frames 3576 * that belong to an aMPDU since they are on the 3577 * same TID from the same station 3578 */ 3579 rx->skb = skb; 3580 3581 CALL_RXH(ieee80211_rx_h_check_more_data); 3582 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll); 3583 CALL_RXH(ieee80211_rx_h_sta_process); 3584 CALL_RXH(ieee80211_rx_h_decrypt); 3585 CALL_RXH(ieee80211_rx_h_defragment); 3586 CALL_RXH(ieee80211_rx_h_michael_mic_verify); 3587 /* must be after MMIC verify so header is counted in MPDU mic */ 3588 #ifdef CONFIG_MAC80211_MESH 3589 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 3590 CALL_RXH(ieee80211_rx_h_mesh_fwding); 3591 #endif 3592 CALL_RXH(ieee80211_rx_h_amsdu); 3593 CALL_RXH(ieee80211_rx_h_data); 3594 3595 /* special treatment -- needs the queue */ 3596 res = ieee80211_rx_h_ctrl(rx, frames); 3597 if (res != RX_CONTINUE) 3598 goto rxh_next; 3599 3600 CALL_RXH(ieee80211_rx_h_mgmt_check); 3601 CALL_RXH(ieee80211_rx_h_action); 3602 CALL_RXH(ieee80211_rx_h_userspace_mgmt); 3603 CALL_RXH(ieee80211_rx_h_action_return); 3604 CALL_RXH(ieee80211_rx_h_mgmt); 3605 3606 rxh_next: 3607 ieee80211_rx_handlers_result(rx, res); 3608 3609 #undef CALL_RXH 3610 } 3611 3612 spin_unlock_bh(&rx->local->rx_path_lock); 3613 } 3614 3615 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 3616 { 3617 struct sk_buff_head reorder_release; 3618 ieee80211_rx_result res = RX_DROP_MONITOR; 3619 3620 __skb_queue_head_init(&reorder_release); 3621 3622 #define CALL_RXH(rxh) \ 3623 do { \ 3624 res = rxh(rx); \ 3625 if (res != RX_CONTINUE) \ 3626 goto rxh_next; \ 3627 } while (0) 3628 3629 CALL_RXH(ieee80211_rx_h_check_dup); 3630 CALL_RXH(ieee80211_rx_h_check); 3631 3632 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 3633 3634 ieee80211_rx_handlers(rx, &reorder_release); 3635 return; 3636 3637 rxh_next: 3638 ieee80211_rx_handlers_result(rx, res); 3639 3640 #undef CALL_RXH 3641 } 3642 3643 /* 3644 * This function makes calls into the RX path, therefore 3645 * it has to be invoked under RCU read lock. 3646 */ 3647 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 3648 { 3649 struct sk_buff_head frames; 3650 struct ieee80211_rx_data rx = { 3651 .sta = sta, 3652 .sdata = sta->sdata, 3653 .local = sta->local, 3654 /* This is OK -- must be QoS data frame */ 3655 .security_idx = tid, 3656 .seqno_idx = tid, 3657 .napi = NULL, /* must be NULL to not have races */ 3658 }; 3659 struct tid_ampdu_rx *tid_agg_rx; 3660 3661 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3662 if (!tid_agg_rx) 3663 return; 3664 3665 __skb_queue_head_init(&frames); 3666 3667 spin_lock(&tid_agg_rx->reorder_lock); 3668 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3669 spin_unlock(&tid_agg_rx->reorder_lock); 3670 3671 if (!skb_queue_empty(&frames)) { 3672 struct ieee80211_event event = { 3673 .type = BA_FRAME_TIMEOUT, 3674 .u.ba.tid = tid, 3675 .u.ba.sta = &sta->sta, 3676 }; 3677 drv_event_callback(rx.local, rx.sdata, &event); 3678 } 3679 3680 ieee80211_rx_handlers(&rx, &frames); 3681 } 3682 3683 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, 3684 u16 ssn, u64 filtered, 3685 u16 received_mpdus) 3686 { 3687 struct sta_info *sta; 3688 struct tid_ampdu_rx *tid_agg_rx; 3689 struct sk_buff_head frames; 3690 struct ieee80211_rx_data rx = { 3691 /* This is OK -- must be QoS data frame */ 3692 .security_idx = tid, 3693 .seqno_idx = tid, 3694 }; 3695 int i, diff; 3696 3697 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS)) 3698 return; 3699 3700 __skb_queue_head_init(&frames); 3701 3702 sta = container_of(pubsta, struct sta_info, sta); 3703 3704 rx.sta = sta; 3705 rx.sdata = sta->sdata; 3706 rx.local = sta->local; 3707 3708 rcu_read_lock(); 3709 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 3710 if (!tid_agg_rx) 3711 goto out; 3712 3713 spin_lock_bh(&tid_agg_rx->reorder_lock); 3714 3715 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) { 3716 int release; 3717 3718 /* release all frames in the reorder buffer */ 3719 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) % 3720 IEEE80211_SN_MODULO; 3721 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, 3722 release, &frames); 3723 /* update ssn to match received ssn */ 3724 tid_agg_rx->head_seq_num = ssn; 3725 } else { 3726 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn, 3727 &frames); 3728 } 3729 3730 /* handle the case that received ssn is behind the mac ssn. 3731 * it can be tid_agg_rx->buf_size behind and still be valid */ 3732 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK; 3733 if (diff >= tid_agg_rx->buf_size) { 3734 tid_agg_rx->reorder_buf_filtered = 0; 3735 goto release; 3736 } 3737 filtered = filtered >> diff; 3738 ssn += diff; 3739 3740 /* update bitmap */ 3741 for (i = 0; i < tid_agg_rx->buf_size; i++) { 3742 int index = (ssn + i) % tid_agg_rx->buf_size; 3743 3744 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 3745 if (filtered & BIT_ULL(i)) 3746 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index); 3747 } 3748 3749 /* now process also frames that the filter marking released */ 3750 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 3751 3752 release: 3753 spin_unlock_bh(&tid_agg_rx->reorder_lock); 3754 3755 ieee80211_rx_handlers(&rx, &frames); 3756 3757 out: 3758 rcu_read_unlock(); 3759 } 3760 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames); 3761 3762 /* main receive path */ 3763 3764 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) 3765 { 3766 struct ieee80211_sub_if_data *sdata = rx->sdata; 3767 struct sk_buff *skb = rx->skb; 3768 struct ieee80211_hdr *hdr = (void *)skb->data; 3769 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3770 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 3771 bool multicast = is_multicast_ether_addr(hdr->addr1); 3772 3773 switch (sdata->vif.type) { 3774 case NL80211_IFTYPE_STATION: 3775 if (!bssid && !sdata->u.mgd.use_4addr) 3776 return false; 3777 if (multicast) 3778 return true; 3779 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3780 case NL80211_IFTYPE_ADHOC: 3781 if (!bssid) 3782 return false; 3783 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 3784 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) 3785 return false; 3786 if (ieee80211_is_beacon(hdr->frame_control)) 3787 return true; 3788 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) 3789 return false; 3790 if (!multicast && 3791 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3792 return false; 3793 if (!rx->sta) { 3794 int rate_idx; 3795 if (status->encoding != RX_ENC_LEGACY) 3796 rate_idx = 0; /* TODO: HT/VHT rates */ 3797 else 3798 rate_idx = status->rate_idx; 3799 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 3800 BIT(rate_idx)); 3801 } 3802 return true; 3803 case NL80211_IFTYPE_OCB: 3804 if (!bssid) 3805 return false; 3806 if (!ieee80211_is_data_present(hdr->frame_control)) 3807 return false; 3808 if (!is_broadcast_ether_addr(bssid)) 3809 return false; 3810 if (!multicast && 3811 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) 3812 return false; 3813 if (!rx->sta) { 3814 int rate_idx; 3815 if (status->encoding != RX_ENC_LEGACY) 3816 rate_idx = 0; /* TODO: HT rates */ 3817 else 3818 rate_idx = status->rate_idx; 3819 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, 3820 BIT(rate_idx)); 3821 } 3822 return true; 3823 case NL80211_IFTYPE_MESH_POINT: 3824 if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) 3825 return false; 3826 if (multicast) 3827 return true; 3828 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3829 case NL80211_IFTYPE_AP_VLAN: 3830 case NL80211_IFTYPE_AP: 3831 if (!bssid) 3832 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 3833 3834 if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) { 3835 /* 3836 * Accept public action frames even when the 3837 * BSSID doesn't match, this is used for P2P 3838 * and location updates. Note that mac80211 3839 * itself never looks at these frames. 3840 */ 3841 if (!multicast && 3842 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 3843 return false; 3844 if (ieee80211_is_public_action(hdr, skb->len)) 3845 return true; 3846 return ieee80211_is_beacon(hdr->frame_control); 3847 } 3848 3849 if (!ieee80211_has_tods(hdr->frame_control)) { 3850 /* ignore data frames to TDLS-peers */ 3851 if (ieee80211_is_data(hdr->frame_control)) 3852 return false; 3853 /* ignore action frames to TDLS-peers */ 3854 if (ieee80211_is_action(hdr->frame_control) && 3855 !is_broadcast_ether_addr(bssid) && 3856 !ether_addr_equal(bssid, hdr->addr1)) 3857 return false; 3858 } 3859 3860 /* 3861 * 802.11-2016 Table 9-26 says that for data frames, A1 must be 3862 * the BSSID - we've checked that already but may have accepted 3863 * the wildcard (ff:ff:ff:ff:ff:ff). 3864 * 3865 * It also says: 3866 * The BSSID of the Data frame is determined as follows: 3867 * a) If the STA is contained within an AP or is associated 3868 * with an AP, the BSSID is the address currently in use 3869 * by the STA contained in the AP. 3870 * 3871 * So we should not accept data frames with an address that's 3872 * multicast. 3873 * 3874 * Accepting it also opens a security problem because stations 3875 * could encrypt it with the GTK and inject traffic that way. 3876 */ 3877 if (ieee80211_is_data(hdr->frame_control) && multicast) 3878 return false; 3879 3880 return true; 3881 case NL80211_IFTYPE_WDS: 3882 if (bssid || !ieee80211_is_data(hdr->frame_control)) 3883 return false; 3884 return ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2); 3885 case NL80211_IFTYPE_P2P_DEVICE: 3886 return ieee80211_is_public_action(hdr, skb->len) || 3887 ieee80211_is_probe_req(hdr->frame_control) || 3888 ieee80211_is_probe_resp(hdr->frame_control) || 3889 ieee80211_is_beacon(hdr->frame_control); 3890 case NL80211_IFTYPE_NAN: 3891 /* Currently no frames on NAN interface are allowed */ 3892 return false; 3893 default: 3894 break; 3895 } 3896 3897 WARN_ON_ONCE(1); 3898 return false; 3899 } 3900 3901 void ieee80211_check_fast_rx(struct sta_info *sta) 3902 { 3903 struct ieee80211_sub_if_data *sdata = sta->sdata; 3904 struct ieee80211_local *local = sdata->local; 3905 struct ieee80211_key *key; 3906 struct ieee80211_fast_rx fastrx = { 3907 .dev = sdata->dev, 3908 .vif_type = sdata->vif.type, 3909 .control_port_protocol = sdata->control_port_protocol, 3910 }, *old, *new = NULL; 3911 bool assign = false; 3912 3913 /* use sparse to check that we don't return without updating */ 3914 __acquire(check_fast_rx); 3915 3916 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header)); 3917 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN); 3918 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header); 3919 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr); 3920 3921 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS); 3922 3923 /* fast-rx doesn't do reordering */ 3924 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && 3925 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) 3926 goto clear; 3927 3928 switch (sdata->vif.type) { 3929 case NL80211_IFTYPE_STATION: 3930 if (sta->sta.tdls) { 3931 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 3932 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 3933 fastrx.expected_ds_bits = 0; 3934 } else { 3935 fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0; 3936 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 3937 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3); 3938 fastrx.expected_ds_bits = 3939 cpu_to_le16(IEEE80211_FCTL_FROMDS); 3940 } 3941 3942 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) { 3943 fastrx.expected_ds_bits |= 3944 cpu_to_le16(IEEE80211_FCTL_TODS); 3945 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 3946 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 3947 } 3948 3949 if (!sdata->u.mgd.powersave) 3950 break; 3951 3952 /* software powersave is a huge mess, avoid all of it */ 3953 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) 3954 goto clear; 3955 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && 3956 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) 3957 goto clear; 3958 break; 3959 case NL80211_IFTYPE_AP_VLAN: 3960 case NL80211_IFTYPE_AP: 3961 /* parallel-rx requires this, at least with calls to 3962 * ieee80211_sta_ps_transition() 3963 */ 3964 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 3965 goto clear; 3966 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 3967 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 3968 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS); 3969 3970 fastrx.internal_forward = 3971 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 3972 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || 3973 !sdata->u.vlan.sta); 3974 3975 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 3976 sdata->u.vlan.sta) { 3977 fastrx.expected_ds_bits |= 3978 cpu_to_le16(IEEE80211_FCTL_FROMDS); 3979 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 3980 fastrx.internal_forward = 0; 3981 } 3982 3983 break; 3984 default: 3985 goto clear; 3986 } 3987 3988 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 3989 goto clear; 3990 3991 rcu_read_lock(); 3992 key = rcu_dereference(sta->ptk[sta->ptk_idx]); 3993 if (key) { 3994 switch (key->conf.cipher) { 3995 case WLAN_CIPHER_SUITE_TKIP: 3996 /* we don't want to deal with MMIC in fast-rx */ 3997 goto clear_rcu; 3998 case WLAN_CIPHER_SUITE_CCMP: 3999 case WLAN_CIPHER_SUITE_CCMP_256: 4000 case WLAN_CIPHER_SUITE_GCMP: 4001 case WLAN_CIPHER_SUITE_GCMP_256: 4002 break; 4003 default: 4004 /* we also don't want to deal with WEP or cipher scheme 4005 * since those require looking up the key idx in the 4006 * frame, rather than assuming the PTK is used 4007 * (we need to revisit this once we implement the real 4008 * PTK index, which is now valid in the spec, but we 4009 * haven't implemented that part yet) 4010 */ 4011 goto clear_rcu; 4012 } 4013 4014 fastrx.key = true; 4015 fastrx.icv_len = key->conf.icv_len; 4016 } 4017 4018 assign = true; 4019 clear_rcu: 4020 rcu_read_unlock(); 4021 clear: 4022 __release(check_fast_rx); 4023 4024 if (assign) 4025 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL); 4026 4027 spin_lock_bh(&sta->lock); 4028 old = rcu_dereference_protected(sta->fast_rx, true); 4029 rcu_assign_pointer(sta->fast_rx, new); 4030 spin_unlock_bh(&sta->lock); 4031 4032 if (old) 4033 kfree_rcu(old, rcu_head); 4034 } 4035 4036 void ieee80211_clear_fast_rx(struct sta_info *sta) 4037 { 4038 struct ieee80211_fast_rx *old; 4039 4040 spin_lock_bh(&sta->lock); 4041 old = rcu_dereference_protected(sta->fast_rx, true); 4042 RCU_INIT_POINTER(sta->fast_rx, NULL); 4043 spin_unlock_bh(&sta->lock); 4044 4045 if (old) 4046 kfree_rcu(old, rcu_head); 4047 } 4048 4049 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4050 { 4051 struct ieee80211_local *local = sdata->local; 4052 struct sta_info *sta; 4053 4054 lockdep_assert_held(&local->sta_mtx); 4055 4056 list_for_each_entry_rcu(sta, &local->sta_list, list) { 4057 if (sdata != sta->sdata && 4058 (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) 4059 continue; 4060 ieee80211_check_fast_rx(sta); 4061 } 4062 } 4063 4064 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4065 { 4066 struct ieee80211_local *local = sdata->local; 4067 4068 mutex_lock(&local->sta_mtx); 4069 __ieee80211_check_fast_rx_iface(sdata); 4070 mutex_unlock(&local->sta_mtx); 4071 } 4072 4073 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, 4074 struct ieee80211_fast_rx *fast_rx) 4075 { 4076 struct sk_buff *skb = rx->skb; 4077 struct ieee80211_hdr *hdr = (void *)skb->data; 4078 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4079 struct sta_info *sta = rx->sta; 4080 int orig_len = skb->len; 4081 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 4082 int snap_offs = hdrlen; 4083 struct { 4084 u8 snap[sizeof(rfc1042_header)]; 4085 __be16 proto; 4086 } *payload __aligned(2); 4087 struct { 4088 u8 da[ETH_ALEN]; 4089 u8 sa[ETH_ALEN]; 4090 } addrs __aligned(2); 4091 struct ieee80211_sta_rx_stats *stats = &sta->rx_stats; 4092 4093 if (fast_rx->uses_rss) 4094 stats = this_cpu_ptr(sta->pcpu_rx_stats); 4095 4096 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write 4097 * to a common data structure; drivers can implement that per queue 4098 * but we don't have that information in mac80211 4099 */ 4100 if (!(status->flag & RX_FLAG_DUP_VALIDATED)) 4101 return false; 4102 4103 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED) 4104 4105 /* If using encryption, we also need to have: 4106 * - PN_VALIDATED: similar, but the implementation is tricky 4107 * - DECRYPTED: necessary for PN_VALIDATED 4108 */ 4109 if (fast_rx->key && 4110 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS) 4111 return false; 4112 4113 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 4114 return false; 4115 4116 if (unlikely(ieee80211_is_frag(hdr))) 4117 return false; 4118 4119 /* Since our interface address cannot be multicast, this 4120 * implicitly also rejects multicast frames without the 4121 * explicit check. 4122 * 4123 * We shouldn't get any *data* frames not addressed to us 4124 * (AP mode will accept multicast *management* frames), but 4125 * punting here will make it go through the full checks in 4126 * ieee80211_accept_frame(). 4127 */ 4128 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1)) 4129 return false; 4130 4131 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | 4132 IEEE80211_FCTL_TODS)) != 4133 fast_rx->expected_ds_bits) 4134 return false; 4135 4136 /* assign the key to drop unencrypted frames (later) 4137 * and strip the IV/MIC if necessary 4138 */ 4139 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) { 4140 /* GCMP header length is the same */ 4141 snap_offs += IEEE80211_CCMP_HDR_LEN; 4142 } 4143 4144 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) { 4145 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) 4146 goto drop; 4147 4148 payload = (void *)(skb->data + snap_offs); 4149 4150 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) 4151 return false; 4152 4153 /* Don't handle these here since they require special code. 4154 * Accept AARP and IPX even though they should come with a 4155 * bridge-tunnel header - but if we get them this way then 4156 * there's little point in discarding them. 4157 */ 4158 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || 4159 payload->proto == fast_rx->control_port_protocol)) 4160 return false; 4161 } 4162 4163 /* after this point, don't punt to the slowpath! */ 4164 4165 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && 4166 pskb_trim(skb, skb->len - fast_rx->icv_len)) 4167 goto drop; 4168 4169 if (unlikely(fast_rx->sta_notify)) { 4170 ieee80211_sta_rx_notify(rx->sdata, hdr); 4171 fast_rx->sta_notify = false; 4172 } 4173 4174 /* statistics part of ieee80211_rx_h_sta_process() */ 4175 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 4176 stats->last_signal = status->signal; 4177 if (!fast_rx->uses_rss) 4178 ewma_signal_add(&sta->rx_stats_avg.signal, 4179 -status->signal); 4180 } 4181 4182 if (status->chains) { 4183 int i; 4184 4185 stats->chains = status->chains; 4186 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 4187 int signal = status->chain_signal[i]; 4188 4189 if (!(status->chains & BIT(i))) 4190 continue; 4191 4192 stats->chain_signal_last[i] = signal; 4193 if (!fast_rx->uses_rss) 4194 ewma_signal_add(&sta->rx_stats_avg.chain_signal[i], 4195 -signal); 4196 } 4197 } 4198 /* end of statistics */ 4199 4200 if (rx->key && !ieee80211_has_protected(hdr->frame_control)) 4201 goto drop; 4202 4203 if (status->rx_flags & IEEE80211_RX_AMSDU) { 4204 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) != 4205 RX_QUEUED) 4206 goto drop; 4207 4208 return true; 4209 } 4210 4211 stats->last_rx = jiffies; 4212 stats->last_rate = sta_stats_encode_rate(status); 4213 4214 stats->fragments++; 4215 stats->packets++; 4216 4217 /* do the header conversion - first grab the addresses */ 4218 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs); 4219 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs); 4220 /* remove the SNAP but leave the ethertype */ 4221 skb_pull(skb, snap_offs + sizeof(rfc1042_header)); 4222 /* push the addresses in front */ 4223 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs)); 4224 4225 skb->dev = fast_rx->dev; 4226 4227 ieee80211_rx_stats(fast_rx->dev, skb->len); 4228 4229 /* The seqno index has the same property as needed 4230 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 4231 * for non-QoS-data frames. Here we know it's a data 4232 * frame, so count MSDUs. 4233 */ 4234 u64_stats_update_begin(&stats->syncp); 4235 stats->msdu[rx->seqno_idx]++; 4236 stats->bytes += orig_len; 4237 u64_stats_update_end(&stats->syncp); 4238 4239 if (fast_rx->internal_forward) { 4240 struct sk_buff *xmit_skb = NULL; 4241 bool multicast = is_multicast_ether_addr(skb->data); 4242 4243 if (multicast) { 4244 xmit_skb = skb_copy(skb, GFP_ATOMIC); 4245 } else if (sta_info_get(rx->sdata, skb->data)) { 4246 xmit_skb = skb; 4247 skb = NULL; 4248 } 4249 4250 if (xmit_skb) { 4251 /* 4252 * Send to wireless media and increase priority by 256 4253 * to keep the received priority instead of 4254 * reclassifying the frame (see cfg80211_classify8021d). 4255 */ 4256 xmit_skb->priority += 256; 4257 xmit_skb->protocol = htons(ETH_P_802_3); 4258 skb_reset_network_header(xmit_skb); 4259 skb_reset_mac_header(xmit_skb); 4260 dev_queue_xmit(xmit_skb); 4261 } 4262 4263 if (!skb) 4264 return true; 4265 } 4266 4267 /* deliver to local stack */ 4268 skb->protocol = eth_type_trans(skb, fast_rx->dev); 4269 memset(skb->cb, 0, sizeof(skb->cb)); 4270 if (rx->napi) 4271 napi_gro_receive(rx->napi, skb); 4272 else 4273 netif_receive_skb(skb); 4274 4275 return true; 4276 drop: 4277 dev_kfree_skb(skb); 4278 stats->dropped++; 4279 return true; 4280 } 4281 4282 /* 4283 * This function returns whether or not the SKB 4284 * was destined for RX processing or not, which, 4285 * if consume is true, is equivalent to whether 4286 * or not the skb was consumed. 4287 */ 4288 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 4289 struct sk_buff *skb, bool consume) 4290 { 4291 struct ieee80211_local *local = rx->local; 4292 struct ieee80211_sub_if_data *sdata = rx->sdata; 4293 4294 rx->skb = skb; 4295 4296 /* See if we can do fast-rx; if we have to copy we already lost, 4297 * so punt in that case. We should never have to deliver a data 4298 * frame to multiple interfaces anyway. 4299 * 4300 * We skip the ieee80211_accept_frame() call and do the necessary 4301 * checking inside ieee80211_invoke_fast_rx(). 4302 */ 4303 if (consume && rx->sta) { 4304 struct ieee80211_fast_rx *fast_rx; 4305 4306 fast_rx = rcu_dereference(rx->sta->fast_rx); 4307 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx)) 4308 return true; 4309 } 4310 4311 if (!ieee80211_accept_frame(rx)) 4312 return false; 4313 4314 if (!consume) { 4315 skb = skb_copy(skb, GFP_ATOMIC); 4316 if (!skb) { 4317 if (net_ratelimit()) 4318 wiphy_debug(local->hw.wiphy, 4319 "failed to copy skb for %s\n", 4320 sdata->name); 4321 return true; 4322 } 4323 4324 rx->skb = skb; 4325 } 4326 4327 ieee80211_invoke_rx_handlers(rx); 4328 return true; 4329 } 4330 4331 /* 4332 * This is the actual Rx frames handler. as it belongs to Rx path it must 4333 * be called with rcu_read_lock protection. 4334 */ 4335 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 4336 struct ieee80211_sta *pubsta, 4337 struct sk_buff *skb, 4338 struct napi_struct *napi) 4339 { 4340 struct ieee80211_local *local = hw_to_local(hw); 4341 struct ieee80211_sub_if_data *sdata; 4342 struct ieee80211_hdr *hdr; 4343 __le16 fc; 4344 struct ieee80211_rx_data rx; 4345 struct ieee80211_sub_if_data *prev; 4346 struct rhlist_head *tmp; 4347 int err = 0; 4348 4349 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 4350 memset(&rx, 0, sizeof(rx)); 4351 rx.skb = skb; 4352 rx.local = local; 4353 rx.napi = napi; 4354 4355 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 4356 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 4357 4358 if (ieee80211_is_mgmt(fc)) { 4359 /* drop frame if too short for header */ 4360 if (skb->len < ieee80211_hdrlen(fc)) 4361 err = -ENOBUFS; 4362 else 4363 err = skb_linearize(skb); 4364 } else { 4365 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 4366 } 4367 4368 if (err) { 4369 dev_kfree_skb(skb); 4370 return; 4371 } 4372 4373 hdr = (struct ieee80211_hdr *)skb->data; 4374 ieee80211_parse_qos(&rx); 4375 ieee80211_verify_alignment(&rx); 4376 4377 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 4378 ieee80211_is_beacon(hdr->frame_control))) 4379 ieee80211_scan_rx(local, skb); 4380 4381 if (ieee80211_is_data(fc)) { 4382 struct sta_info *sta, *prev_sta; 4383 4384 if (pubsta) { 4385 rx.sta = container_of(pubsta, struct sta_info, sta); 4386 rx.sdata = rx.sta->sdata; 4387 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4388 return; 4389 goto out; 4390 } 4391 4392 prev_sta = NULL; 4393 4394 for_each_sta_info(local, hdr->addr2, sta, tmp) { 4395 if (!prev_sta) { 4396 prev_sta = sta; 4397 continue; 4398 } 4399 4400 rx.sta = prev_sta; 4401 rx.sdata = prev_sta->sdata; 4402 ieee80211_prepare_and_rx_handle(&rx, skb, false); 4403 4404 prev_sta = sta; 4405 } 4406 4407 if (prev_sta) { 4408 rx.sta = prev_sta; 4409 rx.sdata = prev_sta->sdata; 4410 4411 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4412 return; 4413 goto out; 4414 } 4415 } 4416 4417 prev = NULL; 4418 4419 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 4420 if (!ieee80211_sdata_running(sdata)) 4421 continue; 4422 4423 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 4424 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 4425 continue; 4426 4427 /* 4428 * frame is destined for this interface, but if it's 4429 * not also for the previous one we handle that after 4430 * the loop to avoid copying the SKB once too much 4431 */ 4432 4433 if (!prev) { 4434 prev = sdata; 4435 continue; 4436 } 4437 4438 rx.sta = sta_info_get_bss(prev, hdr->addr2); 4439 rx.sdata = prev; 4440 ieee80211_prepare_and_rx_handle(&rx, skb, false); 4441 4442 prev = sdata; 4443 } 4444 4445 if (prev) { 4446 rx.sta = sta_info_get_bss(prev, hdr->addr2); 4447 rx.sdata = prev; 4448 4449 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 4450 return; 4451 } 4452 4453 out: 4454 dev_kfree_skb(skb); 4455 } 4456 4457 /* 4458 * This is the receive path handler. It is called by a low level driver when an 4459 * 802.11 MPDU is received from the hardware. 4460 */ 4461 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 4462 struct sk_buff *skb, struct napi_struct *napi) 4463 { 4464 struct ieee80211_local *local = hw_to_local(hw); 4465 struct ieee80211_rate *rate = NULL; 4466 struct ieee80211_supported_band *sband; 4467 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4468 4469 WARN_ON_ONCE(softirq_count() == 0); 4470 4471 if (WARN_ON(status->band >= NUM_NL80211_BANDS)) 4472 goto drop; 4473 4474 sband = local->hw.wiphy->bands[status->band]; 4475 if (WARN_ON(!sband)) 4476 goto drop; 4477 4478 /* 4479 * If we're suspending, it is possible although not too likely 4480 * that we'd be receiving frames after having already partially 4481 * quiesced the stack. We can't process such frames then since 4482 * that might, for example, cause stations to be added or other 4483 * driver callbacks be invoked. 4484 */ 4485 if (unlikely(local->quiescing || local->suspended)) 4486 goto drop; 4487 4488 /* We might be during a HW reconfig, prevent Rx for the same reason */ 4489 if (unlikely(local->in_reconfig)) 4490 goto drop; 4491 4492 /* 4493 * The same happens when we're not even started, 4494 * but that's worth a warning. 4495 */ 4496 if (WARN_ON(!local->started)) 4497 goto drop; 4498 4499 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) { 4500 /* 4501 * Validate the rate, unless a PLCP error means that 4502 * we probably can't have a valid rate here anyway. 4503 */ 4504 4505 switch (status->encoding) { 4506 case RX_ENC_HT: 4507 /* 4508 * rate_idx is MCS index, which can be [0-76] 4509 * as documented on: 4510 * 4511 * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n 4512 * 4513 * Anything else would be some sort of driver or 4514 * hardware error. The driver should catch hardware 4515 * errors. 4516 */ 4517 if (WARN(status->rate_idx > 76, 4518 "Rate marked as an HT rate but passed " 4519 "status->rate_idx is not " 4520 "an MCS index [0-76]: %d (0x%02x)\n", 4521 status->rate_idx, 4522 status->rate_idx)) 4523 goto drop; 4524 break; 4525 case RX_ENC_VHT: 4526 if (WARN_ONCE(status->rate_idx > 9 || 4527 !status->nss || 4528 status->nss > 8, 4529 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", 4530 status->rate_idx, status->nss)) 4531 goto drop; 4532 break; 4533 case RX_ENC_HE: 4534 if (WARN_ONCE(status->rate_idx > 11 || 4535 !status->nss || 4536 status->nss > 8, 4537 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n", 4538 status->rate_idx, status->nss)) 4539 goto drop; 4540 break; 4541 default: 4542 WARN_ON_ONCE(1); 4543 /* fall through */ 4544 case RX_ENC_LEGACY: 4545 if (WARN_ON(status->rate_idx >= sband->n_bitrates)) 4546 goto drop; 4547 rate = &sband->bitrates[status->rate_idx]; 4548 } 4549 } 4550 4551 status->rx_flags = 0; 4552 4553 /* 4554 * key references and virtual interfaces are protected using RCU 4555 * and this requires that we are in a read-side RCU section during 4556 * receive processing 4557 */ 4558 rcu_read_lock(); 4559 4560 /* 4561 * Frames with failed FCS/PLCP checksum are not returned, 4562 * all other frames are returned without radiotap header 4563 * if it was previously present. 4564 * Also, frames with less than 16 bytes are dropped. 4565 */ 4566 skb = ieee80211_rx_monitor(local, skb, rate); 4567 if (!skb) { 4568 rcu_read_unlock(); 4569 return; 4570 } 4571 4572 ieee80211_tpt_led_trig_rx(local, 4573 ((struct ieee80211_hdr *)skb->data)->frame_control, 4574 skb->len); 4575 4576 __ieee80211_rx_handle_packet(hw, pubsta, skb, napi); 4577 4578 rcu_read_unlock(); 4579 4580 return; 4581 drop: 4582 kfree_skb(skb); 4583 } 4584 EXPORT_SYMBOL(ieee80211_rx_napi); 4585 4586 /* This is a version of the rx handler that can be called from hard irq 4587 * context. Post the skb on the queue and schedule the tasklet */ 4588 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 4589 { 4590 struct ieee80211_local *local = hw_to_local(hw); 4591 4592 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 4593 4594 skb->pkt_type = IEEE80211_RX_MSG; 4595 skb_queue_tail(&local->skb_queue, skb); 4596 tasklet_schedule(&local->tasklet); 4597 } 4598 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 4599