1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2005-2006, Devicescape Software, Inc. 5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 7 * Copyright 2013-2014 Intel Mobile Communications GmbH 8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 9 * Copyright (C) 2018-2025 Intel Corporation 10 */ 11 12 #include <linux/jiffies.h> 13 #include <linux/slab.h> 14 #include <linux/kernel.h> 15 #include <linux/skbuff.h> 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rcupdate.h> 19 #include <linux/export.h> 20 #include <linux/kcov.h> 21 #include <linux/bitops.h> 22 #include <kunit/visibility.h> 23 #include <net/mac80211.h> 24 #include <net/ieee80211_radiotap.h> 25 #include <linux/unaligned.h> 26 27 #include "ieee80211_i.h" 28 #include "driver-ops.h" 29 #include "led.h" 30 #include "mesh.h" 31 #include "wep.h" 32 #include "wpa.h" 33 #include "tkip.h" 34 #include "wme.h" 35 #include "rate.h" 36 37 /* 38 * monitor mode reception 39 * 40 * This function cleans up the SKB, i.e. it removes all the stuff 41 * only useful for monitoring. 42 */ 43 static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb, 44 unsigned int present_fcs_len, 45 unsigned int rtap_space) 46 { 47 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 48 struct ieee80211_hdr *hdr; 49 unsigned int hdrlen; 50 __le16 fc; 51 52 if (present_fcs_len) 53 __pskb_trim(skb, skb->len - present_fcs_len); 54 pskb_pull(skb, rtap_space); 55 56 /* After pulling radiotap header, clear all flags that indicate 57 * info in skb->data. 58 */ 59 status->flag &= ~(RX_FLAG_RADIOTAP_TLV_AT_END | 60 RX_FLAG_RADIOTAP_LSIG | 61 RX_FLAG_RADIOTAP_HE_MU | 62 RX_FLAG_RADIOTAP_HE | 63 RX_FLAG_RADIOTAP_VHT); 64 65 hdr = (void *)skb->data; 66 fc = hdr->frame_control; 67 68 /* 69 * Remove the HT-Control field (if present) on management 70 * frames after we've sent the frame to monitoring. We 71 * (currently) don't need it, and don't properly parse 72 * frames with it present, due to the assumption of a 73 * fixed management header length. 74 */ 75 if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc))) 76 return skb; 77 78 hdrlen = ieee80211_hdrlen(fc); 79 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER); 80 81 if (!pskb_may_pull(skb, hdrlen)) { 82 dev_kfree_skb(skb); 83 return NULL; 84 } 85 86 memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data, 87 hdrlen - IEEE80211_HT_CTL_LEN); 88 pskb_pull(skb, IEEE80211_HT_CTL_LEN); 89 90 return skb; 91 } 92 93 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, 94 unsigned int rtap_space) 95 { 96 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 97 struct ieee80211_hdr *hdr; 98 99 hdr = (void *)(skb->data + rtap_space); 100 101 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 102 RX_FLAG_FAILED_PLCP_CRC | 103 RX_FLAG_ONLY_MONITOR | 104 RX_FLAG_NO_PSDU)) 105 return true; 106 107 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space)) 108 return true; 109 110 if (ieee80211_is_ctl(hdr->frame_control) && 111 !ieee80211_is_pspoll(hdr->frame_control) && 112 !ieee80211_is_back_req(hdr->frame_control)) 113 return true; 114 115 return false; 116 } 117 118 static int 119 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, 120 struct ieee80211_rx_status *status, 121 struct sk_buff *skb) 122 { 123 int len; 124 125 /* always present fields */ 126 len = sizeof(struct ieee80211_radiotap_header) + 8; 127 128 /* allocate extra bitmaps */ 129 if (status->chains) 130 len += 4 * hweight8(status->chains); 131 132 if (ieee80211_have_rx_timestamp(status)) { 133 len = ALIGN(len, 8); 134 len += 8; 135 } 136 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) 137 len += 1; 138 139 /* antenna field, if we don't have per-chain info */ 140 if (!status->chains) 141 len += 1; 142 143 /* padding for RX_FLAGS if necessary */ 144 len = ALIGN(len, 2); 145 146 if (status->encoding == RX_ENC_HT) /* HT info */ 147 len += 3; 148 149 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 150 len = ALIGN(len, 4); 151 len += 8; 152 } 153 154 if (status->encoding == RX_ENC_VHT) { 155 /* Included even if RX_FLAG_RADIOTAP_VHT is not set */ 156 len = ALIGN(len, 2); 157 len += 12; 158 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_vht) != 12); 159 } 160 161 if (local->hw.radiotap_timestamp.units_pos >= 0) { 162 len = ALIGN(len, 8); 163 len += 12; 164 } 165 166 if (status->encoding == RX_ENC_HE && 167 status->flag & RX_FLAG_RADIOTAP_HE) { 168 len = ALIGN(len, 2); 169 len += 12; 170 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12); 171 } 172 173 if (status->encoding == RX_ENC_HE && 174 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 175 len = ALIGN(len, 2); 176 len += 12; 177 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); 178 } 179 180 if (status->flag & RX_FLAG_NO_PSDU) 181 len += 1; 182 183 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 184 len = ALIGN(len, 2); 185 len += 4; 186 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4); 187 } 188 189 if (status->chains) { 190 /* antenna and antenna signal fields */ 191 len += 2 * hweight8(status->chains); 192 } 193 194 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) { 195 int tlv_offset = 0; 196 197 /* 198 * The position to look at depends on the existence (or non- 199 * existence) of other elements, so take that into account... 200 */ 201 if (status->flag & RX_FLAG_RADIOTAP_VHT) 202 tlv_offset += 203 sizeof(struct ieee80211_radiotap_vht); 204 if (status->flag & RX_FLAG_RADIOTAP_HE) 205 tlv_offset += 206 sizeof(struct ieee80211_radiotap_he); 207 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 208 tlv_offset += 209 sizeof(struct ieee80211_radiotap_he_mu); 210 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 211 tlv_offset += 212 sizeof(struct ieee80211_radiotap_lsig); 213 214 /* ensure 4 byte alignment for TLV */ 215 len = ALIGN(len, 4); 216 217 /* TLVs until the mac header */ 218 len += skb_mac_header(skb) - &skb->data[tlv_offset]; 219 } 220 221 return len; 222 } 223 224 static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, 225 int link_id, 226 struct sta_info *sta, 227 struct sk_buff *skb) 228 { 229 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 230 231 if (link_id >= 0) { 232 status->link_valid = 1; 233 status->link_id = link_id; 234 } else { 235 status->link_valid = 0; 236 } 237 238 skb_queue_tail(&sdata->skb_queue, skb); 239 wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); 240 if (sta) { 241 struct link_sta_info *link_sta_info; 242 243 if (link_id >= 0) { 244 link_sta_info = rcu_dereference(sta->link[link_id]); 245 if (!link_sta_info) 246 return; 247 } else { 248 link_sta_info = &sta->deflink; 249 } 250 251 link_sta_info->rx_stats.packets++; 252 } 253 } 254 255 static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, 256 int link_id, 257 struct sta_info *sta, 258 struct sk_buff *skb) 259 { 260 skb->protocol = 0; 261 __ieee80211_queue_skb_to_iface(sdata, link_id, sta, skb); 262 } 263 264 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, 265 struct sk_buff *skb, 266 int rtap_space) 267 { 268 struct { 269 struct ieee80211_hdr_3addr hdr; 270 u8 category; 271 u8 action_code; 272 } __packed __aligned(2) action; 273 274 if (!sdata) 275 return; 276 277 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1); 278 279 if (skb->len < rtap_space + sizeof(action) + 280 VHT_MUMIMO_GROUPS_DATA_LEN) 281 return; 282 283 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr)) 284 return; 285 286 skb_copy_bits(skb, rtap_space, &action, sizeof(action)); 287 288 if (!ieee80211_is_action(action.hdr.frame_control)) 289 return; 290 291 if (action.category != WLAN_CATEGORY_VHT) 292 return; 293 294 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT) 295 return; 296 297 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr)) 298 return; 299 300 skb = skb_copy(skb, GFP_ATOMIC); 301 if (!skb) 302 return; 303 304 ieee80211_queue_skb_to_iface(sdata, -1, NULL, skb); 305 } 306 307 /* 308 * ieee80211_add_rx_radiotap_header - add radiotap header 309 * 310 * add a radiotap header containing all the fields which the hardware provided. 311 */ 312 static void 313 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 314 struct sk_buff *skb, 315 struct ieee80211_rate *rate, 316 int rtap_len, bool has_fcs) 317 { 318 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 319 struct ieee80211_radiotap_header *rthdr; 320 unsigned char *pos; 321 __le32 *it_present; 322 u32 it_present_val; 323 u16 rx_flags = 0; 324 u16 channel_flags = 0; 325 u32 tlvs_len = 0; 326 int mpdulen, chain; 327 unsigned long chains = status->chains; 328 struct ieee80211_radiotap_vht vht = {}; 329 struct ieee80211_radiotap_he he = {}; 330 struct ieee80211_radiotap_he_mu he_mu = {}; 331 struct ieee80211_radiotap_lsig lsig = {}; 332 333 if (status->flag & RX_FLAG_RADIOTAP_VHT) { 334 vht = *(struct ieee80211_radiotap_vht *)skb->data; 335 skb_pull(skb, sizeof(vht)); 336 WARN_ON_ONCE(status->encoding != RX_ENC_VHT); 337 } 338 339 if (status->flag & RX_FLAG_RADIOTAP_HE) { 340 he = *(struct ieee80211_radiotap_he *)skb->data; 341 skb_pull(skb, sizeof(he)); 342 WARN_ON_ONCE(status->encoding != RX_ENC_HE); 343 } 344 345 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) { 346 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data; 347 skb_pull(skb, sizeof(he_mu)); 348 } 349 350 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 351 lsig = *(struct ieee80211_radiotap_lsig *)skb->data; 352 skb_pull(skb, sizeof(lsig)); 353 } 354 355 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) { 356 /* data is pointer at tlv all other info was pulled off */ 357 tlvs_len = skb_mac_header(skb) - skb->data; 358 } 359 360 mpdulen = skb->len; 361 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) 362 mpdulen += FCS_LEN; 363 364 rthdr = skb_push(skb, rtap_len - tlvs_len); 365 memset(rthdr, 0, rtap_len - tlvs_len); 366 it_present = &rthdr->it_present; 367 368 /* radiotap header, set always present flags */ 369 rthdr->it_len = cpu_to_le16(rtap_len); 370 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | 371 BIT(IEEE80211_RADIOTAP_CHANNEL) | 372 BIT(IEEE80211_RADIOTAP_RX_FLAGS); 373 374 if (!status->chains) 375 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); 376 377 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 378 it_present_val |= 379 BIT(IEEE80211_RADIOTAP_EXT) | 380 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); 381 put_unaligned_le32(it_present_val, it_present); 382 it_present++; 383 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | 384 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 385 } 386 387 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) 388 it_present_val |= BIT(IEEE80211_RADIOTAP_TLV); 389 390 put_unaligned_le32(it_present_val, it_present); 391 392 /* This references through an offset into it_optional[] rather 393 * than via it_present otherwise later uses of pos will cause 394 * the compiler to think we have walked past the end of the 395 * struct member. 396 */ 397 pos = (void *)&rthdr->it_optional[it_present + 1 - rthdr->it_optional]; 398 399 /* the order of the following fields is important */ 400 401 /* IEEE80211_RADIOTAP_TSFT */ 402 if (ieee80211_have_rx_timestamp(status)) { 403 /* padding */ 404 while ((pos - (u8 *)rthdr) & 7) 405 *pos++ = 0; 406 put_unaligned_le64( 407 ieee80211_calculate_rx_timestamp(local, status, 408 mpdulen, 0), 409 pos); 410 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_TSFT)); 411 pos += 8; 412 } 413 414 /* IEEE80211_RADIOTAP_FLAGS */ 415 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 416 *pos |= IEEE80211_RADIOTAP_F_FCS; 417 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 418 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 419 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) 420 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 421 pos++; 422 423 /* IEEE80211_RADIOTAP_RATE */ 424 if (!rate || status->encoding != RX_ENC_LEGACY) { 425 /* 426 * Without rate information don't add it. If we have, 427 * MCS information is a separate field in radiotap, 428 * added below. The byte here is needed as padding 429 * for the channel though, so initialise it to 0. 430 */ 431 *pos = 0; 432 } else { 433 int shift = 0; 434 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_RATE)); 435 if (status->bw == RATE_INFO_BW_10) 436 shift = 1; 437 else if (status->bw == RATE_INFO_BW_5) 438 shift = 2; 439 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); 440 } 441 pos++; 442 443 /* IEEE80211_RADIOTAP_CHANNEL */ 444 /* TODO: frequency offset in KHz */ 445 put_unaligned_le16(status->freq, pos); 446 pos += 2; 447 if (status->bw == RATE_INFO_BW_10) 448 channel_flags |= IEEE80211_CHAN_HALF; 449 else if (status->bw == RATE_INFO_BW_5) 450 channel_flags |= IEEE80211_CHAN_QUARTER; 451 452 if (status->band == NL80211_BAND_5GHZ || 453 status->band == NL80211_BAND_6GHZ) 454 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; 455 else if (status->encoding != RX_ENC_LEGACY) 456 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 457 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 458 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 459 else if (rate) 460 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; 461 else 462 channel_flags |= IEEE80211_CHAN_2GHZ; 463 put_unaligned_le16(channel_flags, pos); 464 pos += 2; 465 466 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 467 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && 468 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 469 *pos = status->signal; 470 rthdr->it_present |= 471 cpu_to_le32(BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL)); 472 pos++; 473 } 474 475 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 476 477 if (!status->chains) { 478 /* IEEE80211_RADIOTAP_ANTENNA */ 479 *pos = status->antenna; 480 pos++; 481 } 482 483 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 484 485 /* IEEE80211_RADIOTAP_RX_FLAGS */ 486 /* ensure 2 byte alignment for the 2 byte field as required */ 487 if ((pos - (u8 *)rthdr) & 1) 488 *pos++ = 0; 489 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 490 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 491 put_unaligned_le16(rx_flags, pos); 492 pos += 2; 493 494 if (status->encoding == RX_ENC_HT) { 495 unsigned int stbc; 496 497 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS)); 498 *pos = local->hw.radiotap_mcs_details; 499 if (status->enc_flags & RX_ENC_FLAG_HT_GF) 500 *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FMT; 501 if (status->enc_flags & RX_ENC_FLAG_LDPC) 502 *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FEC; 503 pos++; 504 *pos = 0; 505 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 506 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 507 if (status->bw == RATE_INFO_BW_40) 508 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 509 if (status->enc_flags & RX_ENC_FLAG_HT_GF) 510 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 511 if (status->enc_flags & RX_ENC_FLAG_LDPC) 512 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; 513 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT; 514 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; 515 pos++; 516 *pos++ = status->rate_idx; 517 } 518 519 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 520 u16 flags = 0; 521 522 /* ensure 4 byte alignment */ 523 while ((pos - (u8 *)rthdr) & 3) 524 pos++; 525 rthdr->it_present |= 526 cpu_to_le32(BIT(IEEE80211_RADIOTAP_AMPDU_STATUS)); 527 put_unaligned_le32(status->ampdu_reference, pos); 528 pos += 4; 529 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 530 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 531 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 532 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 533 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 534 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 535 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN) 536 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN; 537 if (status->flag & RX_FLAG_AMPDU_EOF_BIT) 538 flags |= IEEE80211_RADIOTAP_AMPDU_EOF; 539 put_unaligned_le16(flags, pos); 540 pos += 2; 541 *pos++ = 0; 542 *pos++ = 0; 543 } 544 545 if (status->encoding == RX_ENC_VHT) { 546 u16 fill = local->hw.radiotap_vht_details; 547 548 /* Leave driver filled fields alone */ 549 fill &= ~le16_to_cpu(vht.known); 550 vht.known |= cpu_to_le16(fill); 551 552 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_GI && 553 status->enc_flags & RX_ENC_FLAG_SHORT_GI) 554 vht.flags |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; 555 /* in VHT, STBC is binary */ 556 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_STBC && 557 status->enc_flags & RX_ENC_FLAG_STBC_MASK) 558 vht.flags |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; 559 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED && 560 status->enc_flags & RX_ENC_FLAG_BF) 561 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; 562 563 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) { 564 switch (status->bw) { 565 case RATE_INFO_BW_40: 566 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_40; 567 break; 568 case RATE_INFO_BW_80: 569 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_80; 570 break; 571 case RATE_INFO_BW_160: 572 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_160; 573 break; 574 default: 575 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_20; 576 break; 577 } 578 } 579 580 /* 581 * If the driver filled in mcs_nss[0], then do not touch it. 582 * 583 * Otherwise, put some information about MCS/NSS into the 584 * user 0 field. Note that this is not technically correct for 585 * an MU frame as we might have decoded a different user. 586 */ 587 if (!vht.mcs_nss[0]) { 588 vht.mcs_nss[0] = (status->rate_idx << 4) | status->nss; 589 590 /* coding field */ 591 if (status->enc_flags & RX_ENC_FLAG_LDPC) 592 vht.coding |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; 593 } 594 595 /* ensure 2 byte alignment */ 596 while ((pos - (u8 *)rthdr) & 1) 597 pos++; 598 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT)); 599 memcpy(pos, &vht, sizeof(vht)); 600 pos += sizeof(vht); 601 } 602 603 if (local->hw.radiotap_timestamp.units_pos >= 0) { 604 u16 accuracy = 0; 605 u8 flags; 606 u64 ts; 607 608 rthdr->it_present |= 609 cpu_to_le32(BIT(IEEE80211_RADIOTAP_TIMESTAMP)); 610 611 /* ensure 8 byte alignment */ 612 while ((pos - (u8 *)rthdr) & 7) 613 pos++; 614 615 if (status->flag & RX_FLAG_MACTIME_IS_RTAP_TS64) { 616 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT; 617 ts = status->mactime; 618 } else { 619 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT; 620 ts = status->device_timestamp; 621 } 622 623 put_unaligned_le64(ts, pos); 624 pos += sizeof(u64); 625 626 if (local->hw.radiotap_timestamp.accuracy >= 0) { 627 accuracy = local->hw.radiotap_timestamp.accuracy; 628 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY; 629 } 630 put_unaligned_le16(accuracy, pos); 631 pos += sizeof(u16); 632 633 *pos++ = local->hw.radiotap_timestamp.units_pos; 634 *pos++ = flags; 635 } 636 637 if (status->encoding == RX_ENC_HE && 638 status->flag & RX_FLAG_RADIOTAP_HE) { 639 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) 640 641 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { 642 he.data6 |= HE_PREP(DATA6_NSTS, 643 FIELD_GET(RX_ENC_FLAG_STBC_MASK, 644 status->enc_flags)); 645 he.data3 |= HE_PREP(DATA3_STBC, 1); 646 } else { 647 he.data6 |= HE_PREP(DATA6_NSTS, status->nss); 648 } 649 650 #define CHECK_GI(s) \ 651 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ 652 (int)NL80211_RATE_INFO_HE_GI_##s) 653 654 CHECK_GI(0_8); 655 CHECK_GI(1_6); 656 CHECK_GI(3_2); 657 658 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx); 659 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm); 660 he.data3 |= HE_PREP(DATA3_CODING, 661 !!(status->enc_flags & RX_ENC_FLAG_LDPC)); 662 663 he.data5 |= HE_PREP(DATA5_GI, status->he_gi); 664 665 switch (status->bw) { 666 case RATE_INFO_BW_20: 667 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 668 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); 669 break; 670 case RATE_INFO_BW_40: 671 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 672 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); 673 break; 674 case RATE_INFO_BW_80: 675 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 676 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); 677 break; 678 case RATE_INFO_BW_160: 679 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 680 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); 681 break; 682 case RATE_INFO_BW_HE_RU: 683 #define CHECK_RU_ALLOC(s) \ 684 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ 685 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) 686 687 CHECK_RU_ALLOC(26); 688 CHECK_RU_ALLOC(52); 689 CHECK_RU_ALLOC(106); 690 CHECK_RU_ALLOC(242); 691 CHECK_RU_ALLOC(484); 692 CHECK_RU_ALLOC(996); 693 CHECK_RU_ALLOC(2x996); 694 695 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 696 status->he_ru + 4); 697 break; 698 default: 699 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw); 700 } 701 702 /* ensure 2 byte alignment */ 703 while ((pos - (u8 *)rthdr) & 1) 704 pos++; 705 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE)); 706 memcpy(pos, &he, sizeof(he)); 707 pos += sizeof(he); 708 } 709 710 if (status->encoding == RX_ENC_HE && 711 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 712 /* ensure 2 byte alignment */ 713 while ((pos - (u8 *)rthdr) & 1) 714 pos++; 715 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE_MU)); 716 memcpy(pos, &he_mu, sizeof(he_mu)); 717 pos += sizeof(he_mu); 718 } 719 720 if (status->flag & RX_FLAG_NO_PSDU) { 721 rthdr->it_present |= 722 cpu_to_le32(BIT(IEEE80211_RADIOTAP_ZERO_LEN_PSDU)); 723 *pos++ = status->zero_length_psdu_type; 724 } 725 726 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 727 /* ensure 2 byte alignment */ 728 while ((pos - (u8 *)rthdr) & 1) 729 pos++; 730 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_LSIG)); 731 memcpy(pos, &lsig, sizeof(lsig)); 732 pos += sizeof(lsig); 733 } 734 735 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 736 *pos++ = status->chain_signal[chain]; 737 *pos++ = chain; 738 } 739 } 740 741 static struct sk_buff * 742 ieee80211_make_monitor_skb(struct ieee80211_local *local, 743 struct sk_buff **origskb, 744 struct ieee80211_rate *rate, 745 int rtap_space, bool use_origskb) 746 { 747 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb); 748 int rt_hdrlen, needed_headroom; 749 struct sk_buff *skb; 750 751 /* room for the radiotap header based on driver features */ 752 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb); 753 needed_headroom = rt_hdrlen - rtap_space; 754 755 if (use_origskb) { 756 /* only need to expand headroom if necessary */ 757 skb = *origskb; 758 *origskb = NULL; 759 760 /* 761 * This shouldn't trigger often because most devices have an 762 * RX header they pull before we get here, and that should 763 * be big enough for our radiotap information. We should 764 * probably export the length to drivers so that we can have 765 * them allocate enough headroom to start with. 766 */ 767 if (skb_headroom(skb) < needed_headroom && 768 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 769 dev_kfree_skb(skb); 770 return NULL; 771 } 772 } else { 773 /* 774 * Need to make a copy and possibly remove radiotap header 775 * and FCS from the original. 776 */ 777 skb = skb_copy_expand(*origskb, needed_headroom + NET_SKB_PAD, 778 0, GFP_ATOMIC); 779 780 if (!skb) 781 return NULL; 782 } 783 784 /* prepend radiotap information */ 785 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); 786 787 skb_reset_mac_header(skb); 788 skb->ip_summed = CHECKSUM_UNNECESSARY; 789 skb->pkt_type = PACKET_OTHERHOST; 790 skb->protocol = htons(ETH_P_802_2); 791 792 return skb; 793 } 794 795 static bool 796 ieee80211_validate_monitor_radio(struct ieee80211_sub_if_data *sdata, 797 struct ieee80211_local *local, 798 struct ieee80211_rx_status *status) 799 { 800 struct wiphy *wiphy = local->hw.wiphy; 801 int i, freq, bw; 802 803 if (!wiphy->n_radio) 804 return true; 805 806 switch (status->bw) { 807 case RATE_INFO_BW_20: 808 bw = 20000; 809 break; 810 case RATE_INFO_BW_40: 811 bw = 40000; 812 break; 813 case RATE_INFO_BW_80: 814 bw = 80000; 815 break; 816 case RATE_INFO_BW_160: 817 bw = 160000; 818 break; 819 case RATE_INFO_BW_320: 820 bw = 320000; 821 break; 822 default: 823 return false; 824 } 825 826 freq = MHZ_TO_KHZ(status->freq); 827 828 for (i = 0; i < wiphy->n_radio; i++) { 829 if (!(sdata->wdev.radio_mask & BIT(i))) 830 continue; 831 832 if (!ieee80211_radio_freq_range_valid(&wiphy->radio[i], freq, bw)) 833 continue; 834 835 return true; 836 } 837 return false; 838 } 839 840 /* 841 * This function copies a received frame to all monitor interfaces and 842 * returns a cleaned-up SKB that no longer includes the FCS nor the 843 * radiotap header the driver might have added. 844 */ 845 static struct sk_buff * 846 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 847 struct ieee80211_rate *rate) 848 { 849 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 850 struct ieee80211_sub_if_data *sdata, *prev_sdata = NULL; 851 struct sk_buff *skb, *monskb = NULL; 852 int present_fcs_len = 0; 853 unsigned int rtap_space = 0; 854 struct ieee80211_sub_if_data *monitor_sdata = 855 rcu_dereference(local->monitor_sdata); 856 bool only_monitor = false; 857 unsigned int min_head_len; 858 859 if (WARN_ON_ONCE(status->flag & RX_FLAG_RADIOTAP_TLV_AT_END && 860 !skb_mac_header_was_set(origskb))) { 861 /* with this skb no way to know where frame payload starts */ 862 dev_kfree_skb(origskb); 863 return NULL; 864 } 865 866 if (status->flag & RX_FLAG_RADIOTAP_VHT) 867 rtap_space += sizeof(struct ieee80211_radiotap_vht); 868 869 if (status->flag & RX_FLAG_RADIOTAP_HE) 870 rtap_space += sizeof(struct ieee80211_radiotap_he); 871 872 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 873 rtap_space += sizeof(struct ieee80211_radiotap_he_mu); 874 875 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 876 rtap_space += sizeof(struct ieee80211_radiotap_lsig); 877 878 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) 879 rtap_space += skb_mac_header(origskb) - &origskb->data[rtap_space]; 880 881 min_head_len = rtap_space; 882 883 /* 884 * First, we may need to make a copy of the skb because 885 * (1) we need to modify it for radiotap (if not present), and 886 * (2) the other RX handlers will modify the skb we got. 887 * 888 * We don't need to, of course, if we aren't going to return 889 * the SKB because it has a bad FCS/PLCP checksum. 890 */ 891 892 if (!(status->flag & RX_FLAG_NO_PSDU)) { 893 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { 894 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) { 895 /* driver bug */ 896 WARN_ON(1); 897 dev_kfree_skb(origskb); 898 return NULL; 899 } 900 present_fcs_len = FCS_LEN; 901 } 902 903 /* also consider the hdr->frame_control */ 904 min_head_len += 2; 905 } 906 907 /* ensure that the expected data elements are in skb head */ 908 if (!pskb_may_pull(origskb, min_head_len)) { 909 dev_kfree_skb(origskb); 910 return NULL; 911 } 912 913 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space); 914 915 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { 916 if (only_monitor) { 917 dev_kfree_skb(origskb); 918 return NULL; 919 } 920 921 return ieee80211_clean_skb(origskb, present_fcs_len, 922 rtap_space); 923 } 924 925 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space); 926 927 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) { 928 struct cfg80211_chan_def *chandef; 929 930 chandef = &sdata->vif.bss_conf.chanreq.oper; 931 if (chandef->chan && 932 chandef->chan->center_freq != status->freq) 933 continue; 934 935 if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR) && 936 !ieee80211_validate_monitor_radio(sdata, local, status)) 937 continue; 938 939 if (!prev_sdata) { 940 prev_sdata = sdata; 941 continue; 942 } 943 944 if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) 945 ieee80211_handle_mu_mimo_mon(sdata, origskb, rtap_space); 946 947 if (!monskb) 948 monskb = ieee80211_make_monitor_skb(local, &origskb, 949 rate, rtap_space, 950 false); 951 if (!monskb) 952 continue; 953 954 skb = skb_clone(monskb, GFP_ATOMIC); 955 if (!skb) 956 continue; 957 958 skb->dev = prev_sdata->dev; 959 dev_sw_netstats_rx_add(skb->dev, skb->len); 960 netif_receive_skb(skb); 961 prev_sdata = sdata; 962 } 963 964 if (prev_sdata) { 965 if (monskb) 966 skb = monskb; 967 else 968 skb = ieee80211_make_monitor_skb(local, &origskb, 969 rate, rtap_space, 970 only_monitor); 971 if (skb) { 972 skb->dev = prev_sdata->dev; 973 dev_sw_netstats_rx_add(skb->dev, skb->len); 974 netif_receive_skb(skb); 975 } 976 } 977 978 if (!origskb) 979 return NULL; 980 981 return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space); 982 } 983 984 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 985 { 986 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 987 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 988 int tid, seqno_idx, security_idx; 989 990 /* does the frame have a qos control field? */ 991 if (ieee80211_is_data_qos(hdr->frame_control)) { 992 u8 *qc = ieee80211_get_qos_ctl(hdr); 993 /* frame has qos control */ 994 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 995 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 996 status->rx_flags |= IEEE80211_RX_AMSDU; 997 998 seqno_idx = tid; 999 security_idx = tid; 1000 } else { 1001 /* 1002 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 1003 * 1004 * Sequence numbers for management frames, QoS data 1005 * frames with a broadcast/multicast address in the 1006 * Address 1 field, and all non-QoS data frames sent 1007 * by QoS STAs are assigned using an additional single 1008 * modulo-4096 counter, [...] 1009 * 1010 * We also use that counter for non-QoS STAs. 1011 */ 1012 seqno_idx = IEEE80211_NUM_TIDS; 1013 security_idx = 0; 1014 if (ieee80211_is_mgmt(hdr->frame_control)) 1015 security_idx = IEEE80211_NUM_TIDS; 1016 tid = 0; 1017 } 1018 1019 rx->seqno_idx = seqno_idx; 1020 rx->security_idx = security_idx; 1021 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 1022 * For now, set skb->priority to 0 for other cases. */ 1023 rx->skb->priority = (tid > 7) ? 0 : tid; 1024 } 1025 1026 /** 1027 * DOC: Packet alignment 1028 * 1029 * Drivers always need to pass packets that are aligned to two-byte boundaries 1030 * to the stack. 1031 * 1032 * Additionally, they should, if possible, align the payload data in a way that 1033 * guarantees that the contained IP header is aligned to a four-byte 1034 * boundary. In the case of regular frames, this simply means aligning the 1035 * payload to a four-byte boundary (because either the IP header is directly 1036 * contained, or IV/RFC1042 headers that have a length divisible by four are 1037 * in front of it). If the payload data is not properly aligned and the 1038 * architecture doesn't support efficient unaligned operations, mac80211 1039 * will align the data. 1040 * 1041 * With A-MSDU frames, however, the payload data address must yield two modulo 1042 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 1043 * push the IP header further back to a multiple of four again. Thankfully, the 1044 * specs were sane enough this time around to require padding each A-MSDU 1045 * subframe to a length that is a multiple of four. 1046 * 1047 * Padding like Atheros hardware adds which is between the 802.11 header and 1048 * the payload is not supported; the driver is required to move the 802.11 1049 * header to be directly in front of the payload in that case. 1050 */ 1051 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 1052 { 1053 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1054 WARN_ON_ONCE((unsigned long)rx->skb->data & 1); 1055 #endif 1056 } 1057 1058 1059 /* rx handlers */ 1060 1061 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 1062 { 1063 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1064 1065 if (is_multicast_ether_addr(hdr->addr1)) 1066 return 0; 1067 1068 return ieee80211_is_robust_mgmt_frame(skb); 1069 } 1070 1071 1072 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 1073 { 1074 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1075 1076 if (!is_multicast_ether_addr(hdr->addr1)) 1077 return 0; 1078 1079 return ieee80211_is_robust_mgmt_frame(skb); 1080 } 1081 1082 1083 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 1084 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 1085 { 1086 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 1087 struct ieee80211_mmie *mmie; 1088 struct ieee80211_mmie_16 *mmie16; 1089 1090 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) 1091 return -1; 1092 1093 if (!ieee80211_is_robust_mgmt_frame(skb) && 1094 !ieee80211_is_beacon(hdr->frame_control)) 1095 return -1; /* not a robust management frame */ 1096 1097 mmie = (struct ieee80211_mmie *) 1098 (skb->data + skb->len - sizeof(*mmie)); 1099 if (mmie->element_id == WLAN_EID_MMIE && 1100 mmie->length == sizeof(*mmie) - 2) 1101 return le16_to_cpu(mmie->key_id); 1102 1103 mmie16 = (struct ieee80211_mmie_16 *) 1104 (skb->data + skb->len - sizeof(*mmie16)); 1105 if (skb->len >= 24 + sizeof(*mmie16) && 1106 mmie16->element_id == WLAN_EID_MMIE && 1107 mmie16->length == sizeof(*mmie16) - 2) 1108 return le16_to_cpu(mmie16->key_id); 1109 1110 return -1; 1111 } 1112 1113 static int ieee80211_get_keyid(struct sk_buff *skb) 1114 { 1115 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1116 __le16 fc = hdr->frame_control; 1117 int hdrlen = ieee80211_hdrlen(fc); 1118 u8 keyid; 1119 1120 /* WEP, TKIP, CCMP and GCMP */ 1121 if (unlikely(skb->len < hdrlen + IEEE80211_WEP_IV_LEN)) 1122 return -EINVAL; 1123 1124 skb_copy_bits(skb, hdrlen + 3, &keyid, 1); 1125 1126 keyid >>= 6; 1127 1128 return keyid; 1129 } 1130 1131 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 1132 { 1133 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1134 char *dev_addr = rx->sdata->vif.addr; 1135 1136 if (ieee80211_is_data(hdr->frame_control)) { 1137 if (is_multicast_ether_addr(hdr->addr1)) { 1138 if (ieee80211_has_tods(hdr->frame_control) || 1139 !ieee80211_has_fromds(hdr->frame_control)) 1140 return RX_DROP; 1141 if (ether_addr_equal(hdr->addr3, dev_addr)) 1142 return RX_DROP; 1143 } else { 1144 if (!ieee80211_has_a4(hdr->frame_control)) 1145 return RX_DROP; 1146 if (ether_addr_equal(hdr->addr4, dev_addr)) 1147 return RX_DROP; 1148 } 1149 } 1150 1151 /* If there is not an established peer link and this is not a peer link 1152 * establisment frame, beacon or probe, drop the frame. 1153 */ 1154 1155 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 1156 struct ieee80211_mgmt *mgmt; 1157 1158 if (!ieee80211_is_mgmt(hdr->frame_control)) 1159 return RX_DROP; 1160 1161 if (ieee80211_is_action(hdr->frame_control)) { 1162 u8 category; 1163 1164 /* make sure category field is present */ 1165 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 1166 return RX_DROP; 1167 1168 mgmt = (struct ieee80211_mgmt *)hdr; 1169 category = mgmt->u.action.category; 1170 if (category != WLAN_CATEGORY_MESH_ACTION && 1171 category != WLAN_CATEGORY_SELF_PROTECTED) 1172 return RX_DROP; 1173 return RX_CONTINUE; 1174 } 1175 1176 if (ieee80211_is_probe_req(hdr->frame_control) || 1177 ieee80211_is_probe_resp(hdr->frame_control) || 1178 ieee80211_is_beacon(hdr->frame_control) || 1179 ieee80211_is_auth(hdr->frame_control)) 1180 return RX_CONTINUE; 1181 1182 return RX_DROP; 1183 } 1184 1185 return RX_CONTINUE; 1186 } 1187 1188 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, 1189 int index) 1190 { 1191 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index]; 1192 struct sk_buff *tail = skb_peek_tail(frames); 1193 struct ieee80211_rx_status *status; 1194 1195 if (tid_agg_rx->reorder_buf_filtered && 1196 tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) 1197 return true; 1198 1199 if (!tail) 1200 return false; 1201 1202 status = IEEE80211_SKB_RXCB(tail); 1203 if (status->flag & RX_FLAG_AMSDU_MORE) 1204 return false; 1205 1206 return true; 1207 } 1208 1209 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 1210 struct tid_ampdu_rx *tid_agg_rx, 1211 int index, 1212 struct sk_buff_head *frames) 1213 { 1214 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; 1215 struct sk_buff *skb; 1216 struct ieee80211_rx_status *status; 1217 1218 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1219 1220 if (skb_queue_empty(skb_list)) 1221 goto no_frame; 1222 1223 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1224 __skb_queue_purge(skb_list); 1225 goto no_frame; 1226 } 1227 1228 /* release frames from the reorder ring buffer */ 1229 tid_agg_rx->stored_mpdu_num--; 1230 while ((skb = __skb_dequeue(skb_list))) { 1231 status = IEEE80211_SKB_RXCB(skb); 1232 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 1233 __skb_queue_tail(frames, skb); 1234 } 1235 1236 no_frame: 1237 if (tid_agg_rx->reorder_buf_filtered) 1238 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 1239 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1240 } 1241 1242 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 1243 struct tid_ampdu_rx *tid_agg_rx, 1244 u16 head_seq_num, 1245 struct sk_buff_head *frames) 1246 { 1247 int index; 1248 1249 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1250 1251 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { 1252 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1253 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1254 frames); 1255 } 1256 } 1257 1258 /* 1259 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 1260 * the skb was added to the buffer longer than this time ago, the earlier 1261 * frames that have not yet been received are assumed to be lost and the skb 1262 * can be released for processing. This may also release other skb's from the 1263 * reorder buffer if there are no additional gaps between the frames. 1264 * 1265 * Callers must hold tid_agg_rx->reorder_lock. 1266 */ 1267 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 1268 1269 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 1270 struct tid_ampdu_rx *tid_agg_rx, 1271 struct sk_buff_head *frames) 1272 { 1273 int index, i, j; 1274 1275 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1276 1277 /* release the buffer until next missing frame */ 1278 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1279 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) && 1280 tid_agg_rx->stored_mpdu_num) { 1281 /* 1282 * No buffers ready to be released, but check whether any 1283 * frames in the reorder buffer have timed out. 1284 */ 1285 int skipped = 1; 1286 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 1287 j = (j + 1) % tid_agg_rx->buf_size) { 1288 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) { 1289 skipped++; 1290 continue; 1291 } 1292 if (skipped && 1293 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 1294 HT_RX_REORDER_BUF_TIMEOUT)) 1295 goto set_release_timer; 1296 1297 /* don't leave incomplete A-MSDUs around */ 1298 for (i = (index + 1) % tid_agg_rx->buf_size; i != j; 1299 i = (i + 1) % tid_agg_rx->buf_size) 1300 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); 1301 1302 ht_dbg_ratelimited(sdata, 1303 "release an RX reorder frame due to timeout on earlier frames\n"); 1304 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, 1305 frames); 1306 1307 /* 1308 * Increment the head seq# also for the skipped slots. 1309 */ 1310 tid_agg_rx->head_seq_num = 1311 (tid_agg_rx->head_seq_num + 1312 skipped) & IEEE80211_SN_MASK; 1313 skipped = 0; 1314 } 1315 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1316 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1317 frames); 1318 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1319 } 1320 1321 if (tid_agg_rx->stored_mpdu_num) { 1322 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1323 1324 for (; j != (index - 1) % tid_agg_rx->buf_size; 1325 j = (j + 1) % tid_agg_rx->buf_size) { 1326 if (ieee80211_rx_reorder_ready(tid_agg_rx, j)) 1327 break; 1328 } 1329 1330 set_release_timer: 1331 1332 if (!tid_agg_rx->removed) 1333 mod_timer(&tid_agg_rx->reorder_timer, 1334 tid_agg_rx->reorder_time[j] + 1 + 1335 HT_RX_REORDER_BUF_TIMEOUT); 1336 } else { 1337 timer_delete(&tid_agg_rx->reorder_timer); 1338 } 1339 } 1340 1341 /* 1342 * As this function belongs to the RX path it must be under 1343 * rcu_read_lock protection. It returns false if the frame 1344 * can be processed immediately, true if it was consumed. 1345 */ 1346 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 1347 struct tid_ampdu_rx *tid_agg_rx, 1348 struct sk_buff *skb, 1349 struct sk_buff_head *frames) 1350 { 1351 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1352 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1353 u16 mpdu_seq_num = ieee80211_get_sn(hdr); 1354 u16 head_seq_num, buf_size; 1355 int index; 1356 bool ret = true; 1357 1358 spin_lock(&tid_agg_rx->reorder_lock); 1359 1360 /* 1361 * Offloaded BA sessions have no known starting sequence number so pick 1362 * one from first Rxed frame for this tid after BA was started. 1363 */ 1364 if (unlikely(tid_agg_rx->auto_seq)) { 1365 tid_agg_rx->auto_seq = false; 1366 tid_agg_rx->ssn = mpdu_seq_num; 1367 tid_agg_rx->head_seq_num = mpdu_seq_num; 1368 } 1369 1370 buf_size = tid_agg_rx->buf_size; 1371 head_seq_num = tid_agg_rx->head_seq_num; 1372 1373 /* 1374 * If the current MPDU's SN is smaller than the SSN, it shouldn't 1375 * be reordered. 1376 */ 1377 if (unlikely(!tid_agg_rx->started)) { 1378 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1379 ret = false; 1380 goto out; 1381 } 1382 tid_agg_rx->started = true; 1383 } 1384 1385 /* frame with out of date sequence number */ 1386 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1387 dev_kfree_skb(skb); 1388 goto out; 1389 } 1390 1391 /* 1392 * If frame the sequence number exceeds our buffering window 1393 * size release some previous frames to make room for this one. 1394 */ 1395 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { 1396 head_seq_num = ieee80211_sn_inc( 1397 ieee80211_sn_sub(mpdu_seq_num, buf_size)); 1398 /* release stored frames up to new head to stack */ 1399 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 1400 head_seq_num, frames); 1401 } 1402 1403 /* Now the new frame is always in the range of the reordering buffer */ 1404 1405 index = mpdu_seq_num % tid_agg_rx->buf_size; 1406 1407 /* check if we already stored this frame */ 1408 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1409 dev_kfree_skb(skb); 1410 goto out; 1411 } 1412 1413 /* 1414 * If the current MPDU is in the right order and nothing else 1415 * is stored we can process it directly, no need to buffer it. 1416 * If it is first but there's something stored, we may be able 1417 * to release frames after this one. 1418 */ 1419 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 1420 tid_agg_rx->stored_mpdu_num == 0) { 1421 if (!(status->flag & RX_FLAG_AMSDU_MORE)) 1422 tid_agg_rx->head_seq_num = 1423 ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1424 ret = false; 1425 goto out; 1426 } 1427 1428 /* put the frame in the reordering buffer */ 1429 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); 1430 if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1431 tid_agg_rx->reorder_time[index] = jiffies; 1432 tid_agg_rx->stored_mpdu_num++; 1433 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); 1434 } 1435 1436 out: 1437 spin_unlock(&tid_agg_rx->reorder_lock); 1438 return ret; 1439 } 1440 1441 /* 1442 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 1443 * true if the MPDU was buffered, false if it should be processed. 1444 */ 1445 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 1446 struct sk_buff_head *frames) 1447 { 1448 struct sk_buff *skb = rx->skb; 1449 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1450 struct sta_info *sta = rx->sta; 1451 struct tid_ampdu_rx *tid_agg_rx; 1452 u16 sc; 1453 u8 tid, ack_policy; 1454 1455 if (!ieee80211_is_data_qos(hdr->frame_control) || 1456 is_multicast_ether_addr(hdr->addr1)) 1457 goto dont_reorder; 1458 1459 /* 1460 * filter the QoS data rx stream according to 1461 * STA/TID and check if this STA/TID is on aggregation 1462 */ 1463 1464 if (!sta) 1465 goto dont_reorder; 1466 1467 ack_policy = *ieee80211_get_qos_ctl(hdr) & 1468 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 1469 tid = ieee80211_get_tid(hdr); 1470 1471 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 1472 if (!tid_agg_rx) { 1473 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1474 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 1475 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 1476 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 1477 WLAN_BACK_RECIPIENT, 1478 WLAN_REASON_QSTA_REQUIRE_SETUP); 1479 goto dont_reorder; 1480 } 1481 1482 /* qos null data frames are excluded */ 1483 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 1484 goto dont_reorder; 1485 1486 /* not part of a BA session */ 1487 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK) 1488 goto dont_reorder; 1489 1490 /* new, potentially un-ordered, ampdu frame - process it */ 1491 1492 /* reset session timer */ 1493 if (tid_agg_rx->timeout) 1494 tid_agg_rx->last_rx = jiffies; 1495 1496 /* if this mpdu is fragmented - terminate rx aggregation session */ 1497 sc = le16_to_cpu(hdr->seq_ctrl); 1498 if (sc & IEEE80211_SCTL_FRAG) { 1499 ieee80211_queue_skb_to_iface(rx->sdata, rx->link_id, NULL, skb); 1500 return; 1501 } 1502 1503 /* 1504 * No locking needed -- we will only ever process one 1505 * RX packet at a time, and thus own tid_agg_rx. All 1506 * other code manipulating it needs to (and does) make 1507 * sure that we cannot get to it any more before doing 1508 * anything with it. 1509 */ 1510 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, 1511 frames)) 1512 return; 1513 1514 dont_reorder: 1515 __skb_queue_tail(frames, skb); 1516 } 1517 1518 static ieee80211_rx_result debug_noinline 1519 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) 1520 { 1521 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1522 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1523 1524 if (status->flag & RX_FLAG_DUP_VALIDATED) 1525 return RX_CONTINUE; 1526 1527 /* 1528 * Drop duplicate 802.11 retransmissions 1529 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") 1530 */ 1531 1532 if (rx->skb->len < 24) 1533 return RX_CONTINUE; 1534 1535 if (ieee80211_is_ctl(hdr->frame_control) || 1536 ieee80211_is_any_nullfunc(hdr->frame_control)) 1537 return RX_CONTINUE; 1538 1539 if (!rx->sta) 1540 return RX_CONTINUE; 1541 1542 if (unlikely(is_multicast_ether_addr(hdr->addr1))) { 1543 struct ieee80211_sub_if_data *sdata = rx->sdata; 1544 u16 sn = ieee80211_get_sn(hdr); 1545 1546 if (!ieee80211_is_data_present(hdr->frame_control)) 1547 return RX_CONTINUE; 1548 1549 if (!ieee80211_vif_is_mld(&sdata->vif) || 1550 sdata->vif.type != NL80211_IFTYPE_STATION) 1551 return RX_CONTINUE; 1552 1553 if (sdata->u.mgd.mcast_seq_last != IEEE80211_SN_MODULO && 1554 ieee80211_sn_less_eq(sn, sdata->u.mgd.mcast_seq_last)) 1555 return RX_DROP_U_DUP; 1556 1557 sdata->u.mgd.mcast_seq_last = sn; 1558 return RX_CONTINUE; 1559 } 1560 1561 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 1562 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { 1563 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); 1564 rx->link_sta->rx_stats.num_duplicates++; 1565 return RX_DROP_U_DUP; 1566 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1567 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 1568 } 1569 1570 return RX_CONTINUE; 1571 } 1572 1573 static ieee80211_rx_result debug_noinline 1574 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 1575 { 1576 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1577 1578 /* Drop disallowed frame classes based on STA auth/assoc state; 1579 * IEEE 802.11, Chap 5.5. 1580 * 1581 * mac80211 filters only based on association state, i.e. it drops 1582 * Class 3 frames from not associated stations. hostapd sends 1583 * deauth/disassoc frames when needed. In addition, hostapd is 1584 * responsible for filtering on both auth and assoc states. 1585 */ 1586 1587 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1588 return ieee80211_rx_mesh_check(rx); 1589 1590 if (unlikely((ieee80211_is_data(hdr->frame_control) || 1591 ieee80211_is_pspoll(hdr->frame_control)) && 1592 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 1593 rx->sdata->vif.type != NL80211_IFTYPE_OCB && 1594 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 1595 /* 1596 * accept port control frames from the AP even when it's not 1597 * yet marked ASSOC to prevent a race where we don't set the 1598 * assoc bit quickly enough before it sends the first frame 1599 */ 1600 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1601 ieee80211_is_data_present(hdr->frame_control)) { 1602 unsigned int hdrlen; 1603 __be16 ethertype; 1604 1605 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1606 1607 if (rx->skb->len < hdrlen + 8) 1608 return RX_DROP; 1609 1610 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 1611 if (ethertype == rx->sdata->control_port_protocol) 1612 return RX_CONTINUE; 1613 } 1614 1615 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 1616 cfg80211_rx_spurious_frame(rx->sdata->dev, hdr->addr2, 1617 rx->link_id, GFP_ATOMIC)) 1618 return RX_DROP_U_SPURIOUS; 1619 1620 return RX_DROP; 1621 } 1622 1623 return RX_CONTINUE; 1624 } 1625 1626 1627 static ieee80211_rx_result debug_noinline 1628 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1629 { 1630 struct ieee80211_local *local; 1631 struct ieee80211_hdr *hdr; 1632 struct sk_buff *skb; 1633 1634 local = rx->local; 1635 skb = rx->skb; 1636 hdr = (struct ieee80211_hdr *) skb->data; 1637 1638 if (!local->pspolling) 1639 return RX_CONTINUE; 1640 1641 if (!ieee80211_has_fromds(hdr->frame_control)) 1642 /* this is not from AP */ 1643 return RX_CONTINUE; 1644 1645 if (!ieee80211_is_data(hdr->frame_control)) 1646 return RX_CONTINUE; 1647 1648 if (!ieee80211_has_moredata(hdr->frame_control)) { 1649 /* AP has no more frames buffered for us */ 1650 local->pspolling = false; 1651 return RX_CONTINUE; 1652 } 1653 1654 /* more data bit is set, let's request a new frame from the AP */ 1655 ieee80211_send_pspoll(local, rx->sdata); 1656 1657 return RX_CONTINUE; 1658 } 1659 1660 static void sta_ps_start(struct sta_info *sta) 1661 { 1662 struct ieee80211_sub_if_data *sdata = sta->sdata; 1663 struct ieee80211_local *local = sdata->local; 1664 struct ps_data *ps; 1665 int tid; 1666 1667 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1668 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1669 ps = &sdata->bss->ps; 1670 else 1671 return; 1672 1673 atomic_inc(&ps->num_sta_ps); 1674 set_sta_flag(sta, WLAN_STA_PS_STA); 1675 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1676 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1677 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1678 sta->sta.addr, sta->sta.aid); 1679 1680 ieee80211_clear_fast_xmit(sta); 1681 1682 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { 1683 struct ieee80211_txq *txq = sta->sta.txq[tid]; 1684 struct txq_info *txqi = to_txq_info(txq); 1685 1686 spin_lock(&local->active_txq_lock[txq->ac]); 1687 if (!list_empty(&txqi->schedule_order)) 1688 list_del_init(&txqi->schedule_order); 1689 spin_unlock(&local->active_txq_lock[txq->ac]); 1690 1691 if (txq_has_queue(txq)) 1692 set_bit(tid, &sta->txq_buffered_tids); 1693 else 1694 clear_bit(tid, &sta->txq_buffered_tids); 1695 } 1696 } 1697 1698 static void sta_ps_end(struct sta_info *sta) 1699 { 1700 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1701 sta->sta.addr, sta->sta.aid); 1702 1703 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1704 /* 1705 * Clear the flag only if the other one is still set 1706 * so that the TX path won't start TX'ing new frames 1707 * directly ... In the case that the driver flag isn't 1708 * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1709 */ 1710 clear_sta_flag(sta, WLAN_STA_PS_STA); 1711 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1712 sta->sta.addr, sta->sta.aid); 1713 return; 1714 } 1715 1716 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1717 clear_sta_flag(sta, WLAN_STA_PS_STA); 1718 ieee80211_sta_ps_deliver_wakeup(sta); 1719 } 1720 1721 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) 1722 { 1723 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1724 bool in_ps; 1725 1726 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); 1727 1728 /* Don't let the same PS state be set twice */ 1729 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); 1730 if ((start && in_ps) || (!start && !in_ps)) 1731 return -EINVAL; 1732 1733 if (start) 1734 sta_ps_start(sta); 1735 else 1736 sta_ps_end(sta); 1737 1738 return 0; 1739 } 1740 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1741 1742 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta) 1743 { 1744 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1745 1746 if (test_sta_flag(sta, WLAN_STA_SP)) 1747 return; 1748 1749 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1750 ieee80211_sta_ps_deliver_poll_response(sta); 1751 else 1752 set_sta_flag(sta, WLAN_STA_PSPOLL); 1753 } 1754 EXPORT_SYMBOL(ieee80211_sta_pspoll); 1755 1756 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid) 1757 { 1758 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1759 int ac = ieee80211_ac_from_tid(tid); 1760 1761 /* 1762 * If this AC is not trigger-enabled do nothing unless the 1763 * driver is calling us after it already checked. 1764 * 1765 * NB: This could/should check a separate bitmap of trigger- 1766 * enabled queues, but for now we only implement uAPSD w/o 1767 * TSPEC changes to the ACs, so they're always the same. 1768 */ 1769 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) && 1770 tid != IEEE80211_NUM_TIDS) 1771 return; 1772 1773 /* if we are in a service period, do nothing */ 1774 if (test_sta_flag(sta, WLAN_STA_SP)) 1775 return; 1776 1777 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1778 ieee80211_sta_ps_deliver_uapsd(sta); 1779 else 1780 set_sta_flag(sta, WLAN_STA_UAPSD); 1781 } 1782 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger); 1783 1784 static ieee80211_rx_result debug_noinline 1785 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1786 { 1787 struct ieee80211_sub_if_data *sdata = rx->sdata; 1788 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1789 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1790 1791 if (!rx->sta) 1792 return RX_CONTINUE; 1793 1794 if (sdata->vif.type != NL80211_IFTYPE_AP && 1795 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1796 return RX_CONTINUE; 1797 1798 /* 1799 * The device handles station powersave, so don't do anything about 1800 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1801 * it to mac80211 since they're handled.) 1802 */ 1803 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) 1804 return RX_CONTINUE; 1805 1806 /* 1807 * Don't do anything if the station isn't already asleep. In 1808 * the uAPSD case, the station will probably be marked asleep, 1809 * in the PS-Poll case the station must be confused ... 1810 */ 1811 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1812 return RX_CONTINUE; 1813 1814 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1815 ieee80211_sta_pspoll(&rx->sta->sta); 1816 1817 /* Free PS Poll skb here instead of returning RX_DROP that would 1818 * count as an dropped frame. */ 1819 dev_kfree_skb(rx->skb); 1820 1821 return RX_QUEUED; 1822 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1823 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1824 ieee80211_has_pm(hdr->frame_control) && 1825 (ieee80211_is_data_qos(hdr->frame_control) || 1826 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1827 u8 tid = ieee80211_get_tid(hdr); 1828 1829 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid); 1830 } 1831 1832 return RX_CONTINUE; 1833 } 1834 1835 static ieee80211_rx_result debug_noinline 1836 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1837 { 1838 struct sta_info *sta = rx->sta; 1839 struct link_sta_info *link_sta = rx->link_sta; 1840 struct sk_buff *skb = rx->skb; 1841 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1842 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1843 int i; 1844 1845 if (!sta || !link_sta) 1846 return RX_CONTINUE; 1847 1848 /* 1849 * Update last_rx only for IBSS packets which are for the current 1850 * BSSID and for station already AUTHORIZED to avoid keeping the 1851 * current IBSS network alive in cases where other STAs start 1852 * using different BSSID. This will also give the station another 1853 * chance to restart the authentication/authorization in case 1854 * something went wrong the first time. 1855 */ 1856 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1857 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1858 NL80211_IFTYPE_ADHOC); 1859 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1860 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1861 link_sta->rx_stats.last_rx = jiffies; 1862 if (ieee80211_is_data_present(hdr->frame_control) && 1863 !is_multicast_ether_addr(hdr->addr1)) 1864 link_sta->rx_stats.last_rate = 1865 sta_stats_encode_rate(status); 1866 } 1867 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { 1868 link_sta->rx_stats.last_rx = jiffies; 1869 } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) && 1870 !is_multicast_ether_addr(hdr->addr1)) { 1871 /* 1872 * Mesh beacons will update last_rx when if they are found to 1873 * match the current local configuration when processed. 1874 */ 1875 link_sta->rx_stats.last_rx = jiffies; 1876 if (ieee80211_is_data_present(hdr->frame_control)) 1877 link_sta->rx_stats.last_rate = sta_stats_encode_rate(status); 1878 } 1879 1880 link_sta->rx_stats.fragments++; 1881 1882 u64_stats_update_begin(&link_sta->rx_stats.syncp); 1883 link_sta->rx_stats.bytes += rx->skb->len; 1884 u64_stats_update_end(&link_sta->rx_stats.syncp); 1885 1886 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1887 link_sta->rx_stats.last_signal = status->signal; 1888 ewma_signal_add(&link_sta->rx_stats_avg.signal, 1889 -status->signal); 1890 } 1891 1892 if (status->chains) { 1893 link_sta->rx_stats.chains = status->chains; 1894 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 1895 int signal = status->chain_signal[i]; 1896 1897 if (!(status->chains & BIT(i))) 1898 continue; 1899 1900 link_sta->rx_stats.chain_signal_last[i] = signal; 1901 ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i], 1902 -signal); 1903 } 1904 } 1905 1906 if (ieee80211_is_s1g_beacon(hdr->frame_control)) 1907 return RX_CONTINUE; 1908 1909 /* 1910 * Change STA power saving mode only at the end of a frame 1911 * exchange sequence, and only for a data or management 1912 * frame as specified in IEEE 802.11-2016 11.2.3.2 1913 */ 1914 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1915 !ieee80211_has_morefrags(hdr->frame_control) && 1916 !is_multicast_ether_addr(hdr->addr1) && 1917 (ieee80211_is_mgmt(hdr->frame_control) || 1918 ieee80211_is_data(hdr->frame_control)) && 1919 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1920 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1921 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1922 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1923 if (!ieee80211_has_pm(hdr->frame_control)) 1924 sta_ps_end(sta); 1925 } else { 1926 if (ieee80211_has_pm(hdr->frame_control)) 1927 sta_ps_start(sta); 1928 } 1929 } 1930 1931 /* mesh power save support */ 1932 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1933 ieee80211_mps_rx_h_sta_process(sta, hdr); 1934 1935 /* 1936 * Drop (qos-)data::nullfunc frames silently, since they 1937 * are used only to control station power saving mode. 1938 */ 1939 if (ieee80211_is_any_nullfunc(hdr->frame_control)) { 1940 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1941 1942 /* 1943 * If we receive a 4-addr nullfunc frame from a STA 1944 * that was not moved to a 4-addr STA vlan yet send 1945 * the event to userspace and for older hostapd drop 1946 * the frame to the monitor interface. 1947 */ 1948 if (ieee80211_has_a4(hdr->frame_control) && 1949 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1950 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1951 !rx->sdata->u.vlan.sta))) { 1952 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1953 cfg80211_rx_unexpected_4addr_frame( 1954 rx->sdata->dev, sta->sta.addr, 1955 rx->link_id, GFP_ATOMIC); 1956 return RX_DROP_U_UNEXPECTED_4ADDR_FRAME; 1957 } 1958 /* 1959 * Update counter and free packet here to avoid 1960 * counting this as a dropped packed. 1961 */ 1962 link_sta->rx_stats.packets++; 1963 dev_kfree_skb(rx->skb); 1964 return RX_QUEUED; 1965 } 1966 1967 return RX_CONTINUE; 1968 } /* ieee80211_rx_h_sta_process */ 1969 1970 static struct ieee80211_key * 1971 ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx) 1972 { 1973 struct ieee80211_key *key = NULL; 1974 int idx2; 1975 1976 /* Make sure key gets set if either BIGTK key index is set so that 1977 * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected 1978 * Beacon frames and Beacon frames that claim to use another BIGTK key 1979 * index (i.e., a key that we do not have). 1980 */ 1981 1982 if (idx < 0) { 1983 idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS; 1984 idx2 = idx + 1; 1985 } else { 1986 if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1987 idx2 = idx + 1; 1988 else 1989 idx2 = idx - 1; 1990 } 1991 1992 if (rx->link_sta) 1993 key = rcu_dereference(rx->link_sta->gtk[idx]); 1994 if (!key) 1995 key = rcu_dereference(rx->link->gtk[idx]); 1996 if (!key && rx->link_sta) 1997 key = rcu_dereference(rx->link_sta->gtk[idx2]); 1998 if (!key) 1999 key = rcu_dereference(rx->link->gtk[idx2]); 2000 2001 return key; 2002 } 2003 2004 static ieee80211_rx_result debug_noinline 2005 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 2006 { 2007 struct sk_buff *skb = rx->skb; 2008 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2009 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2010 int keyidx; 2011 ieee80211_rx_result result = RX_DROP_U_DECRYPT_FAIL; 2012 struct ieee80211_key *sta_ptk = NULL; 2013 struct ieee80211_key *ptk_idx = NULL; 2014 int mmie_keyidx = -1; 2015 __le16 fc; 2016 2017 if (ieee80211_is_ext(hdr->frame_control)) 2018 return RX_CONTINUE; 2019 2020 /* 2021 * Key selection 101 2022 * 2023 * There are five types of keys: 2024 * - GTK (group keys) 2025 * - IGTK (group keys for management frames) 2026 * - BIGTK (group keys for Beacon frames) 2027 * - PTK (pairwise keys) 2028 * - STK (station-to-station pairwise keys) 2029 * 2030 * When selecting a key, we have to distinguish between multicast 2031 * (including broadcast) and unicast frames, the latter can only 2032 * use PTKs and STKs while the former always use GTKs, IGTKs, and 2033 * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used, 2034 * then unicast frames can also use key indices like GTKs. Hence, if we 2035 * don't have a PTK/STK we check the key index for a WEP key. 2036 * 2037 * Note that in a regular BSS, multicast frames are sent by the 2038 * AP only, associated stations unicast the frame to the AP first 2039 * which then multicasts it on their behalf. 2040 * 2041 * There is also a slight problem in IBSS mode: GTKs are negotiated 2042 * with each station, that is something we don't currently handle. 2043 * The spec seems to expect that one negotiates the same key with 2044 * every station but there's no such requirement; VLANs could be 2045 * possible. 2046 */ 2047 2048 /* start without a key */ 2049 rx->key = NULL; 2050 fc = hdr->frame_control; 2051 2052 if (rx->sta) { 2053 int keyid = rx->sta->ptk_idx; 2054 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); 2055 2056 if (ieee80211_has_protected(fc) && 2057 !(status->flag & RX_FLAG_IV_STRIPPED)) { 2058 keyid = ieee80211_get_keyid(rx->skb); 2059 2060 if (unlikely(keyid < 0)) 2061 return RX_DROP_U_NO_KEY_ID; 2062 2063 ptk_idx = rcu_dereference(rx->sta->ptk[keyid]); 2064 } 2065 } 2066 2067 if (!ieee80211_has_protected(fc)) 2068 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 2069 2070 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 2071 rx->key = ptk_idx ? ptk_idx : sta_ptk; 2072 if ((status->flag & RX_FLAG_DECRYPTED) && 2073 (status->flag & RX_FLAG_IV_STRIPPED)) 2074 return RX_CONTINUE; 2075 /* Skip decryption if the frame is not protected. */ 2076 if (!ieee80211_has_protected(fc)) 2077 return RX_CONTINUE; 2078 } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) { 2079 /* Broadcast/multicast robust management frame / BIP */ 2080 if ((status->flag & RX_FLAG_DECRYPTED) && 2081 (status->flag & RX_FLAG_IV_STRIPPED)) 2082 return RX_CONTINUE; 2083 2084 if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS || 2085 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + 2086 NUM_DEFAULT_BEACON_KEYS) { 2087 if (rx->sdata->dev) 2088 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2089 skb->data, 2090 skb->len); 2091 return RX_DROP_U_BAD_BCN_KEYIDX; 2092 } 2093 2094 rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx); 2095 if (!rx->key) 2096 return RX_CONTINUE; /* Beacon protection not in use */ 2097 } else if (mmie_keyidx >= 0) { 2098 /* Broadcast/multicast robust management frame / BIP */ 2099 if ((status->flag & RX_FLAG_DECRYPTED) && 2100 (status->flag & RX_FLAG_IV_STRIPPED)) 2101 return RX_CONTINUE; 2102 2103 if (mmie_keyidx < NUM_DEFAULT_KEYS || 2104 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 2105 return RX_DROP_U_BAD_MGMT_KEYIDX; /* unexpected BIP keyidx */ 2106 if (rx->link_sta) { 2107 if (ieee80211_is_group_privacy_action(skb) && 2108 test_sta_flag(rx->sta, WLAN_STA_MFP)) 2109 return RX_DROP; 2110 2111 rx->key = rcu_dereference(rx->link_sta->gtk[mmie_keyidx]); 2112 } 2113 if (!rx->key) 2114 rx->key = rcu_dereference(rx->link->gtk[mmie_keyidx]); 2115 } else if (!ieee80211_has_protected(fc)) { 2116 /* 2117 * The frame was not protected, so skip decryption. However, we 2118 * need to set rx->key if there is a key that could have been 2119 * used so that the frame may be dropped if encryption would 2120 * have been expected. 2121 */ 2122 struct ieee80211_key *key = NULL; 2123 int i; 2124 2125 if (ieee80211_is_beacon(fc)) { 2126 key = ieee80211_rx_get_bigtk(rx, -1); 2127 } else if (ieee80211_is_mgmt(fc) && 2128 is_multicast_ether_addr(hdr->addr1)) { 2129 key = rcu_dereference(rx->link->default_mgmt_key); 2130 } else { 2131 if (rx->link_sta) { 2132 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 2133 key = rcu_dereference(rx->link_sta->gtk[i]); 2134 if (key) 2135 break; 2136 } 2137 } 2138 if (!key) { 2139 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 2140 key = rcu_dereference(rx->link->gtk[i]); 2141 if (key) 2142 break; 2143 } 2144 } 2145 } 2146 if (key) 2147 rx->key = key; 2148 return RX_CONTINUE; 2149 } else { 2150 /* 2151 * The device doesn't give us the IV so we won't be 2152 * able to look up the key. That's ok though, we 2153 * don't need to decrypt the frame, we just won't 2154 * be able to keep statistics accurate. 2155 * Except for key threshold notifications, should 2156 * we somehow allow the driver to tell us which key 2157 * the hardware used if this flag is set? 2158 */ 2159 if ((status->flag & RX_FLAG_DECRYPTED) && 2160 (status->flag & RX_FLAG_IV_STRIPPED)) 2161 return RX_CONTINUE; 2162 2163 keyidx = ieee80211_get_keyid(rx->skb); 2164 2165 if (unlikely(keyidx < 0)) 2166 return RX_DROP_U_NO_KEY_ID; 2167 2168 /* check per-station GTK first, if multicast packet */ 2169 if (is_multicast_ether_addr(hdr->addr1) && rx->link_sta) 2170 rx->key = rcu_dereference(rx->link_sta->gtk[keyidx]); 2171 2172 /* if not found, try default key */ 2173 if (!rx->key) { 2174 if (is_multicast_ether_addr(hdr->addr1)) 2175 rx->key = rcu_dereference(rx->link->gtk[keyidx]); 2176 if (!rx->key) 2177 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 2178 2179 /* 2180 * RSNA-protected unicast frames should always be 2181 * sent with pairwise or station-to-station keys, 2182 * but for WEP we allow using a key index as well. 2183 */ 2184 if (rx->key && 2185 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 2186 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 2187 !is_multicast_ether_addr(hdr->addr1)) 2188 rx->key = NULL; 2189 } 2190 } 2191 2192 if (rx->key) { 2193 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 2194 return RX_DROP; 2195 2196 /* TODO: add threshold stuff again */ 2197 } else { 2198 return RX_DROP; 2199 } 2200 2201 switch (rx->key->conf.cipher) { 2202 case WLAN_CIPHER_SUITE_WEP40: 2203 case WLAN_CIPHER_SUITE_WEP104: 2204 result = ieee80211_crypto_wep_decrypt(rx); 2205 break; 2206 case WLAN_CIPHER_SUITE_TKIP: 2207 result = ieee80211_crypto_tkip_decrypt(rx); 2208 break; 2209 case WLAN_CIPHER_SUITE_CCMP: 2210 result = ieee80211_crypto_ccmp_decrypt( 2211 rx, IEEE80211_CCMP_MIC_LEN); 2212 break; 2213 case WLAN_CIPHER_SUITE_CCMP_256: 2214 result = ieee80211_crypto_ccmp_decrypt( 2215 rx, IEEE80211_CCMP_256_MIC_LEN); 2216 break; 2217 case WLAN_CIPHER_SUITE_AES_CMAC: 2218 result = ieee80211_crypto_aes_cmac_decrypt( 2219 rx, IEEE80211_CMAC_128_MIC_LEN); 2220 break; 2221 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 2222 result = ieee80211_crypto_aes_cmac_decrypt( 2223 rx, IEEE80211_CMAC_256_MIC_LEN); 2224 break; 2225 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 2226 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 2227 result = ieee80211_crypto_aes_gmac_decrypt(rx); 2228 break; 2229 case WLAN_CIPHER_SUITE_GCMP: 2230 case WLAN_CIPHER_SUITE_GCMP_256: 2231 result = ieee80211_crypto_gcmp_decrypt(rx); 2232 break; 2233 default: 2234 result = RX_DROP_U_BAD_CIPHER; 2235 } 2236 2237 /* the hdr variable is invalid after the decrypt handlers */ 2238 2239 /* either the frame has been decrypted or will be dropped */ 2240 status->flag |= RX_FLAG_DECRYPTED; 2241 2242 if (unlikely(ieee80211_is_beacon(fc) && RX_RES_IS_UNUSABLE(result) && 2243 rx->sdata->dev)) 2244 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2245 skb->data, skb->len); 2246 2247 return result; 2248 } 2249 2250 void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache) 2251 { 2252 int i; 2253 2254 for (i = 0; i < ARRAY_SIZE(cache->entries); i++) 2255 skb_queue_head_init(&cache->entries[i].skb_list); 2256 } 2257 2258 void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache) 2259 { 2260 int i; 2261 2262 for (i = 0; i < ARRAY_SIZE(cache->entries); i++) 2263 __skb_queue_purge(&cache->entries[i].skb_list); 2264 } 2265 2266 static inline struct ieee80211_fragment_entry * 2267 ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache, 2268 unsigned int frag, unsigned int seq, int rx_queue, 2269 struct sk_buff **skb) 2270 { 2271 struct ieee80211_fragment_entry *entry; 2272 2273 entry = &cache->entries[cache->next++]; 2274 if (cache->next >= IEEE80211_FRAGMENT_MAX) 2275 cache->next = 0; 2276 2277 __skb_queue_purge(&entry->skb_list); 2278 2279 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 2280 *skb = NULL; 2281 entry->first_frag_time = jiffies; 2282 entry->seq = seq; 2283 entry->rx_queue = rx_queue; 2284 entry->last_frag = frag; 2285 entry->check_sequential_pn = false; 2286 entry->extra_len = 0; 2287 2288 return entry; 2289 } 2290 2291 static inline struct ieee80211_fragment_entry * 2292 ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache, 2293 unsigned int frag, unsigned int seq, 2294 int rx_queue, struct ieee80211_hdr *hdr) 2295 { 2296 struct ieee80211_fragment_entry *entry; 2297 int i, idx; 2298 2299 idx = cache->next; 2300 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 2301 struct ieee80211_hdr *f_hdr; 2302 struct sk_buff *f_skb; 2303 2304 idx--; 2305 if (idx < 0) 2306 idx = IEEE80211_FRAGMENT_MAX - 1; 2307 2308 entry = &cache->entries[idx]; 2309 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 2310 entry->rx_queue != rx_queue || 2311 entry->last_frag + 1 != frag) 2312 continue; 2313 2314 f_skb = __skb_peek(&entry->skb_list); 2315 f_hdr = (struct ieee80211_hdr *) f_skb->data; 2316 2317 /* 2318 * Check ftype and addresses are equal, else check next fragment 2319 */ 2320 if (((hdr->frame_control ^ f_hdr->frame_control) & 2321 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 2322 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 2323 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 2324 continue; 2325 2326 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 2327 __skb_queue_purge(&entry->skb_list); 2328 continue; 2329 } 2330 return entry; 2331 } 2332 2333 return NULL; 2334 } 2335 2336 static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc) 2337 { 2338 return rx->key && 2339 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || 2340 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || 2341 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || 2342 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && 2343 ieee80211_has_protected(fc); 2344 } 2345 2346 static ieee80211_rx_result debug_noinline 2347 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 2348 { 2349 struct ieee80211_fragment_cache *cache = &rx->sdata->frags; 2350 struct ieee80211_hdr *hdr; 2351 u16 sc; 2352 __le16 fc; 2353 unsigned int frag, seq; 2354 struct ieee80211_fragment_entry *entry; 2355 struct sk_buff *skb; 2356 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2357 2358 hdr = (struct ieee80211_hdr *)rx->skb->data; 2359 fc = hdr->frame_control; 2360 2361 if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc)) 2362 return RX_CONTINUE; 2363 2364 sc = le16_to_cpu(hdr->seq_ctrl); 2365 frag = sc & IEEE80211_SCTL_FRAG; 2366 2367 if (rx->sta) 2368 cache = &rx->sta->frags; 2369 2370 if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) 2371 goto out; 2372 2373 if (is_multicast_ether_addr(hdr->addr1)) 2374 return RX_DROP; 2375 2376 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 2377 2378 if (skb_linearize(rx->skb)) 2379 return RX_DROP_U_OOM; 2380 2381 /* 2382 * skb_linearize() might change the skb->data and 2383 * previously cached variables (in this case, hdr) need to 2384 * be refreshed with the new data. 2385 */ 2386 hdr = (struct ieee80211_hdr *)rx->skb->data; 2387 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 2388 2389 if (frag == 0) { 2390 /* This is the first fragment of a new frame. */ 2391 entry = ieee80211_reassemble_add(cache, frag, seq, 2392 rx->seqno_idx, &(rx->skb)); 2393 if (requires_sequential_pn(rx, fc)) { 2394 int queue = rx->security_idx; 2395 2396 /* Store CCMP/GCMP PN so that we can verify that the 2397 * next fragment has a sequential PN value. 2398 */ 2399 entry->check_sequential_pn = true; 2400 entry->is_protected = true; 2401 entry->key_color = rx->key->color; 2402 memcpy(entry->last_pn, 2403 rx->key->u.ccmp.rx_pn[queue], 2404 IEEE80211_CCMP_PN_LEN); 2405 BUILD_BUG_ON(offsetof(struct ieee80211_key, 2406 u.ccmp.rx_pn) != 2407 offsetof(struct ieee80211_key, 2408 u.gcmp.rx_pn)); 2409 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != 2410 sizeof(rx->key->u.gcmp.rx_pn[queue])); 2411 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != 2412 IEEE80211_GCMP_PN_LEN); 2413 } else if (rx->key && 2414 (ieee80211_has_protected(fc) || 2415 (status->flag & RX_FLAG_DECRYPTED))) { 2416 entry->is_protected = true; 2417 entry->key_color = rx->key->color; 2418 } 2419 return RX_QUEUED; 2420 } 2421 2422 /* This is a fragment for a frame that should already be pending in 2423 * fragment cache. Add this fragment to the end of the pending entry. 2424 */ 2425 entry = ieee80211_reassemble_find(cache, frag, seq, 2426 rx->seqno_idx, hdr); 2427 if (!entry) { 2428 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2429 return RX_DROP; 2430 } 2431 2432 /* "The receiver shall discard MSDUs and MMPDUs whose constituent 2433 * MPDU PN values are not incrementing in steps of 1." 2434 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) 2435 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) 2436 */ 2437 if (entry->check_sequential_pn) { 2438 int i; 2439 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; 2440 2441 if (!requires_sequential_pn(rx, fc)) 2442 return RX_DROP_U_NONSEQ_PN; 2443 2444 /* Prevent mixed key and fragment cache attacks */ 2445 if (entry->key_color != rx->key->color) 2446 return RX_DROP_U_BAD_KEY_COLOR; 2447 2448 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); 2449 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { 2450 pn[i]++; 2451 if (pn[i]) 2452 break; 2453 } 2454 2455 rpn = rx->ccm_gcm.pn; 2456 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) 2457 return RX_DROP_U_REPLAY; 2458 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); 2459 } else if (entry->is_protected && 2460 (!rx->key || 2461 (!ieee80211_has_protected(fc) && 2462 !(status->flag & RX_FLAG_DECRYPTED)) || 2463 rx->key->color != entry->key_color)) { 2464 /* Drop this as a mixed key or fragment cache attack, even 2465 * if for TKIP Michael MIC should protect us, and WEP is a 2466 * lost cause anyway. 2467 */ 2468 return RX_DROP_U_EXPECT_DEFRAG_PROT; 2469 } else if (entry->is_protected && rx->key && 2470 entry->key_color != rx->key->color && 2471 (status->flag & RX_FLAG_DECRYPTED)) { 2472 return RX_DROP_U_BAD_KEY_COLOR; 2473 } 2474 2475 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 2476 __skb_queue_tail(&entry->skb_list, rx->skb); 2477 entry->last_frag = frag; 2478 entry->extra_len += rx->skb->len; 2479 if (ieee80211_has_morefrags(fc)) { 2480 rx->skb = NULL; 2481 return RX_QUEUED; 2482 } 2483 2484 rx->skb = __skb_dequeue(&entry->skb_list); 2485 if (skb_tailroom(rx->skb) < entry->extra_len) { 2486 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); 2487 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 2488 GFP_ATOMIC))) { 2489 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2490 __skb_queue_purge(&entry->skb_list); 2491 return RX_DROP_U_OOM; 2492 } 2493 } 2494 while ((skb = __skb_dequeue(&entry->skb_list))) { 2495 skb_put_data(rx->skb, skb->data, skb->len); 2496 dev_kfree_skb(skb); 2497 } 2498 2499 out: 2500 ieee80211_led_rx(rx->local); 2501 if (rx->sta) 2502 rx->link_sta->rx_stats.packets++; 2503 return RX_CONTINUE; 2504 } 2505 2506 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 2507 { 2508 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 2509 return -EACCES; 2510 2511 return 0; 2512 } 2513 2514 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 2515 { 2516 struct sk_buff *skb = rx->skb; 2517 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2518 2519 /* 2520 * Pass through unencrypted frames if the hardware has 2521 * decrypted them already. 2522 */ 2523 if (status->flag & RX_FLAG_DECRYPTED) 2524 return 0; 2525 2526 /* Drop unencrypted frames if key is set. */ 2527 if (unlikely(!ieee80211_has_protected(fc) && 2528 !ieee80211_is_any_nullfunc(fc) && 2529 ieee80211_is_data(fc) && rx->key)) 2530 return -EACCES; 2531 2532 return 0; 2533 } 2534 2535 VISIBLE_IF_MAC80211_KUNIT ieee80211_rx_result 2536 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 2537 { 2538 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2539 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2540 __le16 fc = mgmt->frame_control; 2541 2542 /* 2543 * Pass through unencrypted frames if the hardware has 2544 * decrypted them already. 2545 */ 2546 if (status->flag & RX_FLAG_DECRYPTED) 2547 return RX_CONTINUE; 2548 2549 /* drop unicast protected dual (that wasn't protected) */ 2550 if (ieee80211_is_action(fc) && 2551 mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION) 2552 return RX_DROP_U_UNPROT_DUAL; 2553 2554 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 2555 if (unlikely(!ieee80211_has_protected(fc) && 2556 ieee80211_is_unicast_robust_mgmt_frame(rx->skb))) { 2557 if (ieee80211_is_deauth(fc) || 2558 ieee80211_is_disassoc(fc)) { 2559 /* 2560 * Permit unprotected deauth/disassoc frames 2561 * during 4-way-HS (key is installed after HS). 2562 */ 2563 if (!rx->key) 2564 return RX_CONTINUE; 2565 2566 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2567 rx->skb->data, 2568 rx->skb->len); 2569 } 2570 return RX_DROP_U_UNPROT_UCAST_MGMT; 2571 } 2572 /* BIP does not use Protected field, so need to check MMIE */ 2573 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 2574 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2575 if (ieee80211_is_deauth(fc) || 2576 ieee80211_is_disassoc(fc)) 2577 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2578 rx->skb->data, 2579 rx->skb->len); 2580 return RX_DROP_U_UNPROT_MCAST_MGMT; 2581 } 2582 if (unlikely(ieee80211_is_beacon(fc) && rx->key && 2583 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2584 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2585 rx->skb->data, 2586 rx->skb->len); 2587 return RX_DROP_U_UNPROT_BEACON; 2588 } 2589 /* 2590 * When using MFP, Action frames are not allowed prior to 2591 * having configured keys. 2592 */ 2593 if (unlikely(ieee80211_is_action(fc) && !rx->key && 2594 ieee80211_is_robust_mgmt_frame(rx->skb))) 2595 return RX_DROP_U_UNPROT_ACTION; 2596 2597 /* drop unicast public action frames when using MPF */ 2598 if (is_unicast_ether_addr(mgmt->da) && 2599 ieee80211_is_protected_dual_of_public_action(rx->skb)) 2600 return RX_DROP_U_UNPROT_UNICAST_PUB_ACTION; 2601 } 2602 2603 /* 2604 * Drop robust action frames before assoc regardless of MFP state, 2605 * after assoc we also have decided on MFP or not. 2606 */ 2607 if (ieee80211_is_action(fc) && 2608 ieee80211_is_robust_mgmt_frame(rx->skb) && 2609 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC))) 2610 return RX_DROP_U_UNPROT_ROBUST_ACTION; 2611 2612 return RX_CONTINUE; 2613 } 2614 EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_drop_unencrypted_mgmt); 2615 2616 static ieee80211_rx_result 2617 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 2618 { 2619 struct ieee80211_sub_if_data *sdata = rx->sdata; 2620 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2621 bool check_port_control = false; 2622 struct ethhdr *ehdr; 2623 int ret; 2624 2625 *port_control = false; 2626 if (ieee80211_has_a4(hdr->frame_control) && 2627 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 2628 return RX_DROP_U_UNEXPECTED_VLAN_4ADDR; 2629 2630 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2631 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 2632 if (!sdata->u.mgd.use_4addr) 2633 return RX_DROP_U_UNEXPECTED_STA_4ADDR; 2634 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr)) 2635 check_port_control = true; 2636 } 2637 2638 if (is_multicast_ether_addr(hdr->addr1) && 2639 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 2640 return RX_DROP_U_UNEXPECTED_VLAN_MCAST; 2641 2642 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 2643 if (ret < 0) 2644 return RX_DROP_U_INVALID_8023; 2645 2646 ehdr = (struct ethhdr *) rx->skb->data; 2647 if (ehdr->h_proto == rx->sdata->control_port_protocol) 2648 *port_control = true; 2649 else if (check_port_control) 2650 return RX_DROP_U_NOT_PORT_CONTROL; 2651 2652 return RX_CONTINUE; 2653 } 2654 2655 bool ieee80211_is_our_addr(struct ieee80211_sub_if_data *sdata, 2656 const u8 *addr, int *out_link_id) 2657 { 2658 unsigned int link_id; 2659 2660 /* non-MLO, or MLD address replaced by hardware */ 2661 if (ether_addr_equal(sdata->vif.addr, addr)) 2662 return true; 2663 2664 if (!ieee80211_vif_is_mld(&sdata->vif)) 2665 return false; 2666 2667 for (link_id = 0; link_id < ARRAY_SIZE(sdata->vif.link_conf); link_id++) { 2668 struct ieee80211_bss_conf *conf; 2669 2670 conf = rcu_dereference(sdata->vif.link_conf[link_id]); 2671 2672 if (!conf) 2673 continue; 2674 if (ether_addr_equal(conf->addr, addr)) { 2675 if (out_link_id) 2676 *out_link_id = link_id; 2677 return true; 2678 } 2679 } 2680 2681 return false; 2682 } 2683 2684 /* 2685 * requires that rx->skb is a frame with ethernet header 2686 */ 2687 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 2688 { 2689 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 2690 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 2691 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2692 2693 /* 2694 * Allow EAPOL frames to us/the PAE group address regardless of 2695 * whether the frame was encrypted or not, and always disallow 2696 * all other destination addresses for them. 2697 */ 2698 if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol)) 2699 return ieee80211_is_our_addr(rx->sdata, ehdr->h_dest, NULL) || 2700 ether_addr_equal(ehdr->h_dest, pae_group_addr); 2701 2702 if (ieee80211_802_1x_port_control(rx) || 2703 ieee80211_drop_unencrypted(rx, fc)) 2704 return false; 2705 2706 return true; 2707 } 2708 2709 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, 2710 struct ieee80211_rx_data *rx) 2711 { 2712 struct ieee80211_sub_if_data *sdata = rx->sdata; 2713 struct net_device *dev = sdata->dev; 2714 2715 if (unlikely((skb->protocol == sdata->control_port_protocol || 2716 (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) && 2717 !sdata->control_port_no_preauth)) && 2718 sdata->control_port_over_nl80211)) { 2719 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2720 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED); 2721 2722 cfg80211_rx_control_port(dev, skb, noencrypt, rx->link_id); 2723 dev_kfree_skb(skb); 2724 } else { 2725 struct ethhdr *ehdr = (void *)skb_mac_header(skb); 2726 2727 memset(skb->cb, 0, sizeof(skb->cb)); 2728 2729 /* 2730 * 802.1X over 802.11 requires that the authenticator address 2731 * be used for EAPOL frames. However, 802.1X allows the use of 2732 * the PAE group address instead. If the interface is part of 2733 * a bridge and we pass the frame with the PAE group address, 2734 * then the bridge will forward it to the network (even if the 2735 * client was not associated yet), which isn't supposed to 2736 * happen. 2737 * To avoid that, rewrite the destination address to our own 2738 * address, so that the authenticator (e.g. hostapd) will see 2739 * the frame, but bridge won't forward it anywhere else. Note 2740 * that due to earlier filtering, the only other address can 2741 * be the PAE group address, unless the hardware allowed them 2742 * through in 802.3 offloaded mode. 2743 */ 2744 if (unlikely(skb->protocol == sdata->control_port_protocol && 2745 !ether_addr_equal(ehdr->h_dest, sdata->vif.addr))) 2746 ether_addr_copy(ehdr->h_dest, sdata->vif.addr); 2747 2748 /* deliver to local stack */ 2749 if (rx->list) 2750 list_add_tail(&skb->list, rx->list); 2751 else 2752 netif_receive_skb(skb); 2753 } 2754 } 2755 2756 /* 2757 * requires that rx->skb is a frame with ethernet header 2758 */ 2759 static void 2760 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 2761 { 2762 struct ieee80211_sub_if_data *sdata = rx->sdata; 2763 struct net_device *dev = sdata->dev; 2764 struct sk_buff *skb, *xmit_skb; 2765 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2766 struct sta_info *dsta; 2767 2768 skb = rx->skb; 2769 xmit_skb = NULL; 2770 2771 dev_sw_netstats_rx_add(dev, skb->len); 2772 2773 if (rx->sta) { 2774 /* The seqno index has the same property as needed 2775 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 2776 * for non-QoS-data frames. Here we know it's a data 2777 * frame, so count MSDUs. 2778 */ 2779 u64_stats_update_begin(&rx->link_sta->rx_stats.syncp); 2780 rx->link_sta->rx_stats.msdu[rx->seqno_idx]++; 2781 u64_stats_update_end(&rx->link_sta->rx_stats.syncp); 2782 } 2783 2784 if ((sdata->vif.type == NL80211_IFTYPE_AP || 2785 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 2786 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 2787 ehdr->h_proto != rx->sdata->control_port_protocol && 2788 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 2789 if (is_multicast_ether_addr(ehdr->h_dest) && 2790 ieee80211_vif_get_num_mcast_if(sdata) != 0) { 2791 /* 2792 * send multicast frames both to higher layers in 2793 * local net stack and back to the wireless medium 2794 */ 2795 xmit_skb = skb_copy(skb, GFP_ATOMIC); 2796 if (!xmit_skb) 2797 net_info_ratelimited("%s: failed to clone multicast frame\n", 2798 dev->name); 2799 } else if (!is_multicast_ether_addr(ehdr->h_dest) && 2800 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) { 2801 dsta = sta_info_get(sdata, ehdr->h_dest); 2802 if (dsta) { 2803 /* 2804 * The destination station is associated to 2805 * this AP (in this VLAN), so send the frame 2806 * directly to it and do not pass it to local 2807 * net stack. 2808 */ 2809 xmit_skb = skb; 2810 skb = NULL; 2811 } 2812 } 2813 } 2814 2815 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2816 if (skb) { 2817 /* 'align' will only take the values 0 or 2 here since all 2818 * frames are required to be aligned to 2-byte boundaries 2819 * when being passed to mac80211; the code here works just 2820 * as well if that isn't true, but mac80211 assumes it can 2821 * access fields as 2-byte aligned (e.g. for ether_addr_equal) 2822 */ 2823 int align; 2824 2825 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; 2826 if (align) { 2827 if (WARN_ON(skb_headroom(skb) < 3)) { 2828 dev_kfree_skb(skb); 2829 skb = NULL; 2830 } else { 2831 u8 *data = skb->data; 2832 size_t len = skb_headlen(skb); 2833 skb->data -= align; 2834 memmove(skb->data, data, len); 2835 skb_set_tail_pointer(skb, len); 2836 } 2837 } 2838 } 2839 #endif 2840 2841 if (skb) { 2842 skb->protocol = eth_type_trans(skb, dev); 2843 ieee80211_deliver_skb_to_local_stack(skb, rx); 2844 } 2845 2846 if (xmit_skb) { 2847 /* 2848 * Send to wireless media and increase priority by 256 to 2849 * keep the received priority instead of reclassifying 2850 * the frame (see cfg80211_classify8021d). 2851 */ 2852 xmit_skb->priority += 256; 2853 xmit_skb->protocol = htons(ETH_P_802_3); 2854 skb_reset_network_header(xmit_skb); 2855 skb_reset_mac_header(xmit_skb); 2856 dev_queue_xmit(xmit_skb); 2857 } 2858 } 2859 2860 #ifdef CONFIG_MAC80211_MESH 2861 static bool 2862 ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata, 2863 struct sk_buff *skb, int hdrlen) 2864 { 2865 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2866 struct ieee80211_mesh_fast_tx_key key = { 2867 .type = MESH_FAST_TX_TYPE_FORWARDED 2868 }; 2869 struct ieee80211_mesh_fast_tx *entry; 2870 struct ieee80211s_hdr *mesh_hdr; 2871 struct tid_ampdu_tx *tid_tx; 2872 struct sta_info *sta; 2873 struct ethhdr eth; 2874 u8 tid; 2875 2876 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(eth)); 2877 if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) 2878 ether_addr_copy(key.addr, mesh_hdr->eaddr1); 2879 else if (!(mesh_hdr->flags & MESH_FLAGS_AE)) 2880 ether_addr_copy(key.addr, skb->data); 2881 else 2882 return false; 2883 2884 entry = mesh_fast_tx_get(sdata, &key); 2885 if (!entry) 2886 return false; 2887 2888 sta = rcu_dereference(entry->mpath->next_hop); 2889 if (!sta) 2890 return false; 2891 2892 if (skb_linearize(skb)) 2893 return false; 2894 2895 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 2896 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); 2897 if (tid_tx) { 2898 if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) 2899 return false; 2900 2901 if (tid_tx->timeout) 2902 tid_tx->last_tx = jiffies; 2903 } 2904 2905 ieee80211_aggr_check(sdata, sta, skb); 2906 2907 if (ieee80211_get_8023_tunnel_proto(skb->data + hdrlen, 2908 &skb->protocol)) 2909 hdrlen += ETH_ALEN; 2910 else 2911 skb->protocol = htons(skb->len - hdrlen); 2912 skb_set_network_header(skb, hdrlen + 2); 2913 2914 skb->dev = sdata->dev; 2915 memcpy(ð, skb->data, ETH_HLEN - 2); 2916 skb_pull(skb, 2); 2917 __ieee80211_xmit_fast(sdata, sta, &entry->fast_tx, skb, tid_tx, 2918 eth.h_dest, eth.h_source); 2919 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2920 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2921 2922 return true; 2923 } 2924 #endif 2925 2926 static ieee80211_rx_result 2927 ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, 2928 struct sk_buff *skb) 2929 { 2930 #ifdef CONFIG_MAC80211_MESH 2931 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2932 struct ieee80211_local *local = sdata->local; 2933 uint16_t fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA; 2934 struct ieee80211_hdr hdr = { 2935 .frame_control = cpu_to_le16(fc) 2936 }; 2937 struct ieee80211_hdr *fwd_hdr; 2938 struct ieee80211s_hdr *mesh_hdr; 2939 struct ieee80211_tx_info *info; 2940 struct sk_buff *fwd_skb; 2941 struct ethhdr *eth; 2942 bool multicast; 2943 int tailroom = 0; 2944 int hdrlen, mesh_hdrlen; 2945 u8 *qos; 2946 2947 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2948 return RX_CONTINUE; 2949 2950 if (!pskb_may_pull(skb, sizeof(*eth) + 6)) 2951 return RX_DROP; 2952 2953 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(*eth)); 2954 mesh_hdrlen = ieee80211_get_mesh_hdrlen(mesh_hdr); 2955 2956 if (!pskb_may_pull(skb, sizeof(*eth) + mesh_hdrlen)) 2957 return RX_DROP; 2958 2959 eth = (struct ethhdr *)skb->data; 2960 multicast = is_multicast_ether_addr(eth->h_dest); 2961 2962 mesh_hdr = (struct ieee80211s_hdr *)(eth + 1); 2963 if (!mesh_hdr->ttl) 2964 return RX_DROP; 2965 2966 /* frame is in RMC, don't forward */ 2967 if (is_multicast_ether_addr(eth->h_dest) && 2968 mesh_rmc_check(sdata, eth->h_source, mesh_hdr)) 2969 return RX_DROP; 2970 2971 /* forward packet */ 2972 if (sdata->crypto_tx_tailroom_needed_cnt) 2973 tailroom = IEEE80211_ENCRYPT_TAILROOM; 2974 2975 if (mesh_hdr->flags & MESH_FLAGS_AE) { 2976 struct mesh_path *mppath; 2977 char *proxied_addr; 2978 bool update = false; 2979 2980 if (multicast) 2981 proxied_addr = mesh_hdr->eaddr1; 2982 else if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) 2983 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2984 proxied_addr = mesh_hdr->eaddr2; 2985 else 2986 return RX_DROP; 2987 2988 rcu_read_lock(); 2989 mppath = mpp_path_lookup(sdata, proxied_addr); 2990 if (!mppath) { 2991 mpp_path_add(sdata, proxied_addr, eth->h_source); 2992 } else { 2993 spin_lock_bh(&mppath->state_lock); 2994 if (!ether_addr_equal(mppath->mpp, eth->h_source)) { 2995 memcpy(mppath->mpp, eth->h_source, ETH_ALEN); 2996 update = true; 2997 } 2998 mppath->exp_time = jiffies; 2999 spin_unlock_bh(&mppath->state_lock); 3000 } 3001 3002 /* flush fast xmit cache if the address path changed */ 3003 if (update) 3004 mesh_fast_tx_flush_addr(sdata, proxied_addr); 3005 3006 rcu_read_unlock(); 3007 } 3008 3009 /* Frame has reached destination. Don't forward */ 3010 if (ether_addr_equal(sdata->vif.addr, eth->h_dest)) 3011 goto rx_accept; 3012 3013 if (!--mesh_hdr->ttl) { 3014 if (multicast) 3015 goto rx_accept; 3016 3017 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); 3018 return RX_DROP; 3019 } 3020 3021 if (!ifmsh->mshcfg.dot11MeshForwarding) { 3022 if (is_multicast_ether_addr(eth->h_dest)) 3023 goto rx_accept; 3024 3025 return RX_DROP; 3026 } 3027 3028 skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]); 3029 3030 if (!multicast && 3031 ieee80211_rx_mesh_fast_forward(sdata, skb, mesh_hdrlen)) 3032 return RX_QUEUED; 3033 3034 ieee80211_fill_mesh_addresses(&hdr, &hdr.frame_control, 3035 eth->h_dest, eth->h_source); 3036 hdrlen = ieee80211_hdrlen(hdr.frame_control); 3037 if (multicast) { 3038 int extra_head = sizeof(struct ieee80211_hdr) - sizeof(*eth); 3039 3040 fwd_skb = skb_copy_expand(skb, local->tx_headroom + extra_head + 3041 IEEE80211_ENCRYPT_HEADROOM, 3042 tailroom, GFP_ATOMIC); 3043 if (!fwd_skb) 3044 goto rx_accept; 3045 } else { 3046 fwd_skb = skb; 3047 skb = NULL; 3048 3049 if (skb_cow_head(fwd_skb, hdrlen - sizeof(struct ethhdr))) 3050 return RX_DROP_U_OOM; 3051 3052 if (skb_linearize(fwd_skb)) 3053 return RX_DROP_U_OOM; 3054 } 3055 3056 fwd_hdr = skb_push(fwd_skb, hdrlen - sizeof(struct ethhdr)); 3057 memcpy(fwd_hdr, &hdr, hdrlen - 2); 3058 qos = ieee80211_get_qos_ctl(fwd_hdr); 3059 qos[0] = qos[1] = 0; 3060 3061 skb_reset_mac_header(fwd_skb); 3062 hdrlen += mesh_hdrlen; 3063 if (ieee80211_get_8023_tunnel_proto(fwd_skb->data + hdrlen, 3064 &fwd_skb->protocol)) 3065 hdrlen += ETH_ALEN; 3066 else 3067 fwd_skb->protocol = htons(fwd_skb->len - hdrlen); 3068 skb_set_network_header(fwd_skb, hdrlen + 2); 3069 3070 info = IEEE80211_SKB_CB(fwd_skb); 3071 memset(info, 0, sizeof(*info)); 3072 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; 3073 info->control.vif = &sdata->vif; 3074 info->control.jiffies = jiffies; 3075 fwd_skb->dev = sdata->dev; 3076 if (multicast) { 3077 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 3078 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 3079 /* update power mode indication when forwarding */ 3080 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); 3081 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { 3082 /* mesh power mode flags updated in mesh_nexthop_lookup */ 3083 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 3084 } else { 3085 /* unable to resolve next hop */ 3086 if (sta) 3087 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, 3088 hdr.addr3, 0, 3089 WLAN_REASON_MESH_PATH_NOFORWARD, 3090 sta->sta.addr); 3091 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 3092 kfree_skb(fwd_skb); 3093 goto rx_accept; 3094 } 3095 3096 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 3097 ieee80211_set_qos_hdr(sdata, fwd_skb); 3098 ieee80211_add_pending_skb(local, fwd_skb); 3099 3100 rx_accept: 3101 if (!skb) 3102 return RX_QUEUED; 3103 3104 ieee80211_strip_8023_mesh_hdr(skb); 3105 #endif 3106 3107 return RX_CONTINUE; 3108 } 3109 3110 static ieee80211_rx_result debug_noinline 3111 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset) 3112 { 3113 struct net_device *dev = rx->sdata->dev; 3114 struct sk_buff *skb = rx->skb; 3115 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3116 __le16 fc = hdr->frame_control; 3117 struct sk_buff_head frame_list; 3118 struct ethhdr ethhdr; 3119 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source; 3120 3121 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 3122 check_da = NULL; 3123 check_sa = NULL; 3124 } else switch (rx->sdata->vif.type) { 3125 case NL80211_IFTYPE_AP: 3126 case NL80211_IFTYPE_AP_VLAN: 3127 check_da = NULL; 3128 break; 3129 case NL80211_IFTYPE_STATION: 3130 if (!test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER)) 3131 check_sa = NULL; 3132 break; 3133 case NL80211_IFTYPE_MESH_POINT: 3134 check_sa = NULL; 3135 check_da = NULL; 3136 break; 3137 default: 3138 break; 3139 } 3140 3141 skb->dev = dev; 3142 __skb_queue_head_init(&frame_list); 3143 3144 if (ieee80211_data_to_8023_exthdr(skb, ðhdr, 3145 rx->sdata->vif.addr, 3146 rx->sdata->vif.type, 3147 data_offset, true)) 3148 return RX_DROP_U_BAD_AMSDU; 3149 3150 if (rx->sta->amsdu_mesh_control < 0) { 3151 s8 valid = -1; 3152 int i; 3153 3154 for (i = 0; i <= 2; i++) { 3155 if (!ieee80211_is_valid_amsdu(skb, i)) 3156 continue; 3157 3158 if (valid >= 0) { 3159 /* ambiguous */ 3160 valid = -1; 3161 break; 3162 } 3163 3164 valid = i; 3165 } 3166 3167 rx->sta->amsdu_mesh_control = valid; 3168 } 3169 3170 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 3171 rx->sdata->vif.type, 3172 rx->local->hw.extra_tx_headroom, 3173 check_da, check_sa, 3174 rx->sta->amsdu_mesh_control); 3175 3176 while (!skb_queue_empty(&frame_list)) { 3177 rx->skb = __skb_dequeue(&frame_list); 3178 3179 switch (ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb)) { 3180 case RX_QUEUED: 3181 break; 3182 case RX_CONTINUE: 3183 if (ieee80211_frame_allowed(rx, fc)) { 3184 ieee80211_deliver_skb(rx); 3185 break; 3186 } 3187 fallthrough; 3188 default: 3189 dev_kfree_skb(rx->skb); 3190 } 3191 } 3192 3193 return RX_QUEUED; 3194 } 3195 3196 static ieee80211_rx_result debug_noinline 3197 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 3198 { 3199 struct sk_buff *skb = rx->skb; 3200 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3201 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3202 __le16 fc = hdr->frame_control; 3203 3204 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 3205 return RX_CONTINUE; 3206 3207 if (unlikely(!ieee80211_is_data(fc))) 3208 return RX_CONTINUE; 3209 3210 if (unlikely(!ieee80211_is_data_present(fc))) 3211 return RX_DROP; 3212 3213 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 3214 switch (rx->sdata->vif.type) { 3215 case NL80211_IFTYPE_AP_VLAN: 3216 if (!rx->sdata->u.vlan.sta) 3217 return RX_DROP_U_BAD_4ADDR; 3218 break; 3219 case NL80211_IFTYPE_STATION: 3220 if (!rx->sdata->u.mgd.use_4addr) 3221 return RX_DROP_U_BAD_4ADDR; 3222 break; 3223 case NL80211_IFTYPE_MESH_POINT: 3224 break; 3225 default: 3226 return RX_DROP_U_BAD_4ADDR; 3227 } 3228 } 3229 3230 if (is_multicast_ether_addr(hdr->addr1) || !rx->sta) 3231 return RX_DROP_U_BAD_AMSDU; 3232 3233 if (rx->key) { 3234 /* 3235 * We should not receive A-MSDUs on pre-HT connections, 3236 * and HT connections cannot use old ciphers. Thus drop 3237 * them, as in those cases we couldn't even have SPP 3238 * A-MSDUs or such. 3239 */ 3240 switch (rx->key->conf.cipher) { 3241 case WLAN_CIPHER_SUITE_WEP40: 3242 case WLAN_CIPHER_SUITE_WEP104: 3243 case WLAN_CIPHER_SUITE_TKIP: 3244 return RX_DROP_U_BAD_AMSDU_CIPHER; 3245 default: 3246 break; 3247 } 3248 } 3249 3250 return __ieee80211_rx_h_amsdu(rx, 0); 3251 } 3252 3253 static ieee80211_rx_result debug_noinline 3254 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 3255 { 3256 struct ieee80211_sub_if_data *sdata = rx->sdata; 3257 struct ieee80211_local *local = rx->local; 3258 struct net_device *dev = sdata->dev; 3259 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 3260 __le16 fc = hdr->frame_control; 3261 ieee80211_rx_result res; 3262 bool port_control; 3263 3264 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 3265 return RX_CONTINUE; 3266 3267 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 3268 return RX_DROP; 3269 3270 /* Send unexpected-4addr-frame event to hostapd */ 3271 if (ieee80211_has_a4(hdr->frame_control) && 3272 sdata->vif.type == NL80211_IFTYPE_AP) { 3273 if (rx->sta && 3274 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 3275 cfg80211_rx_unexpected_4addr_frame( 3276 rx->sdata->dev, rx->sta->sta.addr, rx->link_id, 3277 GFP_ATOMIC); 3278 return RX_DROP; 3279 } 3280 3281 res = __ieee80211_data_to_8023(rx, &port_control); 3282 if (unlikely(res != RX_CONTINUE)) 3283 return res; 3284 3285 res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb); 3286 if (res != RX_CONTINUE) 3287 return res; 3288 3289 if (!ieee80211_frame_allowed(rx, fc)) 3290 return RX_DROP; 3291 3292 /* directly handle TDLS channel switch requests/responses */ 3293 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == 3294 cpu_to_be16(ETH_P_TDLS))) { 3295 struct ieee80211_tdls_data *tf = (void *)rx->skb->data; 3296 3297 if (pskb_may_pull(rx->skb, 3298 offsetof(struct ieee80211_tdls_data, u)) && 3299 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && 3300 tf->category == WLAN_CATEGORY_TDLS && 3301 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || 3302 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { 3303 rx->skb->protocol = cpu_to_be16(ETH_P_TDLS); 3304 __ieee80211_queue_skb_to_iface(sdata, rx->link_id, 3305 rx->sta, rx->skb); 3306 return RX_QUEUED; 3307 } 3308 } 3309 3310 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 3311 unlikely(port_control) && sdata->bss) { 3312 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 3313 u.ap); 3314 dev = sdata->dev; 3315 rx->sdata = sdata; 3316 } 3317 3318 rx->skb->dev = dev; 3319 3320 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && 3321 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 3322 !is_multicast_ether_addr( 3323 ((struct ethhdr *)rx->skb->data)->h_dest) && 3324 (!local->scanning && 3325 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) 3326 mod_timer(&local->dynamic_ps_timer, jiffies + 3327 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 3328 3329 ieee80211_deliver_skb(rx); 3330 3331 return RX_QUEUED; 3332 } 3333 3334 static ieee80211_rx_result debug_noinline 3335 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 3336 { 3337 struct sk_buff *skb = rx->skb; 3338 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 3339 struct tid_ampdu_rx *tid_agg_rx; 3340 u16 start_seq_num; 3341 u16 tid; 3342 3343 if (likely(!ieee80211_is_ctl(bar->frame_control))) 3344 return RX_CONTINUE; 3345 3346 if (ieee80211_is_back_req(bar->frame_control)) { 3347 struct { 3348 __le16 control, start_seq_num; 3349 } __packed bar_data; 3350 struct ieee80211_event event = { 3351 .type = BAR_RX_EVENT, 3352 }; 3353 3354 if (!rx->sta) 3355 return RX_DROP; 3356 3357 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 3358 &bar_data, sizeof(bar_data))) 3359 return RX_DROP; 3360 3361 tid = le16_to_cpu(bar_data.control) >> 12; 3362 3363 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 3364 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 3365 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 3366 WLAN_BACK_RECIPIENT, 3367 WLAN_REASON_QSTA_REQUIRE_SETUP); 3368 3369 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 3370 if (!tid_agg_rx) 3371 return RX_DROP; 3372 3373 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 3374 event.u.ba.tid = tid; 3375 event.u.ba.ssn = start_seq_num; 3376 event.u.ba.sta = &rx->sta->sta; 3377 3378 /* reset session timer */ 3379 if (tid_agg_rx->timeout) 3380 mod_timer(&tid_agg_rx->session_timer, 3381 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 3382 3383 spin_lock(&tid_agg_rx->reorder_lock); 3384 /* release stored frames up to start of BAR */ 3385 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 3386 start_seq_num, frames); 3387 spin_unlock(&tid_agg_rx->reorder_lock); 3388 3389 drv_event_callback(rx->local, rx->sdata, &event); 3390 3391 kfree_skb(skb); 3392 return RX_QUEUED; 3393 } 3394 3395 return RX_DROP; 3396 } 3397 3398 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 3399 struct ieee80211_mgmt *mgmt, 3400 size_t len) 3401 { 3402 struct ieee80211_local *local = sdata->local; 3403 struct sk_buff *skb; 3404 struct ieee80211_mgmt *resp; 3405 3406 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 3407 /* Not to own unicast address */ 3408 return; 3409 } 3410 3411 if (!ether_addr_equal(mgmt->sa, sdata->vif.cfg.ap_addr) || 3412 !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) { 3413 /* Not from the current AP or not associated yet. */ 3414 return; 3415 } 3416 3417 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 3418 /* Too short SA Query request frame */ 3419 return; 3420 } 3421 3422 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 3423 if (skb == NULL) 3424 return; 3425 3426 skb_reserve(skb, local->hw.extra_tx_headroom); 3427 resp = skb_put_zero(skb, 24); 3428 memcpy(resp->da, sdata->vif.cfg.ap_addr, ETH_ALEN); 3429 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 3430 memcpy(resp->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); 3431 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 3432 IEEE80211_STYPE_ACTION); 3433 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 3434 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 3435 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 3436 memcpy(resp->u.action.u.sa_query.trans_id, 3437 mgmt->u.action.u.sa_query.trans_id, 3438 WLAN_SA_QUERY_TR_ID_LEN); 3439 3440 ieee80211_tx_skb(sdata, skb); 3441 } 3442 3443 static void 3444 ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx) 3445 { 3446 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 3447 struct ieee80211_bss_conf *bss_conf; 3448 const struct element *ie; 3449 size_t baselen; 3450 3451 if (!wiphy_ext_feature_isset(rx->local->hw.wiphy, 3452 NL80211_EXT_FEATURE_BSS_COLOR)) 3453 return; 3454 3455 if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION)) 3456 return; 3457 3458 bss_conf = rx->link->conf; 3459 if (bss_conf->csa_active || bss_conf->color_change_active || 3460 !bss_conf->he_bss_color.enabled) 3461 return; 3462 3463 baselen = mgmt->u.beacon.variable - rx->skb->data; 3464 if (baselen > rx->skb->len) 3465 return; 3466 3467 ie = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, 3468 mgmt->u.beacon.variable, 3469 rx->skb->len - baselen); 3470 if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) && 3471 ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) { 3472 const struct ieee80211_he_operation *he_oper; 3473 u8 color; 3474 3475 he_oper = (void *)(ie->data + 1); 3476 if (le32_get_bits(he_oper->he_oper_params, 3477 IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED)) 3478 return; 3479 3480 color = le32_get_bits(he_oper->he_oper_params, 3481 IEEE80211_HE_OPERATION_BSS_COLOR_MASK); 3482 if (color == bss_conf->he_bss_color.color) 3483 ieee80211_obss_color_collision_notify(&rx->sdata->vif, 3484 BIT_ULL(color), 3485 bss_conf->link_id); 3486 } 3487 } 3488 3489 static ieee80211_rx_result debug_noinline 3490 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 3491 { 3492 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3493 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3494 3495 if (ieee80211_is_s1g_beacon(mgmt->frame_control)) 3496 return RX_CONTINUE; 3497 3498 /* 3499 * From here on, look only at management frames. 3500 * Data and control frames are already handled, 3501 * and unknown (reserved) frames are useless. 3502 */ 3503 if (rx->skb->len < 24) 3504 return RX_DROP; 3505 3506 if (!ieee80211_is_mgmt(mgmt->frame_control)) 3507 return RX_DROP; 3508 3509 /* drop too small action frames */ 3510 if (ieee80211_is_action(mgmt->frame_control) && 3511 rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 3512 return RX_DROP_U_RUNT_ACTION; 3513 3514 /* Drop non-broadcast Beacon frames */ 3515 if (ieee80211_is_beacon(mgmt->frame_control) && 3516 !is_broadcast_ether_addr(mgmt->da)) 3517 return RX_DROP; 3518 3519 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 3520 ieee80211_is_beacon(mgmt->frame_control) && 3521 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 3522 int sig = 0; 3523 3524 /* sw bss color collision detection */ 3525 ieee80211_rx_check_bss_color_collision(rx); 3526 3527 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3528 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3529 sig = status->signal; 3530 3531 cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy, 3532 rx->skb->data, rx->skb->len, 3533 ieee80211_rx_status_to_khz(status), 3534 sig); 3535 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 3536 } 3537 3538 return ieee80211_drop_unencrypted_mgmt(rx); 3539 } 3540 3541 static bool 3542 ieee80211_process_rx_twt_action(struct ieee80211_rx_data *rx) 3543 { 3544 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)rx->skb->data; 3545 struct ieee80211_sub_if_data *sdata = rx->sdata; 3546 3547 /* TWT actions are only supported in AP for the moment */ 3548 if (sdata->vif.type != NL80211_IFTYPE_AP) 3549 return false; 3550 3551 if (!rx->local->ops->add_twt_setup) 3552 return false; 3553 3554 if (!sdata->vif.bss_conf.twt_responder) 3555 return false; 3556 3557 if (!rx->sta) 3558 return false; 3559 3560 switch (mgmt->u.action.u.s1g.action_code) { 3561 case WLAN_S1G_TWT_SETUP: { 3562 struct ieee80211_twt_setup *twt; 3563 3564 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 3565 1 + /* action code */ 3566 sizeof(struct ieee80211_twt_setup) + 3567 2 /* TWT req_type agrt */) 3568 break; 3569 3570 twt = (void *)mgmt->u.action.u.s1g.variable; 3571 if (twt->element_id != WLAN_EID_S1G_TWT) 3572 break; 3573 3574 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 3575 4 + /* action code + token + tlv */ 3576 twt->length) 3577 break; 3578 3579 return true; /* queue the frame */ 3580 } 3581 case WLAN_S1G_TWT_TEARDOWN: 3582 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 2) 3583 break; 3584 3585 return true; /* queue the frame */ 3586 default: 3587 break; 3588 } 3589 3590 return false; 3591 } 3592 3593 static ieee80211_rx_result debug_noinline 3594 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 3595 { 3596 struct ieee80211_local *local = rx->local; 3597 struct ieee80211_sub_if_data *sdata = rx->sdata; 3598 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3599 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3600 int len = rx->skb->len; 3601 3602 if (!ieee80211_is_action(mgmt->frame_control)) 3603 return RX_CONTINUE; 3604 3605 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && 3606 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && 3607 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 3608 return RX_DROP_U_ACTION_UNKNOWN_SRC; 3609 3610 switch (mgmt->u.action.category) { 3611 case WLAN_CATEGORY_HT: 3612 /* reject HT action frames from stations not supporting HT 3613 * or not HE Capable 3614 */ 3615 if (!rx->link_sta->pub->ht_cap.ht_supported && 3616 !rx->link_sta->pub->he_cap.has_he) 3617 goto invalid; 3618 3619 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3620 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3621 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3622 sdata->vif.type != NL80211_IFTYPE_AP && 3623 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3624 break; 3625 3626 /* verify action & smps_control/chanwidth are present */ 3627 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3628 goto invalid; 3629 3630 switch (mgmt->u.action.u.ht_smps.action) { 3631 case WLAN_HT_ACTION_SMPS: { 3632 struct ieee80211_supported_band *sband; 3633 enum ieee80211_smps_mode smps_mode; 3634 struct sta_opmode_info sta_opmode = {}; 3635 3636 if (sdata->vif.type != NL80211_IFTYPE_AP && 3637 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 3638 goto handled; 3639 3640 /* convert to HT capability */ 3641 switch (mgmt->u.action.u.ht_smps.smps_control) { 3642 case WLAN_HT_SMPS_CONTROL_DISABLED: 3643 smps_mode = IEEE80211_SMPS_OFF; 3644 break; 3645 case WLAN_HT_SMPS_CONTROL_STATIC: 3646 smps_mode = IEEE80211_SMPS_STATIC; 3647 break; 3648 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 3649 smps_mode = IEEE80211_SMPS_DYNAMIC; 3650 break; 3651 default: 3652 goto invalid; 3653 } 3654 3655 /* if no change do nothing */ 3656 if (rx->link_sta->pub->smps_mode == smps_mode) 3657 goto handled; 3658 rx->link_sta->pub->smps_mode = smps_mode; 3659 sta_opmode.smps_mode = 3660 ieee80211_smps_mode_to_smps_mode(smps_mode); 3661 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED; 3662 3663 sband = rx->local->hw.wiphy->bands[status->band]; 3664 3665 rate_control_rate_update(local, sband, rx->link_sta, 3666 IEEE80211_RC_SMPS_CHANGED); 3667 cfg80211_sta_opmode_change_notify(sdata->dev, 3668 rx->sta->addr, 3669 &sta_opmode, 3670 GFP_ATOMIC); 3671 goto handled; 3672 } 3673 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 3674 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 3675 3676 if (chanwidth != IEEE80211_HT_CHANWIDTH_20MHZ && 3677 chanwidth != IEEE80211_HT_CHANWIDTH_ANY) 3678 goto invalid; 3679 3680 /* If it doesn't support 40 MHz it can't change ... */ 3681 if (!(rx->link_sta->pub->ht_cap.cap & 3682 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 3683 goto handled; 3684 3685 goto queue; 3686 } 3687 default: 3688 goto invalid; 3689 } 3690 3691 break; 3692 case WLAN_CATEGORY_PUBLIC: 3693 case WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION: 3694 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3695 goto invalid; 3696 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3697 break; 3698 if (!rx->sta) 3699 break; 3700 if (!ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid)) 3701 break; 3702 if (mgmt->u.action.u.ext_chan_switch.action_code != 3703 WLAN_PUB_ACTION_EXT_CHANSW_ANN) 3704 break; 3705 if (len < offsetof(struct ieee80211_mgmt, 3706 u.action.u.ext_chan_switch.variable)) 3707 goto invalid; 3708 goto queue; 3709 case WLAN_CATEGORY_VHT: 3710 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3711 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3712 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3713 sdata->vif.type != NL80211_IFTYPE_AP && 3714 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3715 break; 3716 3717 /* verify action code is present */ 3718 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3719 goto invalid; 3720 3721 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 3722 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 3723 /* verify opmode is present */ 3724 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3725 goto invalid; 3726 goto queue; 3727 } 3728 case WLAN_VHT_ACTION_GROUPID_MGMT: { 3729 if (len < IEEE80211_MIN_ACTION_SIZE + 25) 3730 goto invalid; 3731 goto queue; 3732 } 3733 default: 3734 break; 3735 } 3736 break; 3737 case WLAN_CATEGORY_BACK: 3738 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3739 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3740 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3741 sdata->vif.type != NL80211_IFTYPE_AP && 3742 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3743 break; 3744 3745 /* verify action_code is present */ 3746 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3747 break; 3748 3749 switch (mgmt->u.action.u.addba_req.action_code) { 3750 case WLAN_ACTION_ADDBA_REQ: 3751 if (len < (IEEE80211_MIN_ACTION_SIZE + 3752 sizeof(mgmt->u.action.u.addba_req))) 3753 goto invalid; 3754 break; 3755 case WLAN_ACTION_ADDBA_RESP: 3756 if (len < (IEEE80211_MIN_ACTION_SIZE + 3757 sizeof(mgmt->u.action.u.addba_resp))) 3758 goto invalid; 3759 break; 3760 case WLAN_ACTION_DELBA: 3761 if (len < (IEEE80211_MIN_ACTION_SIZE + 3762 sizeof(mgmt->u.action.u.delba))) 3763 goto invalid; 3764 break; 3765 default: 3766 goto invalid; 3767 } 3768 3769 goto queue; 3770 case WLAN_CATEGORY_SPECTRUM_MGMT: 3771 /* verify action_code is present */ 3772 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3773 break; 3774 3775 switch (mgmt->u.action.u.measurement.action_code) { 3776 case WLAN_ACTION_SPCT_MSR_REQ: 3777 if (status->band != NL80211_BAND_5GHZ) 3778 break; 3779 3780 if (len < (IEEE80211_MIN_ACTION_SIZE + 3781 sizeof(mgmt->u.action.u.measurement))) 3782 break; 3783 3784 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3785 break; 3786 3787 ieee80211_process_measurement_req(sdata, mgmt, len); 3788 goto handled; 3789 case WLAN_ACTION_SPCT_CHL_SWITCH: { 3790 u8 *bssid; 3791 if (len < (IEEE80211_MIN_ACTION_SIZE + 3792 sizeof(mgmt->u.action.u.chan_switch))) 3793 break; 3794 3795 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3796 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3797 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3798 break; 3799 3800 if (sdata->vif.type == NL80211_IFTYPE_STATION) 3801 bssid = sdata->deflink.u.mgd.bssid; 3802 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 3803 bssid = sdata->u.ibss.bssid; 3804 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 3805 bssid = mgmt->sa; 3806 else 3807 break; 3808 3809 if (!ether_addr_equal(mgmt->bssid, bssid)) 3810 break; 3811 3812 goto queue; 3813 } 3814 } 3815 break; 3816 case WLAN_CATEGORY_SELF_PROTECTED: 3817 if (len < (IEEE80211_MIN_ACTION_SIZE + 3818 sizeof(mgmt->u.action.u.self_prot.action_code))) 3819 break; 3820 3821 switch (mgmt->u.action.u.self_prot.action_code) { 3822 case WLAN_SP_MESH_PEERING_OPEN: 3823 case WLAN_SP_MESH_PEERING_CLOSE: 3824 case WLAN_SP_MESH_PEERING_CONFIRM: 3825 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3826 goto invalid; 3827 if (sdata->u.mesh.user_mpm) 3828 /* userspace handles this frame */ 3829 break; 3830 goto queue; 3831 case WLAN_SP_MGK_INFORM: 3832 case WLAN_SP_MGK_ACK: 3833 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3834 goto invalid; 3835 break; 3836 } 3837 break; 3838 case WLAN_CATEGORY_MESH_ACTION: 3839 if (len < (IEEE80211_MIN_ACTION_SIZE + 3840 sizeof(mgmt->u.action.u.mesh_action.action_code))) 3841 break; 3842 3843 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3844 break; 3845 if (mesh_action_is_path_sel(mgmt) && 3846 !mesh_path_sel_is_hwmp(sdata)) 3847 break; 3848 goto queue; 3849 case WLAN_CATEGORY_S1G: 3850 if (len < offsetofend(typeof(*mgmt), 3851 u.action.u.s1g.action_code)) 3852 break; 3853 3854 switch (mgmt->u.action.u.s1g.action_code) { 3855 case WLAN_S1G_TWT_SETUP: 3856 case WLAN_S1G_TWT_TEARDOWN: 3857 if (ieee80211_process_rx_twt_action(rx)) 3858 goto queue; 3859 break; 3860 default: 3861 break; 3862 } 3863 break; 3864 case WLAN_CATEGORY_PROTECTED_EHT: 3865 if (len < offsetofend(typeof(*mgmt), 3866 u.action.u.ttlm_req.action_code)) 3867 break; 3868 3869 switch (mgmt->u.action.u.ttlm_req.action_code) { 3870 case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ: 3871 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3872 break; 3873 3874 if (len < offsetofend(typeof(*mgmt), 3875 u.action.u.ttlm_req)) 3876 goto invalid; 3877 goto queue; 3878 case WLAN_PROTECTED_EHT_ACTION_TTLM_RES: 3879 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3880 break; 3881 3882 if (len < offsetofend(typeof(*mgmt), 3883 u.action.u.ttlm_res)) 3884 goto invalid; 3885 goto queue; 3886 case WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN: 3887 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3888 break; 3889 3890 if (len < offsetofend(typeof(*mgmt), 3891 u.action.u.ttlm_tear_down)) 3892 goto invalid; 3893 goto queue; 3894 case WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP: 3895 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3896 break; 3897 3898 /* The reconfiguration response action frame must 3899 * least one 'Status Duple' entry (3 octets) 3900 */ 3901 if (len < 3902 offsetofend(typeof(*mgmt), 3903 u.action.u.ml_reconf_resp) + 3) 3904 goto invalid; 3905 goto queue; 3906 case WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_RESP: 3907 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3908 break; 3909 3910 if (len < offsetofend(typeof(*mgmt), 3911 u.action.u.epcs) + 3912 IEEE80211_EPCS_ENA_RESP_BODY_LEN) 3913 goto invalid; 3914 goto queue; 3915 case WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_TEARDOWN: 3916 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3917 break; 3918 3919 if (len < offsetofend(typeof(*mgmt), 3920 u.action.u.epcs)) 3921 goto invalid; 3922 goto queue; 3923 default: 3924 break; 3925 } 3926 break; 3927 } 3928 3929 return RX_CONTINUE; 3930 3931 invalid: 3932 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 3933 /* will return in the next handlers */ 3934 return RX_CONTINUE; 3935 3936 handled: 3937 if (rx->sta) 3938 rx->link_sta->rx_stats.packets++; 3939 dev_kfree_skb(rx->skb); 3940 return RX_QUEUED; 3941 3942 queue: 3943 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); 3944 return RX_QUEUED; 3945 } 3946 3947 static ieee80211_rx_result debug_noinline 3948 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 3949 { 3950 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3951 struct cfg80211_rx_info info = { 3952 .freq = ieee80211_rx_status_to_khz(status), 3953 .buf = rx->skb->data, 3954 .len = rx->skb->len, 3955 .link_id = rx->link_id, 3956 .have_link_id = rx->link_id >= 0, 3957 }; 3958 3959 /* skip known-bad action frames and return them in the next handler */ 3960 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 3961 return RX_CONTINUE; 3962 3963 /* 3964 * Getting here means the kernel doesn't know how to handle 3965 * it, but maybe userspace does ... include returned frames 3966 * so userspace can register for those to know whether ones 3967 * it transmitted were processed or returned. 3968 */ 3969 3970 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3971 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3972 info.sig_dbm = status->signal; 3973 3974 if (ieee80211_is_timing_measurement(rx->skb) || 3975 ieee80211_is_ftm(rx->skb)) { 3976 info.rx_tstamp = ktime_to_ns(skb_hwtstamps(rx->skb)->hwtstamp); 3977 info.ack_tstamp = ktime_to_ns(status->ack_tx_hwtstamp); 3978 } 3979 3980 if (cfg80211_rx_mgmt_ext(&rx->sdata->wdev, &info)) { 3981 if (rx->sta) 3982 rx->link_sta->rx_stats.packets++; 3983 dev_kfree_skb(rx->skb); 3984 return RX_QUEUED; 3985 } 3986 3987 return RX_CONTINUE; 3988 } 3989 3990 static ieee80211_rx_result debug_noinline 3991 ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx) 3992 { 3993 struct ieee80211_sub_if_data *sdata = rx->sdata; 3994 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3995 int len = rx->skb->len; 3996 3997 if (!ieee80211_is_action(mgmt->frame_control)) 3998 return RX_CONTINUE; 3999 4000 switch (mgmt->u.action.category) { 4001 case WLAN_CATEGORY_SA_QUERY: 4002 if (len < (IEEE80211_MIN_ACTION_SIZE + 4003 sizeof(mgmt->u.action.u.sa_query))) 4004 break; 4005 4006 switch (mgmt->u.action.u.sa_query.action) { 4007 case WLAN_ACTION_SA_QUERY_REQUEST: 4008 if (sdata->vif.type != NL80211_IFTYPE_STATION) 4009 break; 4010 ieee80211_process_sa_query_req(sdata, mgmt, len); 4011 goto handled; 4012 } 4013 break; 4014 } 4015 4016 return RX_CONTINUE; 4017 4018 handled: 4019 if (rx->sta) 4020 rx->link_sta->rx_stats.packets++; 4021 dev_kfree_skb(rx->skb); 4022 return RX_QUEUED; 4023 } 4024 4025 static ieee80211_rx_result debug_noinline 4026 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 4027 { 4028 struct ieee80211_local *local = rx->local; 4029 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 4030 struct sk_buff *nskb; 4031 struct ieee80211_sub_if_data *sdata = rx->sdata; 4032 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 4033 4034 if (!ieee80211_is_action(mgmt->frame_control)) 4035 return RX_CONTINUE; 4036 4037 /* 4038 * For AP mode, hostapd is responsible for handling any action 4039 * frames that we didn't handle, including returning unknown 4040 * ones. For all other modes we will return them to the sender, 4041 * setting the 0x80 bit in the action category, as required by 4042 * 802.11-2012 9.24.4. 4043 * Newer versions of hostapd use the management frame registration 4044 * mechanisms and old cooked monitor interface is no longer supported. 4045 */ 4046 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 4047 (sdata->vif.type == NL80211_IFTYPE_AP || 4048 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 4049 return RX_DROP; 4050 4051 if (is_multicast_ether_addr(mgmt->da)) 4052 return RX_DROP; 4053 4054 /* do not return rejected action frames */ 4055 if (mgmt->u.action.category & 0x80) 4056 return RX_DROP_U_REJECTED_ACTION_RESPONSE; 4057 4058 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 4059 GFP_ATOMIC); 4060 if (nskb) { 4061 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 4062 4063 nmgmt->u.action.category |= 0x80; 4064 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 4065 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 4066 4067 memset(nskb->cb, 0, sizeof(nskb->cb)); 4068 4069 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 4070 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 4071 4072 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 4073 IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 4074 IEEE80211_TX_CTL_NO_CCK_RATE; 4075 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) 4076 info->hw_queue = 4077 local->hw.offchannel_tx_hw_queue; 4078 } 4079 4080 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, -1, 4081 status->band); 4082 } 4083 4084 return RX_DROP_U_UNKNOWN_ACTION_REJECTED; 4085 } 4086 4087 static ieee80211_rx_result debug_noinline 4088 ieee80211_rx_h_ext(struct ieee80211_rx_data *rx) 4089 { 4090 struct ieee80211_sub_if_data *sdata = rx->sdata; 4091 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 4092 4093 if (!ieee80211_is_ext(hdr->frame_control)) 4094 return RX_CONTINUE; 4095 4096 if (sdata->vif.type != NL80211_IFTYPE_STATION) 4097 return RX_DROP; 4098 4099 /* for now only beacons are ext, so queue them */ 4100 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); 4101 4102 return RX_QUEUED; 4103 } 4104 4105 static ieee80211_rx_result debug_noinline 4106 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 4107 { 4108 struct ieee80211_sub_if_data *sdata = rx->sdata; 4109 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 4110 __le16 stype; 4111 4112 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 4113 4114 if (!ieee80211_vif_is_mesh(&sdata->vif) && 4115 sdata->vif.type != NL80211_IFTYPE_ADHOC && 4116 sdata->vif.type != NL80211_IFTYPE_OCB && 4117 sdata->vif.type != NL80211_IFTYPE_STATION) 4118 return RX_DROP; 4119 4120 switch (stype) { 4121 case cpu_to_le16(IEEE80211_STYPE_AUTH): 4122 case cpu_to_le16(IEEE80211_STYPE_BEACON): 4123 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 4124 /* process for all: mesh, mlme, ibss */ 4125 break; 4126 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 4127 if (is_multicast_ether_addr(mgmt->da) && 4128 !is_broadcast_ether_addr(mgmt->da)) 4129 return RX_DROP; 4130 4131 /* process only for station/IBSS */ 4132 if (sdata->vif.type != NL80211_IFTYPE_STATION && 4133 sdata->vif.type != NL80211_IFTYPE_ADHOC) 4134 return RX_DROP; 4135 break; 4136 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 4137 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 4138 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 4139 if (is_multicast_ether_addr(mgmt->da) && 4140 !is_broadcast_ether_addr(mgmt->da)) 4141 return RX_DROP; 4142 4143 /* process only for station */ 4144 if (sdata->vif.type != NL80211_IFTYPE_STATION) 4145 return RX_DROP; 4146 break; 4147 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 4148 /* process only for ibss and mesh */ 4149 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 4150 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 4151 return RX_DROP; 4152 break; 4153 default: 4154 return RX_DROP; 4155 } 4156 4157 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); 4158 4159 return RX_QUEUED; 4160 } 4161 4162 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 4163 ieee80211_rx_result res) 4164 { 4165 if (res == RX_QUEUED) { 4166 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 4167 return; 4168 } 4169 4170 if (res != RX_CONTINUE) { 4171 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 4172 if (rx->sta) 4173 rx->link_sta->rx_stats.dropped++; 4174 } 4175 4176 kfree_skb_reason(rx->skb, (__force u32)res); 4177 } 4178 4179 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 4180 struct sk_buff_head *frames) 4181 { 4182 ieee80211_rx_result res = RX_DROP; 4183 struct sk_buff *skb; 4184 4185 #define CALL_RXH(rxh) \ 4186 do { \ 4187 res = rxh(rx); \ 4188 if (res != RX_CONTINUE) \ 4189 goto rxh_next; \ 4190 } while (0) 4191 4192 /* Lock here to avoid hitting all of the data used in the RX 4193 * path (e.g. key data, station data, ...) concurrently when 4194 * a frame is released from the reorder buffer due to timeout 4195 * from the timer, potentially concurrently with RX from the 4196 * driver. 4197 */ 4198 spin_lock_bh(&rx->local->rx_path_lock); 4199 4200 while ((skb = __skb_dequeue(frames))) { 4201 /* 4202 * all the other fields are valid across frames 4203 * that belong to an aMPDU since they are on the 4204 * same TID from the same station 4205 */ 4206 rx->skb = skb; 4207 4208 if (WARN_ON_ONCE(!rx->link)) 4209 goto rxh_next; 4210 4211 CALL_RXH(ieee80211_rx_h_check_more_data); 4212 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll); 4213 CALL_RXH(ieee80211_rx_h_sta_process); 4214 CALL_RXH(ieee80211_rx_h_decrypt); 4215 CALL_RXH(ieee80211_rx_h_defragment); 4216 CALL_RXH(ieee80211_rx_h_michael_mic_verify); 4217 /* must be after MMIC verify so header is counted in MPDU mic */ 4218 CALL_RXH(ieee80211_rx_h_amsdu); 4219 CALL_RXH(ieee80211_rx_h_data); 4220 4221 /* special treatment -- needs the queue */ 4222 res = ieee80211_rx_h_ctrl(rx, frames); 4223 if (res != RX_CONTINUE) 4224 goto rxh_next; 4225 4226 CALL_RXH(ieee80211_rx_h_mgmt_check); 4227 CALL_RXH(ieee80211_rx_h_action); 4228 CALL_RXH(ieee80211_rx_h_userspace_mgmt); 4229 CALL_RXH(ieee80211_rx_h_action_post_userspace); 4230 CALL_RXH(ieee80211_rx_h_action_return); 4231 CALL_RXH(ieee80211_rx_h_ext); 4232 CALL_RXH(ieee80211_rx_h_mgmt); 4233 4234 rxh_next: 4235 ieee80211_rx_handlers_result(rx, res); 4236 4237 #undef CALL_RXH 4238 } 4239 4240 spin_unlock_bh(&rx->local->rx_path_lock); 4241 } 4242 4243 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 4244 { 4245 struct sk_buff_head reorder_release; 4246 ieee80211_rx_result res = RX_DROP; 4247 4248 __skb_queue_head_init(&reorder_release); 4249 4250 #define CALL_RXH(rxh) \ 4251 do { \ 4252 res = rxh(rx); \ 4253 if (res != RX_CONTINUE) \ 4254 goto rxh_next; \ 4255 } while (0) 4256 4257 CALL_RXH(ieee80211_rx_h_check_dup); 4258 CALL_RXH(ieee80211_rx_h_check); 4259 4260 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 4261 4262 ieee80211_rx_handlers(rx, &reorder_release); 4263 return; 4264 4265 rxh_next: 4266 ieee80211_rx_handlers_result(rx, res); 4267 4268 #undef CALL_RXH 4269 } 4270 4271 static bool 4272 ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id) 4273 { 4274 return !!(sta->valid_links & BIT(link_id)); 4275 } 4276 4277 static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx, 4278 u8 link_id) 4279 { 4280 rx->link_id = link_id; 4281 rx->link = rcu_dereference(rx->sdata->link[link_id]); 4282 4283 if (!rx->sta) 4284 return rx->link; 4285 4286 if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id)) 4287 return false; 4288 4289 rx->link_sta = rcu_dereference(rx->sta->link[link_id]); 4290 4291 return rx->link && rx->link_sta; 4292 } 4293 4294 static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx, 4295 struct sta_info *sta, int link_id) 4296 { 4297 rx->link_id = link_id; 4298 rx->sta = sta; 4299 4300 if (sta) { 4301 rx->local = sta->sdata->local; 4302 if (!rx->sdata) 4303 rx->sdata = sta->sdata; 4304 rx->link_sta = &sta->deflink; 4305 } else { 4306 rx->link_sta = NULL; 4307 } 4308 4309 if (link_id < 0) { 4310 if (ieee80211_vif_is_mld(&rx->sdata->vif) && 4311 sta && !sta->sta.valid_links) 4312 rx->link = 4313 rcu_dereference(rx->sdata->link[sta->deflink.link_id]); 4314 else 4315 rx->link = &rx->sdata->deflink; 4316 } else if (!ieee80211_rx_data_set_link(rx, link_id)) { 4317 return false; 4318 } 4319 4320 return true; 4321 } 4322 4323 /* 4324 * This function makes calls into the RX path, therefore 4325 * it has to be invoked under RCU read lock. 4326 */ 4327 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 4328 { 4329 struct sk_buff_head frames; 4330 struct ieee80211_rx_data rx = { 4331 /* This is OK -- must be QoS data frame */ 4332 .security_idx = tid, 4333 .seqno_idx = tid, 4334 }; 4335 struct tid_ampdu_rx *tid_agg_rx; 4336 int link_id = -1; 4337 4338 /* FIXME: statistics won't be right with this */ 4339 if (sta->sta.valid_links) 4340 link_id = ffs(sta->sta.valid_links) - 1; 4341 4342 if (!ieee80211_rx_data_set_sta(&rx, sta, link_id)) 4343 return; 4344 4345 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 4346 if (!tid_agg_rx) 4347 return; 4348 4349 __skb_queue_head_init(&frames); 4350 4351 spin_lock(&tid_agg_rx->reorder_lock); 4352 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 4353 spin_unlock(&tid_agg_rx->reorder_lock); 4354 4355 if (!skb_queue_empty(&frames)) { 4356 struct ieee80211_event event = { 4357 .type = BA_FRAME_TIMEOUT, 4358 .u.ba.tid = tid, 4359 .u.ba.sta = &sta->sta, 4360 }; 4361 drv_event_callback(rx.local, rx.sdata, &event); 4362 } 4363 4364 ieee80211_rx_handlers(&rx, &frames); 4365 } 4366 4367 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, 4368 u16 ssn, u64 filtered, 4369 u16 received_mpdus) 4370 { 4371 struct ieee80211_local *local; 4372 struct sta_info *sta; 4373 struct tid_ampdu_rx *tid_agg_rx; 4374 struct sk_buff_head frames; 4375 struct ieee80211_rx_data rx = { 4376 /* This is OK -- must be QoS data frame */ 4377 .security_idx = tid, 4378 .seqno_idx = tid, 4379 }; 4380 int i, diff; 4381 4382 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS)) 4383 return; 4384 4385 __skb_queue_head_init(&frames); 4386 4387 sta = container_of(pubsta, struct sta_info, sta); 4388 4389 local = sta->sdata->local; 4390 WARN_ONCE(local->hw.max_rx_aggregation_subframes > 64, 4391 "RX BA marker can't support max_rx_aggregation_subframes %u > 64\n", 4392 local->hw.max_rx_aggregation_subframes); 4393 4394 if (!ieee80211_rx_data_set_sta(&rx, sta, -1)) 4395 return; 4396 4397 rcu_read_lock(); 4398 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 4399 if (!tid_agg_rx) 4400 goto out; 4401 4402 spin_lock_bh(&tid_agg_rx->reorder_lock); 4403 4404 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) { 4405 int release; 4406 4407 /* release all frames in the reorder buffer */ 4408 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) % 4409 IEEE80211_SN_MODULO; 4410 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, 4411 release, &frames); 4412 /* update ssn to match received ssn */ 4413 tid_agg_rx->head_seq_num = ssn; 4414 } else { 4415 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn, 4416 &frames); 4417 } 4418 4419 /* handle the case that received ssn is behind the mac ssn. 4420 * it can be tid_agg_rx->buf_size behind and still be valid */ 4421 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK; 4422 if (diff >= tid_agg_rx->buf_size) { 4423 tid_agg_rx->reorder_buf_filtered = 0; 4424 goto release; 4425 } 4426 filtered = filtered >> diff; 4427 ssn += diff; 4428 4429 /* update bitmap */ 4430 for (i = 0; i < tid_agg_rx->buf_size; i++) { 4431 int index = (ssn + i) % tid_agg_rx->buf_size; 4432 4433 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 4434 if (filtered & BIT_ULL(i)) 4435 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index); 4436 } 4437 4438 /* now process also frames that the filter marking released */ 4439 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 4440 4441 release: 4442 spin_unlock_bh(&tid_agg_rx->reorder_lock); 4443 4444 ieee80211_rx_handlers(&rx, &frames); 4445 4446 out: 4447 rcu_read_unlock(); 4448 } 4449 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames); 4450 4451 /* main receive path */ 4452 4453 static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) 4454 { 4455 return ether_addr_equal(raddr, addr) || 4456 is_broadcast_ether_addr(raddr); 4457 } 4458 4459 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) 4460 { 4461 struct ieee80211_sub_if_data *sdata = rx->sdata; 4462 struct sk_buff *skb = rx->skb; 4463 struct ieee80211_hdr *hdr = (void *)skb->data; 4464 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4465 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 4466 bool multicast = is_multicast_ether_addr(hdr->addr1) || 4467 ieee80211_is_s1g_beacon(hdr->frame_control); 4468 4469 switch (sdata->vif.type) { 4470 case NL80211_IFTYPE_STATION: 4471 if (!bssid && !sdata->u.mgd.use_4addr) 4472 return false; 4473 if (ieee80211_is_first_frag(hdr->seq_ctrl) && 4474 ieee80211_is_robust_mgmt_frame(skb) && !rx->sta) 4475 return false; 4476 if (multicast) 4477 return true; 4478 return ieee80211_is_our_addr(sdata, hdr->addr1, &rx->link_id); 4479 case NL80211_IFTYPE_ADHOC: 4480 if (!bssid) 4481 return false; 4482 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 4483 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) || 4484 !is_valid_ether_addr(hdr->addr2)) 4485 return false; 4486 if (ieee80211_is_beacon(hdr->frame_control)) 4487 return true; 4488 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) 4489 return false; 4490 if (!multicast && 4491 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 4492 return false; 4493 if (!rx->sta) { 4494 int rate_idx; 4495 if (status->encoding != RX_ENC_LEGACY) 4496 rate_idx = 0; /* TODO: HT/VHT rates */ 4497 else 4498 rate_idx = status->rate_idx; 4499 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 4500 BIT(rate_idx)); 4501 } 4502 return true; 4503 case NL80211_IFTYPE_OCB: 4504 if (!bssid) 4505 return false; 4506 if (!ieee80211_is_data_present(hdr->frame_control)) 4507 return false; 4508 if (!is_broadcast_ether_addr(bssid)) 4509 return false; 4510 if (!multicast && 4511 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) 4512 return false; 4513 /* reject invalid/our STA address */ 4514 if (!is_valid_ether_addr(hdr->addr2) || 4515 ether_addr_equal(sdata->dev->dev_addr, hdr->addr2)) 4516 return false; 4517 if (!rx->sta) { 4518 int rate_idx; 4519 if (status->encoding != RX_ENC_LEGACY) 4520 rate_idx = 0; /* TODO: HT rates */ 4521 else 4522 rate_idx = status->rate_idx; 4523 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, 4524 BIT(rate_idx)); 4525 } 4526 return true; 4527 case NL80211_IFTYPE_MESH_POINT: 4528 if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) 4529 return false; 4530 if (multicast) 4531 return true; 4532 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 4533 case NL80211_IFTYPE_AP_VLAN: 4534 case NL80211_IFTYPE_AP: 4535 if (!bssid) 4536 return ieee80211_is_our_addr(sdata, hdr->addr1, 4537 &rx->link_id); 4538 4539 if (!is_broadcast_ether_addr(bssid) && 4540 !ieee80211_is_our_addr(sdata, bssid, NULL)) { 4541 /* 4542 * Accept public action frames even when the 4543 * BSSID doesn't match, this is used for P2P 4544 * and location updates. Note that mac80211 4545 * itself never looks at these frames. 4546 */ 4547 if (!multicast && 4548 !ieee80211_is_our_addr(sdata, hdr->addr1, 4549 &rx->link_id)) 4550 return false; 4551 if (ieee80211_is_public_action(hdr, skb->len)) 4552 return true; 4553 return ieee80211_is_beacon(hdr->frame_control); 4554 } 4555 4556 if (!ieee80211_has_tods(hdr->frame_control)) { 4557 /* ignore data frames to TDLS-peers */ 4558 if (ieee80211_is_data(hdr->frame_control)) 4559 return false; 4560 /* ignore action frames to TDLS-peers */ 4561 if (ieee80211_is_action(hdr->frame_control) && 4562 !is_broadcast_ether_addr(bssid) && 4563 !ether_addr_equal(bssid, hdr->addr1)) 4564 return false; 4565 } 4566 4567 /* 4568 * 802.11-2016 Table 9-26 says that for data frames, A1 must be 4569 * the BSSID - we've checked that already but may have accepted 4570 * the wildcard (ff:ff:ff:ff:ff:ff). 4571 * 4572 * It also says: 4573 * The BSSID of the Data frame is determined as follows: 4574 * a) If the STA is contained within an AP or is associated 4575 * with an AP, the BSSID is the address currently in use 4576 * by the STA contained in the AP. 4577 * 4578 * So we should not accept data frames with an address that's 4579 * multicast. 4580 * 4581 * Accepting it also opens a security problem because stations 4582 * could encrypt it with the GTK and inject traffic that way. 4583 */ 4584 if (ieee80211_is_data(hdr->frame_control) && multicast) 4585 return false; 4586 4587 return true; 4588 case NL80211_IFTYPE_P2P_DEVICE: 4589 return ieee80211_is_public_action(hdr, skb->len) || 4590 ieee80211_is_probe_req(hdr->frame_control) || 4591 ieee80211_is_probe_resp(hdr->frame_control) || 4592 ieee80211_is_beacon(hdr->frame_control) || 4593 (ieee80211_is_auth(hdr->frame_control) && 4594 ether_addr_equal(sdata->vif.addr, hdr->addr1)); 4595 case NL80211_IFTYPE_NAN: 4596 /* Accept only frames that are addressed to the NAN cluster 4597 * (based on the Cluster ID). From these frames, accept only 4598 * action frames or authentication frames that are addressed to 4599 * the local NAN interface. 4600 */ 4601 return memcmp(sdata->wdev.u.nan.cluster_id, 4602 hdr->addr3, ETH_ALEN) == 0 && 4603 (ieee80211_is_public_action(hdr, skb->len) || 4604 (ieee80211_is_auth(hdr->frame_control) && 4605 ether_addr_equal(sdata->vif.addr, hdr->addr1))); 4606 default: 4607 break; 4608 } 4609 4610 WARN_ON_ONCE(1); 4611 return false; 4612 } 4613 4614 void ieee80211_check_fast_rx(struct sta_info *sta) 4615 { 4616 struct ieee80211_sub_if_data *sdata = sta->sdata; 4617 struct ieee80211_local *local = sdata->local; 4618 struct ieee80211_key *key; 4619 struct ieee80211_fast_rx fastrx = { 4620 .dev = sdata->dev, 4621 .vif_type = sdata->vif.type, 4622 .control_port_protocol = sdata->control_port_protocol, 4623 }, *old, *new = NULL; 4624 u32 offload_flags; 4625 bool set_offload = false; 4626 bool assign = false; 4627 bool offload; 4628 4629 /* use sparse to check that we don't return without updating */ 4630 __acquire(check_fast_rx); 4631 4632 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header)); 4633 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN); 4634 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header); 4635 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr); 4636 4637 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS); 4638 4639 /* fast-rx doesn't do reordering */ 4640 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && 4641 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) 4642 goto clear; 4643 4644 switch (sdata->vif.type) { 4645 case NL80211_IFTYPE_STATION: 4646 if (sta->sta.tdls) { 4647 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 4648 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4649 fastrx.expected_ds_bits = 0; 4650 } else { 4651 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 4652 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3); 4653 fastrx.expected_ds_bits = 4654 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4655 } 4656 4657 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) { 4658 fastrx.expected_ds_bits |= 4659 cpu_to_le16(IEEE80211_FCTL_TODS); 4660 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4661 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4662 } 4663 4664 if (!sdata->u.mgd.powersave) 4665 break; 4666 4667 /* software powersave is a huge mess, avoid all of it */ 4668 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) 4669 goto clear; 4670 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && 4671 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) 4672 goto clear; 4673 break; 4674 case NL80211_IFTYPE_AP_VLAN: 4675 case NL80211_IFTYPE_AP: 4676 /* parallel-rx requires this, at least with calls to 4677 * ieee80211_sta_ps_transition() 4678 */ 4679 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 4680 goto clear; 4681 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4682 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4683 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS); 4684 4685 fastrx.internal_forward = 4686 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 4687 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || 4688 !sdata->u.vlan.sta); 4689 4690 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 4691 sdata->u.vlan.sta) { 4692 fastrx.expected_ds_bits |= 4693 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4694 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4695 fastrx.internal_forward = 0; 4696 } 4697 4698 break; 4699 case NL80211_IFTYPE_MESH_POINT: 4700 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_FROMDS | 4701 IEEE80211_FCTL_TODS); 4702 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4703 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4704 break; 4705 default: 4706 goto clear; 4707 } 4708 4709 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 4710 goto clear; 4711 4712 rcu_read_lock(); 4713 key = rcu_dereference(sta->ptk[sta->ptk_idx]); 4714 if (!key) 4715 key = rcu_dereference(sdata->default_unicast_key); 4716 if (key) { 4717 switch (key->conf.cipher) { 4718 case WLAN_CIPHER_SUITE_TKIP: 4719 /* we don't want to deal with MMIC in fast-rx */ 4720 goto clear_rcu; 4721 case WLAN_CIPHER_SUITE_CCMP: 4722 case WLAN_CIPHER_SUITE_CCMP_256: 4723 case WLAN_CIPHER_SUITE_GCMP: 4724 case WLAN_CIPHER_SUITE_GCMP_256: 4725 break; 4726 default: 4727 /* We also don't want to deal with 4728 * WEP or cipher scheme. 4729 */ 4730 goto clear_rcu; 4731 } 4732 4733 fastrx.key = true; 4734 fastrx.icv_len = key->conf.icv_len; 4735 } 4736 4737 assign = true; 4738 clear_rcu: 4739 rcu_read_unlock(); 4740 clear: 4741 __release(check_fast_rx); 4742 4743 if (assign) 4744 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL); 4745 4746 offload_flags = get_bss_sdata(sdata)->vif.offload_flags; 4747 offload = offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED; 4748 4749 if (assign && offload) 4750 set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); 4751 else 4752 set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); 4753 4754 if (set_offload) 4755 drv_sta_set_decap_offload(local, sdata, &sta->sta, assign); 4756 4757 spin_lock_bh(&sta->lock); 4758 old = rcu_dereference_protected(sta->fast_rx, true); 4759 rcu_assign_pointer(sta->fast_rx, new); 4760 spin_unlock_bh(&sta->lock); 4761 4762 if (old) 4763 kfree_rcu(old, rcu_head); 4764 } 4765 4766 void ieee80211_clear_fast_rx(struct sta_info *sta) 4767 { 4768 struct ieee80211_fast_rx *old; 4769 4770 spin_lock_bh(&sta->lock); 4771 old = rcu_dereference_protected(sta->fast_rx, true); 4772 RCU_INIT_POINTER(sta->fast_rx, NULL); 4773 spin_unlock_bh(&sta->lock); 4774 4775 if (old) 4776 kfree_rcu(old, rcu_head); 4777 } 4778 4779 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4780 { 4781 struct ieee80211_local *local = sdata->local; 4782 struct sta_info *sta; 4783 4784 lockdep_assert_wiphy(local->hw.wiphy); 4785 4786 list_for_each_entry(sta, &local->sta_list, list) { 4787 if (sdata != sta->sdata && 4788 (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) 4789 continue; 4790 ieee80211_check_fast_rx(sta); 4791 } 4792 } 4793 4794 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4795 { 4796 struct ieee80211_local *local = sdata->local; 4797 4798 lockdep_assert_wiphy(local->hw.wiphy); 4799 4800 __ieee80211_check_fast_rx_iface(sdata); 4801 } 4802 4803 static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, 4804 struct ieee80211_fast_rx *fast_rx, 4805 int orig_len) 4806 { 4807 struct ieee80211_sta_rx_stats *stats; 4808 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 4809 struct sta_info *sta = rx->sta; 4810 struct link_sta_info *link_sta; 4811 struct sk_buff *skb = rx->skb; 4812 void *sa = skb->data + ETH_ALEN; 4813 void *da = skb->data; 4814 4815 if (rx->link_id >= 0) { 4816 link_sta = rcu_dereference(sta->link[rx->link_id]); 4817 if (WARN_ON_ONCE(!link_sta)) { 4818 dev_kfree_skb(rx->skb); 4819 return; 4820 } 4821 } else { 4822 link_sta = &sta->deflink; 4823 } 4824 4825 stats = &link_sta->rx_stats; 4826 if (fast_rx->uses_rss) 4827 stats = this_cpu_ptr(link_sta->pcpu_rx_stats); 4828 4829 /* statistics part of ieee80211_rx_h_sta_process() */ 4830 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 4831 stats->last_signal = status->signal; 4832 if (!fast_rx->uses_rss) 4833 ewma_signal_add(&link_sta->rx_stats_avg.signal, 4834 -status->signal); 4835 } 4836 4837 if (status->chains) { 4838 int i; 4839 4840 stats->chains = status->chains; 4841 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 4842 int signal = status->chain_signal[i]; 4843 4844 if (!(status->chains & BIT(i))) 4845 continue; 4846 4847 stats->chain_signal_last[i] = signal; 4848 if (!fast_rx->uses_rss) 4849 ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i], 4850 -signal); 4851 } 4852 } 4853 /* end of statistics */ 4854 4855 stats->last_rx = jiffies; 4856 stats->last_rate = sta_stats_encode_rate(status); 4857 4858 stats->fragments++; 4859 stats->packets++; 4860 4861 skb->dev = fast_rx->dev; 4862 4863 dev_sw_netstats_rx_add(fast_rx->dev, skb->len); 4864 4865 /* The seqno index has the same property as needed 4866 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 4867 * for non-QoS-data frames. Here we know it's a data 4868 * frame, so count MSDUs. 4869 */ 4870 u64_stats_update_begin(&stats->syncp); 4871 stats->msdu[rx->seqno_idx]++; 4872 stats->bytes += orig_len; 4873 u64_stats_update_end(&stats->syncp); 4874 4875 if (fast_rx->internal_forward) { 4876 struct sk_buff *xmit_skb = NULL; 4877 if (is_multicast_ether_addr(da)) { 4878 xmit_skb = skb_copy(skb, GFP_ATOMIC); 4879 } else if (!ether_addr_equal(da, sa) && 4880 sta_info_get(rx->sdata, da)) { 4881 xmit_skb = skb; 4882 skb = NULL; 4883 } 4884 4885 if (xmit_skb) { 4886 /* 4887 * Send to wireless media and increase priority by 256 4888 * to keep the received priority instead of 4889 * reclassifying the frame (see cfg80211_classify8021d). 4890 */ 4891 xmit_skb->priority += 256; 4892 xmit_skb->protocol = htons(ETH_P_802_3); 4893 skb_reset_network_header(xmit_skb); 4894 skb_reset_mac_header(xmit_skb); 4895 dev_queue_xmit(xmit_skb); 4896 } 4897 4898 if (!skb) 4899 return; 4900 } 4901 4902 /* deliver to local stack */ 4903 skb->protocol = eth_type_trans(skb, fast_rx->dev); 4904 ieee80211_deliver_skb_to_local_stack(skb, rx); 4905 } 4906 4907 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, 4908 struct ieee80211_fast_rx *fast_rx) 4909 { 4910 struct sk_buff *skb = rx->skb; 4911 struct ieee80211_hdr *hdr = (void *)skb->data; 4912 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4913 static ieee80211_rx_result res; 4914 int orig_len = skb->len; 4915 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 4916 int snap_offs = hdrlen; 4917 struct { 4918 u8 snap[sizeof(rfc1042_header)]; 4919 __be16 proto; 4920 } *payload __aligned(2); 4921 struct { 4922 u8 da[ETH_ALEN]; 4923 u8 sa[ETH_ALEN]; 4924 } addrs __aligned(2); 4925 struct ieee80211_sta_rx_stats *stats; 4926 4927 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write 4928 * to a common data structure; drivers can implement that per queue 4929 * but we don't have that information in mac80211 4930 */ 4931 if (!(status->flag & RX_FLAG_DUP_VALIDATED)) 4932 return false; 4933 4934 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED) 4935 4936 /* If using encryption, we also need to have: 4937 * - PN_VALIDATED: similar, but the implementation is tricky 4938 * - DECRYPTED: necessary for PN_VALIDATED 4939 */ 4940 if (fast_rx->key && 4941 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS) 4942 return false; 4943 4944 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 4945 return false; 4946 4947 if (unlikely(ieee80211_is_frag(hdr))) 4948 return false; 4949 4950 /* Since our interface address cannot be multicast, this 4951 * implicitly also rejects multicast frames without the 4952 * explicit check. 4953 * 4954 * We shouldn't get any *data* frames not addressed to us 4955 * (AP mode will accept multicast *management* frames), but 4956 * punting here will make it go through the full checks in 4957 * ieee80211_accept_frame(). 4958 */ 4959 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1)) 4960 return false; 4961 4962 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | 4963 IEEE80211_FCTL_TODS)) != 4964 fast_rx->expected_ds_bits) 4965 return false; 4966 4967 /* assign the key to drop unencrypted frames (later) 4968 * and strip the IV/MIC if necessary 4969 */ 4970 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) { 4971 /* GCMP header length is the same */ 4972 snap_offs += IEEE80211_CCMP_HDR_LEN; 4973 } 4974 4975 if (!ieee80211_vif_is_mesh(&rx->sdata->vif) && 4976 !(status->rx_flags & IEEE80211_RX_AMSDU)) { 4977 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) 4978 return false; 4979 4980 payload = (void *)(skb->data + snap_offs); 4981 4982 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) 4983 return false; 4984 4985 /* Don't handle these here since they require special code. 4986 * Accept AARP and IPX even though they should come with a 4987 * bridge-tunnel header - but if we get them this way then 4988 * there's little point in discarding them. 4989 */ 4990 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || 4991 payload->proto == fast_rx->control_port_protocol)) 4992 return false; 4993 } 4994 4995 /* after this point, don't punt to the slowpath! */ 4996 4997 if (fast_rx->uses_rss) 4998 stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats); 4999 else 5000 stats = &rx->link_sta->rx_stats; 5001 5002 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && 5003 pskb_trim(skb, skb->len - fast_rx->icv_len)) 5004 goto drop; 5005 5006 if (rx->key && !ieee80211_has_protected(hdr->frame_control)) 5007 goto drop; 5008 5009 if (status->rx_flags & IEEE80211_RX_AMSDU) { 5010 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) != 5011 RX_QUEUED) 5012 goto drop; 5013 5014 return true; 5015 } 5016 5017 /* do the header conversion - first grab the addresses */ 5018 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs); 5019 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs); 5020 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) { 5021 skb_pull(skb, snap_offs - 2); 5022 put_unaligned_be16(skb->len - 2, skb->data); 5023 } else { 5024 skb_postpull_rcsum(skb, skb->data + snap_offs, 5025 sizeof(rfc1042_header) + 2); 5026 5027 /* remove the SNAP but leave the ethertype */ 5028 skb_pull(skb, snap_offs + sizeof(rfc1042_header)); 5029 } 5030 /* push the addresses in front */ 5031 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs)); 5032 5033 res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb); 5034 switch (res) { 5035 case RX_QUEUED: 5036 stats->last_rx = jiffies; 5037 stats->last_rate = sta_stats_encode_rate(status); 5038 return true; 5039 case RX_CONTINUE: 5040 break; 5041 default: 5042 goto drop; 5043 } 5044 5045 ieee80211_rx_8023(rx, fast_rx, orig_len); 5046 5047 return true; 5048 drop: 5049 dev_kfree_skb(skb); 5050 5051 stats->dropped++; 5052 return true; 5053 } 5054 5055 /* 5056 * This function returns whether or not the SKB 5057 * was destined for RX processing or not, which, 5058 * if consume is true, is equivalent to whether 5059 * or not the skb was consumed. 5060 */ 5061 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 5062 struct sk_buff *skb, bool consume) 5063 { 5064 struct ieee80211_local *local = rx->local; 5065 struct ieee80211_sub_if_data *sdata = rx->sdata; 5066 struct ieee80211_hdr *hdr = (void *)skb->data; 5067 struct link_sta_info *link_sta = rx->link_sta; 5068 struct ieee80211_link_data *link = rx->link; 5069 5070 rx->skb = skb; 5071 5072 /* See if we can do fast-rx; if we have to copy we already lost, 5073 * so punt in that case. We should never have to deliver a data 5074 * frame to multiple interfaces anyway. 5075 * 5076 * We skip the ieee80211_accept_frame() call and do the necessary 5077 * checking inside ieee80211_invoke_fast_rx(). 5078 */ 5079 if (consume && rx->sta) { 5080 struct ieee80211_fast_rx *fast_rx; 5081 5082 fast_rx = rcu_dereference(rx->sta->fast_rx); 5083 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx)) 5084 return true; 5085 } 5086 5087 if (!ieee80211_accept_frame(rx)) 5088 return false; 5089 5090 if (!consume) { 5091 struct skb_shared_hwtstamps *shwt; 5092 5093 rx->skb = skb_copy(skb, GFP_ATOMIC); 5094 if (!rx->skb) { 5095 if (net_ratelimit()) 5096 wiphy_debug(local->hw.wiphy, 5097 "failed to copy skb for %s\n", 5098 sdata->name); 5099 return true; 5100 } 5101 5102 /* skb_copy() does not copy the hw timestamps, so copy it 5103 * explicitly 5104 */ 5105 shwt = skb_hwtstamps(rx->skb); 5106 shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp; 5107 5108 /* Update the hdr pointer to the new skb for translation below */ 5109 hdr = (struct ieee80211_hdr *)rx->skb->data; 5110 } 5111 5112 if (unlikely(rx->sta && rx->sta->sta.mlo) && 5113 is_unicast_ether_addr(hdr->addr1) && 5114 !ieee80211_is_probe_resp(hdr->frame_control) && 5115 !ieee80211_is_beacon(hdr->frame_control)) { 5116 /* translate to MLD addresses */ 5117 if (ether_addr_equal(link->conf->addr, hdr->addr1)) 5118 ether_addr_copy(hdr->addr1, rx->sdata->vif.addr); 5119 if (ether_addr_equal(link_sta->addr, hdr->addr2)) 5120 ether_addr_copy(hdr->addr2, rx->sta->addr); 5121 /* translate A3 only if it's the BSSID */ 5122 if (!ieee80211_has_tods(hdr->frame_control) && 5123 !ieee80211_has_fromds(hdr->frame_control)) { 5124 if (ether_addr_equal(link_sta->addr, hdr->addr3)) 5125 ether_addr_copy(hdr->addr3, rx->sta->addr); 5126 else if (ether_addr_equal(link->conf->addr, hdr->addr3)) 5127 ether_addr_copy(hdr->addr3, rx->sdata->vif.addr); 5128 } 5129 /* not needed for A4 since it can only carry the SA */ 5130 } 5131 5132 ieee80211_invoke_rx_handlers(rx); 5133 return true; 5134 } 5135 5136 static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw, 5137 struct ieee80211_sta *pubsta, 5138 struct sk_buff *skb, 5139 struct list_head *list) 5140 { 5141 struct ieee80211_local *local = hw_to_local(hw); 5142 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 5143 struct ieee80211_fast_rx *fast_rx; 5144 struct ieee80211_rx_data rx; 5145 struct sta_info *sta; 5146 int link_id = -1; 5147 5148 memset(&rx, 0, sizeof(rx)); 5149 rx.skb = skb; 5150 rx.local = local; 5151 rx.list = list; 5152 rx.link_id = -1; 5153 5154 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 5155 5156 /* drop frame if too short for header */ 5157 if (skb->len < sizeof(struct ethhdr)) 5158 goto drop; 5159 5160 if (!pubsta) 5161 goto drop; 5162 5163 if (status->link_valid) 5164 link_id = status->link_id; 5165 5166 /* 5167 * TODO: Should the frame be dropped if the right link_id is not 5168 * available? Or may be it is fine in the current form to proceed with 5169 * the frame processing because with frame being in 802.3 format, 5170 * link_id is used only for stats purpose and updating the stats on 5171 * the deflink is fine? 5172 */ 5173 sta = container_of(pubsta, struct sta_info, sta); 5174 if (!ieee80211_rx_data_set_sta(&rx, sta, link_id)) 5175 goto drop; 5176 5177 fast_rx = rcu_dereference(rx.sta->fast_rx); 5178 if (!fast_rx) 5179 goto drop; 5180 5181 ieee80211_rx_8023(&rx, fast_rx, skb->len); 5182 return; 5183 5184 drop: 5185 dev_kfree_skb(skb); 5186 } 5187 5188 static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx, 5189 struct sk_buff *skb, bool consume) 5190 { 5191 struct link_sta_info *link_sta; 5192 struct ieee80211_hdr *hdr = (void *)skb->data; 5193 struct sta_info *sta; 5194 int link_id = -1; 5195 5196 /* 5197 * Look up link station first, in case there's a 5198 * chance that they might have a link address that 5199 * is identical to the MLD address, that way we'll 5200 * have the link information if needed. 5201 */ 5202 link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2); 5203 if (link_sta) { 5204 sta = link_sta->sta; 5205 link_id = link_sta->link_id; 5206 } else { 5207 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 5208 5209 sta = sta_info_get_bss(rx->sdata, hdr->addr2); 5210 if (status->link_valid) { 5211 link_id = status->link_id; 5212 } else if (ieee80211_vif_is_mld(&rx->sdata->vif) && 5213 status->freq) { 5214 struct ieee80211_link_data *link; 5215 struct ieee80211_chanctx_conf *conf; 5216 5217 for_each_link_data_rcu(rx->sdata, link) { 5218 conf = rcu_dereference(link->conf->chanctx_conf); 5219 if (!conf || !conf->def.chan) 5220 continue; 5221 5222 if (status->freq == conf->def.chan->center_freq) { 5223 link_id = link->link_id; 5224 break; 5225 } 5226 } 5227 } 5228 } 5229 5230 if (!ieee80211_rx_data_set_sta(rx, sta, link_id)) 5231 return false; 5232 5233 return ieee80211_prepare_and_rx_handle(rx, skb, consume); 5234 } 5235 5236 /* 5237 * This is the actual Rx frames handler. as it belongs to Rx path it must 5238 * be called with rcu_read_lock protection. 5239 */ 5240 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 5241 struct ieee80211_sta *pubsta, 5242 struct sk_buff *skb, 5243 struct list_head *list) 5244 { 5245 struct ieee80211_local *local = hw_to_local(hw); 5246 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 5247 struct ieee80211_sub_if_data *sdata; 5248 struct ieee80211_hdr *hdr; 5249 __le16 fc; 5250 struct ieee80211_rx_data rx; 5251 struct ieee80211_sub_if_data *prev; 5252 struct rhlist_head *tmp; 5253 int err = 0; 5254 5255 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 5256 memset(&rx, 0, sizeof(rx)); 5257 rx.skb = skb; 5258 rx.local = local; 5259 rx.list = list; 5260 rx.link_id = -1; 5261 5262 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 5263 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 5264 5265 if (ieee80211_is_mgmt(fc)) { 5266 /* drop frame if too short for header */ 5267 if (skb->len < ieee80211_hdrlen(fc)) 5268 err = -ENOBUFS; 5269 else 5270 err = skb_linearize(skb); 5271 } else { 5272 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 5273 } 5274 5275 if (err) { 5276 dev_kfree_skb(skb); 5277 return; 5278 } 5279 5280 hdr = (struct ieee80211_hdr *)skb->data; 5281 ieee80211_parse_qos(&rx); 5282 ieee80211_verify_alignment(&rx); 5283 5284 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 5285 ieee80211_is_beacon(hdr->frame_control) || 5286 ieee80211_is_s1g_beacon(hdr->frame_control))) 5287 ieee80211_scan_rx(local, skb); 5288 5289 if (ieee80211_is_data(fc)) { 5290 struct sta_info *sta, *prev_sta; 5291 int link_id = -1; 5292 5293 if (status->link_valid) 5294 link_id = status->link_id; 5295 5296 if (pubsta) { 5297 sta = container_of(pubsta, struct sta_info, sta); 5298 if (!ieee80211_rx_data_set_sta(&rx, sta, link_id)) 5299 goto out; 5300 5301 /* 5302 * In MLO connection, fetch the link_id using addr2 5303 * when the driver does not pass link_id in status. 5304 * When the address translation is already performed by 5305 * driver/hw, the valid link_id must be passed in 5306 * status. 5307 */ 5308 5309 if (!status->link_valid && pubsta->mlo) { 5310 struct link_sta_info *link_sta; 5311 5312 link_sta = link_sta_info_get_bss(rx.sdata, 5313 hdr->addr2); 5314 if (!link_sta) 5315 goto out; 5316 5317 ieee80211_rx_data_set_link(&rx, link_sta->link_id); 5318 } 5319 5320 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 5321 return; 5322 goto out; 5323 } 5324 5325 prev_sta = NULL; 5326 5327 for_each_sta_info(local, hdr->addr2, sta, tmp) { 5328 if (!prev_sta) { 5329 prev_sta = sta; 5330 continue; 5331 } 5332 5333 rx.sdata = prev_sta->sdata; 5334 if (!status->link_valid && prev_sta->sta.mlo) { 5335 struct link_sta_info *link_sta; 5336 5337 link_sta = link_sta_info_get_bss(rx.sdata, 5338 hdr->addr2); 5339 if (!link_sta) 5340 continue; 5341 5342 link_id = link_sta->link_id; 5343 } 5344 5345 if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id)) 5346 goto out; 5347 5348 ieee80211_prepare_and_rx_handle(&rx, skb, false); 5349 5350 prev_sta = sta; 5351 } 5352 5353 if (prev_sta) { 5354 rx.sdata = prev_sta->sdata; 5355 if (!status->link_valid && prev_sta->sta.mlo) { 5356 struct link_sta_info *link_sta; 5357 5358 link_sta = link_sta_info_get_bss(rx.sdata, 5359 hdr->addr2); 5360 if (!link_sta) 5361 goto out; 5362 5363 link_id = link_sta->link_id; 5364 } 5365 5366 if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id)) 5367 goto out; 5368 5369 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 5370 return; 5371 goto out; 5372 } 5373 } 5374 5375 prev = NULL; 5376 5377 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 5378 if (!ieee80211_sdata_running(sdata)) 5379 continue; 5380 5381 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 5382 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 5383 continue; 5384 5385 /* 5386 * frame is destined for this interface, but if it's 5387 * not also for the previous one we handle that after 5388 * the loop to avoid copying the SKB once too much 5389 */ 5390 5391 if (!prev) { 5392 prev = sdata; 5393 continue; 5394 } 5395 5396 rx.sdata = prev; 5397 ieee80211_rx_for_interface(&rx, skb, false); 5398 5399 prev = sdata; 5400 } 5401 5402 if (prev) { 5403 rx.sdata = prev; 5404 5405 if (ieee80211_rx_for_interface(&rx, skb, true)) 5406 return; 5407 } 5408 5409 out: 5410 dev_kfree_skb(skb); 5411 } 5412 5413 /* 5414 * This is the receive path handler. It is called by a low level driver when an 5415 * 802.11 MPDU is received from the hardware. 5416 */ 5417 void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 5418 struct sk_buff *skb, struct list_head *list) 5419 { 5420 struct ieee80211_local *local = hw_to_local(hw); 5421 struct ieee80211_rate *rate = NULL; 5422 struct ieee80211_supported_band *sband; 5423 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 5424 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 5425 5426 WARN_ON_ONCE(softirq_count() == 0); 5427 5428 if (WARN_ON(status->band >= NUM_NL80211_BANDS)) 5429 goto drop; 5430 5431 sband = local->hw.wiphy->bands[status->band]; 5432 if (WARN_ON(!sband)) 5433 goto drop; 5434 5435 /* 5436 * If we're suspending, it is possible although not too likely 5437 * that we'd be receiving frames after having already partially 5438 * quiesced the stack. We can't process such frames then since 5439 * that might, for example, cause stations to be added or other 5440 * driver callbacks be invoked. 5441 */ 5442 if (unlikely(local->quiescing || local->suspended)) 5443 goto drop; 5444 5445 /* We might be during a HW reconfig, prevent Rx for the same reason */ 5446 if (unlikely(local->in_reconfig)) 5447 goto drop; 5448 5449 /* 5450 * The same happens when we're not even started, 5451 * but that's worth a warning. 5452 */ 5453 if (WARN_ON(!local->started)) 5454 goto drop; 5455 5456 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC) && 5457 !(status->flag & RX_FLAG_NO_PSDU && 5458 status->zero_length_psdu_type == 5459 IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED))) { 5460 /* 5461 * Validate the rate, unless there was a PLCP error which may 5462 * have an invalid rate or the PSDU was not capture and may be 5463 * missing rate information. 5464 */ 5465 5466 switch (status->encoding) { 5467 case RX_ENC_HT: 5468 /* 5469 * rate_idx is MCS index, which can be [0-76] 5470 * as documented on: 5471 * 5472 * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n 5473 * 5474 * Anything else would be some sort of driver or 5475 * hardware error. The driver should catch hardware 5476 * errors. 5477 */ 5478 if (WARN(status->rate_idx > 76, 5479 "Rate marked as an HT rate but passed " 5480 "status->rate_idx is not " 5481 "an MCS index [0-76]: %d (0x%02x)\n", 5482 status->rate_idx, 5483 status->rate_idx)) 5484 goto drop; 5485 break; 5486 case RX_ENC_VHT: 5487 if (WARN_ONCE(status->rate_idx > 11 || 5488 !status->nss || 5489 status->nss > 8, 5490 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", 5491 status->rate_idx, status->nss)) 5492 goto drop; 5493 break; 5494 case RX_ENC_HE: 5495 if (WARN_ONCE(status->rate_idx > 11 || 5496 !status->nss || 5497 status->nss > 8, 5498 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n", 5499 status->rate_idx, status->nss)) 5500 goto drop; 5501 break; 5502 case RX_ENC_EHT: 5503 if (WARN_ONCE(status->rate_idx > 15 || 5504 !status->nss || 5505 status->nss > 8 || 5506 status->eht.gi > NL80211_RATE_INFO_EHT_GI_3_2, 5507 "Rate marked as an EHT rate but data is invalid: MCS:%d, NSS:%d, GI:%d\n", 5508 status->rate_idx, status->nss, status->eht.gi)) 5509 goto drop; 5510 break; 5511 default: 5512 WARN_ON_ONCE(1); 5513 fallthrough; 5514 case RX_ENC_LEGACY: 5515 if (WARN_ON(status->rate_idx >= sband->n_bitrates)) 5516 goto drop; 5517 rate = &sband->bitrates[status->rate_idx]; 5518 } 5519 } 5520 5521 if (WARN_ON_ONCE(status->link_id >= IEEE80211_LINK_UNSPECIFIED)) 5522 goto drop; 5523 5524 status->rx_flags = 0; 5525 5526 kcov_remote_start_common(skb_get_kcov_handle(skb)); 5527 5528 /* 5529 * Frames with failed FCS/PLCP checksum are not returned, 5530 * all other frames are returned without radiotap header 5531 * if it was previously present. 5532 * Also, frames with less than 16 bytes are dropped. 5533 */ 5534 if (!(status->flag & RX_FLAG_8023)) 5535 skb = ieee80211_rx_monitor(local, skb, rate); 5536 if (skb) { 5537 if ((status->flag & RX_FLAG_8023) || 5538 ieee80211_is_data_present(hdr->frame_control)) 5539 ieee80211_tpt_led_trig_rx(local, skb->len); 5540 5541 if (status->flag & RX_FLAG_8023) 5542 __ieee80211_rx_handle_8023(hw, pubsta, skb, list); 5543 else 5544 __ieee80211_rx_handle_packet(hw, pubsta, skb, list); 5545 } 5546 5547 kcov_remote_stop(); 5548 return; 5549 drop: 5550 kfree_skb(skb); 5551 } 5552 EXPORT_SYMBOL(ieee80211_rx_list); 5553 5554 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 5555 struct sk_buff *skb, struct napi_struct *napi) 5556 { 5557 struct sk_buff *tmp; 5558 LIST_HEAD(list); 5559 5560 5561 /* 5562 * key references and virtual interfaces are protected using RCU 5563 * and this requires that we are in a read-side RCU section during 5564 * receive processing 5565 */ 5566 rcu_read_lock(); 5567 ieee80211_rx_list(hw, pubsta, skb, &list); 5568 rcu_read_unlock(); 5569 5570 if (!napi) { 5571 netif_receive_skb_list(&list); 5572 return; 5573 } 5574 5575 list_for_each_entry_safe(skb, tmp, &list, list) { 5576 skb_list_del_init(skb); 5577 napi_gro_receive(napi, skb); 5578 } 5579 } 5580 EXPORT_SYMBOL(ieee80211_rx_napi); 5581 5582 /* This is a version of the rx handler that can be called from hard irq 5583 * context. Post the skb on the queue and schedule the tasklet */ 5584 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 5585 { 5586 struct ieee80211_local *local = hw_to_local(hw); 5587 5588 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 5589 5590 skb->pkt_type = IEEE80211_RX_MSG; 5591 skb_queue_tail(&local->skb_queue, skb); 5592 tasklet_schedule(&local->tasklet); 5593 } 5594 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 5595