1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2005-2006, Devicescape Software, Inc. 5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 7 * Copyright 2013-2014 Intel Mobile Communications GmbH 8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 9 * Copyright (C) 2018-2025 Intel Corporation 10 */ 11 12 #include <linux/jiffies.h> 13 #include <linux/slab.h> 14 #include <linux/kernel.h> 15 #include <linux/skbuff.h> 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rcupdate.h> 19 #include <linux/export.h> 20 #include <linux/kcov.h> 21 #include <linux/bitops.h> 22 #include <kunit/visibility.h> 23 #include <net/mac80211.h> 24 #include <net/ieee80211_radiotap.h> 25 #include <linux/unaligned.h> 26 27 #include "ieee80211_i.h" 28 #include "driver-ops.h" 29 #include "led.h" 30 #include "mesh.h" 31 #include "wep.h" 32 #include "wpa.h" 33 #include "tkip.h" 34 #include "wme.h" 35 #include "rate.h" 36 37 /* 38 * monitor mode reception 39 * 40 * This function cleans up the SKB, i.e. it removes all the stuff 41 * only useful for monitoring. 42 */ 43 static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb, 44 unsigned int present_fcs_len, 45 unsigned int rtap_space) 46 { 47 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 48 struct ieee80211_hdr *hdr; 49 unsigned int hdrlen; 50 __le16 fc; 51 52 if (present_fcs_len) 53 __pskb_trim(skb, skb->len - present_fcs_len); 54 pskb_pull(skb, rtap_space); 55 56 /* After pulling radiotap header, clear all flags that indicate 57 * info in skb->data. 58 */ 59 status->flag &= ~(RX_FLAG_RADIOTAP_TLV_AT_END | 60 RX_FLAG_RADIOTAP_LSIG | 61 RX_FLAG_RADIOTAP_HE_MU | 62 RX_FLAG_RADIOTAP_HE | 63 RX_FLAG_RADIOTAP_VHT); 64 65 hdr = (void *)skb->data; 66 fc = hdr->frame_control; 67 68 /* 69 * Remove the HT-Control field (if present) on management 70 * frames after we've sent the frame to monitoring. We 71 * (currently) don't need it, and don't properly parse 72 * frames with it present, due to the assumption of a 73 * fixed management header length. 74 */ 75 if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc))) 76 return skb; 77 78 hdrlen = ieee80211_hdrlen(fc); 79 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER); 80 81 if (!pskb_may_pull(skb, hdrlen)) { 82 dev_kfree_skb(skb); 83 return NULL; 84 } 85 86 memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data, 87 hdrlen - IEEE80211_HT_CTL_LEN); 88 pskb_pull(skb, IEEE80211_HT_CTL_LEN); 89 90 return skb; 91 } 92 93 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, 94 unsigned int rtap_space) 95 { 96 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 97 struct ieee80211_hdr *hdr; 98 99 hdr = (void *)(skb->data + rtap_space); 100 101 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 102 RX_FLAG_FAILED_PLCP_CRC | 103 RX_FLAG_ONLY_MONITOR | 104 RX_FLAG_NO_PSDU)) 105 return true; 106 107 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space)) 108 return true; 109 110 if (ieee80211_is_ctl(hdr->frame_control) && 111 !ieee80211_is_pspoll(hdr->frame_control) && 112 !ieee80211_is_back_req(hdr->frame_control)) 113 return true; 114 115 return false; 116 } 117 118 static int 119 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, 120 struct ieee80211_rx_status *status, 121 struct sk_buff *skb) 122 { 123 int len; 124 125 /* always present fields */ 126 len = sizeof(struct ieee80211_radiotap_header) + 8; 127 128 /* allocate extra bitmaps */ 129 if (status->chains) 130 len += 4 * hweight8(status->chains); 131 132 if (ieee80211_have_rx_timestamp(status)) { 133 len = ALIGN(len, 8); 134 len += 8; 135 } 136 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) 137 len += 1; 138 139 /* antenna field, if we don't have per-chain info */ 140 if (!status->chains) 141 len += 1; 142 143 /* padding for RX_FLAGS if necessary */ 144 len = ALIGN(len, 2); 145 146 if (status->encoding == RX_ENC_HT) /* HT info */ 147 len += 3; 148 149 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 150 len = ALIGN(len, 4); 151 len += 8; 152 } 153 154 if (status->encoding == RX_ENC_VHT) { 155 /* Included even if RX_FLAG_RADIOTAP_VHT is not set */ 156 len = ALIGN(len, 2); 157 len += 12; 158 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_vht) != 12); 159 } 160 161 if (local->hw.radiotap_timestamp.units_pos >= 0) { 162 len = ALIGN(len, 8); 163 len += 12; 164 } 165 166 if (status->encoding == RX_ENC_HE && 167 status->flag & RX_FLAG_RADIOTAP_HE) { 168 len = ALIGN(len, 2); 169 len += 12; 170 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12); 171 } 172 173 if (status->encoding == RX_ENC_HE && 174 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 175 len = ALIGN(len, 2); 176 len += 12; 177 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); 178 } 179 180 if (status->flag & RX_FLAG_NO_PSDU) 181 len += 1; 182 183 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 184 len = ALIGN(len, 2); 185 len += 4; 186 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4); 187 } 188 189 if (status->chains) { 190 /* antenna and antenna signal fields */ 191 len += 2 * hweight8(status->chains); 192 } 193 194 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) { 195 int tlv_offset = 0; 196 197 /* 198 * The position to look at depends on the existence (or non- 199 * existence) of other elements, so take that into account... 200 */ 201 if (status->flag & RX_FLAG_RADIOTAP_VHT) 202 tlv_offset += 203 sizeof(struct ieee80211_radiotap_vht); 204 if (status->flag & RX_FLAG_RADIOTAP_HE) 205 tlv_offset += 206 sizeof(struct ieee80211_radiotap_he); 207 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 208 tlv_offset += 209 sizeof(struct ieee80211_radiotap_he_mu); 210 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 211 tlv_offset += 212 sizeof(struct ieee80211_radiotap_lsig); 213 214 /* ensure 4 byte alignment for TLV */ 215 len = ALIGN(len, 4); 216 217 /* TLVs until the mac header */ 218 len += skb_mac_header(skb) - &skb->data[tlv_offset]; 219 } 220 221 return len; 222 } 223 224 static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, 225 int link_id, 226 struct sta_info *sta, 227 struct sk_buff *skb) 228 { 229 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 230 231 if (link_id >= 0) { 232 status->link_valid = 1; 233 status->link_id = link_id; 234 } else { 235 status->link_valid = 0; 236 } 237 238 skb_queue_tail(&sdata->skb_queue, skb); 239 wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); 240 if (sta) { 241 struct link_sta_info *link_sta_info; 242 243 if (link_id >= 0) { 244 link_sta_info = rcu_dereference(sta->link[link_id]); 245 if (!link_sta_info) 246 return; 247 } else { 248 link_sta_info = &sta->deflink; 249 } 250 251 link_sta_info->rx_stats.packets++; 252 } 253 } 254 255 static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, 256 int link_id, 257 struct sta_info *sta, 258 struct sk_buff *skb) 259 { 260 skb->protocol = 0; 261 __ieee80211_queue_skb_to_iface(sdata, link_id, sta, skb); 262 } 263 264 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, 265 struct sk_buff *skb, 266 int rtap_space) 267 { 268 struct { 269 struct ieee80211_hdr_3addr hdr; 270 u8 category; 271 u8 action_code; 272 } __packed __aligned(2) action; 273 274 if (!sdata) 275 return; 276 277 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1); 278 279 if (skb->len < rtap_space + sizeof(action) + 280 VHT_MUMIMO_GROUPS_DATA_LEN) 281 return; 282 283 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr)) 284 return; 285 286 skb_copy_bits(skb, rtap_space, &action, sizeof(action)); 287 288 if (!ieee80211_is_action(action.hdr.frame_control)) 289 return; 290 291 if (action.category != WLAN_CATEGORY_VHT) 292 return; 293 294 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT) 295 return; 296 297 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr)) 298 return; 299 300 skb = skb_copy(skb, GFP_ATOMIC); 301 if (!skb) 302 return; 303 304 ieee80211_queue_skb_to_iface(sdata, -1, NULL, skb); 305 } 306 307 /* 308 * ieee80211_add_rx_radiotap_header - add radiotap header 309 * 310 * add a radiotap header containing all the fields which the hardware provided. 311 */ 312 static void 313 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 314 struct sk_buff *skb, 315 struct ieee80211_rate *rate, 316 int rtap_len, bool has_fcs) 317 { 318 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 319 struct ieee80211_radiotap_header *rthdr; 320 unsigned char *pos; 321 __le32 *it_present; 322 u32 it_present_val; 323 u16 rx_flags = 0; 324 u16 channel_flags = 0; 325 u32 tlvs_len = 0; 326 int mpdulen, chain; 327 unsigned long chains = status->chains; 328 struct ieee80211_radiotap_vht vht = {}; 329 struct ieee80211_radiotap_he he = {}; 330 struct ieee80211_radiotap_he_mu he_mu = {}; 331 struct ieee80211_radiotap_lsig lsig = {}; 332 333 if (status->flag & RX_FLAG_RADIOTAP_VHT) { 334 vht = *(struct ieee80211_radiotap_vht *)skb->data; 335 skb_pull(skb, sizeof(vht)); 336 WARN_ON_ONCE(status->encoding != RX_ENC_VHT); 337 } 338 339 if (status->flag & RX_FLAG_RADIOTAP_HE) { 340 he = *(struct ieee80211_radiotap_he *)skb->data; 341 skb_pull(skb, sizeof(he)); 342 WARN_ON_ONCE(status->encoding != RX_ENC_HE); 343 } 344 345 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) { 346 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data; 347 skb_pull(skb, sizeof(he_mu)); 348 } 349 350 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 351 lsig = *(struct ieee80211_radiotap_lsig *)skb->data; 352 skb_pull(skb, sizeof(lsig)); 353 } 354 355 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) { 356 /* data is pointer at tlv all other info was pulled off */ 357 tlvs_len = skb_mac_header(skb) - skb->data; 358 } 359 360 mpdulen = skb->len; 361 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) 362 mpdulen += FCS_LEN; 363 364 rthdr = skb_push(skb, rtap_len - tlvs_len); 365 memset(rthdr, 0, rtap_len - tlvs_len); 366 it_present = &rthdr->it_present; 367 368 /* radiotap header, set always present flags */ 369 rthdr->it_len = cpu_to_le16(rtap_len); 370 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | 371 BIT(IEEE80211_RADIOTAP_CHANNEL) | 372 BIT(IEEE80211_RADIOTAP_RX_FLAGS); 373 374 if (!status->chains) 375 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); 376 377 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 378 it_present_val |= 379 BIT(IEEE80211_RADIOTAP_EXT) | 380 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); 381 put_unaligned_le32(it_present_val, it_present); 382 it_present++; 383 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | 384 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 385 } 386 387 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) 388 it_present_val |= BIT(IEEE80211_RADIOTAP_TLV); 389 390 put_unaligned_le32(it_present_val, it_present); 391 392 /* This references through an offset into it_optional[] rather 393 * than via it_present otherwise later uses of pos will cause 394 * the compiler to think we have walked past the end of the 395 * struct member. 396 */ 397 pos = (void *)&rthdr->it_optional[it_present + 1 - rthdr->it_optional]; 398 399 /* the order of the following fields is important */ 400 401 /* IEEE80211_RADIOTAP_TSFT */ 402 if (ieee80211_have_rx_timestamp(status)) { 403 /* padding */ 404 while ((pos - (u8 *)rthdr) & 7) 405 *pos++ = 0; 406 put_unaligned_le64( 407 ieee80211_calculate_rx_timestamp(local, status, 408 mpdulen, 0), 409 pos); 410 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_TSFT)); 411 pos += 8; 412 } 413 414 /* IEEE80211_RADIOTAP_FLAGS */ 415 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 416 *pos |= IEEE80211_RADIOTAP_F_FCS; 417 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 418 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 419 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) 420 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 421 pos++; 422 423 /* IEEE80211_RADIOTAP_RATE */ 424 if (!rate || status->encoding != RX_ENC_LEGACY) { 425 /* 426 * Without rate information don't add it. If we have, 427 * MCS information is a separate field in radiotap, 428 * added below. The byte here is needed as padding 429 * for the channel though, so initialise it to 0. 430 */ 431 *pos = 0; 432 } else { 433 int shift = 0; 434 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_RATE)); 435 if (status->bw == RATE_INFO_BW_10) 436 shift = 1; 437 else if (status->bw == RATE_INFO_BW_5) 438 shift = 2; 439 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); 440 } 441 pos++; 442 443 /* IEEE80211_RADIOTAP_CHANNEL */ 444 /* TODO: frequency offset in KHz */ 445 put_unaligned_le16(status->freq, pos); 446 pos += 2; 447 if (status->bw == RATE_INFO_BW_10) 448 channel_flags |= IEEE80211_CHAN_HALF; 449 else if (status->bw == RATE_INFO_BW_5) 450 channel_flags |= IEEE80211_CHAN_QUARTER; 451 452 if (status->band == NL80211_BAND_5GHZ || 453 status->band == NL80211_BAND_6GHZ) 454 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; 455 else if (status->encoding != RX_ENC_LEGACY) 456 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 457 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 458 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 459 else if (rate) 460 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; 461 else 462 channel_flags |= IEEE80211_CHAN_2GHZ; 463 put_unaligned_le16(channel_flags, pos); 464 pos += 2; 465 466 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 467 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && 468 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 469 *pos = status->signal; 470 rthdr->it_present |= 471 cpu_to_le32(BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL)); 472 pos++; 473 } 474 475 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 476 477 if (!status->chains) { 478 /* IEEE80211_RADIOTAP_ANTENNA */ 479 *pos = status->antenna; 480 pos++; 481 } 482 483 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 484 485 /* IEEE80211_RADIOTAP_RX_FLAGS */ 486 /* ensure 2 byte alignment for the 2 byte field as required */ 487 if ((pos - (u8 *)rthdr) & 1) 488 *pos++ = 0; 489 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 490 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 491 put_unaligned_le16(rx_flags, pos); 492 pos += 2; 493 494 if (status->encoding == RX_ENC_HT) { 495 unsigned int stbc; 496 497 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS)); 498 *pos = local->hw.radiotap_mcs_details; 499 if (status->enc_flags & RX_ENC_FLAG_HT_GF) 500 *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FMT; 501 if (status->enc_flags & RX_ENC_FLAG_LDPC) 502 *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FEC; 503 pos++; 504 *pos = 0; 505 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 506 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 507 if (status->bw == RATE_INFO_BW_40) 508 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 509 if (status->enc_flags & RX_ENC_FLAG_HT_GF) 510 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 511 if (status->enc_flags & RX_ENC_FLAG_LDPC) 512 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; 513 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT; 514 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; 515 pos++; 516 *pos++ = status->rate_idx; 517 } 518 519 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 520 u16 flags = 0; 521 522 /* ensure 4 byte alignment */ 523 while ((pos - (u8 *)rthdr) & 3) 524 pos++; 525 rthdr->it_present |= 526 cpu_to_le32(BIT(IEEE80211_RADIOTAP_AMPDU_STATUS)); 527 put_unaligned_le32(status->ampdu_reference, pos); 528 pos += 4; 529 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 530 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 531 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 532 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 533 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 534 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 535 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN) 536 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN; 537 if (status->flag & RX_FLAG_AMPDU_EOF_BIT) 538 flags |= IEEE80211_RADIOTAP_AMPDU_EOF; 539 put_unaligned_le16(flags, pos); 540 pos += 2; 541 *pos++ = 0; 542 *pos++ = 0; 543 } 544 545 if (status->encoding == RX_ENC_VHT) { 546 u16 fill = local->hw.radiotap_vht_details; 547 548 /* Leave driver filled fields alone */ 549 fill &= ~le16_to_cpu(vht.known); 550 vht.known |= cpu_to_le16(fill); 551 552 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_GI && 553 status->enc_flags & RX_ENC_FLAG_SHORT_GI) 554 vht.flags |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; 555 /* in VHT, STBC is binary */ 556 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_STBC && 557 status->enc_flags & RX_ENC_FLAG_STBC_MASK) 558 vht.flags |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; 559 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED && 560 status->enc_flags & RX_ENC_FLAG_BF) 561 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; 562 563 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) { 564 switch (status->bw) { 565 case RATE_INFO_BW_40: 566 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_40; 567 break; 568 case RATE_INFO_BW_80: 569 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_80; 570 break; 571 case RATE_INFO_BW_160: 572 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_160; 573 break; 574 default: 575 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_20; 576 break; 577 } 578 } 579 580 /* 581 * If the driver filled in mcs_nss[0], then do not touch it. 582 * 583 * Otherwise, put some information about MCS/NSS into the 584 * user 0 field. Note that this is not technically correct for 585 * an MU frame as we might have decoded a different user. 586 */ 587 if (!vht.mcs_nss[0]) { 588 vht.mcs_nss[0] = (status->rate_idx << 4) | status->nss; 589 590 /* coding field */ 591 if (status->enc_flags & RX_ENC_FLAG_LDPC) 592 vht.coding |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; 593 } 594 595 /* ensure 2 byte alignment */ 596 while ((pos - (u8 *)rthdr) & 1) 597 pos++; 598 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT)); 599 memcpy(pos, &vht, sizeof(vht)); 600 pos += sizeof(vht); 601 } 602 603 if (local->hw.radiotap_timestamp.units_pos >= 0) { 604 u16 accuracy = 0; 605 u8 flags; 606 u64 ts; 607 608 rthdr->it_present |= 609 cpu_to_le32(BIT(IEEE80211_RADIOTAP_TIMESTAMP)); 610 611 /* ensure 8 byte alignment */ 612 while ((pos - (u8 *)rthdr) & 7) 613 pos++; 614 615 if (status->flag & RX_FLAG_MACTIME_IS_RTAP_TS64) { 616 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT; 617 ts = status->mactime; 618 } else { 619 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT; 620 ts = status->device_timestamp; 621 } 622 623 put_unaligned_le64(ts, pos); 624 pos += sizeof(u64); 625 626 if (local->hw.radiotap_timestamp.accuracy >= 0) { 627 accuracy = local->hw.radiotap_timestamp.accuracy; 628 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY; 629 } 630 put_unaligned_le16(accuracy, pos); 631 pos += sizeof(u16); 632 633 *pos++ = local->hw.radiotap_timestamp.units_pos; 634 *pos++ = flags; 635 } 636 637 if (status->encoding == RX_ENC_HE && 638 status->flag & RX_FLAG_RADIOTAP_HE) { 639 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) 640 641 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { 642 he.data6 |= HE_PREP(DATA6_NSTS, 643 FIELD_GET(RX_ENC_FLAG_STBC_MASK, 644 status->enc_flags)); 645 he.data3 |= HE_PREP(DATA3_STBC, 1); 646 } else { 647 he.data6 |= HE_PREP(DATA6_NSTS, status->nss); 648 } 649 650 #define CHECK_GI(s) \ 651 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ 652 (int)NL80211_RATE_INFO_HE_GI_##s) 653 654 CHECK_GI(0_8); 655 CHECK_GI(1_6); 656 CHECK_GI(3_2); 657 658 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx); 659 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm); 660 he.data3 |= HE_PREP(DATA3_CODING, 661 !!(status->enc_flags & RX_ENC_FLAG_LDPC)); 662 663 he.data5 |= HE_PREP(DATA5_GI, status->he_gi); 664 665 switch (status->bw) { 666 case RATE_INFO_BW_20: 667 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 668 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); 669 break; 670 case RATE_INFO_BW_40: 671 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 672 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); 673 break; 674 case RATE_INFO_BW_80: 675 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 676 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); 677 break; 678 case RATE_INFO_BW_160: 679 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 680 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); 681 break; 682 case RATE_INFO_BW_HE_RU: 683 #define CHECK_RU_ALLOC(s) \ 684 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ 685 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) 686 687 CHECK_RU_ALLOC(26); 688 CHECK_RU_ALLOC(52); 689 CHECK_RU_ALLOC(106); 690 CHECK_RU_ALLOC(242); 691 CHECK_RU_ALLOC(484); 692 CHECK_RU_ALLOC(996); 693 CHECK_RU_ALLOC(2x996); 694 695 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 696 status->he_ru + 4); 697 break; 698 default: 699 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw); 700 } 701 702 /* ensure 2 byte alignment */ 703 while ((pos - (u8 *)rthdr) & 1) 704 pos++; 705 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE)); 706 memcpy(pos, &he, sizeof(he)); 707 pos += sizeof(he); 708 } 709 710 if (status->encoding == RX_ENC_HE && 711 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 712 /* ensure 2 byte alignment */ 713 while ((pos - (u8 *)rthdr) & 1) 714 pos++; 715 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE_MU)); 716 memcpy(pos, &he_mu, sizeof(he_mu)); 717 pos += sizeof(he_mu); 718 } 719 720 if (status->flag & RX_FLAG_NO_PSDU) { 721 rthdr->it_present |= 722 cpu_to_le32(BIT(IEEE80211_RADIOTAP_ZERO_LEN_PSDU)); 723 *pos++ = status->zero_length_psdu_type; 724 } 725 726 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 727 /* ensure 2 byte alignment */ 728 while ((pos - (u8 *)rthdr) & 1) 729 pos++; 730 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_LSIG)); 731 memcpy(pos, &lsig, sizeof(lsig)); 732 pos += sizeof(lsig); 733 } 734 735 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 736 *pos++ = status->chain_signal[chain]; 737 *pos++ = chain; 738 } 739 } 740 741 static struct sk_buff * 742 ieee80211_make_monitor_skb(struct ieee80211_local *local, 743 struct sk_buff **origskb, 744 struct ieee80211_rate *rate, 745 int rtap_space, bool use_origskb) 746 { 747 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb); 748 int rt_hdrlen, needed_headroom; 749 struct sk_buff *skb; 750 751 /* room for the radiotap header based on driver features */ 752 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb); 753 needed_headroom = rt_hdrlen - rtap_space; 754 755 if (use_origskb) { 756 /* only need to expand headroom if necessary */ 757 skb = *origskb; 758 *origskb = NULL; 759 760 /* 761 * This shouldn't trigger often because most devices have an 762 * RX header they pull before we get here, and that should 763 * be big enough for our radiotap information. We should 764 * probably export the length to drivers so that we can have 765 * them allocate enough headroom to start with. 766 */ 767 if (skb_headroom(skb) < needed_headroom && 768 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 769 dev_kfree_skb(skb); 770 return NULL; 771 } 772 } else { 773 /* 774 * Need to make a copy and possibly remove radiotap header 775 * and FCS from the original. 776 */ 777 skb = skb_copy_expand(*origskb, needed_headroom + NET_SKB_PAD, 778 0, GFP_ATOMIC); 779 780 if (!skb) 781 return NULL; 782 } 783 784 /* prepend radiotap information */ 785 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); 786 787 skb_reset_mac_header(skb); 788 skb->ip_summed = CHECKSUM_UNNECESSARY; 789 skb->pkt_type = PACKET_OTHERHOST; 790 skb->protocol = htons(ETH_P_802_2); 791 792 return skb; 793 } 794 795 static bool 796 ieee80211_validate_monitor_radio(struct ieee80211_sub_if_data *sdata, 797 struct ieee80211_local *local, 798 struct ieee80211_rx_status *status) 799 { 800 struct wiphy *wiphy = local->hw.wiphy; 801 int i, freq, bw; 802 803 if (!wiphy->n_radio) 804 return true; 805 806 switch (status->bw) { 807 case RATE_INFO_BW_20: 808 bw = 20000; 809 break; 810 case RATE_INFO_BW_40: 811 bw = 40000; 812 break; 813 case RATE_INFO_BW_80: 814 bw = 80000; 815 break; 816 case RATE_INFO_BW_160: 817 bw = 160000; 818 break; 819 case RATE_INFO_BW_320: 820 bw = 320000; 821 break; 822 default: 823 return false; 824 } 825 826 freq = MHZ_TO_KHZ(status->freq); 827 828 for (i = 0; i < wiphy->n_radio; i++) { 829 if (!(sdata->wdev.radio_mask & BIT(i))) 830 continue; 831 832 if (!ieee80211_radio_freq_range_valid(&wiphy->radio[i], freq, bw)) 833 continue; 834 835 return true; 836 } 837 return false; 838 } 839 840 /* 841 * This function copies a received frame to all monitor interfaces and 842 * returns a cleaned-up SKB that no longer includes the FCS nor the 843 * radiotap header the driver might have added. 844 */ 845 static struct sk_buff * 846 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 847 struct ieee80211_rate *rate) 848 { 849 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 850 struct ieee80211_sub_if_data *sdata, *prev_sdata = NULL; 851 struct sk_buff *skb, *monskb = NULL; 852 int present_fcs_len = 0; 853 unsigned int rtap_space = 0; 854 struct ieee80211_sub_if_data *monitor_sdata = 855 rcu_dereference(local->monitor_sdata); 856 bool only_monitor = false; 857 unsigned int min_head_len; 858 859 if (WARN_ON_ONCE(status->flag & RX_FLAG_RADIOTAP_TLV_AT_END && 860 !skb_mac_header_was_set(origskb))) { 861 /* with this skb no way to know where frame payload starts */ 862 dev_kfree_skb(origskb); 863 return NULL; 864 } 865 866 if (status->flag & RX_FLAG_RADIOTAP_VHT) 867 rtap_space += sizeof(struct ieee80211_radiotap_vht); 868 869 if (status->flag & RX_FLAG_RADIOTAP_HE) 870 rtap_space += sizeof(struct ieee80211_radiotap_he); 871 872 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 873 rtap_space += sizeof(struct ieee80211_radiotap_he_mu); 874 875 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 876 rtap_space += sizeof(struct ieee80211_radiotap_lsig); 877 878 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) 879 rtap_space += skb_mac_header(origskb) - &origskb->data[rtap_space]; 880 881 min_head_len = rtap_space; 882 883 /* 884 * First, we may need to make a copy of the skb because 885 * (1) we need to modify it for radiotap (if not present), and 886 * (2) the other RX handlers will modify the skb we got. 887 * 888 * We don't need to, of course, if we aren't going to return 889 * the SKB because it has a bad FCS/PLCP checksum. 890 */ 891 892 if (!(status->flag & RX_FLAG_NO_PSDU)) { 893 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { 894 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) { 895 /* driver bug */ 896 WARN_ON(1); 897 dev_kfree_skb(origskb); 898 return NULL; 899 } 900 present_fcs_len = FCS_LEN; 901 } 902 903 /* also consider the hdr->frame_control */ 904 min_head_len += 2; 905 } 906 907 /* ensure that the expected data elements are in skb head */ 908 if (!pskb_may_pull(origskb, min_head_len)) { 909 dev_kfree_skb(origskb); 910 return NULL; 911 } 912 913 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space); 914 915 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { 916 if (only_monitor) { 917 dev_kfree_skb(origskb); 918 return NULL; 919 } 920 921 return ieee80211_clean_skb(origskb, present_fcs_len, 922 rtap_space); 923 } 924 925 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space); 926 927 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) { 928 struct cfg80211_chan_def *chandef; 929 930 chandef = &sdata->vif.bss_conf.chanreq.oper; 931 if (chandef->chan && 932 chandef->chan->center_freq != status->freq) 933 continue; 934 935 if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR) && 936 !ieee80211_validate_monitor_radio(sdata, local, status)) 937 continue; 938 939 if (!prev_sdata) { 940 prev_sdata = sdata; 941 continue; 942 } 943 944 if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) 945 ieee80211_handle_mu_mimo_mon(sdata, origskb, rtap_space); 946 947 if (!monskb) 948 monskb = ieee80211_make_monitor_skb(local, &origskb, 949 rate, rtap_space, 950 false); 951 if (!monskb) 952 continue; 953 954 skb = skb_clone(monskb, GFP_ATOMIC); 955 if (!skb) 956 continue; 957 958 skb->dev = prev_sdata->dev; 959 dev_sw_netstats_rx_add(skb->dev, skb->len); 960 netif_receive_skb(skb); 961 prev_sdata = sdata; 962 } 963 964 if (prev_sdata) { 965 if (monskb) 966 skb = monskb; 967 else 968 skb = ieee80211_make_monitor_skb(local, &origskb, 969 rate, rtap_space, 970 only_monitor); 971 if (skb) { 972 skb->dev = prev_sdata->dev; 973 dev_sw_netstats_rx_add(skb->dev, skb->len); 974 netif_receive_skb(skb); 975 } 976 } 977 978 if (!origskb) 979 return NULL; 980 981 return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space); 982 } 983 984 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 985 { 986 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 987 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 988 int tid, seqno_idx, security_idx; 989 990 /* does the frame have a qos control field? */ 991 if (ieee80211_is_data_qos(hdr->frame_control)) { 992 u8 *qc = ieee80211_get_qos_ctl(hdr); 993 /* frame has qos control */ 994 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 995 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 996 status->rx_flags |= IEEE80211_RX_AMSDU; 997 998 seqno_idx = tid; 999 security_idx = tid; 1000 } else { 1001 /* 1002 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 1003 * 1004 * Sequence numbers for management frames, QoS data 1005 * frames with a broadcast/multicast address in the 1006 * Address 1 field, and all non-QoS data frames sent 1007 * by QoS STAs are assigned using an additional single 1008 * modulo-4096 counter, [...] 1009 * 1010 * We also use that counter for non-QoS STAs. 1011 */ 1012 seqno_idx = IEEE80211_NUM_TIDS; 1013 security_idx = 0; 1014 if (ieee80211_is_mgmt(hdr->frame_control)) 1015 security_idx = IEEE80211_NUM_TIDS; 1016 tid = 0; 1017 } 1018 1019 rx->seqno_idx = seqno_idx; 1020 rx->security_idx = security_idx; 1021 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 1022 * For now, set skb->priority to 0 for other cases. */ 1023 rx->skb->priority = (tid > 7) ? 0 : tid; 1024 } 1025 1026 /** 1027 * DOC: Packet alignment 1028 * 1029 * Drivers always need to pass packets that are aligned to two-byte boundaries 1030 * to the stack. 1031 * 1032 * Additionally, they should, if possible, align the payload data in a way that 1033 * guarantees that the contained IP header is aligned to a four-byte 1034 * boundary. In the case of regular frames, this simply means aligning the 1035 * payload to a four-byte boundary (because either the IP header is directly 1036 * contained, or IV/RFC1042 headers that have a length divisible by four are 1037 * in front of it). If the payload data is not properly aligned and the 1038 * architecture doesn't support efficient unaligned operations, mac80211 1039 * will align the data. 1040 * 1041 * With A-MSDU frames, however, the payload data address must yield two modulo 1042 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 1043 * push the IP header further back to a multiple of four again. Thankfully, the 1044 * specs were sane enough this time around to require padding each A-MSDU 1045 * subframe to a length that is a multiple of four. 1046 * 1047 * Padding like Atheros hardware adds which is between the 802.11 header and 1048 * the payload is not supported; the driver is required to move the 802.11 1049 * header to be directly in front of the payload in that case. 1050 */ 1051 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 1052 { 1053 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1054 WARN_ON_ONCE((unsigned long)rx->skb->data & 1); 1055 #endif 1056 } 1057 1058 1059 /* rx handlers */ 1060 1061 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 1062 { 1063 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1064 1065 if (is_multicast_ether_addr(hdr->addr1)) 1066 return 0; 1067 1068 return ieee80211_is_robust_mgmt_frame(skb); 1069 } 1070 1071 1072 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 1073 { 1074 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1075 1076 if (!is_multicast_ether_addr(hdr->addr1)) 1077 return 0; 1078 1079 return ieee80211_is_robust_mgmt_frame(skb); 1080 } 1081 1082 1083 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 1084 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 1085 { 1086 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 1087 struct ieee80211_mmie *mmie; 1088 struct ieee80211_mmie_16 *mmie16; 1089 1090 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) 1091 return -1; 1092 1093 if (!ieee80211_is_robust_mgmt_frame(skb) && 1094 !ieee80211_is_beacon(hdr->frame_control)) 1095 return -1; /* not a robust management frame */ 1096 1097 mmie = (struct ieee80211_mmie *) 1098 (skb->data + skb->len - sizeof(*mmie)); 1099 if (mmie->element_id == WLAN_EID_MMIE && 1100 mmie->length == sizeof(*mmie) - 2) 1101 return le16_to_cpu(mmie->key_id); 1102 1103 mmie16 = (struct ieee80211_mmie_16 *) 1104 (skb->data + skb->len - sizeof(*mmie16)); 1105 if (skb->len >= 24 + sizeof(*mmie16) && 1106 mmie16->element_id == WLAN_EID_MMIE && 1107 mmie16->length == sizeof(*mmie16) - 2) 1108 return le16_to_cpu(mmie16->key_id); 1109 1110 return -1; 1111 } 1112 1113 static int ieee80211_get_keyid(struct sk_buff *skb) 1114 { 1115 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1116 __le16 fc = hdr->frame_control; 1117 int hdrlen = ieee80211_hdrlen(fc); 1118 u8 keyid; 1119 1120 /* WEP, TKIP, CCMP and GCMP */ 1121 if (unlikely(skb->len < hdrlen + IEEE80211_WEP_IV_LEN)) 1122 return -EINVAL; 1123 1124 skb_copy_bits(skb, hdrlen + 3, &keyid, 1); 1125 1126 keyid >>= 6; 1127 1128 return keyid; 1129 } 1130 1131 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 1132 { 1133 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1134 char *dev_addr = rx->sdata->vif.addr; 1135 1136 if (ieee80211_is_data(hdr->frame_control)) { 1137 if (is_multicast_ether_addr(hdr->addr1)) { 1138 if (ieee80211_has_tods(hdr->frame_control) || 1139 !ieee80211_has_fromds(hdr->frame_control)) 1140 return RX_DROP; 1141 if (ether_addr_equal(hdr->addr3, dev_addr)) 1142 return RX_DROP; 1143 } else { 1144 if (!ieee80211_has_a4(hdr->frame_control)) 1145 return RX_DROP; 1146 if (ether_addr_equal(hdr->addr4, dev_addr)) 1147 return RX_DROP; 1148 } 1149 } 1150 1151 /* If there is not an established peer link and this is not a peer link 1152 * establisment frame, beacon or probe, drop the frame. 1153 */ 1154 1155 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 1156 struct ieee80211_mgmt *mgmt; 1157 1158 if (!ieee80211_is_mgmt(hdr->frame_control)) 1159 return RX_DROP; 1160 1161 if (ieee80211_is_action(hdr->frame_control)) { 1162 u8 category; 1163 1164 /* make sure category field is present */ 1165 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 1166 return RX_DROP; 1167 1168 mgmt = (struct ieee80211_mgmt *)hdr; 1169 category = mgmt->u.action.category; 1170 if (category != WLAN_CATEGORY_MESH_ACTION && 1171 category != WLAN_CATEGORY_SELF_PROTECTED) 1172 return RX_DROP; 1173 return RX_CONTINUE; 1174 } 1175 1176 if (ieee80211_is_probe_req(hdr->frame_control) || 1177 ieee80211_is_probe_resp(hdr->frame_control) || 1178 ieee80211_is_beacon(hdr->frame_control) || 1179 ieee80211_is_auth(hdr->frame_control)) 1180 return RX_CONTINUE; 1181 1182 return RX_DROP; 1183 } 1184 1185 return RX_CONTINUE; 1186 } 1187 1188 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, 1189 int index) 1190 { 1191 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index]; 1192 struct sk_buff *tail = skb_peek_tail(frames); 1193 struct ieee80211_rx_status *status; 1194 1195 if (tid_agg_rx->reorder_buf_filtered && 1196 tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) 1197 return true; 1198 1199 if (!tail) 1200 return false; 1201 1202 status = IEEE80211_SKB_RXCB(tail); 1203 if (status->flag & RX_FLAG_AMSDU_MORE) 1204 return false; 1205 1206 return true; 1207 } 1208 1209 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 1210 struct tid_ampdu_rx *tid_agg_rx, 1211 int index, 1212 struct sk_buff_head *frames) 1213 { 1214 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; 1215 struct sk_buff *skb; 1216 struct ieee80211_rx_status *status; 1217 1218 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1219 1220 if (skb_queue_empty(skb_list)) 1221 goto no_frame; 1222 1223 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1224 __skb_queue_purge(skb_list); 1225 goto no_frame; 1226 } 1227 1228 /* release frames from the reorder ring buffer */ 1229 tid_agg_rx->stored_mpdu_num--; 1230 while ((skb = __skb_dequeue(skb_list))) { 1231 status = IEEE80211_SKB_RXCB(skb); 1232 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 1233 __skb_queue_tail(frames, skb); 1234 } 1235 1236 no_frame: 1237 if (tid_agg_rx->reorder_buf_filtered) 1238 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 1239 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1240 } 1241 1242 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 1243 struct tid_ampdu_rx *tid_agg_rx, 1244 u16 head_seq_num, 1245 struct sk_buff_head *frames) 1246 { 1247 int index; 1248 1249 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1250 1251 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { 1252 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1253 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1254 frames); 1255 } 1256 } 1257 1258 /* 1259 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 1260 * the skb was added to the buffer longer than this time ago, the earlier 1261 * frames that have not yet been received are assumed to be lost and the skb 1262 * can be released for processing. This may also release other skb's from the 1263 * reorder buffer if there are no additional gaps between the frames. 1264 * 1265 * Callers must hold tid_agg_rx->reorder_lock. 1266 */ 1267 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 1268 1269 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 1270 struct tid_ampdu_rx *tid_agg_rx, 1271 struct sk_buff_head *frames) 1272 { 1273 int index, i, j; 1274 1275 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1276 1277 /* release the buffer until next missing frame */ 1278 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1279 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) && 1280 tid_agg_rx->stored_mpdu_num) { 1281 /* 1282 * No buffers ready to be released, but check whether any 1283 * frames in the reorder buffer have timed out. 1284 */ 1285 int skipped = 1; 1286 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 1287 j = (j + 1) % tid_agg_rx->buf_size) { 1288 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) { 1289 skipped++; 1290 continue; 1291 } 1292 if (skipped && 1293 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 1294 HT_RX_REORDER_BUF_TIMEOUT)) 1295 goto set_release_timer; 1296 1297 /* don't leave incomplete A-MSDUs around */ 1298 for (i = (index + 1) % tid_agg_rx->buf_size; i != j; 1299 i = (i + 1) % tid_agg_rx->buf_size) 1300 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); 1301 1302 ht_dbg_ratelimited(sdata, 1303 "release an RX reorder frame due to timeout on earlier frames\n"); 1304 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, 1305 frames); 1306 1307 /* 1308 * Increment the head seq# also for the skipped slots. 1309 */ 1310 tid_agg_rx->head_seq_num = 1311 (tid_agg_rx->head_seq_num + 1312 skipped) & IEEE80211_SN_MASK; 1313 skipped = 0; 1314 } 1315 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1316 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1317 frames); 1318 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1319 } 1320 1321 if (tid_agg_rx->stored_mpdu_num) { 1322 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1323 1324 for (; j != (index - 1) % tid_agg_rx->buf_size; 1325 j = (j + 1) % tid_agg_rx->buf_size) { 1326 if (ieee80211_rx_reorder_ready(tid_agg_rx, j)) 1327 break; 1328 } 1329 1330 set_release_timer: 1331 1332 if (!tid_agg_rx->removed) 1333 mod_timer(&tid_agg_rx->reorder_timer, 1334 tid_agg_rx->reorder_time[j] + 1 + 1335 HT_RX_REORDER_BUF_TIMEOUT); 1336 } else { 1337 timer_delete(&tid_agg_rx->reorder_timer); 1338 } 1339 } 1340 1341 /* 1342 * As this function belongs to the RX path it must be under 1343 * rcu_read_lock protection. It returns false if the frame 1344 * can be processed immediately, true if it was consumed. 1345 */ 1346 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 1347 struct tid_ampdu_rx *tid_agg_rx, 1348 struct sk_buff *skb, 1349 struct sk_buff_head *frames) 1350 { 1351 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1352 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1353 u16 mpdu_seq_num = ieee80211_get_sn(hdr); 1354 u16 head_seq_num, buf_size; 1355 int index; 1356 bool ret = true; 1357 1358 spin_lock(&tid_agg_rx->reorder_lock); 1359 1360 /* 1361 * Offloaded BA sessions have no known starting sequence number so pick 1362 * one from first Rxed frame for this tid after BA was started. 1363 */ 1364 if (unlikely(tid_agg_rx->auto_seq)) { 1365 tid_agg_rx->auto_seq = false; 1366 tid_agg_rx->ssn = mpdu_seq_num; 1367 tid_agg_rx->head_seq_num = mpdu_seq_num; 1368 } 1369 1370 buf_size = tid_agg_rx->buf_size; 1371 head_seq_num = tid_agg_rx->head_seq_num; 1372 1373 /* 1374 * If the current MPDU's SN is smaller than the SSN, it shouldn't 1375 * be reordered. 1376 */ 1377 if (unlikely(!tid_agg_rx->started)) { 1378 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1379 ret = false; 1380 goto out; 1381 } 1382 tid_agg_rx->started = true; 1383 } 1384 1385 /* frame with out of date sequence number */ 1386 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1387 dev_kfree_skb(skb); 1388 goto out; 1389 } 1390 1391 /* 1392 * If frame the sequence number exceeds our buffering window 1393 * size release some previous frames to make room for this one. 1394 */ 1395 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { 1396 head_seq_num = ieee80211_sn_inc( 1397 ieee80211_sn_sub(mpdu_seq_num, buf_size)); 1398 /* release stored frames up to new head to stack */ 1399 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 1400 head_seq_num, frames); 1401 } 1402 1403 /* Now the new frame is always in the range of the reordering buffer */ 1404 1405 index = mpdu_seq_num % tid_agg_rx->buf_size; 1406 1407 /* check if we already stored this frame */ 1408 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1409 dev_kfree_skb(skb); 1410 goto out; 1411 } 1412 1413 /* 1414 * If the current MPDU is in the right order and nothing else 1415 * is stored we can process it directly, no need to buffer it. 1416 * If it is first but there's something stored, we may be able 1417 * to release frames after this one. 1418 */ 1419 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 1420 tid_agg_rx->stored_mpdu_num == 0) { 1421 if (!(status->flag & RX_FLAG_AMSDU_MORE)) 1422 tid_agg_rx->head_seq_num = 1423 ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1424 ret = false; 1425 goto out; 1426 } 1427 1428 /* put the frame in the reordering buffer */ 1429 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); 1430 if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1431 tid_agg_rx->reorder_time[index] = jiffies; 1432 tid_agg_rx->stored_mpdu_num++; 1433 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); 1434 } 1435 1436 out: 1437 spin_unlock(&tid_agg_rx->reorder_lock); 1438 return ret; 1439 } 1440 1441 /* 1442 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 1443 * true if the MPDU was buffered, false if it should be processed. 1444 */ 1445 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 1446 struct sk_buff_head *frames) 1447 { 1448 struct sk_buff *skb = rx->skb; 1449 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1450 struct sta_info *sta = rx->sta; 1451 struct tid_ampdu_rx *tid_agg_rx; 1452 u16 sc; 1453 u8 tid, ack_policy; 1454 1455 if (!ieee80211_is_data_qos(hdr->frame_control) || 1456 is_multicast_ether_addr(hdr->addr1)) 1457 goto dont_reorder; 1458 1459 /* 1460 * filter the QoS data rx stream according to 1461 * STA/TID and check if this STA/TID is on aggregation 1462 */ 1463 1464 if (!sta) 1465 goto dont_reorder; 1466 1467 ack_policy = *ieee80211_get_qos_ctl(hdr) & 1468 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 1469 tid = ieee80211_get_tid(hdr); 1470 1471 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 1472 if (!tid_agg_rx) { 1473 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1474 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 1475 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 1476 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 1477 WLAN_BACK_RECIPIENT, 1478 WLAN_REASON_QSTA_REQUIRE_SETUP); 1479 goto dont_reorder; 1480 } 1481 1482 /* qos null data frames are excluded */ 1483 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 1484 goto dont_reorder; 1485 1486 /* not part of a BA session */ 1487 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK) 1488 goto dont_reorder; 1489 1490 /* new, potentially un-ordered, ampdu frame - process it */ 1491 1492 /* reset session timer */ 1493 if (tid_agg_rx->timeout) 1494 tid_agg_rx->last_rx = jiffies; 1495 1496 /* if this mpdu is fragmented - terminate rx aggregation session */ 1497 sc = le16_to_cpu(hdr->seq_ctrl); 1498 if (sc & IEEE80211_SCTL_FRAG) { 1499 ieee80211_queue_skb_to_iface(rx->sdata, rx->link_id, NULL, skb); 1500 return; 1501 } 1502 1503 /* 1504 * No locking needed -- we will only ever process one 1505 * RX packet at a time, and thus own tid_agg_rx. All 1506 * other code manipulating it needs to (and does) make 1507 * sure that we cannot get to it any more before doing 1508 * anything with it. 1509 */ 1510 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, 1511 frames)) 1512 return; 1513 1514 dont_reorder: 1515 __skb_queue_tail(frames, skb); 1516 } 1517 1518 static ieee80211_rx_result debug_noinline 1519 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) 1520 { 1521 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1522 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1523 1524 if (status->flag & RX_FLAG_DUP_VALIDATED) 1525 return RX_CONTINUE; 1526 1527 /* 1528 * Drop duplicate 802.11 retransmissions 1529 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") 1530 */ 1531 1532 if (rx->skb->len < 24) 1533 return RX_CONTINUE; 1534 1535 if (ieee80211_is_ctl(hdr->frame_control) || 1536 ieee80211_is_any_nullfunc(hdr->frame_control)) 1537 return RX_CONTINUE; 1538 1539 if (!rx->sta) 1540 return RX_CONTINUE; 1541 1542 if (unlikely(is_multicast_ether_addr(hdr->addr1))) { 1543 struct ieee80211_sub_if_data *sdata = rx->sdata; 1544 u16 sn = ieee80211_get_sn(hdr); 1545 1546 if (!ieee80211_is_data_present(hdr->frame_control)) 1547 return RX_CONTINUE; 1548 1549 if (!ieee80211_vif_is_mld(&sdata->vif) || 1550 sdata->vif.type != NL80211_IFTYPE_STATION) 1551 return RX_CONTINUE; 1552 1553 if (sdata->u.mgd.mcast_seq_last != IEEE80211_SN_MODULO && 1554 ieee80211_sn_less_eq(sn, sdata->u.mgd.mcast_seq_last)) 1555 return RX_DROP_U_DUP; 1556 1557 sdata->u.mgd.mcast_seq_last = sn; 1558 return RX_CONTINUE; 1559 } 1560 1561 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 1562 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { 1563 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); 1564 rx->link_sta->rx_stats.num_duplicates++; 1565 return RX_DROP_U_DUP; 1566 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1567 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 1568 } 1569 1570 return RX_CONTINUE; 1571 } 1572 1573 static ieee80211_rx_result debug_noinline 1574 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 1575 { 1576 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1577 1578 /* Drop disallowed frame classes based on STA auth/assoc state; 1579 * IEEE 802.11, Chap 5.5. 1580 * 1581 * mac80211 filters only based on association state, i.e. it drops 1582 * Class 3 frames from not associated stations. hostapd sends 1583 * deauth/disassoc frames when needed. In addition, hostapd is 1584 * responsible for filtering on both auth and assoc states. 1585 */ 1586 1587 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1588 return ieee80211_rx_mesh_check(rx); 1589 1590 if (unlikely((ieee80211_is_data(hdr->frame_control) || 1591 ieee80211_is_pspoll(hdr->frame_control)) && 1592 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 1593 rx->sdata->vif.type != NL80211_IFTYPE_OCB && 1594 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 1595 /* 1596 * accept port control frames from the AP even when it's not 1597 * yet marked ASSOC to prevent a race where we don't set the 1598 * assoc bit quickly enough before it sends the first frame 1599 */ 1600 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1601 ieee80211_is_data_present(hdr->frame_control)) { 1602 unsigned int hdrlen; 1603 __be16 ethertype; 1604 1605 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1606 1607 if (rx->skb->len < hdrlen + 8) 1608 return RX_DROP; 1609 1610 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 1611 if (ethertype == rx->sdata->control_port_protocol) 1612 return RX_CONTINUE; 1613 } 1614 1615 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 1616 cfg80211_rx_spurious_frame(rx->sdata->dev, hdr->addr2, 1617 rx->link_id, GFP_ATOMIC)) 1618 return RX_DROP_U_SPURIOUS; 1619 1620 return RX_DROP; 1621 } 1622 1623 return RX_CONTINUE; 1624 } 1625 1626 1627 static ieee80211_rx_result debug_noinline 1628 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1629 { 1630 struct ieee80211_local *local; 1631 struct ieee80211_hdr *hdr; 1632 struct sk_buff *skb; 1633 1634 local = rx->local; 1635 skb = rx->skb; 1636 hdr = (struct ieee80211_hdr *) skb->data; 1637 1638 if (!local->pspolling) 1639 return RX_CONTINUE; 1640 1641 if (!ieee80211_has_fromds(hdr->frame_control)) 1642 /* this is not from AP */ 1643 return RX_CONTINUE; 1644 1645 if (!ieee80211_is_data(hdr->frame_control)) 1646 return RX_CONTINUE; 1647 1648 if (!ieee80211_has_moredata(hdr->frame_control)) { 1649 /* AP has no more frames buffered for us */ 1650 local->pspolling = false; 1651 return RX_CONTINUE; 1652 } 1653 1654 /* more data bit is set, let's request a new frame from the AP */ 1655 ieee80211_send_pspoll(local, rx->sdata); 1656 1657 return RX_CONTINUE; 1658 } 1659 1660 static void sta_ps_start(struct sta_info *sta) 1661 { 1662 struct ieee80211_sub_if_data *sdata = sta->sdata; 1663 struct ieee80211_local *local = sdata->local; 1664 struct ps_data *ps; 1665 int tid; 1666 1667 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1668 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1669 ps = &sdata->bss->ps; 1670 else 1671 return; 1672 1673 atomic_inc(&ps->num_sta_ps); 1674 set_sta_flag(sta, WLAN_STA_PS_STA); 1675 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1676 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1677 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1678 sta->sta.addr, sta->sta.aid); 1679 1680 ieee80211_clear_fast_xmit(sta); 1681 1682 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { 1683 struct ieee80211_txq *txq = sta->sta.txq[tid]; 1684 struct txq_info *txqi = to_txq_info(txq); 1685 1686 spin_lock(&local->active_txq_lock[txq->ac]); 1687 if (!list_empty(&txqi->schedule_order)) 1688 list_del_init(&txqi->schedule_order); 1689 spin_unlock(&local->active_txq_lock[txq->ac]); 1690 1691 if (txq_has_queue(txq)) 1692 set_bit(tid, &sta->txq_buffered_tids); 1693 else 1694 clear_bit(tid, &sta->txq_buffered_tids); 1695 } 1696 } 1697 1698 static void sta_ps_end(struct sta_info *sta) 1699 { 1700 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1701 sta->sta.addr, sta->sta.aid); 1702 1703 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1704 /* 1705 * Clear the flag only if the other one is still set 1706 * so that the TX path won't start TX'ing new frames 1707 * directly ... In the case that the driver flag isn't 1708 * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1709 */ 1710 clear_sta_flag(sta, WLAN_STA_PS_STA); 1711 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1712 sta->sta.addr, sta->sta.aid); 1713 return; 1714 } 1715 1716 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1717 clear_sta_flag(sta, WLAN_STA_PS_STA); 1718 ieee80211_sta_ps_deliver_wakeup(sta); 1719 } 1720 1721 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) 1722 { 1723 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1724 bool in_ps; 1725 1726 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); 1727 1728 /* Don't let the same PS state be set twice */ 1729 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); 1730 if ((start && in_ps) || (!start && !in_ps)) 1731 return -EINVAL; 1732 1733 if (start) 1734 sta_ps_start(sta); 1735 else 1736 sta_ps_end(sta); 1737 1738 return 0; 1739 } 1740 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1741 1742 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta) 1743 { 1744 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1745 1746 if (test_sta_flag(sta, WLAN_STA_SP)) 1747 return; 1748 1749 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1750 ieee80211_sta_ps_deliver_poll_response(sta); 1751 else 1752 set_sta_flag(sta, WLAN_STA_PSPOLL); 1753 } 1754 EXPORT_SYMBOL(ieee80211_sta_pspoll); 1755 1756 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid) 1757 { 1758 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1759 int ac = ieee80211_ac_from_tid(tid); 1760 1761 /* 1762 * If this AC is not trigger-enabled do nothing unless the 1763 * driver is calling us after it already checked. 1764 * 1765 * NB: This could/should check a separate bitmap of trigger- 1766 * enabled queues, but for now we only implement uAPSD w/o 1767 * TSPEC changes to the ACs, so they're always the same. 1768 */ 1769 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) && 1770 tid != IEEE80211_NUM_TIDS) 1771 return; 1772 1773 /* if we are in a service period, do nothing */ 1774 if (test_sta_flag(sta, WLAN_STA_SP)) 1775 return; 1776 1777 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1778 ieee80211_sta_ps_deliver_uapsd(sta); 1779 else 1780 set_sta_flag(sta, WLAN_STA_UAPSD); 1781 } 1782 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger); 1783 1784 static ieee80211_rx_result debug_noinline 1785 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1786 { 1787 struct ieee80211_sub_if_data *sdata = rx->sdata; 1788 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1789 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1790 1791 if (!rx->sta) 1792 return RX_CONTINUE; 1793 1794 if (sdata->vif.type != NL80211_IFTYPE_AP && 1795 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1796 return RX_CONTINUE; 1797 1798 /* 1799 * The device handles station powersave, so don't do anything about 1800 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1801 * it to mac80211 since they're handled.) 1802 */ 1803 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) 1804 return RX_CONTINUE; 1805 1806 /* 1807 * Don't do anything if the station isn't already asleep. In 1808 * the uAPSD case, the station will probably be marked asleep, 1809 * in the PS-Poll case the station must be confused ... 1810 */ 1811 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1812 return RX_CONTINUE; 1813 1814 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1815 ieee80211_sta_pspoll(&rx->sta->sta); 1816 1817 /* Free PS Poll skb here instead of returning RX_DROP that would 1818 * count as an dropped frame. */ 1819 dev_kfree_skb(rx->skb); 1820 1821 return RX_QUEUED; 1822 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1823 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1824 ieee80211_has_pm(hdr->frame_control) && 1825 (ieee80211_is_data_qos(hdr->frame_control) || 1826 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1827 u8 tid = ieee80211_get_tid(hdr); 1828 1829 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid); 1830 } 1831 1832 return RX_CONTINUE; 1833 } 1834 1835 static ieee80211_rx_result debug_noinline 1836 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1837 { 1838 struct sta_info *sta = rx->sta; 1839 struct link_sta_info *link_sta = rx->link_sta; 1840 struct sk_buff *skb = rx->skb; 1841 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1842 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1843 int i; 1844 1845 if (!sta || !link_sta) 1846 return RX_CONTINUE; 1847 1848 /* 1849 * Update last_rx only for IBSS packets which are for the current 1850 * BSSID and for station already AUTHORIZED to avoid keeping the 1851 * current IBSS network alive in cases where other STAs start 1852 * using different BSSID. This will also give the station another 1853 * chance to restart the authentication/authorization in case 1854 * something went wrong the first time. 1855 */ 1856 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1857 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1858 NL80211_IFTYPE_ADHOC); 1859 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1860 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1861 link_sta->rx_stats.last_rx = jiffies; 1862 if (ieee80211_is_data_present(hdr->frame_control) && 1863 !is_multicast_ether_addr(hdr->addr1)) 1864 link_sta->rx_stats.last_rate = 1865 sta_stats_encode_rate(status); 1866 } 1867 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { 1868 link_sta->rx_stats.last_rx = jiffies; 1869 } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) && 1870 !is_multicast_ether_addr(hdr->addr1)) { 1871 /* 1872 * Mesh beacons will update last_rx when if they are found to 1873 * match the current local configuration when processed. 1874 */ 1875 link_sta->rx_stats.last_rx = jiffies; 1876 if (ieee80211_is_data_present(hdr->frame_control)) 1877 link_sta->rx_stats.last_rate = sta_stats_encode_rate(status); 1878 } 1879 1880 link_sta->rx_stats.fragments++; 1881 1882 u64_stats_update_begin(&link_sta->rx_stats.syncp); 1883 link_sta->rx_stats.bytes += rx->skb->len; 1884 u64_stats_update_end(&link_sta->rx_stats.syncp); 1885 1886 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1887 link_sta->rx_stats.last_signal = status->signal; 1888 ewma_signal_add(&link_sta->rx_stats_avg.signal, 1889 -status->signal); 1890 } 1891 1892 if (status->chains) { 1893 link_sta->rx_stats.chains = status->chains; 1894 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 1895 int signal = status->chain_signal[i]; 1896 1897 if (!(status->chains & BIT(i))) 1898 continue; 1899 1900 link_sta->rx_stats.chain_signal_last[i] = signal; 1901 ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i], 1902 -signal); 1903 } 1904 } 1905 1906 if (ieee80211_is_s1g_beacon(hdr->frame_control)) 1907 return RX_CONTINUE; 1908 1909 /* 1910 * Change STA power saving mode only at the end of a frame 1911 * exchange sequence, and only for a data or management 1912 * frame as specified in IEEE 802.11-2016 11.2.3.2 1913 */ 1914 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1915 !ieee80211_has_morefrags(hdr->frame_control) && 1916 !is_multicast_ether_addr(hdr->addr1) && 1917 (ieee80211_is_mgmt(hdr->frame_control) || 1918 ieee80211_is_data(hdr->frame_control)) && 1919 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1920 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1921 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1922 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1923 if (!ieee80211_has_pm(hdr->frame_control)) 1924 sta_ps_end(sta); 1925 } else { 1926 if (ieee80211_has_pm(hdr->frame_control)) 1927 sta_ps_start(sta); 1928 } 1929 } 1930 1931 /* mesh power save support */ 1932 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1933 ieee80211_mps_rx_h_sta_process(sta, hdr); 1934 1935 /* 1936 * Drop (qos-)data::nullfunc frames silently, since they 1937 * are used only to control station power saving mode. 1938 */ 1939 if (ieee80211_is_any_nullfunc(hdr->frame_control)) { 1940 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1941 1942 /* 1943 * If we receive a 4-addr nullfunc frame from a STA 1944 * that was not moved to a 4-addr STA vlan yet send 1945 * the event to userspace and for older hostapd drop 1946 * the frame to the monitor interface. 1947 */ 1948 if (ieee80211_has_a4(hdr->frame_control) && 1949 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1950 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1951 !rx->sdata->u.vlan.sta))) { 1952 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1953 cfg80211_rx_unexpected_4addr_frame( 1954 rx->sdata->dev, sta->sta.addr, 1955 rx->link_id, GFP_ATOMIC); 1956 return RX_DROP_U_UNEXPECTED_4ADDR_FRAME; 1957 } 1958 /* 1959 * Update counter and free packet here to avoid 1960 * counting this as a dropped packed. 1961 */ 1962 link_sta->rx_stats.packets++; 1963 dev_kfree_skb(rx->skb); 1964 return RX_QUEUED; 1965 } 1966 1967 return RX_CONTINUE; 1968 } /* ieee80211_rx_h_sta_process */ 1969 1970 static struct ieee80211_key * 1971 ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx) 1972 { 1973 struct ieee80211_key *key = NULL; 1974 int idx2; 1975 1976 /* Make sure key gets set if either BIGTK key index is set so that 1977 * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected 1978 * Beacon frames and Beacon frames that claim to use another BIGTK key 1979 * index (i.e., a key that we do not have). 1980 */ 1981 1982 if (idx < 0) { 1983 idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS; 1984 idx2 = idx + 1; 1985 } else { 1986 if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1987 idx2 = idx + 1; 1988 else 1989 idx2 = idx - 1; 1990 } 1991 1992 if (rx->link_sta) 1993 key = rcu_dereference(rx->link_sta->gtk[idx]); 1994 if (!key) 1995 key = rcu_dereference(rx->link->gtk[idx]); 1996 if (!key && rx->link_sta) 1997 key = rcu_dereference(rx->link_sta->gtk[idx2]); 1998 if (!key) 1999 key = rcu_dereference(rx->link->gtk[idx2]); 2000 2001 return key; 2002 } 2003 2004 static ieee80211_rx_result debug_noinline 2005 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 2006 { 2007 struct sk_buff *skb = rx->skb; 2008 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2009 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2010 int keyidx; 2011 ieee80211_rx_result result = RX_DROP_U_DECRYPT_FAIL; 2012 struct ieee80211_key *sta_ptk = NULL; 2013 struct ieee80211_key *ptk_idx = NULL; 2014 int mmie_keyidx = -1; 2015 __le16 fc; 2016 2017 if (ieee80211_is_ext(hdr->frame_control)) 2018 return RX_CONTINUE; 2019 2020 /* 2021 * Key selection 101 2022 * 2023 * There are five types of keys: 2024 * - GTK (group keys) 2025 * - IGTK (group keys for management frames) 2026 * - BIGTK (group keys for Beacon frames) 2027 * - PTK (pairwise keys) 2028 * - STK (station-to-station pairwise keys) 2029 * 2030 * When selecting a key, we have to distinguish between multicast 2031 * (including broadcast) and unicast frames, the latter can only 2032 * use PTKs and STKs while the former always use GTKs, IGTKs, and 2033 * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used, 2034 * then unicast frames can also use key indices like GTKs. Hence, if we 2035 * don't have a PTK/STK we check the key index for a WEP key. 2036 * 2037 * Note that in a regular BSS, multicast frames are sent by the 2038 * AP only, associated stations unicast the frame to the AP first 2039 * which then multicasts it on their behalf. 2040 * 2041 * There is also a slight problem in IBSS mode: GTKs are negotiated 2042 * with each station, that is something we don't currently handle. 2043 * The spec seems to expect that one negotiates the same key with 2044 * every station but there's no such requirement; VLANs could be 2045 * possible. 2046 */ 2047 2048 /* start without a key */ 2049 rx->key = NULL; 2050 fc = hdr->frame_control; 2051 2052 if (rx->sta) { 2053 int keyid = rx->sta->ptk_idx; 2054 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); 2055 2056 if (ieee80211_has_protected(fc) && 2057 !(status->flag & RX_FLAG_IV_STRIPPED)) { 2058 keyid = ieee80211_get_keyid(rx->skb); 2059 2060 if (unlikely(keyid < 0)) 2061 return RX_DROP_U_NO_KEY_ID; 2062 2063 ptk_idx = rcu_dereference(rx->sta->ptk[keyid]); 2064 } 2065 } 2066 2067 if (!ieee80211_has_protected(fc)) 2068 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 2069 2070 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 2071 rx->key = ptk_idx ? ptk_idx : sta_ptk; 2072 if ((status->flag & RX_FLAG_DECRYPTED) && 2073 (status->flag & RX_FLAG_IV_STRIPPED)) 2074 return RX_CONTINUE; 2075 /* Skip decryption if the frame is not protected. */ 2076 if (!ieee80211_has_protected(fc)) 2077 return RX_CONTINUE; 2078 } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) { 2079 /* Broadcast/multicast robust management frame / BIP */ 2080 if ((status->flag & RX_FLAG_DECRYPTED) && 2081 (status->flag & RX_FLAG_IV_STRIPPED)) 2082 return RX_CONTINUE; 2083 2084 if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS || 2085 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + 2086 NUM_DEFAULT_BEACON_KEYS) { 2087 if (rx->sdata->dev) 2088 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2089 skb->data, 2090 skb->len); 2091 return RX_DROP_U_BAD_BCN_KEYIDX; 2092 } 2093 2094 rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx); 2095 if (!rx->key) 2096 return RX_CONTINUE; /* Beacon protection not in use */ 2097 } else if (mmie_keyidx >= 0) { 2098 /* Broadcast/multicast robust management frame / BIP */ 2099 if ((status->flag & RX_FLAG_DECRYPTED) && 2100 (status->flag & RX_FLAG_IV_STRIPPED)) 2101 return RX_CONTINUE; 2102 2103 if (mmie_keyidx < NUM_DEFAULT_KEYS || 2104 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 2105 return RX_DROP_U_BAD_MGMT_KEYIDX; /* unexpected BIP keyidx */ 2106 if (rx->link_sta) { 2107 if (ieee80211_is_group_privacy_action(skb) && 2108 test_sta_flag(rx->sta, WLAN_STA_MFP)) 2109 return RX_DROP; 2110 2111 rx->key = rcu_dereference(rx->link_sta->gtk[mmie_keyidx]); 2112 } 2113 if (!rx->key) 2114 rx->key = rcu_dereference(rx->link->gtk[mmie_keyidx]); 2115 } else if (!ieee80211_has_protected(fc)) { 2116 /* 2117 * The frame was not protected, so skip decryption. However, we 2118 * need to set rx->key if there is a key that could have been 2119 * used so that the frame may be dropped if encryption would 2120 * have been expected. 2121 */ 2122 struct ieee80211_key *key = NULL; 2123 int i; 2124 2125 if (ieee80211_is_beacon(fc)) { 2126 key = ieee80211_rx_get_bigtk(rx, -1); 2127 } else if (ieee80211_is_mgmt(fc) && 2128 is_multicast_ether_addr(hdr->addr1)) { 2129 key = rcu_dereference(rx->link->default_mgmt_key); 2130 } else { 2131 if (rx->link_sta) { 2132 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 2133 key = rcu_dereference(rx->link_sta->gtk[i]); 2134 if (key) 2135 break; 2136 } 2137 } 2138 if (!key) { 2139 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 2140 key = rcu_dereference(rx->link->gtk[i]); 2141 if (key) 2142 break; 2143 } 2144 } 2145 } 2146 if (key) 2147 rx->key = key; 2148 return RX_CONTINUE; 2149 } else { 2150 /* 2151 * The device doesn't give us the IV so we won't be 2152 * able to look up the key. That's ok though, we 2153 * don't need to decrypt the frame, we just won't 2154 * be able to keep statistics accurate. 2155 * Except for key threshold notifications, should 2156 * we somehow allow the driver to tell us which key 2157 * the hardware used if this flag is set? 2158 */ 2159 if ((status->flag & RX_FLAG_DECRYPTED) && 2160 (status->flag & RX_FLAG_IV_STRIPPED)) 2161 return RX_CONTINUE; 2162 2163 keyidx = ieee80211_get_keyid(rx->skb); 2164 2165 if (unlikely(keyidx < 0)) 2166 return RX_DROP_U_NO_KEY_ID; 2167 2168 /* check per-station GTK first, if multicast packet */ 2169 if (is_multicast_ether_addr(hdr->addr1) && rx->link_sta) 2170 rx->key = rcu_dereference(rx->link_sta->gtk[keyidx]); 2171 2172 /* if not found, try default key */ 2173 if (!rx->key) { 2174 if (is_multicast_ether_addr(hdr->addr1)) 2175 rx->key = rcu_dereference(rx->link->gtk[keyidx]); 2176 if (!rx->key) 2177 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 2178 2179 /* 2180 * RSNA-protected unicast frames should always be 2181 * sent with pairwise or station-to-station keys, 2182 * but for WEP we allow using a key index as well. 2183 */ 2184 if (rx->key && 2185 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 2186 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 2187 !is_multicast_ether_addr(hdr->addr1)) 2188 rx->key = NULL; 2189 } 2190 } 2191 2192 if (rx->key) { 2193 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 2194 return RX_DROP; 2195 2196 /* TODO: add threshold stuff again */ 2197 } else { 2198 return RX_DROP; 2199 } 2200 2201 switch (rx->key->conf.cipher) { 2202 case WLAN_CIPHER_SUITE_WEP40: 2203 case WLAN_CIPHER_SUITE_WEP104: 2204 result = ieee80211_crypto_wep_decrypt(rx); 2205 break; 2206 case WLAN_CIPHER_SUITE_TKIP: 2207 result = ieee80211_crypto_tkip_decrypt(rx); 2208 break; 2209 case WLAN_CIPHER_SUITE_CCMP: 2210 result = ieee80211_crypto_ccmp_decrypt( 2211 rx, IEEE80211_CCMP_MIC_LEN); 2212 break; 2213 case WLAN_CIPHER_SUITE_CCMP_256: 2214 result = ieee80211_crypto_ccmp_decrypt( 2215 rx, IEEE80211_CCMP_256_MIC_LEN); 2216 break; 2217 case WLAN_CIPHER_SUITE_AES_CMAC: 2218 result = ieee80211_crypto_aes_cmac_decrypt(rx); 2219 break; 2220 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 2221 result = ieee80211_crypto_aes_cmac_256_decrypt(rx); 2222 break; 2223 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 2224 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 2225 result = ieee80211_crypto_aes_gmac_decrypt(rx); 2226 break; 2227 case WLAN_CIPHER_SUITE_GCMP: 2228 case WLAN_CIPHER_SUITE_GCMP_256: 2229 result = ieee80211_crypto_gcmp_decrypt(rx); 2230 break; 2231 default: 2232 result = RX_DROP_U_BAD_CIPHER; 2233 } 2234 2235 /* the hdr variable is invalid after the decrypt handlers */ 2236 2237 /* either the frame has been decrypted or will be dropped */ 2238 status->flag |= RX_FLAG_DECRYPTED; 2239 2240 if (unlikely(ieee80211_is_beacon(fc) && RX_RES_IS_UNUSABLE(result) && 2241 rx->sdata->dev)) 2242 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2243 skb->data, skb->len); 2244 2245 return result; 2246 } 2247 2248 void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache) 2249 { 2250 int i; 2251 2252 for (i = 0; i < ARRAY_SIZE(cache->entries); i++) 2253 skb_queue_head_init(&cache->entries[i].skb_list); 2254 } 2255 2256 void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache) 2257 { 2258 int i; 2259 2260 for (i = 0; i < ARRAY_SIZE(cache->entries); i++) 2261 __skb_queue_purge(&cache->entries[i].skb_list); 2262 } 2263 2264 static inline struct ieee80211_fragment_entry * 2265 ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache, 2266 unsigned int frag, unsigned int seq, int rx_queue, 2267 struct sk_buff **skb) 2268 { 2269 struct ieee80211_fragment_entry *entry; 2270 2271 entry = &cache->entries[cache->next++]; 2272 if (cache->next >= IEEE80211_FRAGMENT_MAX) 2273 cache->next = 0; 2274 2275 __skb_queue_purge(&entry->skb_list); 2276 2277 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 2278 *skb = NULL; 2279 entry->first_frag_time = jiffies; 2280 entry->seq = seq; 2281 entry->rx_queue = rx_queue; 2282 entry->last_frag = frag; 2283 entry->check_sequential_pn = false; 2284 entry->extra_len = 0; 2285 2286 return entry; 2287 } 2288 2289 static inline struct ieee80211_fragment_entry * 2290 ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache, 2291 unsigned int frag, unsigned int seq, 2292 int rx_queue, struct ieee80211_hdr *hdr) 2293 { 2294 struct ieee80211_fragment_entry *entry; 2295 int i, idx; 2296 2297 idx = cache->next; 2298 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 2299 struct ieee80211_hdr *f_hdr; 2300 struct sk_buff *f_skb; 2301 2302 idx--; 2303 if (idx < 0) 2304 idx = IEEE80211_FRAGMENT_MAX - 1; 2305 2306 entry = &cache->entries[idx]; 2307 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 2308 entry->rx_queue != rx_queue || 2309 entry->last_frag + 1 != frag) 2310 continue; 2311 2312 f_skb = __skb_peek(&entry->skb_list); 2313 f_hdr = (struct ieee80211_hdr *) f_skb->data; 2314 2315 /* 2316 * Check ftype and addresses are equal, else check next fragment 2317 */ 2318 if (((hdr->frame_control ^ f_hdr->frame_control) & 2319 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 2320 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 2321 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 2322 continue; 2323 2324 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 2325 __skb_queue_purge(&entry->skb_list); 2326 continue; 2327 } 2328 return entry; 2329 } 2330 2331 return NULL; 2332 } 2333 2334 static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc) 2335 { 2336 return rx->key && 2337 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || 2338 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || 2339 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || 2340 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && 2341 ieee80211_has_protected(fc); 2342 } 2343 2344 static ieee80211_rx_result debug_noinline 2345 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 2346 { 2347 struct ieee80211_fragment_cache *cache = &rx->sdata->frags; 2348 struct ieee80211_hdr *hdr; 2349 u16 sc; 2350 __le16 fc; 2351 unsigned int frag, seq; 2352 struct ieee80211_fragment_entry *entry; 2353 struct sk_buff *skb; 2354 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2355 2356 hdr = (struct ieee80211_hdr *)rx->skb->data; 2357 fc = hdr->frame_control; 2358 2359 if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc)) 2360 return RX_CONTINUE; 2361 2362 sc = le16_to_cpu(hdr->seq_ctrl); 2363 frag = sc & IEEE80211_SCTL_FRAG; 2364 2365 if (rx->sta) 2366 cache = &rx->sta->frags; 2367 2368 if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) 2369 goto out; 2370 2371 if (is_multicast_ether_addr(hdr->addr1)) 2372 return RX_DROP; 2373 2374 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 2375 2376 if (skb_linearize(rx->skb)) 2377 return RX_DROP_U_OOM; 2378 2379 /* 2380 * skb_linearize() might change the skb->data and 2381 * previously cached variables (in this case, hdr) need to 2382 * be refreshed with the new data. 2383 */ 2384 hdr = (struct ieee80211_hdr *)rx->skb->data; 2385 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 2386 2387 if (frag == 0) { 2388 /* This is the first fragment of a new frame. */ 2389 entry = ieee80211_reassemble_add(cache, frag, seq, 2390 rx->seqno_idx, &(rx->skb)); 2391 if (requires_sequential_pn(rx, fc)) { 2392 int queue = rx->security_idx; 2393 2394 /* Store CCMP/GCMP PN so that we can verify that the 2395 * next fragment has a sequential PN value. 2396 */ 2397 entry->check_sequential_pn = true; 2398 entry->is_protected = true; 2399 entry->key_color = rx->key->color; 2400 memcpy(entry->last_pn, 2401 rx->key->u.ccmp.rx_pn[queue], 2402 IEEE80211_CCMP_PN_LEN); 2403 BUILD_BUG_ON(offsetof(struct ieee80211_key, 2404 u.ccmp.rx_pn) != 2405 offsetof(struct ieee80211_key, 2406 u.gcmp.rx_pn)); 2407 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != 2408 sizeof(rx->key->u.gcmp.rx_pn[queue])); 2409 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != 2410 IEEE80211_GCMP_PN_LEN); 2411 } else if (rx->key && 2412 (ieee80211_has_protected(fc) || 2413 (status->flag & RX_FLAG_DECRYPTED))) { 2414 entry->is_protected = true; 2415 entry->key_color = rx->key->color; 2416 } 2417 return RX_QUEUED; 2418 } 2419 2420 /* This is a fragment for a frame that should already be pending in 2421 * fragment cache. Add this fragment to the end of the pending entry. 2422 */ 2423 entry = ieee80211_reassemble_find(cache, frag, seq, 2424 rx->seqno_idx, hdr); 2425 if (!entry) { 2426 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2427 return RX_DROP; 2428 } 2429 2430 /* "The receiver shall discard MSDUs and MMPDUs whose constituent 2431 * MPDU PN values are not incrementing in steps of 1." 2432 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) 2433 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) 2434 */ 2435 if (entry->check_sequential_pn) { 2436 int i; 2437 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; 2438 2439 if (!requires_sequential_pn(rx, fc)) 2440 return RX_DROP_U_NONSEQ_PN; 2441 2442 /* Prevent mixed key and fragment cache attacks */ 2443 if (entry->key_color != rx->key->color) 2444 return RX_DROP_U_BAD_KEY_COLOR; 2445 2446 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); 2447 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { 2448 pn[i]++; 2449 if (pn[i]) 2450 break; 2451 } 2452 2453 rpn = rx->ccm_gcm.pn; 2454 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) 2455 return RX_DROP_U_REPLAY; 2456 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); 2457 } else if (entry->is_protected && 2458 (!rx->key || 2459 (!ieee80211_has_protected(fc) && 2460 !(status->flag & RX_FLAG_DECRYPTED)) || 2461 rx->key->color != entry->key_color)) { 2462 /* Drop this as a mixed key or fragment cache attack, even 2463 * if for TKIP Michael MIC should protect us, and WEP is a 2464 * lost cause anyway. 2465 */ 2466 return RX_DROP_U_EXPECT_DEFRAG_PROT; 2467 } else if (entry->is_protected && rx->key && 2468 entry->key_color != rx->key->color && 2469 (status->flag & RX_FLAG_DECRYPTED)) { 2470 return RX_DROP_U_BAD_KEY_COLOR; 2471 } 2472 2473 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 2474 __skb_queue_tail(&entry->skb_list, rx->skb); 2475 entry->last_frag = frag; 2476 entry->extra_len += rx->skb->len; 2477 if (ieee80211_has_morefrags(fc)) { 2478 rx->skb = NULL; 2479 return RX_QUEUED; 2480 } 2481 2482 rx->skb = __skb_dequeue(&entry->skb_list); 2483 if (skb_tailroom(rx->skb) < entry->extra_len) { 2484 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); 2485 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 2486 GFP_ATOMIC))) { 2487 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2488 __skb_queue_purge(&entry->skb_list); 2489 return RX_DROP_U_OOM; 2490 } 2491 } 2492 while ((skb = __skb_dequeue(&entry->skb_list))) { 2493 skb_put_data(rx->skb, skb->data, skb->len); 2494 dev_kfree_skb(skb); 2495 } 2496 2497 out: 2498 ieee80211_led_rx(rx->local); 2499 if (rx->sta) 2500 rx->link_sta->rx_stats.packets++; 2501 return RX_CONTINUE; 2502 } 2503 2504 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 2505 { 2506 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 2507 return -EACCES; 2508 2509 return 0; 2510 } 2511 2512 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 2513 { 2514 struct sk_buff *skb = rx->skb; 2515 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2516 2517 /* 2518 * Pass through unencrypted frames if the hardware has 2519 * decrypted them already. 2520 */ 2521 if (status->flag & RX_FLAG_DECRYPTED) 2522 return 0; 2523 2524 /* Drop unencrypted frames if key is set. */ 2525 if (unlikely(!ieee80211_has_protected(fc) && 2526 !ieee80211_is_any_nullfunc(fc) && 2527 ieee80211_is_data(fc) && rx->key)) 2528 return -EACCES; 2529 2530 return 0; 2531 } 2532 2533 VISIBLE_IF_MAC80211_KUNIT ieee80211_rx_result 2534 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 2535 { 2536 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2537 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2538 __le16 fc = mgmt->frame_control; 2539 2540 /* 2541 * Pass through unencrypted frames if the hardware has 2542 * decrypted them already. 2543 */ 2544 if (status->flag & RX_FLAG_DECRYPTED) 2545 return RX_CONTINUE; 2546 2547 /* drop unicast protected dual (that wasn't protected) */ 2548 if (ieee80211_is_action(fc) && 2549 mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION) 2550 return RX_DROP_U_UNPROT_DUAL; 2551 2552 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 2553 if (unlikely(!ieee80211_has_protected(fc) && 2554 ieee80211_is_unicast_robust_mgmt_frame(rx->skb))) { 2555 if (ieee80211_is_deauth(fc) || 2556 ieee80211_is_disassoc(fc)) { 2557 /* 2558 * Permit unprotected deauth/disassoc frames 2559 * during 4-way-HS (key is installed after HS). 2560 */ 2561 if (!rx->key) 2562 return RX_CONTINUE; 2563 2564 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2565 rx->skb->data, 2566 rx->skb->len); 2567 } 2568 return RX_DROP_U_UNPROT_UCAST_MGMT; 2569 } 2570 /* BIP does not use Protected field, so need to check MMIE */ 2571 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 2572 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2573 if (ieee80211_is_deauth(fc) || 2574 ieee80211_is_disassoc(fc)) 2575 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2576 rx->skb->data, 2577 rx->skb->len); 2578 return RX_DROP_U_UNPROT_MCAST_MGMT; 2579 } 2580 if (unlikely(ieee80211_is_beacon(fc) && rx->key && 2581 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2582 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2583 rx->skb->data, 2584 rx->skb->len); 2585 return RX_DROP_U_UNPROT_BEACON; 2586 } 2587 /* 2588 * When using MFP, Action frames are not allowed prior to 2589 * having configured keys. 2590 */ 2591 if (unlikely(ieee80211_is_action(fc) && !rx->key && 2592 ieee80211_is_robust_mgmt_frame(rx->skb))) 2593 return RX_DROP_U_UNPROT_ACTION; 2594 2595 /* drop unicast public action frames when using MPF */ 2596 if (is_unicast_ether_addr(mgmt->da) && 2597 ieee80211_is_protected_dual_of_public_action(rx->skb)) 2598 return RX_DROP_U_UNPROT_UNICAST_PUB_ACTION; 2599 } 2600 2601 /* 2602 * Drop robust action frames before assoc regardless of MFP state, 2603 * after assoc we also have decided on MFP or not. 2604 */ 2605 if (ieee80211_is_action(fc) && 2606 ieee80211_is_robust_mgmt_frame(rx->skb) && 2607 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC))) 2608 return RX_DROP_U_UNPROT_ROBUST_ACTION; 2609 2610 return RX_CONTINUE; 2611 } 2612 EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_drop_unencrypted_mgmt); 2613 2614 static ieee80211_rx_result 2615 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 2616 { 2617 struct ieee80211_sub_if_data *sdata = rx->sdata; 2618 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2619 bool check_port_control = false; 2620 struct ethhdr *ehdr; 2621 int ret; 2622 2623 *port_control = false; 2624 if (ieee80211_has_a4(hdr->frame_control) && 2625 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 2626 return RX_DROP_U_UNEXPECTED_VLAN_4ADDR; 2627 2628 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2629 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 2630 if (!sdata->u.mgd.use_4addr) 2631 return RX_DROP_U_UNEXPECTED_STA_4ADDR; 2632 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr)) 2633 check_port_control = true; 2634 } 2635 2636 if (is_multicast_ether_addr(hdr->addr1) && 2637 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 2638 return RX_DROP_U_UNEXPECTED_VLAN_MCAST; 2639 2640 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 2641 if (ret < 0) 2642 return RX_DROP_U_INVALID_8023; 2643 2644 ehdr = (struct ethhdr *) rx->skb->data; 2645 if (ehdr->h_proto == rx->sdata->control_port_protocol) 2646 *port_control = true; 2647 else if (check_port_control) 2648 return RX_DROP_U_NOT_PORT_CONTROL; 2649 2650 return RX_CONTINUE; 2651 } 2652 2653 bool ieee80211_is_our_addr(struct ieee80211_sub_if_data *sdata, 2654 const u8 *addr, int *out_link_id) 2655 { 2656 unsigned int link_id; 2657 2658 /* non-MLO, or MLD address replaced by hardware */ 2659 if (ether_addr_equal(sdata->vif.addr, addr)) 2660 return true; 2661 2662 if (!ieee80211_vif_is_mld(&sdata->vif)) 2663 return false; 2664 2665 for (link_id = 0; link_id < ARRAY_SIZE(sdata->vif.link_conf); link_id++) { 2666 struct ieee80211_bss_conf *conf; 2667 2668 conf = rcu_dereference(sdata->vif.link_conf[link_id]); 2669 2670 if (!conf) 2671 continue; 2672 if (ether_addr_equal(conf->addr, addr)) { 2673 if (out_link_id) 2674 *out_link_id = link_id; 2675 return true; 2676 } 2677 } 2678 2679 return false; 2680 } 2681 2682 /* 2683 * requires that rx->skb is a frame with ethernet header 2684 */ 2685 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 2686 { 2687 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 2688 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 2689 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2690 2691 /* 2692 * Allow EAPOL frames to us/the PAE group address regardless of 2693 * whether the frame was encrypted or not, and always disallow 2694 * all other destination addresses for them. 2695 */ 2696 if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol)) 2697 return ieee80211_is_our_addr(rx->sdata, ehdr->h_dest, NULL) || 2698 ether_addr_equal(ehdr->h_dest, pae_group_addr); 2699 2700 if (ieee80211_802_1x_port_control(rx) || 2701 ieee80211_drop_unencrypted(rx, fc)) 2702 return false; 2703 2704 return true; 2705 } 2706 2707 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, 2708 struct ieee80211_rx_data *rx) 2709 { 2710 struct ieee80211_sub_if_data *sdata = rx->sdata; 2711 struct net_device *dev = sdata->dev; 2712 2713 if (unlikely((skb->protocol == sdata->control_port_protocol || 2714 (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) && 2715 !sdata->control_port_no_preauth)) && 2716 sdata->control_port_over_nl80211)) { 2717 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2718 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED); 2719 2720 cfg80211_rx_control_port(dev, skb, noencrypt, rx->link_id); 2721 dev_kfree_skb(skb); 2722 } else { 2723 struct ethhdr *ehdr = (void *)skb_mac_header(skb); 2724 2725 memset(skb->cb, 0, sizeof(skb->cb)); 2726 2727 /* 2728 * 802.1X over 802.11 requires that the authenticator address 2729 * be used for EAPOL frames. However, 802.1X allows the use of 2730 * the PAE group address instead. If the interface is part of 2731 * a bridge and we pass the frame with the PAE group address, 2732 * then the bridge will forward it to the network (even if the 2733 * client was not associated yet), which isn't supposed to 2734 * happen. 2735 * To avoid that, rewrite the destination address to our own 2736 * address, so that the authenticator (e.g. hostapd) will see 2737 * the frame, but bridge won't forward it anywhere else. Note 2738 * that due to earlier filtering, the only other address can 2739 * be the PAE group address, unless the hardware allowed them 2740 * through in 802.3 offloaded mode. 2741 */ 2742 if (unlikely(skb->protocol == sdata->control_port_protocol && 2743 !ether_addr_equal(ehdr->h_dest, sdata->vif.addr))) 2744 ether_addr_copy(ehdr->h_dest, sdata->vif.addr); 2745 2746 /* deliver to local stack */ 2747 if (rx->list) 2748 list_add_tail(&skb->list, rx->list); 2749 else 2750 netif_receive_skb(skb); 2751 } 2752 } 2753 2754 /* 2755 * requires that rx->skb is a frame with ethernet header 2756 */ 2757 static void 2758 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 2759 { 2760 struct ieee80211_sub_if_data *sdata = rx->sdata; 2761 struct net_device *dev = sdata->dev; 2762 struct sk_buff *skb, *xmit_skb; 2763 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2764 struct sta_info *dsta; 2765 2766 skb = rx->skb; 2767 xmit_skb = NULL; 2768 2769 dev_sw_netstats_rx_add(dev, skb->len); 2770 2771 if (rx->sta) { 2772 /* The seqno index has the same property as needed 2773 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 2774 * for non-QoS-data frames. Here we know it's a data 2775 * frame, so count MSDUs. 2776 */ 2777 u64_stats_update_begin(&rx->link_sta->rx_stats.syncp); 2778 rx->link_sta->rx_stats.msdu[rx->seqno_idx]++; 2779 u64_stats_update_end(&rx->link_sta->rx_stats.syncp); 2780 } 2781 2782 if ((sdata->vif.type == NL80211_IFTYPE_AP || 2783 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 2784 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 2785 ehdr->h_proto != rx->sdata->control_port_protocol && 2786 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 2787 if (is_multicast_ether_addr(ehdr->h_dest) && 2788 ieee80211_vif_get_num_mcast_if(sdata) != 0) { 2789 /* 2790 * send multicast frames both to higher layers in 2791 * local net stack and back to the wireless medium 2792 */ 2793 xmit_skb = skb_copy(skb, GFP_ATOMIC); 2794 if (!xmit_skb) 2795 net_info_ratelimited("%s: failed to clone multicast frame\n", 2796 dev->name); 2797 } else if (!is_multicast_ether_addr(ehdr->h_dest) && 2798 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) { 2799 dsta = sta_info_get(sdata, ehdr->h_dest); 2800 if (dsta) { 2801 /* 2802 * The destination station is associated to 2803 * this AP (in this VLAN), so send the frame 2804 * directly to it and do not pass it to local 2805 * net stack. 2806 */ 2807 xmit_skb = skb; 2808 skb = NULL; 2809 } 2810 } 2811 } 2812 2813 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2814 if (skb) { 2815 /* 'align' will only take the values 0 or 2 here since all 2816 * frames are required to be aligned to 2-byte boundaries 2817 * when being passed to mac80211; the code here works just 2818 * as well if that isn't true, but mac80211 assumes it can 2819 * access fields as 2-byte aligned (e.g. for ether_addr_equal) 2820 */ 2821 int align; 2822 2823 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; 2824 if (align) { 2825 if (WARN_ON(skb_headroom(skb) < 3)) { 2826 dev_kfree_skb(skb); 2827 skb = NULL; 2828 } else { 2829 u8 *data = skb->data; 2830 size_t len = skb_headlen(skb); 2831 skb->data -= align; 2832 memmove(skb->data, data, len); 2833 skb_set_tail_pointer(skb, len); 2834 } 2835 } 2836 } 2837 #endif 2838 2839 if (skb) { 2840 skb->protocol = eth_type_trans(skb, dev); 2841 ieee80211_deliver_skb_to_local_stack(skb, rx); 2842 } 2843 2844 if (xmit_skb) { 2845 /* 2846 * Send to wireless media and increase priority by 256 to 2847 * keep the received priority instead of reclassifying 2848 * the frame (see cfg80211_classify8021d). 2849 */ 2850 xmit_skb->priority += 256; 2851 xmit_skb->protocol = htons(ETH_P_802_3); 2852 skb_reset_network_header(xmit_skb); 2853 skb_reset_mac_header(xmit_skb); 2854 dev_queue_xmit(xmit_skb); 2855 } 2856 } 2857 2858 #ifdef CONFIG_MAC80211_MESH 2859 static bool 2860 ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata, 2861 struct sk_buff *skb, int hdrlen) 2862 { 2863 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2864 struct ieee80211_mesh_fast_tx_key key = { 2865 .type = MESH_FAST_TX_TYPE_FORWARDED 2866 }; 2867 struct ieee80211_mesh_fast_tx *entry; 2868 struct ieee80211s_hdr *mesh_hdr; 2869 struct tid_ampdu_tx *tid_tx; 2870 struct sta_info *sta; 2871 struct ethhdr eth; 2872 u8 tid; 2873 2874 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(eth)); 2875 if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) 2876 ether_addr_copy(key.addr, mesh_hdr->eaddr1); 2877 else if (!(mesh_hdr->flags & MESH_FLAGS_AE)) 2878 ether_addr_copy(key.addr, skb->data); 2879 else 2880 return false; 2881 2882 entry = mesh_fast_tx_get(sdata, &key); 2883 if (!entry) 2884 return false; 2885 2886 sta = rcu_dereference(entry->mpath->next_hop); 2887 if (!sta) 2888 return false; 2889 2890 if (skb_linearize(skb)) 2891 return false; 2892 2893 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 2894 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); 2895 if (tid_tx) { 2896 if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) 2897 return false; 2898 2899 if (tid_tx->timeout) 2900 tid_tx->last_tx = jiffies; 2901 } 2902 2903 ieee80211_aggr_check(sdata, sta, skb); 2904 2905 if (ieee80211_get_8023_tunnel_proto(skb->data + hdrlen, 2906 &skb->protocol)) 2907 hdrlen += ETH_ALEN; 2908 else 2909 skb->protocol = htons(skb->len - hdrlen); 2910 skb_set_network_header(skb, hdrlen + 2); 2911 2912 skb->dev = sdata->dev; 2913 memcpy(ð, skb->data, ETH_HLEN - 2); 2914 skb_pull(skb, 2); 2915 __ieee80211_xmit_fast(sdata, sta, &entry->fast_tx, skb, tid_tx, 2916 eth.h_dest, eth.h_source); 2917 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2918 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2919 2920 return true; 2921 } 2922 #endif 2923 2924 static ieee80211_rx_result 2925 ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, 2926 struct sk_buff *skb) 2927 { 2928 #ifdef CONFIG_MAC80211_MESH 2929 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2930 struct ieee80211_local *local = sdata->local; 2931 uint16_t fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA; 2932 struct ieee80211_hdr hdr = { 2933 .frame_control = cpu_to_le16(fc) 2934 }; 2935 struct ieee80211_hdr *fwd_hdr; 2936 struct ieee80211s_hdr *mesh_hdr; 2937 struct ieee80211_tx_info *info; 2938 struct sk_buff *fwd_skb; 2939 struct ethhdr *eth; 2940 bool multicast; 2941 int tailroom = 0; 2942 int hdrlen, mesh_hdrlen; 2943 u8 *qos; 2944 2945 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2946 return RX_CONTINUE; 2947 2948 if (!pskb_may_pull(skb, sizeof(*eth) + 6)) 2949 return RX_DROP; 2950 2951 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(*eth)); 2952 mesh_hdrlen = ieee80211_get_mesh_hdrlen(mesh_hdr); 2953 2954 if (!pskb_may_pull(skb, sizeof(*eth) + mesh_hdrlen)) 2955 return RX_DROP; 2956 2957 eth = (struct ethhdr *)skb->data; 2958 multicast = is_multicast_ether_addr(eth->h_dest); 2959 2960 mesh_hdr = (struct ieee80211s_hdr *)(eth + 1); 2961 if (!mesh_hdr->ttl) 2962 return RX_DROP; 2963 2964 /* frame is in RMC, don't forward */ 2965 if (is_multicast_ether_addr(eth->h_dest) && 2966 mesh_rmc_check(sdata, eth->h_source, mesh_hdr)) 2967 return RX_DROP; 2968 2969 /* forward packet */ 2970 if (sdata->crypto_tx_tailroom_needed_cnt) 2971 tailroom = IEEE80211_ENCRYPT_TAILROOM; 2972 2973 if (mesh_hdr->flags & MESH_FLAGS_AE) { 2974 struct mesh_path *mppath; 2975 char *proxied_addr; 2976 bool update = false; 2977 2978 if (multicast) 2979 proxied_addr = mesh_hdr->eaddr1; 2980 else if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) 2981 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2982 proxied_addr = mesh_hdr->eaddr2; 2983 else 2984 return RX_DROP; 2985 2986 rcu_read_lock(); 2987 mppath = mpp_path_lookup(sdata, proxied_addr); 2988 if (!mppath) { 2989 mpp_path_add(sdata, proxied_addr, eth->h_source); 2990 } else { 2991 spin_lock_bh(&mppath->state_lock); 2992 if (!ether_addr_equal(mppath->mpp, eth->h_source)) { 2993 memcpy(mppath->mpp, eth->h_source, ETH_ALEN); 2994 update = true; 2995 } 2996 mppath->exp_time = jiffies; 2997 spin_unlock_bh(&mppath->state_lock); 2998 } 2999 3000 /* flush fast xmit cache if the address path changed */ 3001 if (update) 3002 mesh_fast_tx_flush_addr(sdata, proxied_addr); 3003 3004 rcu_read_unlock(); 3005 } 3006 3007 /* Frame has reached destination. Don't forward */ 3008 if (ether_addr_equal(sdata->vif.addr, eth->h_dest)) 3009 goto rx_accept; 3010 3011 if (!--mesh_hdr->ttl) { 3012 if (multicast) 3013 goto rx_accept; 3014 3015 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); 3016 return RX_DROP; 3017 } 3018 3019 if (!ifmsh->mshcfg.dot11MeshForwarding) { 3020 if (is_multicast_ether_addr(eth->h_dest)) 3021 goto rx_accept; 3022 3023 return RX_DROP; 3024 } 3025 3026 skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]); 3027 3028 if (!multicast && 3029 ieee80211_rx_mesh_fast_forward(sdata, skb, mesh_hdrlen)) 3030 return RX_QUEUED; 3031 3032 ieee80211_fill_mesh_addresses(&hdr, &hdr.frame_control, 3033 eth->h_dest, eth->h_source); 3034 hdrlen = ieee80211_hdrlen(hdr.frame_control); 3035 if (multicast) { 3036 int extra_head = sizeof(struct ieee80211_hdr) - sizeof(*eth); 3037 3038 fwd_skb = skb_copy_expand(skb, local->tx_headroom + extra_head + 3039 IEEE80211_ENCRYPT_HEADROOM, 3040 tailroom, GFP_ATOMIC); 3041 if (!fwd_skb) 3042 goto rx_accept; 3043 } else { 3044 fwd_skb = skb; 3045 skb = NULL; 3046 3047 if (skb_cow_head(fwd_skb, hdrlen - sizeof(struct ethhdr))) 3048 return RX_DROP_U_OOM; 3049 3050 if (skb_linearize(fwd_skb)) 3051 return RX_DROP_U_OOM; 3052 } 3053 3054 fwd_hdr = skb_push(fwd_skb, hdrlen - sizeof(struct ethhdr)); 3055 memcpy(fwd_hdr, &hdr, hdrlen - 2); 3056 qos = ieee80211_get_qos_ctl(fwd_hdr); 3057 qos[0] = qos[1] = 0; 3058 3059 skb_reset_mac_header(fwd_skb); 3060 hdrlen += mesh_hdrlen; 3061 if (ieee80211_get_8023_tunnel_proto(fwd_skb->data + hdrlen, 3062 &fwd_skb->protocol)) 3063 hdrlen += ETH_ALEN; 3064 else 3065 fwd_skb->protocol = htons(fwd_skb->len - hdrlen); 3066 skb_set_network_header(fwd_skb, hdrlen + 2); 3067 3068 info = IEEE80211_SKB_CB(fwd_skb); 3069 memset(info, 0, sizeof(*info)); 3070 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; 3071 info->control.vif = &sdata->vif; 3072 info->control.jiffies = jiffies; 3073 fwd_skb->dev = sdata->dev; 3074 if (multicast) { 3075 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 3076 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 3077 /* update power mode indication when forwarding */ 3078 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); 3079 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { 3080 /* mesh power mode flags updated in mesh_nexthop_lookup */ 3081 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 3082 } else { 3083 /* unable to resolve next hop */ 3084 if (sta) 3085 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, 3086 hdr.addr3, 0, 3087 WLAN_REASON_MESH_PATH_NOFORWARD, 3088 sta->sta.addr); 3089 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 3090 kfree_skb(fwd_skb); 3091 goto rx_accept; 3092 } 3093 3094 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 3095 ieee80211_set_qos_hdr(sdata, fwd_skb); 3096 ieee80211_add_pending_skb(local, fwd_skb); 3097 3098 rx_accept: 3099 if (!skb) 3100 return RX_QUEUED; 3101 3102 ieee80211_strip_8023_mesh_hdr(skb); 3103 #endif 3104 3105 return RX_CONTINUE; 3106 } 3107 3108 static ieee80211_rx_result debug_noinline 3109 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset) 3110 { 3111 struct net_device *dev = rx->sdata->dev; 3112 struct sk_buff *skb = rx->skb; 3113 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3114 __le16 fc = hdr->frame_control; 3115 struct sk_buff_head frame_list; 3116 struct ethhdr ethhdr; 3117 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source; 3118 3119 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 3120 check_da = NULL; 3121 check_sa = NULL; 3122 } else switch (rx->sdata->vif.type) { 3123 case NL80211_IFTYPE_AP: 3124 case NL80211_IFTYPE_AP_VLAN: 3125 check_da = NULL; 3126 break; 3127 case NL80211_IFTYPE_STATION: 3128 if (!test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER)) 3129 check_sa = NULL; 3130 break; 3131 case NL80211_IFTYPE_MESH_POINT: 3132 check_sa = NULL; 3133 check_da = NULL; 3134 break; 3135 default: 3136 break; 3137 } 3138 3139 skb->dev = dev; 3140 __skb_queue_head_init(&frame_list); 3141 3142 if (ieee80211_data_to_8023_exthdr(skb, ðhdr, 3143 rx->sdata->vif.addr, 3144 rx->sdata->vif.type, 3145 data_offset, true)) 3146 return RX_DROP_U_BAD_AMSDU; 3147 3148 if (rx->sta->amsdu_mesh_control < 0) { 3149 s8 valid = -1; 3150 int i; 3151 3152 for (i = 0; i <= 2; i++) { 3153 if (!ieee80211_is_valid_amsdu(skb, i)) 3154 continue; 3155 3156 if (valid >= 0) { 3157 /* ambiguous */ 3158 valid = -1; 3159 break; 3160 } 3161 3162 valid = i; 3163 } 3164 3165 rx->sta->amsdu_mesh_control = valid; 3166 } 3167 3168 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 3169 rx->sdata->vif.type, 3170 rx->local->hw.extra_tx_headroom, 3171 check_da, check_sa, 3172 rx->sta->amsdu_mesh_control); 3173 3174 while (!skb_queue_empty(&frame_list)) { 3175 rx->skb = __skb_dequeue(&frame_list); 3176 3177 switch (ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb)) { 3178 case RX_QUEUED: 3179 break; 3180 case RX_CONTINUE: 3181 if (ieee80211_frame_allowed(rx, fc)) { 3182 ieee80211_deliver_skb(rx); 3183 break; 3184 } 3185 fallthrough; 3186 default: 3187 dev_kfree_skb(rx->skb); 3188 } 3189 } 3190 3191 return RX_QUEUED; 3192 } 3193 3194 static ieee80211_rx_result debug_noinline 3195 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 3196 { 3197 struct sk_buff *skb = rx->skb; 3198 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3199 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3200 __le16 fc = hdr->frame_control; 3201 3202 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 3203 return RX_CONTINUE; 3204 3205 if (unlikely(!ieee80211_is_data(fc))) 3206 return RX_CONTINUE; 3207 3208 if (unlikely(!ieee80211_is_data_present(fc))) 3209 return RX_DROP; 3210 3211 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 3212 switch (rx->sdata->vif.type) { 3213 case NL80211_IFTYPE_AP_VLAN: 3214 if (!rx->sdata->u.vlan.sta) 3215 return RX_DROP_U_BAD_4ADDR; 3216 break; 3217 case NL80211_IFTYPE_STATION: 3218 if (!rx->sdata->u.mgd.use_4addr) 3219 return RX_DROP_U_BAD_4ADDR; 3220 break; 3221 case NL80211_IFTYPE_MESH_POINT: 3222 break; 3223 default: 3224 return RX_DROP_U_BAD_4ADDR; 3225 } 3226 } 3227 3228 if (is_multicast_ether_addr(hdr->addr1) || !rx->sta) 3229 return RX_DROP_U_BAD_AMSDU; 3230 3231 if (rx->key) { 3232 /* 3233 * We should not receive A-MSDUs on pre-HT connections, 3234 * and HT connections cannot use old ciphers. Thus drop 3235 * them, as in those cases we couldn't even have SPP 3236 * A-MSDUs or such. 3237 */ 3238 switch (rx->key->conf.cipher) { 3239 case WLAN_CIPHER_SUITE_WEP40: 3240 case WLAN_CIPHER_SUITE_WEP104: 3241 case WLAN_CIPHER_SUITE_TKIP: 3242 return RX_DROP_U_BAD_AMSDU_CIPHER; 3243 default: 3244 break; 3245 } 3246 } 3247 3248 return __ieee80211_rx_h_amsdu(rx, 0); 3249 } 3250 3251 static ieee80211_rx_result debug_noinline 3252 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 3253 { 3254 struct ieee80211_sub_if_data *sdata = rx->sdata; 3255 struct ieee80211_local *local = rx->local; 3256 struct net_device *dev = sdata->dev; 3257 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 3258 __le16 fc = hdr->frame_control; 3259 ieee80211_rx_result res; 3260 bool port_control; 3261 3262 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 3263 return RX_CONTINUE; 3264 3265 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 3266 return RX_DROP; 3267 3268 /* Send unexpected-4addr-frame event to hostapd */ 3269 if (ieee80211_has_a4(hdr->frame_control) && 3270 sdata->vif.type == NL80211_IFTYPE_AP) { 3271 if (rx->sta && 3272 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 3273 cfg80211_rx_unexpected_4addr_frame( 3274 rx->sdata->dev, rx->sta->sta.addr, rx->link_id, 3275 GFP_ATOMIC); 3276 return RX_DROP; 3277 } 3278 3279 res = __ieee80211_data_to_8023(rx, &port_control); 3280 if (unlikely(res != RX_CONTINUE)) 3281 return res; 3282 3283 res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb); 3284 if (res != RX_CONTINUE) 3285 return res; 3286 3287 if (!ieee80211_frame_allowed(rx, fc)) 3288 return RX_DROP; 3289 3290 /* directly handle TDLS channel switch requests/responses */ 3291 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == 3292 cpu_to_be16(ETH_P_TDLS))) { 3293 struct ieee80211_tdls_data *tf = (void *)rx->skb->data; 3294 3295 if (pskb_may_pull(rx->skb, 3296 offsetof(struct ieee80211_tdls_data, u)) && 3297 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && 3298 tf->category == WLAN_CATEGORY_TDLS && 3299 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || 3300 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { 3301 rx->skb->protocol = cpu_to_be16(ETH_P_TDLS); 3302 __ieee80211_queue_skb_to_iface(sdata, rx->link_id, 3303 rx->sta, rx->skb); 3304 return RX_QUEUED; 3305 } 3306 } 3307 3308 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 3309 unlikely(port_control) && sdata->bss) { 3310 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 3311 u.ap); 3312 dev = sdata->dev; 3313 rx->sdata = sdata; 3314 } 3315 3316 rx->skb->dev = dev; 3317 3318 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && 3319 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 3320 !is_multicast_ether_addr( 3321 ((struct ethhdr *)rx->skb->data)->h_dest) && 3322 (!local->scanning && 3323 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) 3324 mod_timer(&local->dynamic_ps_timer, jiffies + 3325 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 3326 3327 ieee80211_deliver_skb(rx); 3328 3329 return RX_QUEUED; 3330 } 3331 3332 static ieee80211_rx_result debug_noinline 3333 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 3334 { 3335 struct sk_buff *skb = rx->skb; 3336 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 3337 struct tid_ampdu_rx *tid_agg_rx; 3338 u16 start_seq_num; 3339 u16 tid; 3340 3341 if (likely(!ieee80211_is_ctl(bar->frame_control))) 3342 return RX_CONTINUE; 3343 3344 if (ieee80211_is_back_req(bar->frame_control)) { 3345 struct { 3346 __le16 control, start_seq_num; 3347 } __packed bar_data; 3348 struct ieee80211_event event = { 3349 .type = BAR_RX_EVENT, 3350 }; 3351 3352 if (!rx->sta) 3353 return RX_DROP; 3354 3355 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 3356 &bar_data, sizeof(bar_data))) 3357 return RX_DROP; 3358 3359 tid = le16_to_cpu(bar_data.control) >> 12; 3360 3361 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 3362 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 3363 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 3364 WLAN_BACK_RECIPIENT, 3365 WLAN_REASON_QSTA_REQUIRE_SETUP); 3366 3367 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 3368 if (!tid_agg_rx) 3369 return RX_DROP; 3370 3371 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 3372 event.u.ba.tid = tid; 3373 event.u.ba.ssn = start_seq_num; 3374 event.u.ba.sta = &rx->sta->sta; 3375 3376 /* reset session timer */ 3377 if (tid_agg_rx->timeout) 3378 mod_timer(&tid_agg_rx->session_timer, 3379 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 3380 3381 spin_lock(&tid_agg_rx->reorder_lock); 3382 /* release stored frames up to start of BAR */ 3383 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 3384 start_seq_num, frames); 3385 spin_unlock(&tid_agg_rx->reorder_lock); 3386 3387 drv_event_callback(rx->local, rx->sdata, &event); 3388 3389 kfree_skb(skb); 3390 return RX_QUEUED; 3391 } 3392 3393 return RX_DROP; 3394 } 3395 3396 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 3397 struct ieee80211_mgmt *mgmt, 3398 size_t len) 3399 { 3400 struct ieee80211_local *local = sdata->local; 3401 struct sk_buff *skb; 3402 struct ieee80211_mgmt *resp; 3403 3404 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 3405 /* Not to own unicast address */ 3406 return; 3407 } 3408 3409 if (!ether_addr_equal(mgmt->sa, sdata->vif.cfg.ap_addr) || 3410 !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) { 3411 /* Not from the current AP or not associated yet. */ 3412 return; 3413 } 3414 3415 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 3416 /* Too short SA Query request frame */ 3417 return; 3418 } 3419 3420 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 3421 if (skb == NULL) 3422 return; 3423 3424 skb_reserve(skb, local->hw.extra_tx_headroom); 3425 resp = skb_put_zero(skb, 24); 3426 memcpy(resp->da, sdata->vif.cfg.ap_addr, ETH_ALEN); 3427 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 3428 memcpy(resp->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); 3429 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 3430 IEEE80211_STYPE_ACTION); 3431 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 3432 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 3433 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 3434 memcpy(resp->u.action.u.sa_query.trans_id, 3435 mgmt->u.action.u.sa_query.trans_id, 3436 WLAN_SA_QUERY_TR_ID_LEN); 3437 3438 ieee80211_tx_skb(sdata, skb); 3439 } 3440 3441 static void 3442 ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx) 3443 { 3444 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 3445 struct ieee80211_bss_conf *bss_conf; 3446 const struct element *ie; 3447 size_t baselen; 3448 3449 if (!wiphy_ext_feature_isset(rx->local->hw.wiphy, 3450 NL80211_EXT_FEATURE_BSS_COLOR)) 3451 return; 3452 3453 if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION)) 3454 return; 3455 3456 bss_conf = rx->link->conf; 3457 if (bss_conf->csa_active || bss_conf->color_change_active || 3458 !bss_conf->he_bss_color.enabled) 3459 return; 3460 3461 baselen = mgmt->u.beacon.variable - rx->skb->data; 3462 if (baselen > rx->skb->len) 3463 return; 3464 3465 ie = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, 3466 mgmt->u.beacon.variable, 3467 rx->skb->len - baselen); 3468 if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) && 3469 ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) { 3470 const struct ieee80211_he_operation *he_oper; 3471 u8 color; 3472 3473 he_oper = (void *)(ie->data + 1); 3474 if (le32_get_bits(he_oper->he_oper_params, 3475 IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED)) 3476 return; 3477 3478 color = le32_get_bits(he_oper->he_oper_params, 3479 IEEE80211_HE_OPERATION_BSS_COLOR_MASK); 3480 if (color == bss_conf->he_bss_color.color) 3481 ieee80211_obss_color_collision_notify(&rx->sdata->vif, 3482 BIT_ULL(color), 3483 bss_conf->link_id); 3484 } 3485 } 3486 3487 static ieee80211_rx_result debug_noinline 3488 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 3489 { 3490 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3491 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3492 3493 if (ieee80211_is_s1g_beacon(mgmt->frame_control)) 3494 return RX_CONTINUE; 3495 3496 /* 3497 * From here on, look only at management frames. 3498 * Data and control frames are already handled, 3499 * and unknown (reserved) frames are useless. 3500 */ 3501 if (rx->skb->len < 24) 3502 return RX_DROP; 3503 3504 if (!ieee80211_is_mgmt(mgmt->frame_control)) 3505 return RX_DROP; 3506 3507 /* drop too small action frames */ 3508 if (ieee80211_is_action(mgmt->frame_control) && 3509 rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 3510 return RX_DROP_U_RUNT_ACTION; 3511 3512 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 3513 ieee80211_is_beacon(mgmt->frame_control) && 3514 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 3515 int sig = 0; 3516 3517 /* sw bss color collision detection */ 3518 ieee80211_rx_check_bss_color_collision(rx); 3519 3520 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3521 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3522 sig = status->signal; 3523 3524 cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy, 3525 rx->skb->data, rx->skb->len, 3526 ieee80211_rx_status_to_khz(status), 3527 sig); 3528 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 3529 } 3530 3531 return ieee80211_drop_unencrypted_mgmt(rx); 3532 } 3533 3534 static bool 3535 ieee80211_process_rx_twt_action(struct ieee80211_rx_data *rx) 3536 { 3537 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)rx->skb->data; 3538 struct ieee80211_sub_if_data *sdata = rx->sdata; 3539 3540 /* TWT actions are only supported in AP for the moment */ 3541 if (sdata->vif.type != NL80211_IFTYPE_AP) 3542 return false; 3543 3544 if (!rx->local->ops->add_twt_setup) 3545 return false; 3546 3547 if (!sdata->vif.bss_conf.twt_responder) 3548 return false; 3549 3550 if (!rx->sta) 3551 return false; 3552 3553 switch (mgmt->u.action.u.s1g.action_code) { 3554 case WLAN_S1G_TWT_SETUP: { 3555 struct ieee80211_twt_setup *twt; 3556 3557 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 3558 1 + /* action code */ 3559 sizeof(struct ieee80211_twt_setup) + 3560 2 /* TWT req_type agrt */) 3561 break; 3562 3563 twt = (void *)mgmt->u.action.u.s1g.variable; 3564 if (twt->element_id != WLAN_EID_S1G_TWT) 3565 break; 3566 3567 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 3568 4 + /* action code + token + tlv */ 3569 twt->length) 3570 break; 3571 3572 return true; /* queue the frame */ 3573 } 3574 case WLAN_S1G_TWT_TEARDOWN: 3575 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 2) 3576 break; 3577 3578 return true; /* queue the frame */ 3579 default: 3580 break; 3581 } 3582 3583 return false; 3584 } 3585 3586 static ieee80211_rx_result debug_noinline 3587 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 3588 { 3589 struct ieee80211_local *local = rx->local; 3590 struct ieee80211_sub_if_data *sdata = rx->sdata; 3591 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3592 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3593 int len = rx->skb->len; 3594 3595 if (!ieee80211_is_action(mgmt->frame_control)) 3596 return RX_CONTINUE; 3597 3598 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && 3599 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && 3600 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 3601 return RX_DROP_U_ACTION_UNKNOWN_SRC; 3602 3603 switch (mgmt->u.action.category) { 3604 case WLAN_CATEGORY_HT: 3605 /* reject HT action frames from stations not supporting HT 3606 * or not HE Capable 3607 */ 3608 if (!rx->link_sta->pub->ht_cap.ht_supported && 3609 !rx->link_sta->pub->he_cap.has_he) 3610 goto invalid; 3611 3612 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3613 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3614 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3615 sdata->vif.type != NL80211_IFTYPE_AP && 3616 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3617 break; 3618 3619 /* verify action & smps_control/chanwidth are present */ 3620 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3621 goto invalid; 3622 3623 switch (mgmt->u.action.u.ht_smps.action) { 3624 case WLAN_HT_ACTION_SMPS: { 3625 struct ieee80211_supported_band *sband; 3626 enum ieee80211_smps_mode smps_mode; 3627 struct sta_opmode_info sta_opmode = {}; 3628 3629 if (sdata->vif.type != NL80211_IFTYPE_AP && 3630 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 3631 goto handled; 3632 3633 /* convert to HT capability */ 3634 switch (mgmt->u.action.u.ht_smps.smps_control) { 3635 case WLAN_HT_SMPS_CONTROL_DISABLED: 3636 smps_mode = IEEE80211_SMPS_OFF; 3637 break; 3638 case WLAN_HT_SMPS_CONTROL_STATIC: 3639 smps_mode = IEEE80211_SMPS_STATIC; 3640 break; 3641 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 3642 smps_mode = IEEE80211_SMPS_DYNAMIC; 3643 break; 3644 default: 3645 goto invalid; 3646 } 3647 3648 /* if no change do nothing */ 3649 if (rx->link_sta->pub->smps_mode == smps_mode) 3650 goto handled; 3651 rx->link_sta->pub->smps_mode = smps_mode; 3652 sta_opmode.smps_mode = 3653 ieee80211_smps_mode_to_smps_mode(smps_mode); 3654 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED; 3655 3656 sband = rx->local->hw.wiphy->bands[status->band]; 3657 3658 rate_control_rate_update(local, sband, rx->link_sta, 3659 IEEE80211_RC_SMPS_CHANGED); 3660 cfg80211_sta_opmode_change_notify(sdata->dev, 3661 rx->sta->addr, 3662 &sta_opmode, 3663 GFP_ATOMIC); 3664 goto handled; 3665 } 3666 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 3667 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 3668 3669 if (chanwidth != IEEE80211_HT_CHANWIDTH_20MHZ && 3670 chanwidth != IEEE80211_HT_CHANWIDTH_ANY) 3671 goto invalid; 3672 3673 /* If it doesn't support 40 MHz it can't change ... */ 3674 if (!(rx->link_sta->pub->ht_cap.cap & 3675 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 3676 goto handled; 3677 3678 goto queue; 3679 } 3680 default: 3681 goto invalid; 3682 } 3683 3684 break; 3685 case WLAN_CATEGORY_PUBLIC: 3686 case WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION: 3687 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3688 goto invalid; 3689 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3690 break; 3691 if (!rx->sta) 3692 break; 3693 if (!ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid)) 3694 break; 3695 if (mgmt->u.action.u.ext_chan_switch.action_code != 3696 WLAN_PUB_ACTION_EXT_CHANSW_ANN) 3697 break; 3698 if (len < offsetof(struct ieee80211_mgmt, 3699 u.action.u.ext_chan_switch.variable)) 3700 goto invalid; 3701 goto queue; 3702 case WLAN_CATEGORY_VHT: 3703 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3704 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3705 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3706 sdata->vif.type != NL80211_IFTYPE_AP && 3707 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3708 break; 3709 3710 /* verify action code is present */ 3711 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3712 goto invalid; 3713 3714 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 3715 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 3716 /* verify opmode is present */ 3717 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3718 goto invalid; 3719 goto queue; 3720 } 3721 case WLAN_VHT_ACTION_GROUPID_MGMT: { 3722 if (len < IEEE80211_MIN_ACTION_SIZE + 25) 3723 goto invalid; 3724 goto queue; 3725 } 3726 default: 3727 break; 3728 } 3729 break; 3730 case WLAN_CATEGORY_BACK: 3731 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3732 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3733 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3734 sdata->vif.type != NL80211_IFTYPE_AP && 3735 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3736 break; 3737 3738 /* verify action_code is present */ 3739 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3740 break; 3741 3742 switch (mgmt->u.action.u.addba_req.action_code) { 3743 case WLAN_ACTION_ADDBA_REQ: 3744 if (len < (IEEE80211_MIN_ACTION_SIZE + 3745 sizeof(mgmt->u.action.u.addba_req))) 3746 goto invalid; 3747 break; 3748 case WLAN_ACTION_ADDBA_RESP: 3749 if (len < (IEEE80211_MIN_ACTION_SIZE + 3750 sizeof(mgmt->u.action.u.addba_resp))) 3751 goto invalid; 3752 break; 3753 case WLAN_ACTION_DELBA: 3754 if (len < (IEEE80211_MIN_ACTION_SIZE + 3755 sizeof(mgmt->u.action.u.delba))) 3756 goto invalid; 3757 break; 3758 default: 3759 goto invalid; 3760 } 3761 3762 goto queue; 3763 case WLAN_CATEGORY_SPECTRUM_MGMT: 3764 /* verify action_code is present */ 3765 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3766 break; 3767 3768 switch (mgmt->u.action.u.measurement.action_code) { 3769 case WLAN_ACTION_SPCT_MSR_REQ: 3770 if (status->band != NL80211_BAND_5GHZ) 3771 break; 3772 3773 if (len < (IEEE80211_MIN_ACTION_SIZE + 3774 sizeof(mgmt->u.action.u.measurement))) 3775 break; 3776 3777 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3778 break; 3779 3780 ieee80211_process_measurement_req(sdata, mgmt, len); 3781 goto handled; 3782 case WLAN_ACTION_SPCT_CHL_SWITCH: { 3783 u8 *bssid; 3784 if (len < (IEEE80211_MIN_ACTION_SIZE + 3785 sizeof(mgmt->u.action.u.chan_switch))) 3786 break; 3787 3788 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3789 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3790 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3791 break; 3792 3793 if (sdata->vif.type == NL80211_IFTYPE_STATION) 3794 bssid = sdata->deflink.u.mgd.bssid; 3795 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 3796 bssid = sdata->u.ibss.bssid; 3797 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 3798 bssid = mgmt->sa; 3799 else 3800 break; 3801 3802 if (!ether_addr_equal(mgmt->bssid, bssid)) 3803 break; 3804 3805 goto queue; 3806 } 3807 } 3808 break; 3809 case WLAN_CATEGORY_SELF_PROTECTED: 3810 if (len < (IEEE80211_MIN_ACTION_SIZE + 3811 sizeof(mgmt->u.action.u.self_prot.action_code))) 3812 break; 3813 3814 switch (mgmt->u.action.u.self_prot.action_code) { 3815 case WLAN_SP_MESH_PEERING_OPEN: 3816 case WLAN_SP_MESH_PEERING_CLOSE: 3817 case WLAN_SP_MESH_PEERING_CONFIRM: 3818 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3819 goto invalid; 3820 if (sdata->u.mesh.user_mpm) 3821 /* userspace handles this frame */ 3822 break; 3823 goto queue; 3824 case WLAN_SP_MGK_INFORM: 3825 case WLAN_SP_MGK_ACK: 3826 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3827 goto invalid; 3828 break; 3829 } 3830 break; 3831 case WLAN_CATEGORY_MESH_ACTION: 3832 if (len < (IEEE80211_MIN_ACTION_SIZE + 3833 sizeof(mgmt->u.action.u.mesh_action.action_code))) 3834 break; 3835 3836 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3837 break; 3838 if (mesh_action_is_path_sel(mgmt) && 3839 !mesh_path_sel_is_hwmp(sdata)) 3840 break; 3841 goto queue; 3842 case WLAN_CATEGORY_S1G: 3843 if (len < offsetofend(typeof(*mgmt), 3844 u.action.u.s1g.action_code)) 3845 break; 3846 3847 switch (mgmt->u.action.u.s1g.action_code) { 3848 case WLAN_S1G_TWT_SETUP: 3849 case WLAN_S1G_TWT_TEARDOWN: 3850 if (ieee80211_process_rx_twt_action(rx)) 3851 goto queue; 3852 break; 3853 default: 3854 break; 3855 } 3856 break; 3857 case WLAN_CATEGORY_PROTECTED_EHT: 3858 if (len < offsetofend(typeof(*mgmt), 3859 u.action.u.ttlm_req.action_code)) 3860 break; 3861 3862 switch (mgmt->u.action.u.ttlm_req.action_code) { 3863 case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ: 3864 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3865 break; 3866 3867 if (len < offsetofend(typeof(*mgmt), 3868 u.action.u.ttlm_req)) 3869 goto invalid; 3870 goto queue; 3871 case WLAN_PROTECTED_EHT_ACTION_TTLM_RES: 3872 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3873 break; 3874 3875 if (len < offsetofend(typeof(*mgmt), 3876 u.action.u.ttlm_res)) 3877 goto invalid; 3878 goto queue; 3879 case WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN: 3880 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3881 break; 3882 3883 if (len < offsetofend(typeof(*mgmt), 3884 u.action.u.ttlm_tear_down)) 3885 goto invalid; 3886 goto queue; 3887 case WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP: 3888 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3889 break; 3890 3891 /* The reconfiguration response action frame must 3892 * least one 'Status Duple' entry (3 octets) 3893 */ 3894 if (len < 3895 offsetofend(typeof(*mgmt), 3896 u.action.u.ml_reconf_resp) + 3) 3897 goto invalid; 3898 goto queue; 3899 case WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_RESP: 3900 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3901 break; 3902 3903 if (len < offsetofend(typeof(*mgmt), 3904 u.action.u.epcs) + 3905 IEEE80211_EPCS_ENA_RESP_BODY_LEN) 3906 goto invalid; 3907 goto queue; 3908 case WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_TEARDOWN: 3909 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3910 break; 3911 3912 if (len < offsetofend(typeof(*mgmt), 3913 u.action.u.epcs)) 3914 goto invalid; 3915 goto queue; 3916 default: 3917 break; 3918 } 3919 break; 3920 } 3921 3922 return RX_CONTINUE; 3923 3924 invalid: 3925 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 3926 /* will return in the next handlers */ 3927 return RX_CONTINUE; 3928 3929 handled: 3930 if (rx->sta) 3931 rx->link_sta->rx_stats.packets++; 3932 dev_kfree_skb(rx->skb); 3933 return RX_QUEUED; 3934 3935 queue: 3936 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); 3937 return RX_QUEUED; 3938 } 3939 3940 static ieee80211_rx_result debug_noinline 3941 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 3942 { 3943 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3944 struct cfg80211_rx_info info = { 3945 .freq = ieee80211_rx_status_to_khz(status), 3946 .buf = rx->skb->data, 3947 .len = rx->skb->len, 3948 .link_id = rx->link_id, 3949 .have_link_id = rx->link_id >= 0, 3950 }; 3951 3952 /* skip known-bad action frames and return them in the next handler */ 3953 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 3954 return RX_CONTINUE; 3955 3956 /* 3957 * Getting here means the kernel doesn't know how to handle 3958 * it, but maybe userspace does ... include returned frames 3959 * so userspace can register for those to know whether ones 3960 * it transmitted were processed or returned. 3961 */ 3962 3963 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3964 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3965 info.sig_dbm = status->signal; 3966 3967 if (ieee80211_is_timing_measurement(rx->skb) || 3968 ieee80211_is_ftm(rx->skb)) { 3969 info.rx_tstamp = ktime_to_ns(skb_hwtstamps(rx->skb)->hwtstamp); 3970 info.ack_tstamp = ktime_to_ns(status->ack_tx_hwtstamp); 3971 } 3972 3973 if (cfg80211_rx_mgmt_ext(&rx->sdata->wdev, &info)) { 3974 if (rx->sta) 3975 rx->link_sta->rx_stats.packets++; 3976 dev_kfree_skb(rx->skb); 3977 return RX_QUEUED; 3978 } 3979 3980 return RX_CONTINUE; 3981 } 3982 3983 static ieee80211_rx_result debug_noinline 3984 ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx) 3985 { 3986 struct ieee80211_sub_if_data *sdata = rx->sdata; 3987 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3988 int len = rx->skb->len; 3989 3990 if (!ieee80211_is_action(mgmt->frame_control)) 3991 return RX_CONTINUE; 3992 3993 switch (mgmt->u.action.category) { 3994 case WLAN_CATEGORY_SA_QUERY: 3995 if (len < (IEEE80211_MIN_ACTION_SIZE + 3996 sizeof(mgmt->u.action.u.sa_query))) 3997 break; 3998 3999 switch (mgmt->u.action.u.sa_query.action) { 4000 case WLAN_ACTION_SA_QUERY_REQUEST: 4001 if (sdata->vif.type != NL80211_IFTYPE_STATION) 4002 break; 4003 ieee80211_process_sa_query_req(sdata, mgmt, len); 4004 goto handled; 4005 } 4006 break; 4007 } 4008 4009 return RX_CONTINUE; 4010 4011 handled: 4012 if (rx->sta) 4013 rx->link_sta->rx_stats.packets++; 4014 dev_kfree_skb(rx->skb); 4015 return RX_QUEUED; 4016 } 4017 4018 static ieee80211_rx_result debug_noinline 4019 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 4020 { 4021 struct ieee80211_local *local = rx->local; 4022 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 4023 struct sk_buff *nskb; 4024 struct ieee80211_sub_if_data *sdata = rx->sdata; 4025 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 4026 4027 if (!ieee80211_is_action(mgmt->frame_control)) 4028 return RX_CONTINUE; 4029 4030 /* 4031 * For AP mode, hostapd is responsible for handling any action 4032 * frames that we didn't handle, including returning unknown 4033 * ones. For all other modes we will return them to the sender, 4034 * setting the 0x80 bit in the action category, as required by 4035 * 802.11-2012 9.24.4. 4036 * Newer versions of hostapd use the management frame registration 4037 * mechanisms and old cooked monitor interface is no longer supported. 4038 */ 4039 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 4040 (sdata->vif.type == NL80211_IFTYPE_AP || 4041 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 4042 return RX_DROP; 4043 4044 if (is_multicast_ether_addr(mgmt->da)) 4045 return RX_DROP; 4046 4047 /* do not return rejected action frames */ 4048 if (mgmt->u.action.category & 0x80) 4049 return RX_DROP_U_REJECTED_ACTION_RESPONSE; 4050 4051 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 4052 GFP_ATOMIC); 4053 if (nskb) { 4054 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 4055 4056 nmgmt->u.action.category |= 0x80; 4057 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 4058 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 4059 4060 memset(nskb->cb, 0, sizeof(nskb->cb)); 4061 4062 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 4063 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 4064 4065 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 4066 IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 4067 IEEE80211_TX_CTL_NO_CCK_RATE; 4068 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) 4069 info->hw_queue = 4070 local->hw.offchannel_tx_hw_queue; 4071 } 4072 4073 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, -1, 4074 status->band); 4075 } 4076 4077 return RX_DROP_U_UNKNOWN_ACTION_REJECTED; 4078 } 4079 4080 static ieee80211_rx_result debug_noinline 4081 ieee80211_rx_h_ext(struct ieee80211_rx_data *rx) 4082 { 4083 struct ieee80211_sub_if_data *sdata = rx->sdata; 4084 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 4085 4086 if (!ieee80211_is_ext(hdr->frame_control)) 4087 return RX_CONTINUE; 4088 4089 if (sdata->vif.type != NL80211_IFTYPE_STATION) 4090 return RX_DROP; 4091 4092 /* for now only beacons are ext, so queue them */ 4093 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); 4094 4095 return RX_QUEUED; 4096 } 4097 4098 static ieee80211_rx_result debug_noinline 4099 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 4100 { 4101 struct ieee80211_sub_if_data *sdata = rx->sdata; 4102 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 4103 __le16 stype; 4104 4105 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 4106 4107 if (!ieee80211_vif_is_mesh(&sdata->vif) && 4108 sdata->vif.type != NL80211_IFTYPE_ADHOC && 4109 sdata->vif.type != NL80211_IFTYPE_OCB && 4110 sdata->vif.type != NL80211_IFTYPE_STATION) 4111 return RX_DROP; 4112 4113 switch (stype) { 4114 case cpu_to_le16(IEEE80211_STYPE_AUTH): 4115 case cpu_to_le16(IEEE80211_STYPE_BEACON): 4116 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 4117 /* process for all: mesh, mlme, ibss */ 4118 break; 4119 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 4120 if (is_multicast_ether_addr(mgmt->da) && 4121 !is_broadcast_ether_addr(mgmt->da)) 4122 return RX_DROP; 4123 4124 /* process only for station/IBSS */ 4125 if (sdata->vif.type != NL80211_IFTYPE_STATION && 4126 sdata->vif.type != NL80211_IFTYPE_ADHOC) 4127 return RX_DROP; 4128 break; 4129 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 4130 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 4131 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 4132 if (is_multicast_ether_addr(mgmt->da) && 4133 !is_broadcast_ether_addr(mgmt->da)) 4134 return RX_DROP; 4135 4136 /* process only for station */ 4137 if (sdata->vif.type != NL80211_IFTYPE_STATION) 4138 return RX_DROP; 4139 break; 4140 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 4141 /* process only for ibss and mesh */ 4142 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 4143 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 4144 return RX_DROP; 4145 break; 4146 default: 4147 return RX_DROP; 4148 } 4149 4150 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); 4151 4152 return RX_QUEUED; 4153 } 4154 4155 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 4156 ieee80211_rx_result res) 4157 { 4158 if (res == RX_QUEUED) { 4159 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 4160 return; 4161 } 4162 4163 if (res != RX_CONTINUE) { 4164 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 4165 if (rx->sta) 4166 rx->link_sta->rx_stats.dropped++; 4167 } 4168 4169 kfree_skb_reason(rx->skb, (__force u32)res); 4170 } 4171 4172 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 4173 struct sk_buff_head *frames) 4174 { 4175 ieee80211_rx_result res = RX_DROP; 4176 struct sk_buff *skb; 4177 4178 #define CALL_RXH(rxh) \ 4179 do { \ 4180 res = rxh(rx); \ 4181 if (res != RX_CONTINUE) \ 4182 goto rxh_next; \ 4183 } while (0) 4184 4185 /* Lock here to avoid hitting all of the data used in the RX 4186 * path (e.g. key data, station data, ...) concurrently when 4187 * a frame is released from the reorder buffer due to timeout 4188 * from the timer, potentially concurrently with RX from the 4189 * driver. 4190 */ 4191 spin_lock_bh(&rx->local->rx_path_lock); 4192 4193 while ((skb = __skb_dequeue(frames))) { 4194 /* 4195 * all the other fields are valid across frames 4196 * that belong to an aMPDU since they are on the 4197 * same TID from the same station 4198 */ 4199 rx->skb = skb; 4200 4201 if (WARN_ON_ONCE(!rx->link)) 4202 goto rxh_next; 4203 4204 CALL_RXH(ieee80211_rx_h_check_more_data); 4205 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll); 4206 CALL_RXH(ieee80211_rx_h_sta_process); 4207 CALL_RXH(ieee80211_rx_h_decrypt); 4208 CALL_RXH(ieee80211_rx_h_defragment); 4209 CALL_RXH(ieee80211_rx_h_michael_mic_verify); 4210 /* must be after MMIC verify so header is counted in MPDU mic */ 4211 CALL_RXH(ieee80211_rx_h_amsdu); 4212 CALL_RXH(ieee80211_rx_h_data); 4213 4214 /* special treatment -- needs the queue */ 4215 res = ieee80211_rx_h_ctrl(rx, frames); 4216 if (res != RX_CONTINUE) 4217 goto rxh_next; 4218 4219 CALL_RXH(ieee80211_rx_h_mgmt_check); 4220 CALL_RXH(ieee80211_rx_h_action); 4221 CALL_RXH(ieee80211_rx_h_userspace_mgmt); 4222 CALL_RXH(ieee80211_rx_h_action_post_userspace); 4223 CALL_RXH(ieee80211_rx_h_action_return); 4224 CALL_RXH(ieee80211_rx_h_ext); 4225 CALL_RXH(ieee80211_rx_h_mgmt); 4226 4227 rxh_next: 4228 ieee80211_rx_handlers_result(rx, res); 4229 4230 #undef CALL_RXH 4231 } 4232 4233 spin_unlock_bh(&rx->local->rx_path_lock); 4234 } 4235 4236 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 4237 { 4238 struct sk_buff_head reorder_release; 4239 ieee80211_rx_result res = RX_DROP; 4240 4241 __skb_queue_head_init(&reorder_release); 4242 4243 #define CALL_RXH(rxh) \ 4244 do { \ 4245 res = rxh(rx); \ 4246 if (res != RX_CONTINUE) \ 4247 goto rxh_next; \ 4248 } while (0) 4249 4250 CALL_RXH(ieee80211_rx_h_check_dup); 4251 CALL_RXH(ieee80211_rx_h_check); 4252 4253 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 4254 4255 ieee80211_rx_handlers(rx, &reorder_release); 4256 return; 4257 4258 rxh_next: 4259 ieee80211_rx_handlers_result(rx, res); 4260 4261 #undef CALL_RXH 4262 } 4263 4264 static bool 4265 ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id) 4266 { 4267 return !!(sta->valid_links & BIT(link_id)); 4268 } 4269 4270 static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx, 4271 u8 link_id) 4272 { 4273 rx->link_id = link_id; 4274 rx->link = rcu_dereference(rx->sdata->link[link_id]); 4275 4276 if (!rx->sta) 4277 return rx->link; 4278 4279 if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id)) 4280 return false; 4281 4282 rx->link_sta = rcu_dereference(rx->sta->link[link_id]); 4283 4284 return rx->link && rx->link_sta; 4285 } 4286 4287 static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx, 4288 struct sta_info *sta, int link_id) 4289 { 4290 rx->link_id = link_id; 4291 rx->sta = sta; 4292 4293 if (sta) { 4294 rx->local = sta->sdata->local; 4295 if (!rx->sdata) 4296 rx->sdata = sta->sdata; 4297 rx->link_sta = &sta->deflink; 4298 } else { 4299 rx->link_sta = NULL; 4300 } 4301 4302 if (link_id < 0) { 4303 if (ieee80211_vif_is_mld(&rx->sdata->vif) && 4304 sta && !sta->sta.valid_links) 4305 rx->link = 4306 rcu_dereference(rx->sdata->link[sta->deflink.link_id]); 4307 else 4308 rx->link = &rx->sdata->deflink; 4309 } else if (!ieee80211_rx_data_set_link(rx, link_id)) { 4310 return false; 4311 } 4312 4313 return true; 4314 } 4315 4316 /* 4317 * This function makes calls into the RX path, therefore 4318 * it has to be invoked under RCU read lock. 4319 */ 4320 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 4321 { 4322 struct sk_buff_head frames; 4323 struct ieee80211_rx_data rx = { 4324 /* This is OK -- must be QoS data frame */ 4325 .security_idx = tid, 4326 .seqno_idx = tid, 4327 }; 4328 struct tid_ampdu_rx *tid_agg_rx; 4329 int link_id = -1; 4330 4331 /* FIXME: statistics won't be right with this */ 4332 if (sta->sta.valid_links) 4333 link_id = ffs(sta->sta.valid_links) - 1; 4334 4335 if (!ieee80211_rx_data_set_sta(&rx, sta, link_id)) 4336 return; 4337 4338 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 4339 if (!tid_agg_rx) 4340 return; 4341 4342 __skb_queue_head_init(&frames); 4343 4344 spin_lock(&tid_agg_rx->reorder_lock); 4345 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 4346 spin_unlock(&tid_agg_rx->reorder_lock); 4347 4348 if (!skb_queue_empty(&frames)) { 4349 struct ieee80211_event event = { 4350 .type = BA_FRAME_TIMEOUT, 4351 .u.ba.tid = tid, 4352 .u.ba.sta = &sta->sta, 4353 }; 4354 drv_event_callback(rx.local, rx.sdata, &event); 4355 } 4356 4357 ieee80211_rx_handlers(&rx, &frames); 4358 } 4359 4360 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, 4361 u16 ssn, u64 filtered, 4362 u16 received_mpdus) 4363 { 4364 struct ieee80211_local *local; 4365 struct sta_info *sta; 4366 struct tid_ampdu_rx *tid_agg_rx; 4367 struct sk_buff_head frames; 4368 struct ieee80211_rx_data rx = { 4369 /* This is OK -- must be QoS data frame */ 4370 .security_idx = tid, 4371 .seqno_idx = tid, 4372 }; 4373 int i, diff; 4374 4375 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS)) 4376 return; 4377 4378 __skb_queue_head_init(&frames); 4379 4380 sta = container_of(pubsta, struct sta_info, sta); 4381 4382 local = sta->sdata->local; 4383 WARN_ONCE(local->hw.max_rx_aggregation_subframes > 64, 4384 "RX BA marker can't support max_rx_aggregation_subframes %u > 64\n", 4385 local->hw.max_rx_aggregation_subframes); 4386 4387 if (!ieee80211_rx_data_set_sta(&rx, sta, -1)) 4388 return; 4389 4390 rcu_read_lock(); 4391 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 4392 if (!tid_agg_rx) 4393 goto out; 4394 4395 spin_lock_bh(&tid_agg_rx->reorder_lock); 4396 4397 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) { 4398 int release; 4399 4400 /* release all frames in the reorder buffer */ 4401 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) % 4402 IEEE80211_SN_MODULO; 4403 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, 4404 release, &frames); 4405 /* update ssn to match received ssn */ 4406 tid_agg_rx->head_seq_num = ssn; 4407 } else { 4408 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn, 4409 &frames); 4410 } 4411 4412 /* handle the case that received ssn is behind the mac ssn. 4413 * it can be tid_agg_rx->buf_size behind and still be valid */ 4414 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK; 4415 if (diff >= tid_agg_rx->buf_size) { 4416 tid_agg_rx->reorder_buf_filtered = 0; 4417 goto release; 4418 } 4419 filtered = filtered >> diff; 4420 ssn += diff; 4421 4422 /* update bitmap */ 4423 for (i = 0; i < tid_agg_rx->buf_size; i++) { 4424 int index = (ssn + i) % tid_agg_rx->buf_size; 4425 4426 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 4427 if (filtered & BIT_ULL(i)) 4428 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index); 4429 } 4430 4431 /* now process also frames that the filter marking released */ 4432 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 4433 4434 release: 4435 spin_unlock_bh(&tid_agg_rx->reorder_lock); 4436 4437 ieee80211_rx_handlers(&rx, &frames); 4438 4439 out: 4440 rcu_read_unlock(); 4441 } 4442 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames); 4443 4444 /* main receive path */ 4445 4446 static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) 4447 { 4448 return ether_addr_equal(raddr, addr) || 4449 is_broadcast_ether_addr(raddr); 4450 } 4451 4452 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) 4453 { 4454 struct ieee80211_sub_if_data *sdata = rx->sdata; 4455 struct sk_buff *skb = rx->skb; 4456 struct ieee80211_hdr *hdr = (void *)skb->data; 4457 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4458 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 4459 bool multicast = is_multicast_ether_addr(hdr->addr1) || 4460 ieee80211_is_s1g_beacon(hdr->frame_control); 4461 4462 switch (sdata->vif.type) { 4463 case NL80211_IFTYPE_STATION: 4464 if (!bssid && !sdata->u.mgd.use_4addr) 4465 return false; 4466 if (ieee80211_is_first_frag(hdr->seq_ctrl) && 4467 ieee80211_is_robust_mgmt_frame(skb) && !rx->sta) 4468 return false; 4469 if (multicast) 4470 return true; 4471 return ieee80211_is_our_addr(sdata, hdr->addr1, &rx->link_id); 4472 case NL80211_IFTYPE_ADHOC: 4473 if (!bssid) 4474 return false; 4475 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 4476 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) || 4477 !is_valid_ether_addr(hdr->addr2)) 4478 return false; 4479 if (ieee80211_is_beacon(hdr->frame_control)) 4480 return true; 4481 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) 4482 return false; 4483 if (!multicast && 4484 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 4485 return false; 4486 if (!rx->sta) { 4487 int rate_idx; 4488 if (status->encoding != RX_ENC_LEGACY) 4489 rate_idx = 0; /* TODO: HT/VHT rates */ 4490 else 4491 rate_idx = status->rate_idx; 4492 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 4493 BIT(rate_idx)); 4494 } 4495 return true; 4496 case NL80211_IFTYPE_OCB: 4497 if (!bssid) 4498 return false; 4499 if (!ieee80211_is_data_present(hdr->frame_control)) 4500 return false; 4501 if (!is_broadcast_ether_addr(bssid)) 4502 return false; 4503 if (!multicast && 4504 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) 4505 return false; 4506 /* reject invalid/our STA address */ 4507 if (!is_valid_ether_addr(hdr->addr2) || 4508 ether_addr_equal(sdata->dev->dev_addr, hdr->addr2)) 4509 return false; 4510 if (!rx->sta) { 4511 int rate_idx; 4512 if (status->encoding != RX_ENC_LEGACY) 4513 rate_idx = 0; /* TODO: HT rates */ 4514 else 4515 rate_idx = status->rate_idx; 4516 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, 4517 BIT(rate_idx)); 4518 } 4519 return true; 4520 case NL80211_IFTYPE_MESH_POINT: 4521 if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) 4522 return false; 4523 if (multicast) 4524 return true; 4525 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 4526 case NL80211_IFTYPE_AP_VLAN: 4527 case NL80211_IFTYPE_AP: 4528 if (!bssid) 4529 return ieee80211_is_our_addr(sdata, hdr->addr1, 4530 &rx->link_id); 4531 4532 if (!is_broadcast_ether_addr(bssid) && 4533 !ieee80211_is_our_addr(sdata, bssid, NULL)) { 4534 /* 4535 * Accept public action frames even when the 4536 * BSSID doesn't match, this is used for P2P 4537 * and location updates. Note that mac80211 4538 * itself never looks at these frames. 4539 */ 4540 if (!multicast && 4541 !ieee80211_is_our_addr(sdata, hdr->addr1, 4542 &rx->link_id)) 4543 return false; 4544 if (ieee80211_is_public_action(hdr, skb->len)) 4545 return true; 4546 return ieee80211_is_beacon(hdr->frame_control); 4547 } 4548 4549 if (!ieee80211_has_tods(hdr->frame_control)) { 4550 /* ignore data frames to TDLS-peers */ 4551 if (ieee80211_is_data(hdr->frame_control)) 4552 return false; 4553 /* ignore action frames to TDLS-peers */ 4554 if (ieee80211_is_action(hdr->frame_control) && 4555 !is_broadcast_ether_addr(bssid) && 4556 !ether_addr_equal(bssid, hdr->addr1)) 4557 return false; 4558 } 4559 4560 /* 4561 * 802.11-2016 Table 9-26 says that for data frames, A1 must be 4562 * the BSSID - we've checked that already but may have accepted 4563 * the wildcard (ff:ff:ff:ff:ff:ff). 4564 * 4565 * It also says: 4566 * The BSSID of the Data frame is determined as follows: 4567 * a) If the STA is contained within an AP or is associated 4568 * with an AP, the BSSID is the address currently in use 4569 * by the STA contained in the AP. 4570 * 4571 * So we should not accept data frames with an address that's 4572 * multicast. 4573 * 4574 * Accepting it also opens a security problem because stations 4575 * could encrypt it with the GTK and inject traffic that way. 4576 */ 4577 if (ieee80211_is_data(hdr->frame_control) && multicast) 4578 return false; 4579 4580 return true; 4581 case NL80211_IFTYPE_P2P_DEVICE: 4582 return ieee80211_is_public_action(hdr, skb->len) || 4583 ieee80211_is_probe_req(hdr->frame_control) || 4584 ieee80211_is_probe_resp(hdr->frame_control) || 4585 ieee80211_is_beacon(hdr->frame_control) || 4586 (ieee80211_is_auth(hdr->frame_control) && 4587 ether_addr_equal(sdata->vif.addr, hdr->addr1)); 4588 case NL80211_IFTYPE_NAN: 4589 /* Accept only frames that are addressed to the NAN cluster 4590 * (based on the Cluster ID). From these frames, accept only 4591 * action frames or authentication frames that are addressed to 4592 * the local NAN interface. 4593 */ 4594 return memcmp(sdata->wdev.u.nan.cluster_id, 4595 hdr->addr3, ETH_ALEN) == 0 && 4596 (ieee80211_is_public_action(hdr, skb->len) || 4597 (ieee80211_is_auth(hdr->frame_control) && 4598 ether_addr_equal(sdata->vif.addr, hdr->addr1))); 4599 default: 4600 break; 4601 } 4602 4603 WARN_ON_ONCE(1); 4604 return false; 4605 } 4606 4607 void ieee80211_check_fast_rx(struct sta_info *sta) 4608 { 4609 struct ieee80211_sub_if_data *sdata = sta->sdata; 4610 struct ieee80211_local *local = sdata->local; 4611 struct ieee80211_key *key; 4612 struct ieee80211_fast_rx fastrx = { 4613 .dev = sdata->dev, 4614 .vif_type = sdata->vif.type, 4615 .control_port_protocol = sdata->control_port_protocol, 4616 }, *old, *new = NULL; 4617 u32 offload_flags; 4618 bool set_offload = false; 4619 bool assign = false; 4620 bool offload; 4621 4622 /* use sparse to check that we don't return without updating */ 4623 __acquire(check_fast_rx); 4624 4625 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header)); 4626 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN); 4627 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header); 4628 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr); 4629 4630 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS); 4631 4632 /* fast-rx doesn't do reordering */ 4633 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && 4634 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) 4635 goto clear; 4636 4637 switch (sdata->vif.type) { 4638 case NL80211_IFTYPE_STATION: 4639 if (sta->sta.tdls) { 4640 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 4641 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4642 fastrx.expected_ds_bits = 0; 4643 } else { 4644 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 4645 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3); 4646 fastrx.expected_ds_bits = 4647 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4648 } 4649 4650 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) { 4651 fastrx.expected_ds_bits |= 4652 cpu_to_le16(IEEE80211_FCTL_TODS); 4653 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4654 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4655 } 4656 4657 if (!sdata->u.mgd.powersave) 4658 break; 4659 4660 /* software powersave is a huge mess, avoid all of it */ 4661 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) 4662 goto clear; 4663 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && 4664 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) 4665 goto clear; 4666 break; 4667 case NL80211_IFTYPE_AP_VLAN: 4668 case NL80211_IFTYPE_AP: 4669 /* parallel-rx requires this, at least with calls to 4670 * ieee80211_sta_ps_transition() 4671 */ 4672 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 4673 goto clear; 4674 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4675 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4676 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS); 4677 4678 fastrx.internal_forward = 4679 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 4680 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || 4681 !sdata->u.vlan.sta); 4682 4683 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 4684 sdata->u.vlan.sta) { 4685 fastrx.expected_ds_bits |= 4686 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4687 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4688 fastrx.internal_forward = 0; 4689 } 4690 4691 break; 4692 case NL80211_IFTYPE_MESH_POINT: 4693 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_FROMDS | 4694 IEEE80211_FCTL_TODS); 4695 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4696 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4697 break; 4698 default: 4699 goto clear; 4700 } 4701 4702 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 4703 goto clear; 4704 4705 rcu_read_lock(); 4706 key = rcu_dereference(sta->ptk[sta->ptk_idx]); 4707 if (!key) 4708 key = rcu_dereference(sdata->default_unicast_key); 4709 if (key) { 4710 switch (key->conf.cipher) { 4711 case WLAN_CIPHER_SUITE_TKIP: 4712 /* we don't want to deal with MMIC in fast-rx */ 4713 goto clear_rcu; 4714 case WLAN_CIPHER_SUITE_CCMP: 4715 case WLAN_CIPHER_SUITE_CCMP_256: 4716 case WLAN_CIPHER_SUITE_GCMP: 4717 case WLAN_CIPHER_SUITE_GCMP_256: 4718 break; 4719 default: 4720 /* We also don't want to deal with 4721 * WEP or cipher scheme. 4722 */ 4723 goto clear_rcu; 4724 } 4725 4726 fastrx.key = true; 4727 fastrx.icv_len = key->conf.icv_len; 4728 } 4729 4730 assign = true; 4731 clear_rcu: 4732 rcu_read_unlock(); 4733 clear: 4734 __release(check_fast_rx); 4735 4736 if (assign) 4737 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL); 4738 4739 offload_flags = get_bss_sdata(sdata)->vif.offload_flags; 4740 offload = offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED; 4741 4742 if (assign && offload) 4743 set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); 4744 else 4745 set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); 4746 4747 if (set_offload) 4748 drv_sta_set_decap_offload(local, sdata, &sta->sta, assign); 4749 4750 spin_lock_bh(&sta->lock); 4751 old = rcu_dereference_protected(sta->fast_rx, true); 4752 rcu_assign_pointer(sta->fast_rx, new); 4753 spin_unlock_bh(&sta->lock); 4754 4755 if (old) 4756 kfree_rcu(old, rcu_head); 4757 } 4758 4759 void ieee80211_clear_fast_rx(struct sta_info *sta) 4760 { 4761 struct ieee80211_fast_rx *old; 4762 4763 spin_lock_bh(&sta->lock); 4764 old = rcu_dereference_protected(sta->fast_rx, true); 4765 RCU_INIT_POINTER(sta->fast_rx, NULL); 4766 spin_unlock_bh(&sta->lock); 4767 4768 if (old) 4769 kfree_rcu(old, rcu_head); 4770 } 4771 4772 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4773 { 4774 struct ieee80211_local *local = sdata->local; 4775 struct sta_info *sta; 4776 4777 lockdep_assert_wiphy(local->hw.wiphy); 4778 4779 list_for_each_entry(sta, &local->sta_list, list) { 4780 if (sdata != sta->sdata && 4781 (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) 4782 continue; 4783 ieee80211_check_fast_rx(sta); 4784 } 4785 } 4786 4787 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4788 { 4789 struct ieee80211_local *local = sdata->local; 4790 4791 lockdep_assert_wiphy(local->hw.wiphy); 4792 4793 __ieee80211_check_fast_rx_iface(sdata); 4794 } 4795 4796 static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, 4797 struct ieee80211_fast_rx *fast_rx, 4798 int orig_len) 4799 { 4800 struct ieee80211_sta_rx_stats *stats; 4801 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 4802 struct sta_info *sta = rx->sta; 4803 struct link_sta_info *link_sta; 4804 struct sk_buff *skb = rx->skb; 4805 void *sa = skb->data + ETH_ALEN; 4806 void *da = skb->data; 4807 4808 if (rx->link_id >= 0) { 4809 link_sta = rcu_dereference(sta->link[rx->link_id]); 4810 if (WARN_ON_ONCE(!link_sta)) { 4811 dev_kfree_skb(rx->skb); 4812 return; 4813 } 4814 } else { 4815 link_sta = &sta->deflink; 4816 } 4817 4818 stats = &link_sta->rx_stats; 4819 if (fast_rx->uses_rss) 4820 stats = this_cpu_ptr(link_sta->pcpu_rx_stats); 4821 4822 /* statistics part of ieee80211_rx_h_sta_process() */ 4823 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 4824 stats->last_signal = status->signal; 4825 if (!fast_rx->uses_rss) 4826 ewma_signal_add(&link_sta->rx_stats_avg.signal, 4827 -status->signal); 4828 } 4829 4830 if (status->chains) { 4831 int i; 4832 4833 stats->chains = status->chains; 4834 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 4835 int signal = status->chain_signal[i]; 4836 4837 if (!(status->chains & BIT(i))) 4838 continue; 4839 4840 stats->chain_signal_last[i] = signal; 4841 if (!fast_rx->uses_rss) 4842 ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i], 4843 -signal); 4844 } 4845 } 4846 /* end of statistics */ 4847 4848 stats->last_rx = jiffies; 4849 stats->last_rate = sta_stats_encode_rate(status); 4850 4851 stats->fragments++; 4852 stats->packets++; 4853 4854 skb->dev = fast_rx->dev; 4855 4856 dev_sw_netstats_rx_add(fast_rx->dev, skb->len); 4857 4858 /* The seqno index has the same property as needed 4859 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 4860 * for non-QoS-data frames. Here we know it's a data 4861 * frame, so count MSDUs. 4862 */ 4863 u64_stats_update_begin(&stats->syncp); 4864 stats->msdu[rx->seqno_idx]++; 4865 stats->bytes += orig_len; 4866 u64_stats_update_end(&stats->syncp); 4867 4868 if (fast_rx->internal_forward) { 4869 struct sk_buff *xmit_skb = NULL; 4870 if (is_multicast_ether_addr(da)) { 4871 xmit_skb = skb_copy(skb, GFP_ATOMIC); 4872 } else if (!ether_addr_equal(da, sa) && 4873 sta_info_get(rx->sdata, da)) { 4874 xmit_skb = skb; 4875 skb = NULL; 4876 } 4877 4878 if (xmit_skb) { 4879 /* 4880 * Send to wireless media and increase priority by 256 4881 * to keep the received priority instead of 4882 * reclassifying the frame (see cfg80211_classify8021d). 4883 */ 4884 xmit_skb->priority += 256; 4885 xmit_skb->protocol = htons(ETH_P_802_3); 4886 skb_reset_network_header(xmit_skb); 4887 skb_reset_mac_header(xmit_skb); 4888 dev_queue_xmit(xmit_skb); 4889 } 4890 4891 if (!skb) 4892 return; 4893 } 4894 4895 /* deliver to local stack */ 4896 skb->protocol = eth_type_trans(skb, fast_rx->dev); 4897 ieee80211_deliver_skb_to_local_stack(skb, rx); 4898 } 4899 4900 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, 4901 struct ieee80211_fast_rx *fast_rx) 4902 { 4903 struct sk_buff *skb = rx->skb; 4904 struct ieee80211_hdr *hdr = (void *)skb->data; 4905 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4906 static ieee80211_rx_result res; 4907 int orig_len = skb->len; 4908 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 4909 int snap_offs = hdrlen; 4910 struct { 4911 u8 snap[sizeof(rfc1042_header)]; 4912 __be16 proto; 4913 } *payload __aligned(2); 4914 struct { 4915 u8 da[ETH_ALEN]; 4916 u8 sa[ETH_ALEN]; 4917 } addrs __aligned(2); 4918 struct ieee80211_sta_rx_stats *stats; 4919 4920 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write 4921 * to a common data structure; drivers can implement that per queue 4922 * but we don't have that information in mac80211 4923 */ 4924 if (!(status->flag & RX_FLAG_DUP_VALIDATED)) 4925 return false; 4926 4927 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED) 4928 4929 /* If using encryption, we also need to have: 4930 * - PN_VALIDATED: similar, but the implementation is tricky 4931 * - DECRYPTED: necessary for PN_VALIDATED 4932 */ 4933 if (fast_rx->key && 4934 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS) 4935 return false; 4936 4937 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 4938 return false; 4939 4940 if (unlikely(ieee80211_is_frag(hdr))) 4941 return false; 4942 4943 /* Since our interface address cannot be multicast, this 4944 * implicitly also rejects multicast frames without the 4945 * explicit check. 4946 * 4947 * We shouldn't get any *data* frames not addressed to us 4948 * (AP mode will accept multicast *management* frames), but 4949 * punting here will make it go through the full checks in 4950 * ieee80211_accept_frame(). 4951 */ 4952 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1)) 4953 return false; 4954 4955 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | 4956 IEEE80211_FCTL_TODS)) != 4957 fast_rx->expected_ds_bits) 4958 return false; 4959 4960 /* assign the key to drop unencrypted frames (later) 4961 * and strip the IV/MIC if necessary 4962 */ 4963 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) { 4964 /* GCMP header length is the same */ 4965 snap_offs += IEEE80211_CCMP_HDR_LEN; 4966 } 4967 4968 if (!ieee80211_vif_is_mesh(&rx->sdata->vif) && 4969 !(status->rx_flags & IEEE80211_RX_AMSDU)) { 4970 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) 4971 return false; 4972 4973 payload = (void *)(skb->data + snap_offs); 4974 4975 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) 4976 return false; 4977 4978 /* Don't handle these here since they require special code. 4979 * Accept AARP and IPX even though they should come with a 4980 * bridge-tunnel header - but if we get them this way then 4981 * there's little point in discarding them. 4982 */ 4983 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || 4984 payload->proto == fast_rx->control_port_protocol)) 4985 return false; 4986 } 4987 4988 /* after this point, don't punt to the slowpath! */ 4989 4990 if (fast_rx->uses_rss) 4991 stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats); 4992 else 4993 stats = &rx->link_sta->rx_stats; 4994 4995 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && 4996 pskb_trim(skb, skb->len - fast_rx->icv_len)) 4997 goto drop; 4998 4999 if (rx->key && !ieee80211_has_protected(hdr->frame_control)) 5000 goto drop; 5001 5002 if (status->rx_flags & IEEE80211_RX_AMSDU) { 5003 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) != 5004 RX_QUEUED) 5005 goto drop; 5006 5007 return true; 5008 } 5009 5010 /* do the header conversion - first grab the addresses */ 5011 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs); 5012 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs); 5013 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) { 5014 skb_pull(skb, snap_offs - 2); 5015 put_unaligned_be16(skb->len - 2, skb->data); 5016 } else { 5017 skb_postpull_rcsum(skb, skb->data + snap_offs, 5018 sizeof(rfc1042_header) + 2); 5019 5020 /* remove the SNAP but leave the ethertype */ 5021 skb_pull(skb, snap_offs + sizeof(rfc1042_header)); 5022 } 5023 /* push the addresses in front */ 5024 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs)); 5025 5026 res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb); 5027 switch (res) { 5028 case RX_QUEUED: 5029 stats->last_rx = jiffies; 5030 stats->last_rate = sta_stats_encode_rate(status); 5031 return true; 5032 case RX_CONTINUE: 5033 break; 5034 default: 5035 goto drop; 5036 } 5037 5038 ieee80211_rx_8023(rx, fast_rx, orig_len); 5039 5040 return true; 5041 drop: 5042 dev_kfree_skb(skb); 5043 5044 stats->dropped++; 5045 return true; 5046 } 5047 5048 /* 5049 * This function returns whether or not the SKB 5050 * was destined for RX processing or not, which, 5051 * if consume is true, is equivalent to whether 5052 * or not the skb was consumed. 5053 */ 5054 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 5055 struct sk_buff *skb, bool consume) 5056 { 5057 struct ieee80211_local *local = rx->local; 5058 struct ieee80211_sub_if_data *sdata = rx->sdata; 5059 struct ieee80211_hdr *hdr = (void *)skb->data; 5060 struct link_sta_info *link_sta = rx->link_sta; 5061 struct ieee80211_link_data *link = rx->link; 5062 5063 rx->skb = skb; 5064 5065 /* See if we can do fast-rx; if we have to copy we already lost, 5066 * so punt in that case. We should never have to deliver a data 5067 * frame to multiple interfaces anyway. 5068 * 5069 * We skip the ieee80211_accept_frame() call and do the necessary 5070 * checking inside ieee80211_invoke_fast_rx(). 5071 */ 5072 if (consume && rx->sta) { 5073 struct ieee80211_fast_rx *fast_rx; 5074 5075 fast_rx = rcu_dereference(rx->sta->fast_rx); 5076 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx)) 5077 return true; 5078 } 5079 5080 if (!ieee80211_accept_frame(rx)) 5081 return false; 5082 5083 if (!consume) { 5084 struct skb_shared_hwtstamps *shwt; 5085 5086 rx->skb = skb_copy(skb, GFP_ATOMIC); 5087 if (!rx->skb) { 5088 if (net_ratelimit()) 5089 wiphy_debug(local->hw.wiphy, 5090 "failed to copy skb for %s\n", 5091 sdata->name); 5092 return true; 5093 } 5094 5095 /* skb_copy() does not copy the hw timestamps, so copy it 5096 * explicitly 5097 */ 5098 shwt = skb_hwtstamps(rx->skb); 5099 shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp; 5100 5101 /* Update the hdr pointer to the new skb for translation below */ 5102 hdr = (struct ieee80211_hdr *)rx->skb->data; 5103 } 5104 5105 if (unlikely(rx->sta && rx->sta->sta.mlo) && 5106 is_unicast_ether_addr(hdr->addr1) && 5107 !ieee80211_is_probe_resp(hdr->frame_control) && 5108 !ieee80211_is_beacon(hdr->frame_control)) { 5109 /* translate to MLD addresses */ 5110 if (ether_addr_equal(link->conf->addr, hdr->addr1)) 5111 ether_addr_copy(hdr->addr1, rx->sdata->vif.addr); 5112 if (ether_addr_equal(link_sta->addr, hdr->addr2)) 5113 ether_addr_copy(hdr->addr2, rx->sta->addr); 5114 /* translate A3 only if it's the BSSID */ 5115 if (!ieee80211_has_tods(hdr->frame_control) && 5116 !ieee80211_has_fromds(hdr->frame_control)) { 5117 if (ether_addr_equal(link_sta->addr, hdr->addr3)) 5118 ether_addr_copy(hdr->addr3, rx->sta->addr); 5119 else if (ether_addr_equal(link->conf->addr, hdr->addr3)) 5120 ether_addr_copy(hdr->addr3, rx->sdata->vif.addr); 5121 } 5122 /* not needed for A4 since it can only carry the SA */ 5123 } 5124 5125 ieee80211_invoke_rx_handlers(rx); 5126 return true; 5127 } 5128 5129 static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw, 5130 struct ieee80211_sta *pubsta, 5131 struct sk_buff *skb, 5132 struct list_head *list) 5133 { 5134 struct ieee80211_local *local = hw_to_local(hw); 5135 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 5136 struct ieee80211_fast_rx *fast_rx; 5137 struct ieee80211_rx_data rx; 5138 struct sta_info *sta; 5139 int link_id = -1; 5140 5141 memset(&rx, 0, sizeof(rx)); 5142 rx.skb = skb; 5143 rx.local = local; 5144 rx.list = list; 5145 rx.link_id = -1; 5146 5147 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 5148 5149 /* drop frame if too short for header */ 5150 if (skb->len < sizeof(struct ethhdr)) 5151 goto drop; 5152 5153 if (!pubsta) 5154 goto drop; 5155 5156 if (status->link_valid) 5157 link_id = status->link_id; 5158 5159 /* 5160 * TODO: Should the frame be dropped if the right link_id is not 5161 * available? Or may be it is fine in the current form to proceed with 5162 * the frame processing because with frame being in 802.3 format, 5163 * link_id is used only for stats purpose and updating the stats on 5164 * the deflink is fine? 5165 */ 5166 sta = container_of(pubsta, struct sta_info, sta); 5167 if (!ieee80211_rx_data_set_sta(&rx, sta, link_id)) 5168 goto drop; 5169 5170 fast_rx = rcu_dereference(rx.sta->fast_rx); 5171 if (!fast_rx) 5172 goto drop; 5173 5174 ieee80211_rx_8023(&rx, fast_rx, skb->len); 5175 return; 5176 5177 drop: 5178 dev_kfree_skb(skb); 5179 } 5180 5181 static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx, 5182 struct sk_buff *skb, bool consume) 5183 { 5184 struct link_sta_info *link_sta; 5185 struct ieee80211_hdr *hdr = (void *)skb->data; 5186 struct sta_info *sta; 5187 int link_id = -1; 5188 5189 /* 5190 * Look up link station first, in case there's a 5191 * chance that they might have a link address that 5192 * is identical to the MLD address, that way we'll 5193 * have the link information if needed. 5194 */ 5195 link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2); 5196 if (link_sta) { 5197 sta = link_sta->sta; 5198 link_id = link_sta->link_id; 5199 } else { 5200 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 5201 5202 sta = sta_info_get_bss(rx->sdata, hdr->addr2); 5203 if (status->link_valid) { 5204 link_id = status->link_id; 5205 } else if (ieee80211_vif_is_mld(&rx->sdata->vif) && 5206 status->freq) { 5207 struct ieee80211_link_data *link; 5208 struct ieee80211_chanctx_conf *conf; 5209 5210 for_each_link_data_rcu(rx->sdata, link) { 5211 conf = rcu_dereference(link->conf->chanctx_conf); 5212 if (!conf || !conf->def.chan) 5213 continue; 5214 5215 if (status->freq == conf->def.chan->center_freq) { 5216 link_id = link->link_id; 5217 break; 5218 } 5219 } 5220 } 5221 } 5222 5223 if (!ieee80211_rx_data_set_sta(rx, sta, link_id)) 5224 return false; 5225 5226 return ieee80211_prepare_and_rx_handle(rx, skb, consume); 5227 } 5228 5229 /* 5230 * This is the actual Rx frames handler. as it belongs to Rx path it must 5231 * be called with rcu_read_lock protection. 5232 */ 5233 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 5234 struct ieee80211_sta *pubsta, 5235 struct sk_buff *skb, 5236 struct list_head *list) 5237 { 5238 struct ieee80211_local *local = hw_to_local(hw); 5239 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 5240 struct ieee80211_sub_if_data *sdata; 5241 struct ieee80211_hdr *hdr; 5242 __le16 fc; 5243 struct ieee80211_rx_data rx; 5244 struct ieee80211_sub_if_data *prev; 5245 struct rhlist_head *tmp; 5246 int err = 0; 5247 5248 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 5249 memset(&rx, 0, sizeof(rx)); 5250 rx.skb = skb; 5251 rx.local = local; 5252 rx.list = list; 5253 rx.link_id = -1; 5254 5255 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 5256 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 5257 5258 if (ieee80211_is_mgmt(fc)) { 5259 /* drop frame if too short for header */ 5260 if (skb->len < ieee80211_hdrlen(fc)) 5261 err = -ENOBUFS; 5262 else 5263 err = skb_linearize(skb); 5264 } else { 5265 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 5266 } 5267 5268 if (err) { 5269 dev_kfree_skb(skb); 5270 return; 5271 } 5272 5273 hdr = (struct ieee80211_hdr *)skb->data; 5274 ieee80211_parse_qos(&rx); 5275 ieee80211_verify_alignment(&rx); 5276 5277 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 5278 ieee80211_is_beacon(hdr->frame_control) || 5279 ieee80211_is_s1g_beacon(hdr->frame_control))) 5280 ieee80211_scan_rx(local, skb); 5281 5282 if (ieee80211_is_data(fc)) { 5283 struct sta_info *sta, *prev_sta; 5284 int link_id = -1; 5285 5286 if (status->link_valid) 5287 link_id = status->link_id; 5288 5289 if (pubsta) { 5290 sta = container_of(pubsta, struct sta_info, sta); 5291 if (!ieee80211_rx_data_set_sta(&rx, sta, link_id)) 5292 goto out; 5293 5294 /* 5295 * In MLO connection, fetch the link_id using addr2 5296 * when the driver does not pass link_id in status. 5297 * When the address translation is already performed by 5298 * driver/hw, the valid link_id must be passed in 5299 * status. 5300 */ 5301 5302 if (!status->link_valid && pubsta->mlo) { 5303 struct link_sta_info *link_sta; 5304 5305 link_sta = link_sta_info_get_bss(rx.sdata, 5306 hdr->addr2); 5307 if (!link_sta) 5308 goto out; 5309 5310 ieee80211_rx_data_set_link(&rx, link_sta->link_id); 5311 } 5312 5313 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 5314 return; 5315 goto out; 5316 } 5317 5318 prev_sta = NULL; 5319 5320 for_each_sta_info(local, hdr->addr2, sta, tmp) { 5321 if (!prev_sta) { 5322 prev_sta = sta; 5323 continue; 5324 } 5325 5326 rx.sdata = prev_sta->sdata; 5327 if (!status->link_valid && prev_sta->sta.mlo) { 5328 struct link_sta_info *link_sta; 5329 5330 link_sta = link_sta_info_get_bss(rx.sdata, 5331 hdr->addr2); 5332 if (!link_sta) 5333 continue; 5334 5335 link_id = link_sta->link_id; 5336 } 5337 5338 if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id)) 5339 goto out; 5340 5341 ieee80211_prepare_and_rx_handle(&rx, skb, false); 5342 5343 prev_sta = sta; 5344 } 5345 5346 if (prev_sta) { 5347 rx.sdata = prev_sta->sdata; 5348 if (!status->link_valid && prev_sta->sta.mlo) { 5349 struct link_sta_info *link_sta; 5350 5351 link_sta = link_sta_info_get_bss(rx.sdata, 5352 hdr->addr2); 5353 if (!link_sta) 5354 goto out; 5355 5356 link_id = link_sta->link_id; 5357 } 5358 5359 if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id)) 5360 goto out; 5361 5362 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 5363 return; 5364 goto out; 5365 } 5366 } 5367 5368 prev = NULL; 5369 5370 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 5371 if (!ieee80211_sdata_running(sdata)) 5372 continue; 5373 5374 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 5375 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 5376 continue; 5377 5378 /* 5379 * frame is destined for this interface, but if it's 5380 * not also for the previous one we handle that after 5381 * the loop to avoid copying the SKB once too much 5382 */ 5383 5384 if (!prev) { 5385 prev = sdata; 5386 continue; 5387 } 5388 5389 rx.sdata = prev; 5390 ieee80211_rx_for_interface(&rx, skb, false); 5391 5392 prev = sdata; 5393 } 5394 5395 if (prev) { 5396 rx.sdata = prev; 5397 5398 if (ieee80211_rx_for_interface(&rx, skb, true)) 5399 return; 5400 } 5401 5402 out: 5403 dev_kfree_skb(skb); 5404 } 5405 5406 /* 5407 * This is the receive path handler. It is called by a low level driver when an 5408 * 802.11 MPDU is received from the hardware. 5409 */ 5410 void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 5411 struct sk_buff *skb, struct list_head *list) 5412 { 5413 struct ieee80211_local *local = hw_to_local(hw); 5414 struct ieee80211_rate *rate = NULL; 5415 struct ieee80211_supported_band *sband; 5416 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 5417 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 5418 5419 WARN_ON_ONCE(softirq_count() == 0); 5420 5421 if (WARN_ON(status->band >= NUM_NL80211_BANDS)) 5422 goto drop; 5423 5424 sband = local->hw.wiphy->bands[status->band]; 5425 if (WARN_ON(!sband)) 5426 goto drop; 5427 5428 /* 5429 * If we're suspending, it is possible although not too likely 5430 * that we'd be receiving frames after having already partially 5431 * quiesced the stack. We can't process such frames then since 5432 * that might, for example, cause stations to be added or other 5433 * driver callbacks be invoked. 5434 */ 5435 if (unlikely(local->quiescing || local->suspended)) 5436 goto drop; 5437 5438 /* We might be during a HW reconfig, prevent Rx for the same reason */ 5439 if (unlikely(local->in_reconfig)) 5440 goto drop; 5441 5442 /* 5443 * The same happens when we're not even started, 5444 * but that's worth a warning. 5445 */ 5446 if (WARN_ON(!local->started)) 5447 goto drop; 5448 5449 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC) && 5450 !(status->flag & RX_FLAG_NO_PSDU && 5451 status->zero_length_psdu_type == 5452 IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED))) { 5453 /* 5454 * Validate the rate, unless there was a PLCP error which may 5455 * have an invalid rate or the PSDU was not capture and may be 5456 * missing rate information. 5457 */ 5458 5459 switch (status->encoding) { 5460 case RX_ENC_HT: 5461 /* 5462 * rate_idx is MCS index, which can be [0-76] 5463 * as documented on: 5464 * 5465 * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n 5466 * 5467 * Anything else would be some sort of driver or 5468 * hardware error. The driver should catch hardware 5469 * errors. 5470 */ 5471 if (WARN(status->rate_idx > 76, 5472 "Rate marked as an HT rate but passed " 5473 "status->rate_idx is not " 5474 "an MCS index [0-76]: %d (0x%02x)\n", 5475 status->rate_idx, 5476 status->rate_idx)) 5477 goto drop; 5478 break; 5479 case RX_ENC_VHT: 5480 if (WARN_ONCE(status->rate_idx > 11 || 5481 !status->nss || 5482 status->nss > 8, 5483 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", 5484 status->rate_idx, status->nss)) 5485 goto drop; 5486 break; 5487 case RX_ENC_HE: 5488 if (WARN_ONCE(status->rate_idx > 11 || 5489 !status->nss || 5490 status->nss > 8, 5491 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n", 5492 status->rate_idx, status->nss)) 5493 goto drop; 5494 break; 5495 case RX_ENC_EHT: 5496 if (WARN_ONCE(status->rate_idx > 15 || 5497 !status->nss || 5498 status->nss > 8 || 5499 status->eht.gi > NL80211_RATE_INFO_EHT_GI_3_2, 5500 "Rate marked as an EHT rate but data is invalid: MCS:%d, NSS:%d, GI:%d\n", 5501 status->rate_idx, status->nss, status->eht.gi)) 5502 goto drop; 5503 break; 5504 default: 5505 WARN_ON_ONCE(1); 5506 fallthrough; 5507 case RX_ENC_LEGACY: 5508 if (WARN_ON(status->rate_idx >= sband->n_bitrates)) 5509 goto drop; 5510 rate = &sband->bitrates[status->rate_idx]; 5511 } 5512 } 5513 5514 if (WARN_ON_ONCE(status->link_id >= IEEE80211_LINK_UNSPECIFIED)) 5515 goto drop; 5516 5517 status->rx_flags = 0; 5518 5519 kcov_remote_start_common(skb_get_kcov_handle(skb)); 5520 5521 /* 5522 * Frames with failed FCS/PLCP checksum are not returned, 5523 * all other frames are returned without radiotap header 5524 * if it was previously present. 5525 * Also, frames with less than 16 bytes are dropped. 5526 */ 5527 if (!(status->flag & RX_FLAG_8023)) 5528 skb = ieee80211_rx_monitor(local, skb, rate); 5529 if (skb) { 5530 if ((status->flag & RX_FLAG_8023) || 5531 ieee80211_is_data_present(hdr->frame_control)) 5532 ieee80211_tpt_led_trig_rx(local, skb->len); 5533 5534 if (status->flag & RX_FLAG_8023) 5535 __ieee80211_rx_handle_8023(hw, pubsta, skb, list); 5536 else 5537 __ieee80211_rx_handle_packet(hw, pubsta, skb, list); 5538 } 5539 5540 kcov_remote_stop(); 5541 return; 5542 drop: 5543 kfree_skb(skb); 5544 } 5545 EXPORT_SYMBOL(ieee80211_rx_list); 5546 5547 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 5548 struct sk_buff *skb, struct napi_struct *napi) 5549 { 5550 struct sk_buff *tmp; 5551 LIST_HEAD(list); 5552 5553 5554 /* 5555 * key references and virtual interfaces are protected using RCU 5556 * and this requires that we are in a read-side RCU section during 5557 * receive processing 5558 */ 5559 rcu_read_lock(); 5560 ieee80211_rx_list(hw, pubsta, skb, &list); 5561 rcu_read_unlock(); 5562 5563 if (!napi) { 5564 netif_receive_skb_list(&list); 5565 return; 5566 } 5567 5568 list_for_each_entry_safe(skb, tmp, &list, list) { 5569 skb_list_del_init(skb); 5570 napi_gro_receive(napi, skb); 5571 } 5572 } 5573 EXPORT_SYMBOL(ieee80211_rx_napi); 5574 5575 /* This is a version of the rx handler that can be called from hard irq 5576 * context. Post the skb on the queue and schedule the tasklet */ 5577 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 5578 { 5579 struct ieee80211_local *local = hw_to_local(hw); 5580 5581 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 5582 5583 skb->pkt_type = IEEE80211_RX_MSG; 5584 skb_queue_tail(&local->skb_queue, skb); 5585 tasklet_schedule(&local->tasklet); 5586 } 5587 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 5588