1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2002-2005, Instant802 Networks, Inc. 4 * Copyright 2005-2006, Devicescape Software, Inc. 5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> 7 * Copyright 2013-2014 Intel Mobile Communications GmbH 8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 9 * Copyright (C) 2018-2026 Intel Corporation 10 */ 11 12 #include <linux/jiffies.h> 13 #include <linux/slab.h> 14 #include <linux/kernel.h> 15 #include <linux/skbuff.h> 16 #include <linux/netdevice.h> 17 #include <linux/etherdevice.h> 18 #include <linux/rcupdate.h> 19 #include <linux/export.h> 20 #include <linux/kcov.h> 21 #include <linux/bitops.h> 22 #include <kunit/visibility.h> 23 #include <net/mac80211.h> 24 #include <net/ieee80211_radiotap.h> 25 #include <linux/unaligned.h> 26 27 #include "ieee80211_i.h" 28 #include "driver-ops.h" 29 #include "led.h" 30 #include "mesh.h" 31 #include "wep.h" 32 #include "wpa.h" 33 #include "tkip.h" 34 #include "wme.h" 35 #include "rate.h" 36 37 /* 38 * monitor mode reception 39 * 40 * This function cleans up the SKB, i.e. it removes all the stuff 41 * only useful for monitoring. 42 */ 43 static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb, 44 unsigned int present_fcs_len, 45 unsigned int rtap_space) 46 { 47 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 48 struct ieee80211_hdr *hdr; 49 unsigned int hdrlen; 50 __le16 fc; 51 52 if (present_fcs_len) 53 __pskb_trim(skb, skb->len - present_fcs_len); 54 pskb_pull(skb, rtap_space); 55 56 /* After pulling radiotap header, clear all flags that indicate 57 * info in skb->data. 58 */ 59 status->flag &= ~(RX_FLAG_RADIOTAP_TLV_AT_END | 60 RX_FLAG_RADIOTAP_LSIG | 61 RX_FLAG_RADIOTAP_HE_MU | 62 RX_FLAG_RADIOTAP_HE | 63 RX_FLAG_RADIOTAP_VHT); 64 65 hdr = (void *)skb->data; 66 fc = hdr->frame_control; 67 68 /* 69 * Remove the HT-Control field (if present) on management 70 * frames after we've sent the frame to monitoring. We 71 * (currently) don't need it, and don't properly parse 72 * frames with it present, due to the assumption of a 73 * fixed management header length. 74 */ 75 if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc))) 76 return skb; 77 78 hdrlen = ieee80211_hdrlen(fc); 79 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER); 80 81 if (!pskb_may_pull(skb, hdrlen)) { 82 dev_kfree_skb(skb); 83 return NULL; 84 } 85 86 memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data, 87 hdrlen - IEEE80211_HT_CTL_LEN); 88 pskb_pull(skb, IEEE80211_HT_CTL_LEN); 89 90 return skb; 91 } 92 93 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len, 94 unsigned int rtap_space) 95 { 96 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 97 struct ieee80211_hdr *hdr; 98 99 hdr = (void *)(skb->data + rtap_space); 100 101 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | 102 RX_FLAG_FAILED_PLCP_CRC | 103 RX_FLAG_ONLY_MONITOR | 104 RX_FLAG_NO_PSDU)) 105 return true; 106 107 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space)) 108 return true; 109 110 if (ieee80211_is_ctl(hdr->frame_control) && 111 !ieee80211_is_pspoll(hdr->frame_control) && 112 !ieee80211_is_back_req(hdr->frame_control)) 113 return true; 114 115 return false; 116 } 117 118 static int 119 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local, 120 struct ieee80211_rx_status *status, 121 struct sk_buff *skb) 122 { 123 int len; 124 125 /* always present fields */ 126 len = sizeof(struct ieee80211_radiotap_header) + 8; 127 128 /* allocate extra bitmaps */ 129 if (status->chains) 130 len += 4 * hweight8(status->chains); 131 132 if (ieee80211_have_rx_timestamp(status)) { 133 len = ALIGN(len, 8); 134 len += 8; 135 } 136 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM)) 137 len += 1; 138 139 /* antenna field, if we don't have per-chain info */ 140 if (!status->chains) 141 len += 1; 142 143 /* padding for RX_FLAGS if necessary */ 144 len = ALIGN(len, 2); 145 146 if (status->encoding == RX_ENC_HT) /* HT info */ 147 len += 3; 148 149 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 150 len = ALIGN(len, 4); 151 len += 8; 152 } 153 154 if (status->encoding == RX_ENC_VHT) { 155 /* Included even if RX_FLAG_RADIOTAP_VHT is not set */ 156 len = ALIGN(len, 2); 157 len += 12; 158 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_vht) != 12); 159 } 160 161 if (local->hw.radiotap_timestamp.units_pos >= 0) { 162 len = ALIGN(len, 8); 163 len += 12; 164 } 165 166 if (status->encoding == RX_ENC_HE && 167 status->flag & RX_FLAG_RADIOTAP_HE) { 168 len = ALIGN(len, 2); 169 len += 12; 170 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12); 171 } 172 173 if (status->encoding == RX_ENC_HE && 174 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 175 len = ALIGN(len, 2); 176 len += 12; 177 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12); 178 } 179 180 if (status->flag & RX_FLAG_NO_PSDU) 181 len += 1; 182 183 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 184 len = ALIGN(len, 2); 185 len += 4; 186 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4); 187 } 188 189 if (status->chains) { 190 /* antenna and antenna signal fields */ 191 len += 2 * hweight8(status->chains); 192 } 193 194 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) { 195 int tlv_offset = 0; 196 197 /* 198 * The position to look at depends on the existence (or non- 199 * existence) of other elements, so take that into account... 200 */ 201 if (status->flag & RX_FLAG_RADIOTAP_VHT) 202 tlv_offset += 203 sizeof(struct ieee80211_radiotap_vht); 204 if (status->flag & RX_FLAG_RADIOTAP_HE) 205 tlv_offset += 206 sizeof(struct ieee80211_radiotap_he); 207 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 208 tlv_offset += 209 sizeof(struct ieee80211_radiotap_he_mu); 210 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 211 tlv_offset += 212 sizeof(struct ieee80211_radiotap_lsig); 213 214 /* ensure 4 byte alignment for TLV */ 215 len = ALIGN(len, 4); 216 217 /* TLVs until the mac header */ 218 len += skb_mac_header(skb) - &skb->data[tlv_offset]; 219 } 220 221 return len; 222 } 223 224 static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, 225 int link_id, 226 struct sta_info *sta, 227 struct sk_buff *skb) 228 { 229 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 230 231 if (link_id >= 0) { 232 status->link_valid = 1; 233 status->link_id = link_id; 234 } else { 235 status->link_valid = 0; 236 } 237 238 skb_queue_tail(&sdata->skb_queue, skb); 239 wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work); 240 if (sta) { 241 struct link_sta_info *link_sta_info; 242 243 if (link_id >= 0) { 244 link_sta_info = rcu_dereference(sta->link[link_id]); 245 if (!link_sta_info) 246 return; 247 } else { 248 link_sta_info = &sta->deflink; 249 } 250 251 link_sta_info->rx_stats.packets++; 252 } 253 } 254 255 static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata, 256 int link_id, 257 struct sta_info *sta, 258 struct sk_buff *skb) 259 { 260 skb->protocol = 0; 261 __ieee80211_queue_skb_to_iface(sdata, link_id, sta, skb); 262 } 263 264 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata, 265 struct sk_buff *skb, 266 int rtap_space) 267 { 268 struct { 269 struct ieee80211_hdr_3addr hdr; 270 u8 category; 271 u8 action_code; 272 } __packed __aligned(2) action; 273 274 if (!sdata) 275 return; 276 277 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1); 278 279 if (skb->len < rtap_space + sizeof(action) + 280 VHT_MUMIMO_GROUPS_DATA_LEN) 281 return; 282 283 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr)) 284 return; 285 286 skb_copy_bits(skb, rtap_space, &action, sizeof(action)); 287 288 if (!ieee80211_is_action(action.hdr.frame_control)) 289 return; 290 291 if (action.category != WLAN_CATEGORY_VHT) 292 return; 293 294 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT) 295 return; 296 297 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr)) 298 return; 299 300 skb = skb_copy(skb, GFP_ATOMIC); 301 if (!skb) 302 return; 303 304 ieee80211_queue_skb_to_iface(sdata, -1, NULL, skb); 305 } 306 307 /* 308 * ieee80211_add_rx_radiotap_header - add radiotap header 309 * 310 * add a radiotap header containing all the fields which the hardware provided. 311 */ 312 static void 313 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, 314 struct sk_buff *skb, 315 struct ieee80211_rate *rate, 316 int rtap_len, bool has_fcs) 317 { 318 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 319 struct ieee80211_radiotap_header *rthdr; 320 unsigned char *pos; 321 __le32 *it_present; 322 u32 it_present_val; 323 u16 rx_flags = 0; 324 u16 channel_flags = 0; 325 u32 tlvs_len = 0; 326 int mpdulen, chain; 327 unsigned long chains = status->chains; 328 struct ieee80211_radiotap_vht vht = {}; 329 struct ieee80211_radiotap_he he = {}; 330 struct ieee80211_radiotap_he_mu he_mu = {}; 331 struct ieee80211_radiotap_lsig lsig = {}; 332 333 if (status->flag & RX_FLAG_RADIOTAP_VHT) { 334 vht = *(struct ieee80211_radiotap_vht *)skb->data; 335 skb_pull(skb, sizeof(vht)); 336 WARN_ON_ONCE(status->encoding != RX_ENC_VHT); 337 } 338 339 if (status->flag & RX_FLAG_RADIOTAP_HE) { 340 he = *(struct ieee80211_radiotap_he *)skb->data; 341 skb_pull(skb, sizeof(he)); 342 WARN_ON_ONCE(status->encoding != RX_ENC_HE); 343 } 344 345 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) { 346 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data; 347 skb_pull(skb, sizeof(he_mu)); 348 } 349 350 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 351 lsig = *(struct ieee80211_radiotap_lsig *)skb->data; 352 skb_pull(skb, sizeof(lsig)); 353 } 354 355 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) { 356 /* data is pointer at tlv all other info was pulled off */ 357 tlvs_len = skb_mac_header(skb) - skb->data; 358 } 359 360 mpdulen = skb->len; 361 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))) 362 mpdulen += FCS_LEN; 363 364 rthdr = skb_push(skb, rtap_len - tlvs_len); 365 memset(rthdr, 0, rtap_len - tlvs_len); 366 it_present = &rthdr->it_present; 367 368 /* radiotap header, set always present flags */ 369 rthdr->it_len = cpu_to_le16(rtap_len); 370 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) | 371 BIT(IEEE80211_RADIOTAP_CHANNEL) | 372 BIT(IEEE80211_RADIOTAP_RX_FLAGS); 373 374 if (!status->chains) 375 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA); 376 377 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 378 it_present_val |= 379 BIT(IEEE80211_RADIOTAP_EXT) | 380 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE); 381 put_unaligned_le32(it_present_val, it_present); 382 it_present++; 383 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) | 384 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL); 385 } 386 387 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) 388 it_present_val |= BIT(IEEE80211_RADIOTAP_TLV); 389 390 put_unaligned_le32(it_present_val, it_present); 391 392 /* This references through an offset into it_optional[] rather 393 * than via it_present otherwise later uses of pos will cause 394 * the compiler to think we have walked past the end of the 395 * struct member. 396 */ 397 pos = (void *)&rthdr->it_optional[it_present + 1 - rthdr->it_optional]; 398 399 /* the order of the following fields is important */ 400 401 /* IEEE80211_RADIOTAP_TSFT */ 402 if (ieee80211_have_rx_timestamp(status)) { 403 /* padding */ 404 while ((pos - (u8 *)rthdr) & 7) 405 *pos++ = 0; 406 put_unaligned_le64( 407 ieee80211_calculate_rx_timestamp(local, status, 408 mpdulen, 0), 409 pos); 410 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_TSFT)); 411 pos += 8; 412 } 413 414 /* IEEE80211_RADIOTAP_FLAGS */ 415 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) 416 *pos |= IEEE80211_RADIOTAP_F_FCS; 417 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 418 *pos |= IEEE80211_RADIOTAP_F_BADFCS; 419 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE) 420 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 421 pos++; 422 423 /* IEEE80211_RADIOTAP_RATE */ 424 if (!rate || status->encoding != RX_ENC_LEGACY) { 425 /* 426 * Without rate information don't add it. If we have, 427 * MCS information is a separate field in radiotap, 428 * added below. The byte here is needed as padding 429 * for the channel though, so initialise it to 0. 430 */ 431 *pos = 0; 432 } else { 433 int shift = 0; 434 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_RATE)); 435 if (status->bw == RATE_INFO_BW_10) 436 shift = 1; 437 else if (status->bw == RATE_INFO_BW_5) 438 shift = 2; 439 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift)); 440 } 441 pos++; 442 443 /* IEEE80211_RADIOTAP_CHANNEL */ 444 /* TODO: frequency offset in KHz */ 445 put_unaligned_le16(status->freq, pos); 446 pos += 2; 447 if (status->bw == RATE_INFO_BW_10) 448 channel_flags |= IEEE80211_CHAN_HALF; 449 else if (status->bw == RATE_INFO_BW_5) 450 channel_flags |= IEEE80211_CHAN_QUARTER; 451 452 if (status->band == NL80211_BAND_5GHZ || 453 status->band == NL80211_BAND_6GHZ) 454 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; 455 else if (status->encoding != RX_ENC_LEGACY) 456 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; 457 else if (rate && rate->flags & IEEE80211_RATE_ERP_G) 458 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ; 459 else if (rate) 460 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ; 461 else 462 channel_flags |= IEEE80211_CHAN_2GHZ; 463 put_unaligned_le16(channel_flags, pos); 464 pos += 2; 465 466 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */ 467 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) && 468 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 469 *pos = status->signal; 470 rthdr->it_present |= 471 cpu_to_le32(BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL)); 472 pos++; 473 } 474 475 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 476 477 if (!status->chains) { 478 /* IEEE80211_RADIOTAP_ANTENNA */ 479 *pos = status->antenna; 480 pos++; 481 } 482 483 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */ 484 485 /* IEEE80211_RADIOTAP_RX_FLAGS */ 486 /* ensure 2 byte alignment for the 2 byte field as required */ 487 if ((pos - (u8 *)rthdr) & 1) 488 *pos++ = 0; 489 if (status->flag & RX_FLAG_FAILED_PLCP_CRC) 490 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP; 491 put_unaligned_le16(rx_flags, pos); 492 pos += 2; 493 494 if (status->encoding == RX_ENC_HT) { 495 unsigned int stbc; 496 497 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS)); 498 *pos = local->hw.radiotap_mcs_details; 499 if (status->enc_flags & RX_ENC_FLAG_HT_GF) 500 *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FMT; 501 if (status->enc_flags & RX_ENC_FLAG_LDPC) 502 *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FEC; 503 pos++; 504 *pos = 0; 505 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI) 506 *pos |= IEEE80211_RADIOTAP_MCS_SGI; 507 if (status->bw == RATE_INFO_BW_40) 508 *pos |= IEEE80211_RADIOTAP_MCS_BW_40; 509 if (status->enc_flags & RX_ENC_FLAG_HT_GF) 510 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF; 511 if (status->enc_flags & RX_ENC_FLAG_LDPC) 512 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC; 513 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT; 514 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT; 515 pos++; 516 *pos++ = status->rate_idx; 517 } 518 519 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 520 u16 flags = 0; 521 522 /* ensure 4 byte alignment */ 523 while ((pos - (u8 *)rthdr) & 3) 524 pos++; 525 rthdr->it_present |= 526 cpu_to_le32(BIT(IEEE80211_RADIOTAP_AMPDU_STATUS)); 527 put_unaligned_le32(status->ampdu_reference, pos); 528 pos += 4; 529 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN) 530 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN; 531 if (status->flag & RX_FLAG_AMPDU_IS_LAST) 532 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST; 533 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR) 534 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR; 535 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN) 536 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN; 537 if (status->flag & RX_FLAG_AMPDU_EOF_BIT) 538 flags |= IEEE80211_RADIOTAP_AMPDU_EOF; 539 put_unaligned_le16(flags, pos); 540 pos += 2; 541 *pos++ = 0; 542 *pos++ = 0; 543 } 544 545 if (status->encoding == RX_ENC_VHT) { 546 u16 fill = local->hw.radiotap_vht_details; 547 548 /* Leave driver filled fields alone */ 549 fill &= ~le16_to_cpu(vht.known); 550 vht.known |= cpu_to_le16(fill); 551 552 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_GI && 553 status->enc_flags & RX_ENC_FLAG_SHORT_GI) 554 vht.flags |= IEEE80211_RADIOTAP_VHT_FLAG_SGI; 555 /* in VHT, STBC is binary */ 556 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_STBC && 557 status->enc_flags & RX_ENC_FLAG_STBC_MASK) 558 vht.flags |= IEEE80211_RADIOTAP_VHT_FLAG_STBC; 559 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED && 560 status->enc_flags & RX_ENC_FLAG_BF) 561 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED; 562 563 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) { 564 switch (status->bw) { 565 case RATE_INFO_BW_40: 566 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_40; 567 break; 568 case RATE_INFO_BW_80: 569 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_80; 570 break; 571 case RATE_INFO_BW_160: 572 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_160; 573 break; 574 default: 575 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_20; 576 break; 577 } 578 } 579 580 /* 581 * If the driver filled in mcs_nss[0], then do not touch it. 582 * 583 * Otherwise, put some information about MCS/NSS into the 584 * user 0 field. Note that this is not technically correct for 585 * an MU frame as we might have decoded a different user. 586 */ 587 if (!vht.mcs_nss[0]) { 588 vht.mcs_nss[0] = (status->rate_idx << 4) | status->nss; 589 590 /* coding field */ 591 if (status->enc_flags & RX_ENC_FLAG_LDPC) 592 vht.coding |= IEEE80211_RADIOTAP_CODING_LDPC_USER0; 593 } 594 595 /* ensure 2 byte alignment */ 596 while ((pos - (u8 *)rthdr) & 1) 597 pos++; 598 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT)); 599 memcpy(pos, &vht, sizeof(vht)); 600 pos += sizeof(vht); 601 } 602 603 if (local->hw.radiotap_timestamp.units_pos >= 0) { 604 u16 accuracy = 0; 605 u8 flags; 606 u64 ts; 607 608 rthdr->it_present |= 609 cpu_to_le32(BIT(IEEE80211_RADIOTAP_TIMESTAMP)); 610 611 /* ensure 8 byte alignment */ 612 while ((pos - (u8 *)rthdr) & 7) 613 pos++; 614 615 if (status->flag & RX_FLAG_MACTIME_IS_RTAP_TS64) { 616 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT; 617 ts = status->mactime; 618 } else { 619 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT; 620 ts = status->device_timestamp; 621 } 622 623 put_unaligned_le64(ts, pos); 624 pos += sizeof(u64); 625 626 if (local->hw.radiotap_timestamp.accuracy >= 0) { 627 accuracy = local->hw.radiotap_timestamp.accuracy; 628 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY; 629 } 630 put_unaligned_le16(accuracy, pos); 631 pos += sizeof(u16); 632 633 *pos++ = local->hw.radiotap_timestamp.units_pos; 634 *pos++ = flags; 635 } 636 637 if (status->encoding == RX_ENC_HE && 638 status->flag & RX_FLAG_RADIOTAP_HE) { 639 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f) 640 641 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) { 642 he.data6 |= HE_PREP(DATA6_NSTS, 643 FIELD_GET(RX_ENC_FLAG_STBC_MASK, 644 status->enc_flags)); 645 he.data3 |= HE_PREP(DATA3_STBC, 1); 646 } else { 647 he.data6 |= HE_PREP(DATA6_NSTS, status->nss); 648 } 649 650 #define CHECK_GI(s) \ 651 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \ 652 (int)NL80211_RATE_INFO_HE_GI_##s) 653 654 CHECK_GI(0_8); 655 CHECK_GI(1_6); 656 CHECK_GI(3_2); 657 658 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx); 659 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm); 660 he.data3 |= HE_PREP(DATA3_CODING, 661 !!(status->enc_flags & RX_ENC_FLAG_LDPC)); 662 663 he.data5 |= HE_PREP(DATA5_GI, status->he_gi); 664 665 switch (status->bw) { 666 case RATE_INFO_BW_20: 667 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 668 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ); 669 break; 670 case RATE_INFO_BW_40: 671 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 672 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ); 673 break; 674 case RATE_INFO_BW_80: 675 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 676 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ); 677 break; 678 case RATE_INFO_BW_160: 679 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 680 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ); 681 break; 682 case RATE_INFO_BW_HE_RU: 683 #define CHECK_RU_ALLOC(s) \ 684 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \ 685 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4) 686 687 CHECK_RU_ALLOC(26); 688 CHECK_RU_ALLOC(52); 689 CHECK_RU_ALLOC(106); 690 CHECK_RU_ALLOC(242); 691 CHECK_RU_ALLOC(484); 692 CHECK_RU_ALLOC(996); 693 CHECK_RU_ALLOC(2x996); 694 695 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC, 696 status->he_ru + 4); 697 break; 698 default: 699 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw); 700 } 701 702 /* ensure 2 byte alignment */ 703 while ((pos - (u8 *)rthdr) & 1) 704 pos++; 705 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE)); 706 memcpy(pos, &he, sizeof(he)); 707 pos += sizeof(he); 708 } 709 710 if (status->encoding == RX_ENC_HE && 711 status->flag & RX_FLAG_RADIOTAP_HE_MU) { 712 /* ensure 2 byte alignment */ 713 while ((pos - (u8 *)rthdr) & 1) 714 pos++; 715 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE_MU)); 716 memcpy(pos, &he_mu, sizeof(he_mu)); 717 pos += sizeof(he_mu); 718 } 719 720 if (status->flag & RX_FLAG_NO_PSDU) { 721 rthdr->it_present |= 722 cpu_to_le32(BIT(IEEE80211_RADIOTAP_ZERO_LEN_PSDU)); 723 *pos++ = status->zero_length_psdu_type; 724 } 725 726 if (status->flag & RX_FLAG_RADIOTAP_LSIG) { 727 /* ensure 2 byte alignment */ 728 while ((pos - (u8 *)rthdr) & 1) 729 pos++; 730 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_LSIG)); 731 memcpy(pos, &lsig, sizeof(lsig)); 732 pos += sizeof(lsig); 733 } 734 735 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) { 736 *pos++ = status->chain_signal[chain]; 737 *pos++ = chain; 738 } 739 } 740 741 static struct sk_buff * 742 ieee80211_make_monitor_skb(struct ieee80211_local *local, 743 struct sk_buff **origskb, 744 struct ieee80211_rate *rate, 745 int rtap_space, bool use_origskb) 746 { 747 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb); 748 int rt_hdrlen, needed_headroom; 749 struct sk_buff *skb; 750 751 /* room for the radiotap header based on driver features */ 752 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb); 753 needed_headroom = rt_hdrlen - rtap_space; 754 755 if (use_origskb) { 756 /* only need to expand headroom if necessary */ 757 skb = *origskb; 758 *origskb = NULL; 759 760 /* 761 * This shouldn't trigger often because most devices have an 762 * RX header they pull before we get here, and that should 763 * be big enough for our radiotap information. We should 764 * probably export the length to drivers so that we can have 765 * them allocate enough headroom to start with. 766 */ 767 if (skb_headroom(skb) < needed_headroom && 768 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) { 769 dev_kfree_skb(skb); 770 return NULL; 771 } 772 } else { 773 /* 774 * Need to make a copy and possibly remove radiotap header 775 * and FCS from the original. 776 */ 777 skb = skb_copy_expand(*origskb, needed_headroom + NET_SKB_PAD, 778 0, GFP_ATOMIC); 779 780 if (!skb) 781 return NULL; 782 } 783 784 /* prepend radiotap information */ 785 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true); 786 787 skb_reset_mac_header(skb); 788 skb->ip_summed = CHECKSUM_UNNECESSARY; 789 skb->pkt_type = PACKET_OTHERHOST; 790 skb->protocol = htons(ETH_P_802_2); 791 792 return skb; 793 } 794 795 static bool 796 ieee80211_validate_monitor_radio(struct ieee80211_sub_if_data *sdata, 797 struct ieee80211_local *local, 798 struct ieee80211_rx_status *status) 799 { 800 struct wiphy *wiphy = local->hw.wiphy; 801 int i, freq, bw; 802 803 if (!wiphy->n_radio) 804 return true; 805 806 switch (status->bw) { 807 case RATE_INFO_BW_20: 808 bw = 20000; 809 break; 810 case RATE_INFO_BW_40: 811 bw = 40000; 812 break; 813 case RATE_INFO_BW_80: 814 bw = 80000; 815 break; 816 case RATE_INFO_BW_160: 817 bw = 160000; 818 break; 819 case RATE_INFO_BW_320: 820 bw = 320000; 821 break; 822 default: 823 return false; 824 } 825 826 freq = MHZ_TO_KHZ(status->freq); 827 828 for (i = 0; i < wiphy->n_radio; i++) { 829 if (!(sdata->wdev.radio_mask & BIT(i))) 830 continue; 831 832 if (!ieee80211_radio_freq_range_valid(&wiphy->radio[i], freq, bw)) 833 continue; 834 835 return true; 836 } 837 return false; 838 } 839 840 /* 841 * This function copies a received frame to all monitor interfaces and 842 * returns a cleaned-up SKB that no longer includes the FCS nor the 843 * radiotap header the driver might have added. 844 */ 845 static struct sk_buff * 846 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, 847 struct ieee80211_rate *rate) 848 { 849 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb); 850 struct ieee80211_sub_if_data *sdata, *prev_sdata = NULL; 851 struct sk_buff *skb, *monskb = NULL; 852 int present_fcs_len = 0; 853 unsigned int rtap_space = 0; 854 struct ieee80211_sub_if_data *monitor_sdata = 855 rcu_dereference(local->monitor_sdata); 856 bool only_monitor = false; 857 unsigned int min_head_len; 858 859 if (WARN_ON_ONCE(status->flag & RX_FLAG_RADIOTAP_TLV_AT_END && 860 !skb_mac_header_was_set(origskb))) { 861 /* with this skb no way to know where frame payload starts */ 862 dev_kfree_skb(origskb); 863 return NULL; 864 } 865 866 if (status->flag & RX_FLAG_RADIOTAP_VHT) 867 rtap_space += sizeof(struct ieee80211_radiotap_vht); 868 869 if (status->flag & RX_FLAG_RADIOTAP_HE) 870 rtap_space += sizeof(struct ieee80211_radiotap_he); 871 872 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) 873 rtap_space += sizeof(struct ieee80211_radiotap_he_mu); 874 875 if (status->flag & RX_FLAG_RADIOTAP_LSIG) 876 rtap_space += sizeof(struct ieee80211_radiotap_lsig); 877 878 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) 879 rtap_space += skb_mac_header(origskb) - &origskb->data[rtap_space]; 880 881 min_head_len = rtap_space; 882 883 /* 884 * First, we may need to make a copy of the skb because 885 * (1) we need to modify it for radiotap (if not present), and 886 * (2) the other RX handlers will modify the skb we got. 887 * 888 * We don't need to, of course, if we aren't going to return 889 * the SKB because it has a bad FCS/PLCP checksum. 890 */ 891 892 if (!(status->flag & RX_FLAG_NO_PSDU)) { 893 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) { 894 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) { 895 /* driver bug */ 896 WARN_ON(1); 897 dev_kfree_skb(origskb); 898 return NULL; 899 } 900 present_fcs_len = FCS_LEN; 901 } 902 903 /* also consider the hdr->frame_control */ 904 min_head_len += 2; 905 } 906 907 /* ensure that the expected data elements are in skb head */ 908 if (!pskb_may_pull(origskb, min_head_len)) { 909 dev_kfree_skb(origskb); 910 return NULL; 911 } 912 913 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space); 914 915 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) { 916 if (only_monitor) { 917 dev_kfree_skb(origskb); 918 return NULL; 919 } 920 921 return ieee80211_clean_skb(origskb, present_fcs_len, 922 rtap_space); 923 } 924 925 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space); 926 927 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) { 928 struct cfg80211_chan_def *chandef; 929 930 chandef = &sdata->vif.bss_conf.chanreq.oper; 931 if (chandef->chan && 932 chandef->chan->center_freq != status->freq) 933 continue; 934 935 if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR) && 936 !ieee80211_validate_monitor_radio(sdata, local, status)) 937 continue; 938 939 if (!prev_sdata) { 940 prev_sdata = sdata; 941 continue; 942 } 943 944 if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR)) 945 ieee80211_handle_mu_mimo_mon(sdata, origskb, rtap_space); 946 947 if (!monskb) 948 monskb = ieee80211_make_monitor_skb(local, &origskb, 949 rate, rtap_space, 950 false); 951 if (!monskb) 952 continue; 953 954 skb = skb_clone(monskb, GFP_ATOMIC); 955 if (!skb) 956 continue; 957 958 skb->dev = prev_sdata->dev; 959 dev_sw_netstats_rx_add(skb->dev, skb->len); 960 netif_receive_skb(skb); 961 prev_sdata = sdata; 962 } 963 964 if (prev_sdata) { 965 if (monskb) 966 skb = monskb; 967 else 968 skb = ieee80211_make_monitor_skb(local, &origskb, 969 rate, rtap_space, 970 only_monitor); 971 if (skb) { 972 skb->dev = prev_sdata->dev; 973 dev_sw_netstats_rx_add(skb->dev, skb->len); 974 netif_receive_skb(skb); 975 } 976 } 977 978 if (!origskb) 979 return NULL; 980 981 return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space); 982 } 983 984 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx) 985 { 986 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 987 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 988 int tid, seqno_idx, security_idx; 989 990 /* does the frame have a qos control field? */ 991 if (ieee80211_is_data_qos(hdr->frame_control)) { 992 u8 *qc = ieee80211_get_qos_ctl(hdr); 993 /* frame has qos control */ 994 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 995 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT) 996 status->rx_flags |= IEEE80211_RX_AMSDU; 997 998 seqno_idx = tid; 999 security_idx = tid; 1000 } else { 1001 /* 1002 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 1003 * 1004 * Sequence numbers for management frames, QoS data 1005 * frames with a broadcast/multicast address in the 1006 * Address 1 field, and all non-QoS data frames sent 1007 * by QoS STAs are assigned using an additional single 1008 * modulo-4096 counter, [...] 1009 * 1010 * We also use that counter for non-QoS STAs. 1011 */ 1012 seqno_idx = IEEE80211_NUM_TIDS; 1013 security_idx = 0; 1014 if (ieee80211_is_mgmt(hdr->frame_control)) 1015 security_idx = IEEE80211_NUM_TIDS; 1016 tid = 0; 1017 } 1018 1019 rx->seqno_idx = seqno_idx; 1020 rx->security_idx = security_idx; 1021 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 1022 * For now, set skb->priority to 0 for other cases. */ 1023 rx->skb->priority = (tid > 7) ? 0 : tid; 1024 } 1025 1026 /** 1027 * DOC: Packet alignment 1028 * 1029 * Drivers always need to pass packets that are aligned to two-byte boundaries 1030 * to the stack. 1031 * 1032 * Additionally, they should, if possible, align the payload data in a way that 1033 * guarantees that the contained IP header is aligned to a four-byte 1034 * boundary. In the case of regular frames, this simply means aligning the 1035 * payload to a four-byte boundary (because either the IP header is directly 1036 * contained, or IV/RFC1042 headers that have a length divisible by four are 1037 * in front of it). If the payload data is not properly aligned and the 1038 * architecture doesn't support efficient unaligned operations, mac80211 1039 * will align the data. 1040 * 1041 * With A-MSDU frames, however, the payload data address must yield two modulo 1042 * four because there are 14-byte 802.3 headers within the A-MSDU frames that 1043 * push the IP header further back to a multiple of four again. Thankfully, the 1044 * specs were sane enough this time around to require padding each A-MSDU 1045 * subframe to a length that is a multiple of four. 1046 * 1047 * Padding like Atheros hardware adds which is between the 802.11 header and 1048 * the payload is not supported; the driver is required to move the 802.11 1049 * header to be directly in front of the payload in that case. 1050 */ 1051 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx) 1052 { 1053 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1054 WARN_ON_ONCE((unsigned long)rx->skb->data & 1); 1055 #endif 1056 } 1057 1058 1059 /* rx handlers */ 1060 1061 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb) 1062 { 1063 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1064 1065 if (is_multicast_ether_addr(hdr->addr1)) 1066 return 0; 1067 1068 return ieee80211_is_robust_mgmt_frame(skb); 1069 } 1070 1071 1072 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb) 1073 { 1074 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1075 1076 if (!is_multicast_ether_addr(hdr->addr1)) 1077 return 0; 1078 1079 return ieee80211_is_robust_mgmt_frame(skb); 1080 } 1081 1082 1083 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */ 1084 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb) 1085 { 1086 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data; 1087 struct ieee80211_mmie *mmie; 1088 struct ieee80211_mmie_16 *mmie16; 1089 1090 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da)) 1091 return -1; 1092 1093 if (!ieee80211_is_robust_mgmt_frame(skb) && 1094 !ieee80211_is_beacon(hdr->frame_control)) 1095 return -1; /* not a robust management frame */ 1096 1097 mmie = (struct ieee80211_mmie *) 1098 (skb->data + skb->len - sizeof(*mmie)); 1099 if (mmie->element_id == WLAN_EID_MMIE && 1100 mmie->length == sizeof(*mmie) - 2) 1101 return le16_to_cpu(mmie->key_id); 1102 1103 mmie16 = (struct ieee80211_mmie_16 *) 1104 (skb->data + skb->len - sizeof(*mmie16)); 1105 if (skb->len >= 24 + sizeof(*mmie16) && 1106 mmie16->element_id == WLAN_EID_MMIE && 1107 mmie16->length == sizeof(*mmie16) - 2) 1108 return le16_to_cpu(mmie16->key_id); 1109 1110 return -1; 1111 } 1112 1113 static int ieee80211_get_keyid(struct sk_buff *skb) 1114 { 1115 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1116 __le16 fc = hdr->frame_control; 1117 int hdrlen = ieee80211_hdrlen(fc); 1118 u8 keyid; 1119 1120 /* WEP, TKIP, CCMP and GCMP */ 1121 if (unlikely(skb->len < hdrlen + IEEE80211_WEP_IV_LEN)) 1122 return -EINVAL; 1123 1124 skb_copy_bits(skb, hdrlen + 3, &keyid, 1); 1125 1126 keyid >>= 6; 1127 1128 return keyid; 1129 } 1130 1131 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) 1132 { 1133 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1134 char *dev_addr = rx->sdata->vif.addr; 1135 1136 if (ieee80211_is_data(hdr->frame_control)) { 1137 if (is_multicast_ether_addr(hdr->addr1)) { 1138 if (ieee80211_has_tods(hdr->frame_control) || 1139 !ieee80211_has_fromds(hdr->frame_control)) 1140 return RX_DROP_U_MESH_DS_BITS; 1141 if (ether_addr_equal(hdr->addr3, dev_addr)) 1142 return RX_DROP_U_MESH_A3_MISMATCH; 1143 } else { 1144 if (!ieee80211_has_a4(hdr->frame_control)) 1145 return RX_DROP_U_MESH_NO_A4; 1146 if (ether_addr_equal(hdr->addr4, dev_addr)) 1147 return RX_DROP_U_MESH_A4_MISMATCH; 1148 } 1149 } 1150 1151 /* If there is not an established peer link and this is not a peer link 1152 * establisment frame, beacon or probe, drop the frame. 1153 */ 1154 1155 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) { 1156 struct ieee80211_mgmt *mgmt; 1157 1158 if (!ieee80211_is_mgmt(hdr->frame_control)) 1159 return RX_DROP_U_MESH_UNEXP_DATA; 1160 1161 if (ieee80211_is_action(hdr->frame_control)) { 1162 u8 category; 1163 1164 /* make sure category field is present */ 1165 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 1166 return RX_DROP_U_RUNT_ACTION; 1167 1168 mgmt = (struct ieee80211_mgmt *)hdr; 1169 category = mgmt->u.action.category; 1170 if (category != WLAN_CATEGORY_MESH_ACTION && 1171 category != WLAN_CATEGORY_SELF_PROTECTED) 1172 return RX_DROP_U_MESH_WRONG_ACTION; 1173 return RX_CONTINUE; 1174 } 1175 1176 if (ieee80211_is_probe_req(hdr->frame_control) || 1177 ieee80211_is_probe_resp(hdr->frame_control) || 1178 ieee80211_is_beacon(hdr->frame_control) || 1179 ieee80211_is_auth(hdr->frame_control)) 1180 return RX_CONTINUE; 1181 1182 return RX_DROP_U_MESH_UNEXP_MGMT; 1183 } 1184 1185 return RX_CONTINUE; 1186 } 1187 1188 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, 1189 int index) 1190 { 1191 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index]; 1192 struct sk_buff *tail = skb_peek_tail(frames); 1193 struct ieee80211_rx_status *status; 1194 1195 if (tid_agg_rx->reorder_buf_filtered && 1196 tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) 1197 return true; 1198 1199 if (!tail) 1200 return false; 1201 1202 status = IEEE80211_SKB_RXCB(tail); 1203 if (status->flag & RX_FLAG_AMSDU_MORE) 1204 return false; 1205 1206 return true; 1207 } 1208 1209 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 1210 struct tid_ampdu_rx *tid_agg_rx, 1211 int index, 1212 struct sk_buff_head *frames) 1213 { 1214 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index]; 1215 struct sk_buff *skb; 1216 struct ieee80211_rx_status *status; 1217 1218 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1219 1220 if (skb_queue_empty(skb_list)) 1221 goto no_frame; 1222 1223 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1224 __skb_queue_purge(skb_list); 1225 goto no_frame; 1226 } 1227 1228 /* release frames from the reorder ring buffer */ 1229 tid_agg_rx->stored_mpdu_num--; 1230 while ((skb = __skb_dequeue(skb_list))) { 1231 status = IEEE80211_SKB_RXCB(skb); 1232 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; 1233 __skb_queue_tail(frames, skb); 1234 } 1235 1236 no_frame: 1237 if (tid_agg_rx->reorder_buf_filtered) 1238 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 1239 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1240 } 1241 1242 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 1243 struct tid_ampdu_rx *tid_agg_rx, 1244 u16 head_seq_num, 1245 struct sk_buff_head *frames) 1246 { 1247 int index; 1248 1249 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1250 1251 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) { 1252 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1253 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1254 frames); 1255 } 1256 } 1257 1258 /* 1259 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If 1260 * the skb was added to the buffer longer than this time ago, the earlier 1261 * frames that have not yet been received are assumed to be lost and the skb 1262 * can be released for processing. This may also release other skb's from the 1263 * reorder buffer if there are no additional gaps between the frames. 1264 * 1265 * Callers must hold tid_agg_rx->reorder_lock. 1266 */ 1267 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) 1268 1269 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, 1270 struct tid_ampdu_rx *tid_agg_rx, 1271 struct sk_buff_head *frames) 1272 { 1273 int index, i, j; 1274 1275 lockdep_assert_held(&tid_agg_rx->reorder_lock); 1276 1277 /* release the buffer until next missing frame */ 1278 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1279 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) && 1280 tid_agg_rx->stored_mpdu_num) { 1281 /* 1282 * No buffers ready to be released, but check whether any 1283 * frames in the reorder buffer have timed out. 1284 */ 1285 int skipped = 1; 1286 for (j = (index + 1) % tid_agg_rx->buf_size; j != index; 1287 j = (j + 1) % tid_agg_rx->buf_size) { 1288 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) { 1289 skipped++; 1290 continue; 1291 } 1292 if (skipped && 1293 !time_after(jiffies, tid_agg_rx->reorder_time[j] + 1294 HT_RX_REORDER_BUF_TIMEOUT)) 1295 goto set_release_timer; 1296 1297 /* don't leave incomplete A-MSDUs around */ 1298 for (i = (index + 1) % tid_agg_rx->buf_size; i != j; 1299 i = (i + 1) % tid_agg_rx->buf_size) 1300 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]); 1301 1302 ht_dbg_ratelimited(sdata, 1303 "release an RX reorder frame due to timeout on earlier frames\n"); 1304 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, 1305 frames); 1306 1307 /* 1308 * Increment the head seq# also for the skipped slots. 1309 */ 1310 tid_agg_rx->head_seq_num = 1311 (tid_agg_rx->head_seq_num + 1312 skipped) & IEEE80211_SN_MASK; 1313 skipped = 0; 1314 } 1315 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1316 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 1317 frames); 1318 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1319 } 1320 1321 if (tid_agg_rx->stored_mpdu_num) { 1322 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size; 1323 1324 for (; j != (index - 1) % tid_agg_rx->buf_size; 1325 j = (j + 1) % tid_agg_rx->buf_size) { 1326 if (ieee80211_rx_reorder_ready(tid_agg_rx, j)) 1327 break; 1328 } 1329 1330 set_release_timer: 1331 1332 if (!tid_agg_rx->removed) 1333 mod_timer(&tid_agg_rx->reorder_timer, 1334 tid_agg_rx->reorder_time[j] + 1 + 1335 HT_RX_REORDER_BUF_TIMEOUT); 1336 } else { 1337 timer_delete(&tid_agg_rx->reorder_timer); 1338 } 1339 } 1340 1341 /* 1342 * As this function belongs to the RX path it must be under 1343 * rcu_read_lock protection. It returns false if the frame 1344 * can be processed immediately, true if it was consumed. 1345 */ 1346 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, 1347 struct tid_ampdu_rx *tid_agg_rx, 1348 struct sk_buff *skb, 1349 struct sk_buff_head *frames) 1350 { 1351 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1352 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1353 u16 mpdu_seq_num = ieee80211_get_sn(hdr); 1354 u16 head_seq_num, buf_size; 1355 int index; 1356 bool ret = true; 1357 1358 spin_lock(&tid_agg_rx->reorder_lock); 1359 1360 /* 1361 * Offloaded BA sessions have no known starting sequence number so pick 1362 * one from first Rxed frame for this tid after BA was started. 1363 */ 1364 if (unlikely(tid_agg_rx->auto_seq)) { 1365 tid_agg_rx->auto_seq = false; 1366 tid_agg_rx->ssn = mpdu_seq_num; 1367 tid_agg_rx->head_seq_num = mpdu_seq_num; 1368 } 1369 1370 buf_size = tid_agg_rx->buf_size; 1371 head_seq_num = tid_agg_rx->head_seq_num; 1372 1373 /* 1374 * If the current MPDU's SN is smaller than the SSN, it shouldn't 1375 * be reordered. 1376 */ 1377 if (unlikely(!tid_agg_rx->started)) { 1378 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1379 ret = false; 1380 goto out; 1381 } 1382 tid_agg_rx->started = true; 1383 } 1384 1385 /* frame with out of date sequence number */ 1386 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) { 1387 dev_kfree_skb(skb); 1388 goto out; 1389 } 1390 1391 /* 1392 * If frame the sequence number exceeds our buffering window 1393 * size release some previous frames to make room for this one. 1394 */ 1395 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) { 1396 head_seq_num = ieee80211_sn_inc( 1397 ieee80211_sn_sub(mpdu_seq_num, buf_size)); 1398 /* release stored frames up to new head to stack */ 1399 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 1400 head_seq_num, frames); 1401 } 1402 1403 /* Now the new frame is always in the range of the reordering buffer */ 1404 1405 index = mpdu_seq_num % tid_agg_rx->buf_size; 1406 1407 /* check if we already stored this frame */ 1408 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) { 1409 dev_kfree_skb(skb); 1410 goto out; 1411 } 1412 1413 /* 1414 * If the current MPDU is in the right order and nothing else 1415 * is stored we can process it directly, no need to buffer it. 1416 * If it is first but there's something stored, we may be able 1417 * to release frames after this one. 1418 */ 1419 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 1420 tid_agg_rx->stored_mpdu_num == 0) { 1421 if (!(status->flag & RX_FLAG_AMSDU_MORE)) 1422 tid_agg_rx->head_seq_num = 1423 ieee80211_sn_inc(tid_agg_rx->head_seq_num); 1424 ret = false; 1425 goto out; 1426 } 1427 1428 /* put the frame in the reordering buffer */ 1429 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb); 1430 if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1431 tid_agg_rx->reorder_time[index] = jiffies; 1432 tid_agg_rx->stored_mpdu_num++; 1433 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); 1434 } 1435 1436 out: 1437 spin_unlock(&tid_agg_rx->reorder_lock); 1438 return ret; 1439 } 1440 1441 /* 1442 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns 1443 * true if the MPDU was buffered, false if it should be processed. 1444 */ 1445 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, 1446 struct sk_buff_head *frames) 1447 { 1448 struct sk_buff *skb = rx->skb; 1449 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1450 struct sta_info *sta = rx->sta; 1451 struct tid_ampdu_rx *tid_agg_rx; 1452 u16 sc; 1453 u8 tid, ack_policy; 1454 1455 if (!ieee80211_is_data_qos(hdr->frame_control) || 1456 is_multicast_ether_addr(hdr->addr1)) 1457 goto dont_reorder; 1458 1459 /* 1460 * filter the QoS data rx stream according to 1461 * STA/TID and check if this STA/TID is on aggregation 1462 */ 1463 1464 if (!sta) 1465 goto dont_reorder; 1466 1467 ack_policy = *ieee80211_get_qos_ctl(hdr) & 1468 IEEE80211_QOS_CTL_ACK_POLICY_MASK; 1469 tid = ieee80211_get_tid(hdr); 1470 1471 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 1472 if (!tid_agg_rx) { 1473 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK && 1474 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 1475 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 1476 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 1477 WLAN_BACK_RECIPIENT, 1478 WLAN_REASON_QSTA_REQUIRE_SETUP); 1479 goto dont_reorder; 1480 } 1481 1482 /* qos null data frames are excluded */ 1483 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 1484 goto dont_reorder; 1485 1486 /* not part of a BA session */ 1487 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK) 1488 goto dont_reorder; 1489 1490 /* new, potentially un-ordered, ampdu frame - process it */ 1491 1492 /* reset session timer */ 1493 if (tid_agg_rx->timeout) 1494 tid_agg_rx->last_rx = jiffies; 1495 1496 /* if this mpdu is fragmented - terminate rx aggregation session */ 1497 sc = le16_to_cpu(hdr->seq_ctrl); 1498 if (sc & IEEE80211_SCTL_FRAG) { 1499 ieee80211_queue_skb_to_iface(rx->sdata, rx->link_id, NULL, skb); 1500 return; 1501 } 1502 1503 /* 1504 * No locking needed -- we will only ever process one 1505 * RX packet at a time, and thus own tid_agg_rx. All 1506 * other code manipulating it needs to (and does) make 1507 * sure that we cannot get to it any more before doing 1508 * anything with it. 1509 */ 1510 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, 1511 frames)) 1512 return; 1513 1514 dont_reorder: 1515 __skb_queue_tail(frames, skb); 1516 } 1517 1518 static ieee80211_rx_result debug_noinline 1519 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx) 1520 { 1521 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1522 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1523 1524 if (status->flag & RX_FLAG_DUP_VALIDATED) 1525 return RX_CONTINUE; 1526 1527 /* 1528 * Drop duplicate 802.11 retransmissions 1529 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") 1530 */ 1531 1532 if (rx->skb->len < 24) 1533 return RX_CONTINUE; 1534 1535 if (ieee80211_is_ctl(hdr->frame_control) || 1536 ieee80211_is_any_nullfunc(hdr->frame_control)) 1537 return RX_CONTINUE; 1538 1539 if (!rx->sta) 1540 return RX_CONTINUE; 1541 1542 if (unlikely(is_multicast_ether_addr(hdr->addr1))) { 1543 struct ieee80211_sub_if_data *sdata = rx->sdata; 1544 u16 sn = ieee80211_get_sn(hdr); 1545 1546 if (!ieee80211_is_data_present(hdr->frame_control)) 1547 return RX_CONTINUE; 1548 1549 if (!ieee80211_vif_is_mld(&sdata->vif) || 1550 sdata->vif.type != NL80211_IFTYPE_STATION) 1551 return RX_CONTINUE; 1552 1553 if (sdata->u.mgd.mcast_seq_last != IEEE80211_SN_MODULO && 1554 ieee80211_sn_less_eq(sn, sdata->u.mgd.mcast_seq_last)) 1555 return RX_DROP_U_DUP; 1556 1557 sdata->u.mgd.mcast_seq_last = sn; 1558 return RX_CONTINUE; 1559 } 1560 1561 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 1562 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) { 1563 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount); 1564 rx->link_sta->rx_stats.num_duplicates++; 1565 return RX_DROP_U_DUP; 1566 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) { 1567 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl; 1568 } 1569 1570 return RX_CONTINUE; 1571 } 1572 1573 static ieee80211_rx_result debug_noinline 1574 ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 1575 { 1576 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1577 1578 /* Drop disallowed frame classes based on STA auth/assoc state; 1579 * IEEE 802.11, Chap 5.5. 1580 * 1581 * mac80211 filters only based on association state, i.e. it drops 1582 * Class 3 frames from not associated stations. hostapd sends 1583 * deauth/disassoc frames when needed. In addition, hostapd is 1584 * responsible for filtering on both auth and assoc states. 1585 */ 1586 1587 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1588 return ieee80211_rx_mesh_check(rx); 1589 1590 if (unlikely((ieee80211_is_data(hdr->frame_control) || 1591 ieee80211_is_pspoll(hdr->frame_control)) && 1592 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC && 1593 rx->sdata->vif.type != NL80211_IFTYPE_OCB && 1594 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) { 1595 /* 1596 * accept port control frames from the AP even when it's not 1597 * yet marked ASSOC to prevent a race where we don't set the 1598 * assoc bit quickly enough before it sends the first frame 1599 */ 1600 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION && 1601 ieee80211_is_data_present(hdr->frame_control)) { 1602 unsigned int hdrlen; 1603 __be16 ethertype; 1604 1605 hdrlen = ieee80211_hdrlen(hdr->frame_control); 1606 1607 if (rx->skb->len < hdrlen + 8) 1608 return RX_DROP_U_RUNT_DATA; 1609 1610 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2); 1611 if (ethertype == rx->sdata->control_port_protocol) 1612 return RX_CONTINUE; 1613 } 1614 1615 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 1616 cfg80211_rx_spurious_frame(rx->sdata->dev, hdr->addr2, 1617 rx->link_id, GFP_ATOMIC)) 1618 return RX_DROP_U_SPURIOUS_NOTIF; 1619 1620 return RX_DROP_U_SPURIOUS; 1621 } 1622 1623 return RX_CONTINUE; 1624 } 1625 1626 1627 static ieee80211_rx_result debug_noinline 1628 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx) 1629 { 1630 struct ieee80211_local *local; 1631 struct ieee80211_hdr *hdr; 1632 struct sk_buff *skb; 1633 1634 local = rx->local; 1635 skb = rx->skb; 1636 hdr = (struct ieee80211_hdr *) skb->data; 1637 1638 if (!local->pspolling) 1639 return RX_CONTINUE; 1640 1641 if (!ieee80211_has_fromds(hdr->frame_control)) 1642 /* this is not from AP */ 1643 return RX_CONTINUE; 1644 1645 if (!ieee80211_is_data(hdr->frame_control)) 1646 return RX_CONTINUE; 1647 1648 if (!ieee80211_has_moredata(hdr->frame_control)) { 1649 /* AP has no more frames buffered for us */ 1650 local->pspolling = false; 1651 return RX_CONTINUE; 1652 } 1653 1654 /* more data bit is set, let's request a new frame from the AP */ 1655 ieee80211_send_pspoll(local, rx->sdata); 1656 1657 return RX_CONTINUE; 1658 } 1659 1660 static void sta_ps_start(struct sta_info *sta) 1661 { 1662 struct ieee80211_sub_if_data *sdata = sta->sdata; 1663 struct ieee80211_local *local = sdata->local; 1664 struct ps_data *ps; 1665 int tid; 1666 1667 if (sta->sdata->vif.type == NL80211_IFTYPE_AP || 1668 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1669 ps = &sdata->bss->ps; 1670 else 1671 return; 1672 1673 atomic_inc(&ps->num_sta_ps); 1674 set_sta_flag(sta, WLAN_STA_PS_STA); 1675 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 1676 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta); 1677 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n", 1678 sta->sta.addr, sta->sta.aid); 1679 1680 ieee80211_clear_fast_xmit(sta); 1681 1682 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { 1683 struct ieee80211_txq *txq = sta->sta.txq[tid]; 1684 struct txq_info *txqi = to_txq_info(txq); 1685 1686 spin_lock(&local->active_txq_lock[txq->ac]); 1687 if (!list_empty(&txqi->schedule_order)) 1688 list_del_init(&txqi->schedule_order); 1689 spin_unlock(&local->active_txq_lock[txq->ac]); 1690 1691 if (txq_has_queue(txq)) 1692 set_bit(tid, &sta->txq_buffered_tids); 1693 else 1694 clear_bit(tid, &sta->txq_buffered_tids); 1695 } 1696 } 1697 1698 static void sta_ps_end(struct sta_info *sta) 1699 { 1700 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n", 1701 sta->sta.addr, sta->sta.aid); 1702 1703 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { 1704 /* 1705 * Clear the flag only if the other one is still set 1706 * so that the TX path won't start TX'ing new frames 1707 * directly ... In the case that the driver flag isn't 1708 * set ieee80211_sta_ps_deliver_wakeup() will clear it. 1709 */ 1710 clear_sta_flag(sta, WLAN_STA_PS_STA); 1711 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", 1712 sta->sta.addr, sta->sta.aid); 1713 return; 1714 } 1715 1716 set_sta_flag(sta, WLAN_STA_PS_DELIVER); 1717 clear_sta_flag(sta, WLAN_STA_PS_STA); 1718 ieee80211_sta_ps_deliver_wakeup(sta); 1719 } 1720 1721 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start) 1722 { 1723 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1724 bool in_ps; 1725 1726 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS)); 1727 1728 /* Don't let the same PS state be set twice */ 1729 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA); 1730 if ((start && in_ps) || (!start && !in_ps)) 1731 return -EINVAL; 1732 1733 if (start) 1734 sta_ps_start(sta); 1735 else 1736 sta_ps_end(sta); 1737 1738 return 0; 1739 } 1740 EXPORT_SYMBOL(ieee80211_sta_ps_transition); 1741 1742 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta) 1743 { 1744 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1745 1746 if (test_sta_flag(sta, WLAN_STA_SP)) 1747 return; 1748 1749 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1750 ieee80211_sta_ps_deliver_poll_response(sta); 1751 else 1752 set_sta_flag(sta, WLAN_STA_PSPOLL); 1753 } 1754 EXPORT_SYMBOL(ieee80211_sta_pspoll); 1755 1756 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid) 1757 { 1758 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 1759 int ac = ieee80211_ac_from_tid(tid); 1760 1761 /* 1762 * If this AC is not trigger-enabled do nothing unless the 1763 * driver is calling us after it already checked. 1764 * 1765 * NB: This could/should check a separate bitmap of trigger- 1766 * enabled queues, but for now we only implement uAPSD w/o 1767 * TSPEC changes to the ACs, so they're always the same. 1768 */ 1769 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) && 1770 tid != IEEE80211_NUM_TIDS) 1771 return; 1772 1773 /* if we are in a service period, do nothing */ 1774 if (test_sta_flag(sta, WLAN_STA_SP)) 1775 return; 1776 1777 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER)) 1778 ieee80211_sta_ps_deliver_uapsd(sta); 1779 else 1780 set_sta_flag(sta, WLAN_STA_UAPSD); 1781 } 1782 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger); 1783 1784 static ieee80211_rx_result debug_noinline 1785 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx) 1786 { 1787 struct ieee80211_sub_if_data *sdata = rx->sdata; 1788 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 1789 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 1790 1791 if (!rx->sta) 1792 return RX_CONTINUE; 1793 1794 if (sdata->vif.type != NL80211_IFTYPE_AP && 1795 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 1796 return RX_CONTINUE; 1797 1798 /* 1799 * The device handles station powersave, so don't do anything about 1800 * uAPSD and PS-Poll frames (the latter shouldn't even come up from 1801 * it to mac80211 since they're handled.) 1802 */ 1803 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS)) 1804 return RX_CONTINUE; 1805 1806 /* 1807 * Don't do anything if the station isn't already asleep. In 1808 * the uAPSD case, the station will probably be marked asleep, 1809 * in the PS-Poll case the station must be confused ... 1810 */ 1811 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA)) 1812 return RX_CONTINUE; 1813 1814 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) { 1815 ieee80211_sta_pspoll(&rx->sta->sta); 1816 1817 /* Free PS Poll skb here instead of returning RX_DROP that would 1818 * count as an dropped frame. */ 1819 dev_kfree_skb(rx->skb); 1820 1821 return RX_QUEUED; 1822 } else if (!ieee80211_has_morefrags(hdr->frame_control) && 1823 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1824 ieee80211_has_pm(hdr->frame_control) && 1825 (ieee80211_is_data_qos(hdr->frame_control) || 1826 ieee80211_is_qos_nullfunc(hdr->frame_control))) { 1827 u8 tid = ieee80211_get_tid(hdr); 1828 1829 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid); 1830 } 1831 1832 return RX_CONTINUE; 1833 } 1834 1835 static ieee80211_rx_result debug_noinline 1836 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx) 1837 { 1838 struct sta_info *sta = rx->sta; 1839 struct link_sta_info *link_sta = rx->link_sta; 1840 struct sk_buff *skb = rx->skb; 1841 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 1842 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1843 int i; 1844 1845 if (!sta || !link_sta) 1846 return RX_CONTINUE; 1847 1848 /* 1849 * Update last_rx only for IBSS packets which are for the current 1850 * BSSID and for station already AUTHORIZED to avoid keeping the 1851 * current IBSS network alive in cases where other STAs start 1852 * using different BSSID. This will also give the station another 1853 * chance to restart the authentication/authorization in case 1854 * something went wrong the first time. 1855 */ 1856 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) { 1857 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len, 1858 NL80211_IFTYPE_ADHOC); 1859 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) && 1860 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) { 1861 link_sta->rx_stats.last_rx = jiffies; 1862 if (ieee80211_is_data_present(hdr->frame_control) && 1863 !is_multicast_ether_addr(hdr->addr1)) 1864 link_sta->rx_stats.last_rate = 1865 sta_stats_encode_rate(status); 1866 } 1867 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) { 1868 link_sta->rx_stats.last_rx = jiffies; 1869 } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) && 1870 !is_multicast_ether_addr(hdr->addr1)) { 1871 /* 1872 * Mesh beacons will update last_rx when if they are found to 1873 * match the current local configuration when processed. 1874 */ 1875 link_sta->rx_stats.last_rx = jiffies; 1876 if (ieee80211_is_data_present(hdr->frame_control)) 1877 link_sta->rx_stats.last_rate = sta_stats_encode_rate(status); 1878 } 1879 1880 link_sta->rx_stats.fragments++; 1881 1882 u64_stats_update_begin(&link_sta->rx_stats.syncp); 1883 u64_stats_add(&link_sta->rx_stats.bytes, rx->skb->len); 1884 u64_stats_update_end(&link_sta->rx_stats.syncp); 1885 1886 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 1887 link_sta->rx_stats.last_signal = status->signal; 1888 ewma_signal_add(&link_sta->rx_stats_avg.signal, 1889 -status->signal); 1890 } 1891 1892 if (status->chains) { 1893 link_sta->rx_stats.chains = status->chains; 1894 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 1895 int signal = status->chain_signal[i]; 1896 1897 if (!(status->chains & BIT(i))) 1898 continue; 1899 1900 link_sta->rx_stats.chain_signal_last[i] = signal; 1901 ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i], 1902 -signal); 1903 } 1904 } 1905 1906 if (ieee80211_is_s1g_beacon(hdr->frame_control)) 1907 return RX_CONTINUE; 1908 1909 /* 1910 * Change STA power saving mode only at the end of a frame 1911 * exchange sequence, and only for a data or management 1912 * frame as specified in IEEE 802.11-2016 11.2.3.2 1913 */ 1914 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) && 1915 !ieee80211_has_morefrags(hdr->frame_control) && 1916 !is_multicast_ether_addr(hdr->addr1) && 1917 (ieee80211_is_mgmt(hdr->frame_control) || 1918 ieee80211_is_data(hdr->frame_control)) && 1919 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) && 1920 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1921 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) { 1922 if (test_sta_flag(sta, WLAN_STA_PS_STA)) { 1923 if (!ieee80211_has_pm(hdr->frame_control)) 1924 sta_ps_end(sta); 1925 } else { 1926 if (ieee80211_has_pm(hdr->frame_control)) 1927 sta_ps_start(sta); 1928 } 1929 } 1930 1931 /* mesh power save support */ 1932 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) 1933 ieee80211_mps_rx_h_sta_process(sta, hdr); 1934 1935 /* 1936 * Drop (qos-)data::nullfunc frames silently, since they 1937 * are used only to control station power saving mode. 1938 */ 1939 if (ieee80211_is_any_nullfunc(hdr->frame_control)) { 1940 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc); 1941 1942 /* 1943 * If we receive a 4-addr nullfunc frame from a STA 1944 * that was not moved to a 4-addr STA vlan yet send 1945 * the event to userspace and for older hostapd drop 1946 * the frame to the monitor interface. 1947 */ 1948 if (ieee80211_has_a4(hdr->frame_control) && 1949 (rx->sdata->vif.type == NL80211_IFTYPE_AP || 1950 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 1951 !rx->sdata->u.vlan.sta))) { 1952 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT)) 1953 cfg80211_rx_unexpected_4addr_frame( 1954 rx->sdata->dev, sta->sta.addr, 1955 rx->link_id, GFP_ATOMIC); 1956 return RX_DROP_U_UNEXPECTED_4ADDR_FRAME; 1957 } 1958 /* 1959 * Update counter and free packet here to avoid 1960 * counting this as a dropped packed. 1961 */ 1962 link_sta->rx_stats.packets++; 1963 dev_kfree_skb(rx->skb); 1964 return RX_QUEUED; 1965 } 1966 1967 return RX_CONTINUE; 1968 } /* ieee80211_rx_h_sta_process */ 1969 1970 static struct ieee80211_key * 1971 ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx) 1972 { 1973 struct ieee80211_key *key = NULL; 1974 int idx2; 1975 1976 /* Make sure key gets set if either BIGTK key index is set so that 1977 * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected 1978 * Beacon frames and Beacon frames that claim to use another BIGTK key 1979 * index (i.e., a key that we do not have). 1980 */ 1981 1982 if (idx < 0) { 1983 idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS; 1984 idx2 = idx + 1; 1985 } else { 1986 if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 1987 idx2 = idx + 1; 1988 else 1989 idx2 = idx - 1; 1990 } 1991 1992 if (rx->link_sta) 1993 key = rcu_dereference(rx->link_sta->gtk[idx]); 1994 if (!key) 1995 key = rcu_dereference(rx->link->gtk[idx]); 1996 if (!key && rx->link_sta) 1997 key = rcu_dereference(rx->link_sta->gtk[idx2]); 1998 if (!key) 1999 key = rcu_dereference(rx->link->gtk[idx2]); 2000 2001 return key; 2002 } 2003 2004 static ieee80211_rx_result debug_noinline 2005 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) 2006 { 2007 struct sk_buff *skb = rx->skb; 2008 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2009 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2010 int keyidx; 2011 ieee80211_rx_result result = RX_DROP_U_DECRYPT_FAIL; 2012 struct ieee80211_key *sta_ptk = NULL; 2013 struct ieee80211_key *ptk_idx = NULL; 2014 int mmie_keyidx = -1; 2015 __le16 fc; 2016 2017 if (ieee80211_is_ext(hdr->frame_control)) 2018 return RX_CONTINUE; 2019 2020 /* 2021 * Key selection 101 2022 * 2023 * There are five types of keys: 2024 * - GTK (group keys) 2025 * - IGTK (group keys for management frames) 2026 * - BIGTK (group keys for Beacon frames) 2027 * - PTK (pairwise keys) 2028 * - STK (station-to-station pairwise keys) 2029 * 2030 * When selecting a key, we have to distinguish between multicast 2031 * (including broadcast) and unicast frames, the latter can only 2032 * use PTKs and STKs while the former always use GTKs, IGTKs, and 2033 * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used, 2034 * then unicast frames can also use key indices like GTKs. Hence, if we 2035 * don't have a PTK/STK we check the key index for a WEP key. 2036 * 2037 * Note that in a regular BSS, multicast frames are sent by the 2038 * AP only, associated stations unicast the frame to the AP first 2039 * which then multicasts it on their behalf. 2040 * 2041 * There is also a slight problem in IBSS mode: GTKs are negotiated 2042 * with each station, that is something we don't currently handle. 2043 * The spec seems to expect that one negotiates the same key with 2044 * every station but there's no such requirement; VLANs could be 2045 * possible. 2046 */ 2047 2048 /* start without a key */ 2049 rx->key = NULL; 2050 fc = hdr->frame_control; 2051 2052 if (rx->sta) { 2053 int keyid = rx->sta->ptk_idx; 2054 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]); 2055 2056 if (ieee80211_has_protected(fc) && 2057 !(status->flag & RX_FLAG_IV_STRIPPED)) { 2058 keyid = ieee80211_get_keyid(rx->skb); 2059 2060 if (unlikely(keyid < 0)) 2061 return RX_DROP_U_NO_KEY_ID; 2062 2063 ptk_idx = rcu_dereference(rx->sta->ptk[keyid]); 2064 } 2065 } 2066 2067 if (!ieee80211_has_protected(fc)) 2068 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 2069 2070 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) { 2071 rx->key = ptk_idx ? ptk_idx : sta_ptk; 2072 if ((status->flag & RX_FLAG_DECRYPTED) && 2073 (status->flag & RX_FLAG_IV_STRIPPED)) 2074 return RX_CONTINUE; 2075 /* Skip decryption if the frame is not protected. */ 2076 if (!ieee80211_has_protected(fc)) 2077 return RX_CONTINUE; 2078 } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) { 2079 /* Broadcast/multicast robust management frame / BIP */ 2080 if ((status->flag & RX_FLAG_DECRYPTED) && 2081 (status->flag & RX_FLAG_IV_STRIPPED)) 2082 return RX_CONTINUE; 2083 2084 if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS || 2085 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS + 2086 NUM_DEFAULT_BEACON_KEYS) { 2087 if (rx->sdata->dev) 2088 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2089 skb->data, 2090 skb->len); 2091 return RX_DROP_U_BAD_BCN_KEYIDX; 2092 } 2093 2094 rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx); 2095 if (!rx->key) 2096 return RX_CONTINUE; /* Beacon protection not in use */ 2097 } else if (mmie_keyidx >= 0) { 2098 /* Broadcast/multicast robust management frame / BIP */ 2099 if ((status->flag & RX_FLAG_DECRYPTED) && 2100 (status->flag & RX_FLAG_IV_STRIPPED)) 2101 return RX_CONTINUE; 2102 2103 if (mmie_keyidx < NUM_DEFAULT_KEYS || 2104 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 2105 return RX_DROP_U_BAD_MGMT_KEYIDX; /* unexpected BIP keyidx */ 2106 if (rx->link_sta) { 2107 if (ieee80211_is_group_privacy_action(skb) && 2108 test_sta_flag(rx->sta, WLAN_STA_MFP)) 2109 return RX_DROP_U_UNPROTECTED; 2110 2111 rx->key = rcu_dereference(rx->link_sta->gtk[mmie_keyidx]); 2112 } 2113 if (!rx->key) 2114 rx->key = rcu_dereference(rx->link->gtk[mmie_keyidx]); 2115 } else if (!ieee80211_has_protected(fc)) { 2116 /* 2117 * The frame was not protected, so skip decryption. However, we 2118 * need to set rx->key if there is a key that could have been 2119 * used so that the frame may be dropped if encryption would 2120 * have been expected. 2121 */ 2122 struct ieee80211_key *key = NULL; 2123 int i; 2124 2125 if (ieee80211_is_beacon(fc)) { 2126 key = ieee80211_rx_get_bigtk(rx, -1); 2127 } else if (ieee80211_is_mgmt(fc) && 2128 is_multicast_ether_addr(hdr->addr1)) { 2129 key = rcu_dereference(rx->link->default_mgmt_key); 2130 } else { 2131 if (rx->link_sta) { 2132 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 2133 key = rcu_dereference(rx->link_sta->gtk[i]); 2134 if (key) 2135 break; 2136 } 2137 } 2138 if (!key) { 2139 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 2140 key = rcu_dereference(rx->link->gtk[i]); 2141 if (key) 2142 break; 2143 } 2144 } 2145 } 2146 if (key) 2147 rx->key = key; 2148 return RX_CONTINUE; 2149 } else { 2150 /* 2151 * The device doesn't give us the IV so we won't be 2152 * able to look up the key. That's ok though, we 2153 * don't need to decrypt the frame, we just won't 2154 * be able to keep statistics accurate. 2155 * Except for key threshold notifications, should 2156 * we somehow allow the driver to tell us which key 2157 * the hardware used if this flag is set? 2158 */ 2159 if ((status->flag & RX_FLAG_DECRYPTED) && 2160 (status->flag & RX_FLAG_IV_STRIPPED)) 2161 return RX_CONTINUE; 2162 2163 keyidx = ieee80211_get_keyid(rx->skb); 2164 2165 if (unlikely(keyidx < 0)) 2166 return RX_DROP_U_NO_KEY_ID; 2167 2168 /* check per-station GTK first, if multicast packet */ 2169 if (is_multicast_ether_addr(hdr->addr1) && rx->link_sta) 2170 rx->key = rcu_dereference(rx->link_sta->gtk[keyidx]); 2171 2172 /* if not found, try default key */ 2173 if (!rx->key) { 2174 if (is_multicast_ether_addr(hdr->addr1)) 2175 rx->key = rcu_dereference(rx->link->gtk[keyidx]); 2176 if (!rx->key) 2177 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 2178 2179 /* 2180 * RSNA-protected unicast frames should always be 2181 * sent with pairwise or station-to-station keys, 2182 * but for WEP we allow using a key index as well. 2183 */ 2184 if (rx->key && 2185 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 && 2186 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 && 2187 !is_multicast_ether_addr(hdr->addr1)) 2188 rx->key = NULL; 2189 } 2190 } 2191 2192 if (rx->key) { 2193 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED)) 2194 return RX_DROP_U_KEY_TAINTED; 2195 2196 /* TODO: add threshold stuff again */ 2197 } else { 2198 return RX_DROP_U_UNPROTECTED; 2199 } 2200 2201 switch (rx->key->conf.cipher) { 2202 case WLAN_CIPHER_SUITE_WEP40: 2203 case WLAN_CIPHER_SUITE_WEP104: 2204 result = ieee80211_crypto_wep_decrypt(rx); 2205 break; 2206 case WLAN_CIPHER_SUITE_TKIP: 2207 result = ieee80211_crypto_tkip_decrypt(rx); 2208 break; 2209 case WLAN_CIPHER_SUITE_CCMP: 2210 result = ieee80211_crypto_ccmp_decrypt( 2211 rx, IEEE80211_CCMP_MIC_LEN); 2212 break; 2213 case WLAN_CIPHER_SUITE_CCMP_256: 2214 result = ieee80211_crypto_ccmp_decrypt( 2215 rx, IEEE80211_CCMP_256_MIC_LEN); 2216 break; 2217 case WLAN_CIPHER_SUITE_AES_CMAC: 2218 result = ieee80211_crypto_aes_cmac_decrypt( 2219 rx, IEEE80211_CMAC_128_MIC_LEN); 2220 break; 2221 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 2222 result = ieee80211_crypto_aes_cmac_decrypt( 2223 rx, IEEE80211_CMAC_256_MIC_LEN); 2224 break; 2225 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 2226 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 2227 result = ieee80211_crypto_aes_gmac_decrypt(rx); 2228 break; 2229 case WLAN_CIPHER_SUITE_GCMP: 2230 case WLAN_CIPHER_SUITE_GCMP_256: 2231 result = ieee80211_crypto_gcmp_decrypt(rx); 2232 break; 2233 default: 2234 result = RX_DROP_U_BAD_CIPHER; 2235 } 2236 2237 /* the hdr variable is invalid after the decrypt handlers */ 2238 2239 /* either the frame has been decrypted or will be dropped */ 2240 status->flag |= RX_FLAG_DECRYPTED; 2241 2242 if (unlikely(ieee80211_is_beacon(fc) && RX_RES_IS_UNUSABLE(result) && 2243 rx->sdata->dev)) 2244 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2245 skb->data, skb->len); 2246 2247 return result; 2248 } 2249 2250 void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache) 2251 { 2252 int i; 2253 2254 for (i = 0; i < ARRAY_SIZE(cache->entries); i++) 2255 skb_queue_head_init(&cache->entries[i].skb_list); 2256 } 2257 2258 void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache) 2259 { 2260 int i; 2261 2262 for (i = 0; i < ARRAY_SIZE(cache->entries); i++) 2263 __skb_queue_purge(&cache->entries[i].skb_list); 2264 } 2265 2266 static inline struct ieee80211_fragment_entry * 2267 ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache, 2268 unsigned int frag, unsigned int seq, int rx_queue, 2269 struct sk_buff **skb) 2270 { 2271 struct ieee80211_fragment_entry *entry; 2272 2273 entry = &cache->entries[cache->next++]; 2274 if (cache->next >= IEEE80211_FRAGMENT_MAX) 2275 cache->next = 0; 2276 2277 __skb_queue_purge(&entry->skb_list); 2278 2279 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ 2280 *skb = NULL; 2281 entry->first_frag_time = jiffies; 2282 entry->seq = seq; 2283 entry->rx_queue = rx_queue; 2284 entry->last_frag = frag; 2285 entry->check_sequential_pn = false; 2286 entry->extra_len = 0; 2287 2288 return entry; 2289 } 2290 2291 static inline struct ieee80211_fragment_entry * 2292 ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache, 2293 unsigned int frag, unsigned int seq, 2294 int rx_queue, struct ieee80211_hdr *hdr) 2295 { 2296 struct ieee80211_fragment_entry *entry; 2297 int i, idx; 2298 2299 idx = cache->next; 2300 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { 2301 struct ieee80211_hdr *f_hdr; 2302 struct sk_buff *f_skb; 2303 2304 idx--; 2305 if (idx < 0) 2306 idx = IEEE80211_FRAGMENT_MAX - 1; 2307 2308 entry = &cache->entries[idx]; 2309 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || 2310 entry->rx_queue != rx_queue || 2311 entry->last_frag + 1 != frag) 2312 continue; 2313 2314 f_skb = __skb_peek(&entry->skb_list); 2315 f_hdr = (struct ieee80211_hdr *) f_skb->data; 2316 2317 /* 2318 * Check ftype and addresses are equal, else check next fragment 2319 */ 2320 if (((hdr->frame_control ^ f_hdr->frame_control) & 2321 cpu_to_le16(IEEE80211_FCTL_FTYPE)) || 2322 !ether_addr_equal(hdr->addr1, f_hdr->addr1) || 2323 !ether_addr_equal(hdr->addr2, f_hdr->addr2)) 2324 continue; 2325 2326 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) { 2327 __skb_queue_purge(&entry->skb_list); 2328 continue; 2329 } 2330 return entry; 2331 } 2332 2333 return NULL; 2334 } 2335 2336 static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc) 2337 { 2338 return rx->key && 2339 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || 2340 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || 2341 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || 2342 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && 2343 ieee80211_has_protected(fc); 2344 } 2345 2346 static ieee80211_rx_result debug_noinline 2347 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) 2348 { 2349 struct ieee80211_fragment_cache *cache = &rx->sdata->frags; 2350 struct ieee80211_hdr *hdr; 2351 u16 sc; 2352 __le16 fc; 2353 unsigned int frag, seq; 2354 struct ieee80211_fragment_entry *entry; 2355 struct sk_buff *skb; 2356 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2357 2358 hdr = (struct ieee80211_hdr *)rx->skb->data; 2359 fc = hdr->frame_control; 2360 2361 if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc)) 2362 return RX_CONTINUE; 2363 2364 sc = le16_to_cpu(hdr->seq_ctrl); 2365 frag = sc & IEEE80211_SCTL_FRAG; 2366 2367 if (rx->sta) 2368 cache = &rx->sta->frags; 2369 2370 if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) 2371 goto out; 2372 2373 if (is_multicast_ether_addr(hdr->addr1)) 2374 return RX_DROP_U_MCAST_FRAGMENT; 2375 2376 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 2377 2378 if (skb_linearize(rx->skb)) 2379 return RX_DROP_U_OOM; 2380 2381 /* 2382 * skb_linearize() might change the skb->data and 2383 * previously cached variables (in this case, hdr) need to 2384 * be refreshed with the new data. 2385 */ 2386 hdr = (struct ieee80211_hdr *)rx->skb->data; 2387 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 2388 2389 if (frag == 0) { 2390 /* This is the first fragment of a new frame. */ 2391 entry = ieee80211_reassemble_add(cache, frag, seq, 2392 rx->seqno_idx, &(rx->skb)); 2393 if (requires_sequential_pn(rx, fc)) { 2394 int queue = rx->security_idx; 2395 2396 /* Store CCMP/GCMP PN so that we can verify that the 2397 * next fragment has a sequential PN value. 2398 */ 2399 entry->check_sequential_pn = true; 2400 entry->is_protected = true; 2401 entry->key_color = rx->key->color; 2402 memcpy(entry->last_pn, 2403 rx->key->u.ccmp.rx_pn[queue], 2404 IEEE80211_CCMP_PN_LEN); 2405 BUILD_BUG_ON(offsetof(struct ieee80211_key, 2406 u.ccmp.rx_pn) != 2407 offsetof(struct ieee80211_key, 2408 u.gcmp.rx_pn)); 2409 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) != 2410 sizeof(rx->key->u.gcmp.rx_pn[queue])); 2411 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != 2412 IEEE80211_GCMP_PN_LEN); 2413 } else if (rx->key && 2414 (ieee80211_has_protected(fc) || 2415 (status->flag & RX_FLAG_DECRYPTED))) { 2416 entry->is_protected = true; 2417 entry->key_color = rx->key->color; 2418 } 2419 return RX_QUEUED; 2420 } 2421 2422 /* This is a fragment for a frame that should already be pending in 2423 * fragment cache. Add this fragment to the end of the pending entry. 2424 */ 2425 entry = ieee80211_reassemble_find(cache, frag, seq, 2426 rx->seqno_idx, hdr); 2427 if (!entry) { 2428 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2429 return RX_DROP_U_DEFRAG_MISMATCH; 2430 } 2431 2432 /* "The receiver shall discard MSDUs and MMPDUs whose constituent 2433 * MPDU PN values are not incrementing in steps of 1." 2434 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP) 2435 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP) 2436 */ 2437 if (entry->check_sequential_pn) { 2438 int i; 2439 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; 2440 2441 if (!requires_sequential_pn(rx, fc)) 2442 return RX_DROP_U_NONSEQ_PN; 2443 2444 /* Prevent mixed key and fragment cache attacks */ 2445 if (entry->key_color != rx->key->color) 2446 return RX_DROP_U_BAD_KEY_COLOR; 2447 2448 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); 2449 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { 2450 pn[i]++; 2451 if (pn[i]) 2452 break; 2453 } 2454 2455 rpn = rx->ccm_gcm.pn; 2456 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) 2457 return RX_DROP_U_REPLAY; 2458 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); 2459 } else if (entry->is_protected && 2460 (!rx->key || 2461 (!ieee80211_has_protected(fc) && 2462 !(status->flag & RX_FLAG_DECRYPTED)) || 2463 rx->key->color != entry->key_color)) { 2464 /* Drop this as a mixed key or fragment cache attack, even 2465 * if for TKIP Michael MIC should protect us, and WEP is a 2466 * lost cause anyway. 2467 */ 2468 return RX_DROP_U_EXPECT_DEFRAG_PROT; 2469 } else if (entry->is_protected && rx->key && 2470 entry->key_color != rx->key->color && 2471 (status->flag & RX_FLAG_DECRYPTED)) { 2472 return RX_DROP_U_BAD_KEY_COLOR; 2473 } 2474 2475 skb_pull(rx->skb, ieee80211_hdrlen(fc)); 2476 __skb_queue_tail(&entry->skb_list, rx->skb); 2477 entry->last_frag = frag; 2478 entry->extra_len += rx->skb->len; 2479 if (ieee80211_has_morefrags(fc)) { 2480 rx->skb = NULL; 2481 return RX_QUEUED; 2482 } 2483 2484 rx->skb = __skb_dequeue(&entry->skb_list); 2485 if (skb_tailroom(rx->skb) < entry->extra_len) { 2486 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag); 2487 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len, 2488 GFP_ATOMIC))) { 2489 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 2490 __skb_queue_purge(&entry->skb_list); 2491 return RX_DROP_U_OOM; 2492 } 2493 } 2494 while ((skb = __skb_dequeue(&entry->skb_list))) { 2495 skb_put_data(rx->skb, skb->data, skb->len); 2496 dev_kfree_skb(skb); 2497 } 2498 2499 out: 2500 ieee80211_led_rx(rx->local); 2501 if (rx->sta) 2502 rx->link_sta->rx_stats.packets++; 2503 return RX_CONTINUE; 2504 } 2505 2506 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx) 2507 { 2508 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED))) 2509 return -EACCES; 2510 2511 return 0; 2512 } 2513 2514 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc) 2515 { 2516 struct sk_buff *skb = rx->skb; 2517 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2518 2519 /* 2520 * Pass through unencrypted frames if the hardware has 2521 * decrypted them already. 2522 */ 2523 if (status->flag & RX_FLAG_DECRYPTED) 2524 return 0; 2525 2526 /* Drop unencrypted frames if key is set. */ 2527 if (unlikely(!ieee80211_has_protected(fc) && 2528 !ieee80211_is_any_nullfunc(fc) && 2529 ieee80211_is_data(fc) && rx->key)) 2530 return -EACCES; 2531 2532 return 0; 2533 } 2534 2535 VISIBLE_IF_MAC80211_KUNIT ieee80211_rx_result 2536 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 2537 { 2538 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 2539 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 2540 __le16 fc = mgmt->frame_control; 2541 2542 /* 2543 * Pass through unencrypted frames if the hardware has 2544 * decrypted them already. 2545 */ 2546 if (status->flag & RX_FLAG_DECRYPTED) 2547 return RX_CONTINUE; 2548 2549 /* drop unicast protected dual (that wasn't protected) */ 2550 if (ieee80211_is_action(fc) && 2551 mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION) 2552 return RX_DROP_U_UNPROT_DUAL; 2553 2554 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) { 2555 if (unlikely(!ieee80211_has_protected(fc) && 2556 ieee80211_is_unicast_robust_mgmt_frame(rx->skb))) { 2557 if (ieee80211_is_deauth(fc) || 2558 ieee80211_is_disassoc(fc)) { 2559 /* 2560 * Permit unprotected deauth/disassoc frames 2561 * during 4-way-HS (key is installed after HS). 2562 */ 2563 if (!rx->key) 2564 return RX_CONTINUE; 2565 2566 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2567 rx->skb->data, 2568 rx->skb->len); 2569 } 2570 return RX_DROP_U_UNPROT_UCAST_MGMT; 2571 } 2572 /* BIP does not use Protected field, so need to check MMIE */ 2573 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 2574 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2575 if (ieee80211_is_deauth(fc) || 2576 ieee80211_is_disassoc(fc)) 2577 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2578 rx->skb->data, 2579 rx->skb->len); 2580 return RX_DROP_U_UNPROT_MCAST_MGMT; 2581 } 2582 if (unlikely(ieee80211_is_beacon(fc) && rx->key && 2583 ieee80211_get_mmie_keyidx(rx->skb) < 0)) { 2584 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev, 2585 rx->skb->data, 2586 rx->skb->len); 2587 return RX_DROP_U_UNPROT_BEACON; 2588 } 2589 /* 2590 * When using MFP, Action frames are not allowed prior to 2591 * having configured keys. 2592 */ 2593 if (unlikely(ieee80211_is_action(fc) && !rx->key && 2594 ieee80211_is_robust_mgmt_frame(rx->skb))) 2595 return RX_DROP_U_UNPROT_ACTION; 2596 2597 /* drop unicast public action frames when using MPF */ 2598 if (is_unicast_ether_addr(mgmt->da) && 2599 ieee80211_is_protected_dual_of_public_action(rx->skb)) 2600 return RX_DROP_U_UNPROT_UNICAST_PUB_ACTION; 2601 } 2602 2603 /* 2604 * Drop robust action frames before assoc regardless of MFP state, 2605 * after assoc we also have decided on MFP or not. 2606 */ 2607 if (ieee80211_is_action(fc) && 2608 ieee80211_is_robust_mgmt_frame(rx->skb) && 2609 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC))) 2610 return RX_DROP_U_UNPROT_ROBUST_ACTION; 2611 2612 /* 2613 * Drop unprotected (Re)Association Request/Response frame received from 2614 * an EPP Peer. 2615 */ 2616 if (!ieee80211_has_protected(fc) && 2617 ieee80211_require_encrypted_assoc(fc, rx->sta)) 2618 return RX_DROP_U_UNPROT_UCAST_MGMT; 2619 2620 return RX_CONTINUE; 2621 } 2622 EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_drop_unencrypted_mgmt); 2623 2624 static ieee80211_rx_result 2625 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control) 2626 { 2627 struct ieee80211_sub_if_data *sdata = rx->sdata; 2628 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 2629 bool check_port_control = false; 2630 struct ethhdr *ehdr; 2631 int ret; 2632 2633 *port_control = false; 2634 if (ieee80211_has_a4(hdr->frame_control) && 2635 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 2636 return RX_DROP_U_UNEXPECTED_VLAN_4ADDR; 2637 2638 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2639 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) { 2640 if (!sdata->u.mgd.use_4addr) 2641 return RX_DROP_U_UNEXPECTED_STA_4ADDR; 2642 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr)) 2643 check_port_control = true; 2644 } 2645 2646 if (is_multicast_ether_addr(hdr->addr1) && 2647 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta) 2648 return RX_DROP_U_UNEXPECTED_VLAN_MCAST; 2649 2650 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 2651 if (ret < 0) 2652 return RX_DROP_U_INVALID_8023; 2653 2654 ehdr = (struct ethhdr *) rx->skb->data; 2655 if (ehdr->h_proto == rx->sdata->control_port_protocol) 2656 *port_control = true; 2657 else if (check_port_control) 2658 return RX_DROP_U_NOT_PORT_CONTROL; 2659 2660 return RX_CONTINUE; 2661 } 2662 2663 bool ieee80211_is_our_addr(struct ieee80211_sub_if_data *sdata, 2664 const u8 *addr, int *out_link_id) 2665 { 2666 unsigned int link_id; 2667 2668 /* non-MLO, or MLD address replaced by hardware */ 2669 if (ether_addr_equal(sdata->vif.addr, addr)) 2670 return true; 2671 2672 if (!ieee80211_vif_is_mld(&sdata->vif)) 2673 return false; 2674 2675 for (link_id = 0; link_id < ARRAY_SIZE(sdata->vif.link_conf); link_id++) { 2676 struct ieee80211_bss_conf *conf; 2677 2678 conf = rcu_dereference(sdata->vif.link_conf[link_id]); 2679 2680 if (!conf) 2681 continue; 2682 if (ether_addr_equal(conf->addr, addr)) { 2683 if (out_link_id) 2684 *out_link_id = link_id; 2685 return true; 2686 } 2687 } 2688 2689 return false; 2690 } 2691 2692 /* 2693 * requires that rx->skb is a frame with ethernet header 2694 */ 2695 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc) 2696 { 2697 static const u8 pae_group_addr[ETH_ALEN] __aligned(2) 2698 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 }; 2699 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2700 2701 /* 2702 * Allow EAPOL frames to us/the PAE group address regardless of 2703 * whether the frame was encrypted or not, and always disallow 2704 * all other destination addresses for them. 2705 */ 2706 if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol)) 2707 return ieee80211_is_our_addr(rx->sdata, ehdr->h_dest, NULL) || 2708 ether_addr_equal(ehdr->h_dest, pae_group_addr); 2709 2710 if (ieee80211_802_1x_port_control(rx) || 2711 ieee80211_drop_unencrypted(rx, fc)) 2712 return false; 2713 2714 return true; 2715 } 2716 2717 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb, 2718 struct ieee80211_rx_data *rx) 2719 { 2720 struct ieee80211_sub_if_data *sdata = rx->sdata; 2721 struct net_device *dev = sdata->dev; 2722 2723 if (unlikely((skb->protocol == sdata->control_port_protocol || 2724 (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) && 2725 !sdata->control_port_no_preauth)) && 2726 sdata->control_port_over_nl80211)) { 2727 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2728 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED); 2729 2730 cfg80211_rx_control_port(dev, skb, noencrypt, rx->link_id); 2731 dev_kfree_skb(skb); 2732 } else { 2733 struct ethhdr *ehdr = (void *)skb_mac_header(skb); 2734 2735 memset(skb->cb, 0, sizeof(skb->cb)); 2736 2737 /* 2738 * 802.1X over 802.11 requires that the authenticator address 2739 * be used for EAPOL frames. However, 802.1X allows the use of 2740 * the PAE group address instead. If the interface is part of 2741 * a bridge and we pass the frame with the PAE group address, 2742 * then the bridge will forward it to the network (even if the 2743 * client was not associated yet), which isn't supposed to 2744 * happen. 2745 * To avoid that, rewrite the destination address to our own 2746 * address, so that the authenticator (e.g. hostapd) will see 2747 * the frame, but bridge won't forward it anywhere else. Note 2748 * that due to earlier filtering, the only other address can 2749 * be the PAE group address, unless the hardware allowed them 2750 * through in 802.3 offloaded mode. 2751 */ 2752 if (unlikely(skb->protocol == sdata->control_port_protocol && 2753 !ether_addr_equal(ehdr->h_dest, sdata->vif.addr))) 2754 ether_addr_copy(ehdr->h_dest, sdata->vif.addr); 2755 2756 /* deliver to local stack */ 2757 if (rx->list) 2758 list_add_tail(&skb->list, rx->list); 2759 else 2760 netif_receive_skb(skb); 2761 } 2762 } 2763 2764 /* 2765 * requires that rx->skb is a frame with ethernet header 2766 */ 2767 static void 2768 ieee80211_deliver_skb(struct ieee80211_rx_data *rx) 2769 { 2770 struct ieee80211_sub_if_data *sdata = rx->sdata; 2771 struct net_device *dev = sdata->dev; 2772 struct sk_buff *skb, *xmit_skb; 2773 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; 2774 struct sta_info *dsta; 2775 2776 skb = rx->skb; 2777 xmit_skb = NULL; 2778 2779 dev_sw_netstats_rx_add(dev, skb->len); 2780 2781 if (rx->sta) { 2782 /* The seqno index has the same property as needed 2783 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 2784 * for non-QoS-data frames. Here we know it's a data 2785 * frame, so count MSDUs. 2786 */ 2787 u64_stats_update_begin(&rx->link_sta->rx_stats.syncp); 2788 u64_stats_inc(&rx->link_sta->rx_stats.msdu[rx->seqno_idx]); 2789 u64_stats_update_end(&rx->link_sta->rx_stats.syncp); 2790 } 2791 2792 if ((sdata->vif.type == NL80211_IFTYPE_AP || 2793 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && 2794 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 2795 ehdr->h_proto != rx->sdata->control_port_protocol && 2796 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { 2797 if (is_multicast_ether_addr(ehdr->h_dest) && 2798 ieee80211_vif_get_num_mcast_if(sdata) != 0) { 2799 /* 2800 * send multicast frames both to higher layers in 2801 * local net stack and back to the wireless medium 2802 */ 2803 xmit_skb = skb_copy(skb, GFP_ATOMIC); 2804 if (!xmit_skb) 2805 net_info_ratelimited("%s: failed to clone multicast frame\n", 2806 dev->name); 2807 } else if (!is_multicast_ether_addr(ehdr->h_dest) && 2808 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) { 2809 dsta = sta_info_get(sdata, ehdr->h_dest); 2810 if (dsta) { 2811 /* 2812 * The destination station is associated to 2813 * this AP (in this VLAN), so send the frame 2814 * directly to it and do not pass it to local 2815 * net stack. 2816 */ 2817 xmit_skb = skb; 2818 skb = NULL; 2819 } 2820 } 2821 } 2822 2823 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 2824 if (skb) { 2825 /* 'align' will only take the values 0 or 2 here since all 2826 * frames are required to be aligned to 2-byte boundaries 2827 * when being passed to mac80211; the code here works just 2828 * as well if that isn't true, but mac80211 assumes it can 2829 * access fields as 2-byte aligned (e.g. for ether_addr_equal) 2830 */ 2831 int align; 2832 2833 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3; 2834 if (align) { 2835 if (WARN_ON(skb_headroom(skb) < 3)) { 2836 dev_kfree_skb(skb); 2837 skb = NULL; 2838 } else { 2839 u8 *data = skb->data; 2840 size_t len = skb_headlen(skb); 2841 skb->data -= align; 2842 memmove(skb->data, data, len); 2843 skb_set_tail_pointer(skb, len); 2844 } 2845 } 2846 } 2847 #endif 2848 2849 if (skb) { 2850 skb->protocol = eth_type_trans(skb, dev); 2851 ieee80211_deliver_skb_to_local_stack(skb, rx); 2852 } 2853 2854 if (xmit_skb) { 2855 /* 2856 * Send to wireless media and increase priority by 256 to 2857 * keep the received priority instead of reclassifying 2858 * the frame (see cfg80211_classify8021d). 2859 */ 2860 xmit_skb->priority += 256; 2861 xmit_skb->protocol = htons(ETH_P_802_3); 2862 skb_reset_network_header(xmit_skb); 2863 skb_reset_mac_header(xmit_skb); 2864 dev_queue_xmit(xmit_skb); 2865 } 2866 } 2867 2868 #ifdef CONFIG_MAC80211_MESH 2869 static bool 2870 ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata, 2871 struct sk_buff *skb, int hdrlen) 2872 { 2873 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2874 struct ieee80211_mesh_fast_tx_key key = { 2875 .type = MESH_FAST_TX_TYPE_FORWARDED 2876 }; 2877 struct ieee80211_mesh_fast_tx *entry; 2878 struct ieee80211s_hdr *mesh_hdr; 2879 struct tid_ampdu_tx *tid_tx; 2880 struct sta_info *sta; 2881 struct ethhdr eth; 2882 u8 tid; 2883 2884 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(eth)); 2885 if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) 2886 ether_addr_copy(key.addr, mesh_hdr->eaddr1); 2887 else if (!(mesh_hdr->flags & MESH_FLAGS_AE)) 2888 ether_addr_copy(key.addr, skb->data); 2889 else 2890 return false; 2891 2892 entry = mesh_fast_tx_get(sdata, &key); 2893 if (!entry) 2894 return false; 2895 2896 sta = rcu_dereference(entry->mpath->next_hop); 2897 if (!sta) 2898 return false; 2899 2900 if (skb_linearize(skb)) 2901 return false; 2902 2903 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 2904 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); 2905 if (tid_tx) { 2906 if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) 2907 return false; 2908 2909 if (tid_tx->timeout) 2910 tid_tx->last_tx = jiffies; 2911 } 2912 2913 ieee80211_aggr_check(sdata, sta, skb); 2914 2915 if (ieee80211_get_8023_tunnel_proto(skb->data + hdrlen, 2916 &skb->protocol)) 2917 hdrlen += ETH_ALEN; 2918 else 2919 skb->protocol = htons(skb->len - hdrlen); 2920 skb_set_network_header(skb, hdrlen + 2); 2921 2922 skb->dev = sdata->dev; 2923 memcpy(ð, skb->data, ETH_HLEN - 2); 2924 skb_pull(skb, 2); 2925 __ieee80211_xmit_fast(sdata, sta, &entry->fast_tx, skb, tid_tx, 2926 eth.h_dest, eth.h_source); 2927 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 2928 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 2929 2930 return true; 2931 } 2932 #endif 2933 2934 static ieee80211_rx_result 2935 ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta, 2936 struct sk_buff *skb) 2937 { 2938 #ifdef CONFIG_MAC80211_MESH 2939 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 2940 struct ieee80211_local *local = sdata->local; 2941 uint16_t fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA; 2942 struct ieee80211_hdr hdr = { 2943 .frame_control = cpu_to_le16(fc) 2944 }; 2945 struct ieee80211_hdr *fwd_hdr; 2946 struct ieee80211s_hdr *mesh_hdr; 2947 struct ieee80211_tx_info *info; 2948 struct sk_buff *fwd_skb; 2949 struct ethhdr *eth; 2950 bool multicast; 2951 int tailroom = 0; 2952 int hdrlen, mesh_hdrlen; 2953 u8 *qos; 2954 2955 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2956 return RX_CONTINUE; 2957 2958 if (!pskb_may_pull(skb, sizeof(*eth) + 6)) 2959 return RX_DROP_U_RUNT_MESH_DATA; 2960 2961 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(*eth)); 2962 mesh_hdrlen = ieee80211_get_mesh_hdrlen(mesh_hdr); 2963 2964 if (!pskb_may_pull(skb, sizeof(*eth) + mesh_hdrlen)) 2965 return RX_DROP_U_RUNT_MESH_DATA; 2966 2967 eth = (struct ethhdr *)skb->data; 2968 multicast = is_multicast_ether_addr(eth->h_dest); 2969 2970 mesh_hdr = (struct ieee80211s_hdr *)(eth + 1); 2971 if (!mesh_hdr->ttl) 2972 return RX_DROP_U_MESH_NO_TTL; 2973 2974 /* frame is in RMC, don't forward */ 2975 if (is_multicast_ether_addr(eth->h_dest) && 2976 mesh_rmc_check(sdata, eth->h_source, mesh_hdr)) 2977 return RX_DROP_U_MESH_RMC; 2978 2979 /* forward packet */ 2980 if (sdata->crypto_tx_tailroom_needed_cnt) 2981 tailroom = IEEE80211_ENCRYPT_TAILROOM; 2982 2983 if (mesh_hdr->flags & MESH_FLAGS_AE) { 2984 struct mesh_path *mppath; 2985 char *proxied_addr; 2986 bool update = false; 2987 2988 if (multicast) 2989 proxied_addr = mesh_hdr->eaddr1; 2990 else if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) 2991 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2992 proxied_addr = mesh_hdr->eaddr2; 2993 else 2994 return RX_DROP_U_MESH_BAD_AE; 2995 2996 rcu_read_lock(); 2997 mppath = mpp_path_lookup(sdata, proxied_addr); 2998 if (!mppath) { 2999 mpp_path_add(sdata, proxied_addr, eth->h_source); 3000 } else { 3001 spin_lock_bh(&mppath->state_lock); 3002 if (!ether_addr_equal(mppath->mpp, eth->h_source)) { 3003 memcpy(mppath->mpp, eth->h_source, ETH_ALEN); 3004 update = true; 3005 } 3006 mppath->exp_time = jiffies; 3007 spin_unlock_bh(&mppath->state_lock); 3008 } 3009 3010 /* flush fast xmit cache if the address path changed */ 3011 if (update) 3012 mesh_fast_tx_flush_addr(sdata, proxied_addr); 3013 3014 rcu_read_unlock(); 3015 } 3016 3017 /* Frame has reached destination. Don't forward */ 3018 if (ether_addr_equal(sdata->vif.addr, eth->h_dest)) 3019 goto rx_accept; 3020 3021 if (!--mesh_hdr->ttl) { 3022 if (multicast) 3023 goto rx_accept; 3024 3025 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl); 3026 return RX_DROP_U_MESH_TTL_EXPIRED; 3027 } 3028 3029 if (!ifmsh->mshcfg.dot11MeshForwarding) { 3030 if (is_multicast_ether_addr(eth->h_dest)) 3031 goto rx_accept; 3032 3033 return RX_DROP_U_MESH_NOT_FORWARDING; 3034 } 3035 3036 skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]); 3037 3038 if (!multicast && 3039 ieee80211_rx_mesh_fast_forward(sdata, skb, mesh_hdrlen)) 3040 return RX_QUEUED; 3041 3042 ieee80211_fill_mesh_addresses(&hdr, &hdr.frame_control, 3043 eth->h_dest, eth->h_source); 3044 hdrlen = ieee80211_hdrlen(hdr.frame_control); 3045 if (multicast) { 3046 int extra_head = sizeof(struct ieee80211_hdr) - sizeof(*eth); 3047 3048 fwd_skb = skb_copy_expand(skb, local->tx_headroom + extra_head + 3049 IEEE80211_ENCRYPT_HEADROOM, 3050 tailroom, GFP_ATOMIC); 3051 if (!fwd_skb) 3052 goto rx_accept; 3053 } else { 3054 fwd_skb = skb; 3055 skb = NULL; 3056 3057 if (skb_cow_head(fwd_skb, hdrlen - sizeof(struct ethhdr))) 3058 return RX_DROP_U_OOM; 3059 3060 if (skb_linearize(fwd_skb)) 3061 return RX_DROP_U_OOM; 3062 } 3063 3064 fwd_hdr = skb_push(fwd_skb, hdrlen - sizeof(struct ethhdr)); 3065 memcpy(fwd_hdr, &hdr, hdrlen - 2); 3066 qos = ieee80211_get_qos_ctl(fwd_hdr); 3067 qos[0] = qos[1] = 0; 3068 3069 skb_reset_mac_header(fwd_skb); 3070 hdrlen += mesh_hdrlen; 3071 if (ieee80211_get_8023_tunnel_proto(fwd_skb->data + hdrlen, 3072 &fwd_skb->protocol)) 3073 hdrlen += ETH_ALEN; 3074 else 3075 fwd_skb->protocol = htons(fwd_skb->len - hdrlen); 3076 skb_set_network_header(fwd_skb, hdrlen + 2); 3077 3078 info = IEEE80211_SKB_CB(fwd_skb); 3079 memset(info, 0, sizeof(*info)); 3080 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING; 3081 info->control.vif = &sdata->vif; 3082 info->control.jiffies = jiffies; 3083 fwd_skb->dev = sdata->dev; 3084 if (multicast) { 3085 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast); 3086 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN); 3087 /* update power mode indication when forwarding */ 3088 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr); 3089 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) { 3090 /* mesh power mode flags updated in mesh_nexthop_lookup */ 3091 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast); 3092 } else { 3093 /* unable to resolve next hop */ 3094 if (sta) 3095 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl, 3096 hdr.addr3, 0, 3097 WLAN_REASON_MESH_PATH_NOFORWARD, 3098 sta->sta.addr); 3099 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route); 3100 kfree_skb(fwd_skb); 3101 goto rx_accept; 3102 } 3103 3104 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames); 3105 ieee80211_set_qos_hdr(sdata, fwd_skb); 3106 ieee80211_add_pending_skb(local, fwd_skb); 3107 3108 rx_accept: 3109 if (!skb) 3110 return RX_QUEUED; 3111 3112 ieee80211_strip_8023_mesh_hdr(skb); 3113 #endif 3114 3115 return RX_CONTINUE; 3116 } 3117 3118 static ieee80211_rx_result debug_noinline 3119 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset) 3120 { 3121 struct net_device *dev = rx->sdata->dev; 3122 struct sk_buff *skb = rx->skb; 3123 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3124 __le16 fc = hdr->frame_control; 3125 struct sk_buff_head frame_list; 3126 struct ethhdr ethhdr; 3127 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source; 3128 3129 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 3130 check_da = NULL; 3131 check_sa = NULL; 3132 } else switch (rx->sdata->vif.type) { 3133 case NL80211_IFTYPE_AP: 3134 case NL80211_IFTYPE_AP_VLAN: 3135 check_da = NULL; 3136 break; 3137 case NL80211_IFTYPE_STATION: 3138 if (!test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER)) 3139 check_sa = NULL; 3140 break; 3141 case NL80211_IFTYPE_MESH_POINT: 3142 check_sa = NULL; 3143 check_da = NULL; 3144 break; 3145 default: 3146 break; 3147 } 3148 3149 skb->dev = dev; 3150 __skb_queue_head_init(&frame_list); 3151 3152 if (ieee80211_data_to_8023_exthdr(skb, ðhdr, 3153 rx->sdata->vif.addr, 3154 rx->sdata->vif.type, 3155 data_offset, true)) 3156 return RX_DROP_U_BAD_AMSDU; 3157 3158 if (rx->sta->amsdu_mesh_control < 0) { 3159 s8 valid = -1; 3160 int i; 3161 3162 for (i = 0; i <= 2; i++) { 3163 if (!ieee80211_is_valid_amsdu(skb, i)) 3164 continue; 3165 3166 if (valid >= 0) { 3167 /* ambiguous */ 3168 valid = -1; 3169 break; 3170 } 3171 3172 valid = i; 3173 } 3174 3175 rx->sta->amsdu_mesh_control = valid; 3176 } 3177 3178 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 3179 rx->sdata->vif.type, 3180 rx->local->hw.extra_tx_headroom, 3181 check_da, check_sa, 3182 rx->sta->amsdu_mesh_control); 3183 3184 while (!skb_queue_empty(&frame_list)) { 3185 rx->skb = __skb_dequeue(&frame_list); 3186 3187 switch (ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb)) { 3188 case RX_QUEUED: 3189 break; 3190 case RX_CONTINUE: 3191 if (ieee80211_frame_allowed(rx, fc)) { 3192 ieee80211_deliver_skb(rx); 3193 break; 3194 } 3195 fallthrough; 3196 default: 3197 dev_kfree_skb(rx->skb); 3198 } 3199 } 3200 3201 return RX_QUEUED; 3202 } 3203 3204 static ieee80211_rx_result debug_noinline 3205 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx) 3206 { 3207 struct sk_buff *skb = rx->skb; 3208 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 3209 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3210 __le16 fc = hdr->frame_control; 3211 3212 if (!(status->rx_flags & IEEE80211_RX_AMSDU)) 3213 return RX_CONTINUE; 3214 3215 if (unlikely(!ieee80211_is_data(fc))) 3216 return RX_CONTINUE; 3217 3218 if (unlikely(!ieee80211_is_data_present(fc))) 3219 return RX_DROP_U_AMSDU_WITHOUT_DATA; 3220 3221 if (unlikely(ieee80211_has_a4(hdr->frame_control))) { 3222 switch (rx->sdata->vif.type) { 3223 case NL80211_IFTYPE_AP_VLAN: 3224 if (!rx->sdata->u.vlan.sta) 3225 return RX_DROP_U_BAD_4ADDR; 3226 break; 3227 case NL80211_IFTYPE_STATION: 3228 if (!rx->sdata->u.mgd.use_4addr) 3229 return RX_DROP_U_BAD_4ADDR; 3230 break; 3231 case NL80211_IFTYPE_MESH_POINT: 3232 break; 3233 default: 3234 return RX_DROP_U_BAD_4ADDR; 3235 } 3236 } 3237 3238 if (is_multicast_ether_addr(hdr->addr1) || !rx->sta) 3239 return RX_DROP_U_BAD_AMSDU; 3240 3241 if (rx->key) { 3242 /* 3243 * We should not receive A-MSDUs on pre-HT connections, 3244 * and HT connections cannot use old ciphers. Thus drop 3245 * them, as in those cases we couldn't even have SPP 3246 * A-MSDUs or such. 3247 */ 3248 switch (rx->key->conf.cipher) { 3249 case WLAN_CIPHER_SUITE_WEP40: 3250 case WLAN_CIPHER_SUITE_WEP104: 3251 case WLAN_CIPHER_SUITE_TKIP: 3252 return RX_DROP_U_BAD_AMSDU_CIPHER; 3253 default: 3254 break; 3255 } 3256 } 3257 3258 return __ieee80211_rx_h_amsdu(rx, 0); 3259 } 3260 3261 static ieee80211_rx_result debug_noinline 3262 ieee80211_rx_h_data(struct ieee80211_rx_data *rx) 3263 { 3264 struct ieee80211_sub_if_data *sdata = rx->sdata; 3265 struct ieee80211_local *local = rx->local; 3266 struct net_device *dev = sdata->dev; 3267 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 3268 __le16 fc = hdr->frame_control; 3269 ieee80211_rx_result res; 3270 bool port_control; 3271 3272 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 3273 return RX_CONTINUE; 3274 3275 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 3276 return RX_DROP_U_NULL_DATA; 3277 3278 /* Send unexpected-4addr-frame event to hostapd */ 3279 if (ieee80211_has_a4(hdr->frame_control) && 3280 sdata->vif.type == NL80211_IFTYPE_AP) { 3281 if (rx->sta && 3282 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT)) 3283 cfg80211_rx_unexpected_4addr_frame( 3284 rx->sdata->dev, rx->sta->sta.addr, rx->link_id, 3285 GFP_ATOMIC); 3286 return RX_DROP_U_UNEXPECTED_4ADDR; 3287 } 3288 3289 res = __ieee80211_data_to_8023(rx, &port_control); 3290 if (unlikely(res != RX_CONTINUE)) 3291 return res; 3292 3293 res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb); 3294 if (res != RX_CONTINUE) 3295 return res; 3296 3297 if (!ieee80211_frame_allowed(rx, fc)) 3298 return RX_DROP_U_PORT_CONTROL; 3299 3300 /* directly handle TDLS channel switch requests/responses */ 3301 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto == 3302 cpu_to_be16(ETH_P_TDLS))) { 3303 struct ieee80211_tdls_data *tf = (void *)rx->skb->data; 3304 3305 if (pskb_may_pull(rx->skb, 3306 offsetof(struct ieee80211_tdls_data, u)) && 3307 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE && 3308 tf->category == WLAN_CATEGORY_TDLS && 3309 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST || 3310 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) { 3311 rx->skb->protocol = cpu_to_be16(ETH_P_TDLS); 3312 __ieee80211_queue_skb_to_iface(sdata, rx->link_id, 3313 rx->sta, rx->skb); 3314 return RX_QUEUED; 3315 } 3316 } 3317 3318 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 3319 unlikely(port_control) && sdata->bss) { 3320 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, 3321 u.ap); 3322 dev = sdata->dev; 3323 rx->sdata = sdata; 3324 } 3325 3326 rx->skb->dev = dev; 3327 3328 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) && 3329 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 && 3330 !is_multicast_ether_addr( 3331 ((struct ethhdr *)rx->skb->data)->h_dest) && 3332 (!local->scanning && 3333 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) 3334 mod_timer(&local->dynamic_ps_timer, jiffies + 3335 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout)); 3336 3337 ieee80211_deliver_skb(rx); 3338 3339 return RX_QUEUED; 3340 } 3341 3342 static ieee80211_rx_result debug_noinline 3343 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) 3344 { 3345 struct sk_buff *skb = rx->skb; 3346 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 3347 struct tid_ampdu_rx *tid_agg_rx; 3348 u16 start_seq_num; 3349 u16 tid; 3350 3351 if (likely(!ieee80211_is_ctl(bar->frame_control))) 3352 return RX_CONTINUE; 3353 3354 if (ieee80211_is_back_req(bar->frame_control)) { 3355 struct { 3356 __le16 control, start_seq_num; 3357 } __packed bar_data; 3358 struct ieee80211_event event = { 3359 .type = BAR_RX_EVENT, 3360 }; 3361 3362 if (!rx->sta) 3363 return RX_DROP_U_UNKNOWN_STA; 3364 3365 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control), 3366 &bar_data, sizeof(bar_data))) 3367 return RX_DROP_U_RUNT_BAR; 3368 3369 tid = le16_to_cpu(bar_data.control) >> 12; 3370 3371 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) && 3372 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg)) 3373 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid, 3374 WLAN_BACK_RECIPIENT, 3375 WLAN_REASON_QSTA_REQUIRE_SETUP); 3376 3377 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); 3378 if (!tid_agg_rx) 3379 return RX_DROP_U_BAR_OUTSIDE_SESSION; 3380 3381 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 3382 event.u.ba.tid = tid; 3383 event.u.ba.ssn = start_seq_num; 3384 event.u.ba.sta = &rx->sta->sta; 3385 3386 /* reset session timer */ 3387 if (tid_agg_rx->timeout) 3388 mod_timer(&tid_agg_rx->session_timer, 3389 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 3390 3391 spin_lock(&tid_agg_rx->reorder_lock); 3392 /* release stored frames up to start of BAR */ 3393 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, 3394 start_seq_num, frames); 3395 spin_unlock(&tid_agg_rx->reorder_lock); 3396 3397 drv_event_callback(rx->local, rx->sdata, &event); 3398 3399 kfree_skb(skb); 3400 return RX_QUEUED; 3401 } 3402 3403 return RX_DROP_U_CTRL_FRAME; 3404 } 3405 3406 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 3407 struct ieee80211_mgmt *mgmt, 3408 size_t len) 3409 { 3410 struct ieee80211_local *local = sdata->local; 3411 struct sk_buff *skb; 3412 struct ieee80211_mgmt *resp; 3413 3414 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) { 3415 /* Not to own unicast address */ 3416 return; 3417 } 3418 3419 if (!ether_addr_equal(mgmt->sa, sdata->vif.cfg.ap_addr) || 3420 !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) { 3421 /* Not from the current AP or not associated yet. */ 3422 return; 3423 } 3424 3425 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) { 3426 /* Too short SA Query request frame */ 3427 return; 3428 } 3429 3430 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom); 3431 if (skb == NULL) 3432 return; 3433 3434 skb_reserve(skb, local->hw.extra_tx_headroom); 3435 resp = skb_put_zero(skb, 24); 3436 memcpy(resp->da, sdata->vif.cfg.ap_addr, ETH_ALEN); 3437 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN); 3438 memcpy(resp->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); 3439 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | 3440 IEEE80211_STYPE_ACTION); 3441 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query)); 3442 resp->u.action.category = WLAN_CATEGORY_SA_QUERY; 3443 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE; 3444 memcpy(resp->u.action.u.sa_query.trans_id, 3445 mgmt->u.action.u.sa_query.trans_id, 3446 WLAN_SA_QUERY_TR_ID_LEN); 3447 3448 ieee80211_tx_skb(sdata, skb); 3449 } 3450 3451 static void 3452 ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx) 3453 { 3454 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 3455 struct ieee80211_bss_conf *bss_conf; 3456 const struct element *ie; 3457 size_t baselen; 3458 3459 if (!wiphy_ext_feature_isset(rx->local->hw.wiphy, 3460 NL80211_EXT_FEATURE_BSS_COLOR)) 3461 return; 3462 3463 if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION)) 3464 return; 3465 3466 bss_conf = rx->link->conf; 3467 if (bss_conf->csa_active || bss_conf->color_change_active || 3468 !bss_conf->he_bss_color.enabled) 3469 return; 3470 3471 baselen = mgmt->u.beacon.variable - rx->skb->data; 3472 if (baselen > rx->skb->len) 3473 return; 3474 3475 ie = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, 3476 mgmt->u.beacon.variable, 3477 rx->skb->len - baselen); 3478 if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) && 3479 ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) { 3480 const struct ieee80211_he_operation *he_oper; 3481 u8 color; 3482 3483 he_oper = (void *)(ie->data + 1); 3484 if (le32_get_bits(he_oper->he_oper_params, 3485 IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED)) 3486 return; 3487 3488 color = le32_get_bits(he_oper->he_oper_params, 3489 IEEE80211_HE_OPERATION_BSS_COLOR_MASK); 3490 if (color == bss_conf->he_bss_color.color) 3491 ieee80211_obss_color_collision_notify(&rx->sdata->vif, 3492 BIT_ULL(color), 3493 bss_conf->link_id); 3494 } 3495 } 3496 3497 static ieee80211_rx_result debug_noinline 3498 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx) 3499 { 3500 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3501 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3502 3503 if (ieee80211_is_s1g_beacon(mgmt->frame_control)) 3504 return RX_CONTINUE; 3505 3506 /* 3507 * From here on, look only at management frames. 3508 * Data and control frames are already handled, 3509 * and unknown (reserved) frames are useless. 3510 */ 3511 if (rx->skb->len < 24) 3512 return RX_DROP_U_RUNT_MGMT; 3513 3514 if (!ieee80211_is_mgmt(mgmt->frame_control)) 3515 return RX_DROP_U_EXPECTED_MGMT; 3516 3517 /* drop too small action frames */ 3518 if (ieee80211_is_action(mgmt->frame_control) && 3519 rx->skb->len < IEEE80211_MIN_ACTION_SIZE) 3520 return RX_DROP_U_RUNT_ACTION; 3521 3522 /* Drop non-broadcast Beacon frames */ 3523 if (ieee80211_is_beacon(mgmt->frame_control) && 3524 !is_broadcast_ether_addr(mgmt->da)) 3525 return RX_DROP_U_NONBCAST_BEACON; 3526 3527 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && 3528 ieee80211_is_beacon(mgmt->frame_control) && 3529 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) { 3530 int sig = 0; 3531 3532 /* sw bss color collision detection */ 3533 ieee80211_rx_check_bss_color_collision(rx); 3534 3535 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3536 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3537 sig = status->signal; 3538 3539 cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy, 3540 rx->skb->data, rx->skb->len, 3541 ieee80211_rx_status_to_khz(status), 3542 sig); 3543 rx->flags |= IEEE80211_RX_BEACON_REPORTED; 3544 } 3545 3546 return ieee80211_drop_unencrypted_mgmt(rx); 3547 } 3548 3549 static bool 3550 ieee80211_process_rx_twt_action(struct ieee80211_rx_data *rx) 3551 { 3552 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)rx->skb->data; 3553 struct ieee80211_sub_if_data *sdata = rx->sdata; 3554 3555 /* TWT actions are only supported in AP for the moment */ 3556 if (sdata->vif.type != NL80211_IFTYPE_AP) 3557 return false; 3558 3559 if (!rx->local->ops->add_twt_setup) 3560 return false; 3561 3562 if (!sdata->vif.bss_conf.twt_responder) 3563 return false; 3564 3565 if (!rx->sta) 3566 return false; 3567 3568 switch (mgmt->u.action.u.s1g.action_code) { 3569 case WLAN_S1G_TWT_SETUP: { 3570 struct ieee80211_twt_setup *twt; 3571 3572 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 3573 1 + /* action code */ 3574 sizeof(struct ieee80211_twt_setup) + 3575 2 /* TWT req_type agrt */) 3576 break; 3577 3578 twt = (void *)mgmt->u.action.u.s1g.variable; 3579 if (twt->element_id != WLAN_EID_S1G_TWT) 3580 break; 3581 3582 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 3583 4 + /* action code + token + tlv */ 3584 twt->length) 3585 break; 3586 3587 return true; /* queue the frame */ 3588 } 3589 case WLAN_S1G_TWT_TEARDOWN: 3590 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 2) 3591 break; 3592 3593 return true; /* queue the frame */ 3594 default: 3595 break; 3596 } 3597 3598 return false; 3599 } 3600 3601 static ieee80211_rx_result debug_noinline 3602 ieee80211_rx_h_action(struct ieee80211_rx_data *rx) 3603 { 3604 struct ieee80211_local *local = rx->local; 3605 struct ieee80211_sub_if_data *sdata = rx->sdata; 3606 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 3607 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3608 int len = rx->skb->len; 3609 3610 if (!ieee80211_is_action(mgmt->frame_control)) 3611 return RX_CONTINUE; 3612 3613 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC && 3614 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED && 3615 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT) 3616 return RX_DROP_U_ACTION_UNKNOWN_SRC; 3617 3618 switch (mgmt->u.action.category) { 3619 case WLAN_CATEGORY_HT: 3620 /* reject HT action frames from stations not supporting HT 3621 * or not HE Capable 3622 */ 3623 if (!rx->link_sta->pub->ht_cap.ht_supported && 3624 !rx->link_sta->pub->he_cap.has_he) 3625 goto invalid; 3626 3627 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3628 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3629 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3630 sdata->vif.type != NL80211_IFTYPE_AP && 3631 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3632 break; 3633 3634 /* verify action & smps_control/chanwidth are present */ 3635 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3636 goto invalid; 3637 3638 switch (mgmt->u.action.u.ht_smps.action) { 3639 case WLAN_HT_ACTION_SMPS: { 3640 struct ieee80211_supported_band *sband; 3641 enum ieee80211_smps_mode smps_mode; 3642 struct sta_opmode_info sta_opmode = {}; 3643 3644 if (sdata->vif.type != NL80211_IFTYPE_AP && 3645 sdata->vif.type != NL80211_IFTYPE_AP_VLAN) 3646 goto handled; 3647 3648 /* convert to HT capability */ 3649 switch (mgmt->u.action.u.ht_smps.smps_control) { 3650 case WLAN_HT_SMPS_CONTROL_DISABLED: 3651 smps_mode = IEEE80211_SMPS_OFF; 3652 break; 3653 case WLAN_HT_SMPS_CONTROL_STATIC: 3654 smps_mode = IEEE80211_SMPS_STATIC; 3655 break; 3656 case WLAN_HT_SMPS_CONTROL_DYNAMIC: 3657 smps_mode = IEEE80211_SMPS_DYNAMIC; 3658 break; 3659 default: 3660 goto invalid; 3661 } 3662 3663 /* if no change do nothing */ 3664 if (rx->link_sta->pub->smps_mode == smps_mode) 3665 goto handled; 3666 rx->link_sta->pub->smps_mode = smps_mode; 3667 sta_opmode.smps_mode = 3668 ieee80211_smps_mode_to_smps_mode(smps_mode); 3669 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED; 3670 3671 sband = rx->local->hw.wiphy->bands[status->band]; 3672 3673 rate_control_rate_update(local, sband, rx->link_sta, 3674 IEEE80211_RC_SMPS_CHANGED); 3675 cfg80211_sta_opmode_change_notify(sdata->dev, 3676 rx->sta->addr, 3677 &sta_opmode, 3678 GFP_ATOMIC); 3679 goto handled; 3680 } 3681 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: { 3682 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth; 3683 3684 if (chanwidth != IEEE80211_HT_CHANWIDTH_20MHZ && 3685 chanwidth != IEEE80211_HT_CHANWIDTH_ANY) 3686 goto invalid; 3687 3688 /* If it doesn't support 40 MHz it can't change ... */ 3689 if (!(rx->link_sta->pub->ht_cap.cap & 3690 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 3691 goto handled; 3692 3693 goto queue; 3694 } 3695 default: 3696 goto invalid; 3697 } 3698 3699 break; 3700 case WLAN_CATEGORY_PUBLIC: 3701 case WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION: 3702 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3703 goto invalid; 3704 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3705 break; 3706 if (!rx->sta) 3707 break; 3708 if (!ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid)) 3709 break; 3710 if (mgmt->u.action.u.ext_chan_switch.action_code != 3711 WLAN_PUB_ACTION_EXT_CHANSW_ANN) 3712 break; 3713 if (len < offsetof(struct ieee80211_mgmt, 3714 u.action.u.ext_chan_switch.variable)) 3715 goto invalid; 3716 goto queue; 3717 case WLAN_CATEGORY_VHT: 3718 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3719 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3720 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3721 sdata->vif.type != NL80211_IFTYPE_AP && 3722 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3723 break; 3724 3725 /* verify action code is present */ 3726 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3727 goto invalid; 3728 3729 switch (mgmt->u.action.u.vht_opmode_notif.action_code) { 3730 case WLAN_VHT_ACTION_OPMODE_NOTIF: { 3731 /* verify opmode is present */ 3732 if (len < IEEE80211_MIN_ACTION_SIZE + 2) 3733 goto invalid; 3734 goto queue; 3735 } 3736 case WLAN_VHT_ACTION_GROUPID_MGMT: { 3737 if (len < IEEE80211_MIN_ACTION_SIZE + 25) 3738 goto invalid; 3739 goto queue; 3740 } 3741 default: 3742 break; 3743 } 3744 break; 3745 case WLAN_CATEGORY_BACK: 3746 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3747 sdata->vif.type != NL80211_IFTYPE_MESH_POINT && 3748 sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 3749 sdata->vif.type != NL80211_IFTYPE_AP && 3750 sdata->vif.type != NL80211_IFTYPE_ADHOC) 3751 break; 3752 3753 /* verify action_code is present */ 3754 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3755 break; 3756 3757 switch (mgmt->u.action.u.addba_req.action_code) { 3758 case WLAN_ACTION_ADDBA_REQ: 3759 if (len < (IEEE80211_MIN_ACTION_SIZE + 3760 sizeof(mgmt->u.action.u.addba_req))) 3761 goto invalid; 3762 break; 3763 case WLAN_ACTION_ADDBA_RESP: 3764 if (len < (IEEE80211_MIN_ACTION_SIZE + 3765 sizeof(mgmt->u.action.u.addba_resp))) 3766 goto invalid; 3767 break; 3768 case WLAN_ACTION_DELBA: 3769 if (len < (IEEE80211_MIN_ACTION_SIZE + 3770 sizeof(mgmt->u.action.u.delba))) 3771 goto invalid; 3772 break; 3773 default: 3774 goto invalid; 3775 } 3776 3777 goto queue; 3778 case WLAN_CATEGORY_SPECTRUM_MGMT: 3779 /* verify action_code is present */ 3780 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 3781 break; 3782 3783 switch (mgmt->u.action.u.measurement.action_code) { 3784 case WLAN_ACTION_SPCT_MSR_REQ: 3785 if (status->band != NL80211_BAND_5GHZ) 3786 break; 3787 3788 if (len < (IEEE80211_MIN_ACTION_SIZE + 3789 sizeof(mgmt->u.action.u.measurement))) 3790 break; 3791 3792 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3793 break; 3794 3795 ieee80211_process_measurement_req(sdata, mgmt, len); 3796 goto handled; 3797 case WLAN_ACTION_SPCT_CHL_SWITCH: { 3798 u8 *bssid; 3799 if (len < (IEEE80211_MIN_ACTION_SIZE + 3800 sizeof(mgmt->u.action.u.chan_switch))) 3801 break; 3802 3803 if (sdata->vif.type != NL80211_IFTYPE_STATION && 3804 sdata->vif.type != NL80211_IFTYPE_ADHOC && 3805 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 3806 break; 3807 3808 if (sdata->vif.type == NL80211_IFTYPE_STATION) 3809 bssid = sdata->deflink.u.mgd.bssid; 3810 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 3811 bssid = sdata->u.ibss.bssid; 3812 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 3813 bssid = mgmt->sa; 3814 else 3815 break; 3816 3817 if (!ether_addr_equal(mgmt->bssid, bssid)) 3818 break; 3819 3820 goto queue; 3821 } 3822 } 3823 break; 3824 case WLAN_CATEGORY_SELF_PROTECTED: 3825 if (len < (IEEE80211_MIN_ACTION_SIZE + 3826 sizeof(mgmt->u.action.u.self_prot.action_code))) 3827 break; 3828 3829 switch (mgmt->u.action.u.self_prot.action_code) { 3830 case WLAN_SP_MESH_PEERING_OPEN: 3831 case WLAN_SP_MESH_PEERING_CLOSE: 3832 case WLAN_SP_MESH_PEERING_CONFIRM: 3833 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3834 goto invalid; 3835 if (sdata->u.mesh.user_mpm) 3836 /* userspace handles this frame */ 3837 break; 3838 goto queue; 3839 case WLAN_SP_MGK_INFORM: 3840 case WLAN_SP_MGK_ACK: 3841 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3842 goto invalid; 3843 break; 3844 } 3845 break; 3846 case WLAN_CATEGORY_MESH_ACTION: 3847 if (len < (IEEE80211_MIN_ACTION_SIZE + 3848 sizeof(mgmt->u.action.u.mesh_action.action_code))) 3849 break; 3850 3851 if (!ieee80211_vif_is_mesh(&sdata->vif)) 3852 break; 3853 if (mesh_action_is_path_sel(mgmt) && 3854 !mesh_path_sel_is_hwmp(sdata)) 3855 break; 3856 goto queue; 3857 case WLAN_CATEGORY_S1G: 3858 if (len < offsetofend(typeof(*mgmt), 3859 u.action.u.s1g.action_code)) 3860 break; 3861 3862 switch (mgmt->u.action.u.s1g.action_code) { 3863 case WLAN_S1G_TWT_SETUP: 3864 case WLAN_S1G_TWT_TEARDOWN: 3865 if (ieee80211_process_rx_twt_action(rx)) 3866 goto queue; 3867 break; 3868 default: 3869 break; 3870 } 3871 break; 3872 case WLAN_CATEGORY_PROTECTED_EHT: 3873 if (len < offsetofend(typeof(*mgmt), 3874 u.action.u.ttlm_req.action_code)) 3875 break; 3876 3877 switch (mgmt->u.action.u.ttlm_req.action_code) { 3878 case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ: 3879 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3880 break; 3881 3882 if (len < offsetofend(typeof(*mgmt), 3883 u.action.u.ttlm_req)) 3884 goto invalid; 3885 goto queue; 3886 case WLAN_PROTECTED_EHT_ACTION_TTLM_RES: 3887 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3888 break; 3889 3890 if (len < offsetofend(typeof(*mgmt), 3891 u.action.u.ttlm_res)) 3892 goto invalid; 3893 goto queue; 3894 case WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN: 3895 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3896 break; 3897 3898 if (len < offsetofend(typeof(*mgmt), 3899 u.action.u.ttlm_tear_down)) 3900 goto invalid; 3901 goto queue; 3902 case WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP: 3903 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3904 break; 3905 3906 /* The reconfiguration response action frame must 3907 * least one 'Status Duple' entry (3 octets) 3908 */ 3909 if (len < 3910 offsetofend(typeof(*mgmt), 3911 u.action.u.ml_reconf_resp) + 3) 3912 goto invalid; 3913 goto queue; 3914 case WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_RESP: 3915 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3916 break; 3917 3918 if (len < offsetofend(typeof(*mgmt), 3919 u.action.u.epcs) + 3920 IEEE80211_EPCS_ENA_RESP_BODY_LEN) 3921 goto invalid; 3922 goto queue; 3923 case WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_TEARDOWN: 3924 if (sdata->vif.type != NL80211_IFTYPE_STATION) 3925 break; 3926 3927 if (len < offsetofend(typeof(*mgmt), 3928 u.action.u.epcs)) 3929 goto invalid; 3930 goto queue; 3931 case WLAN_PROTECTED_EHT_ACTION_EML_OP_MODE_NOTIF: 3932 if (sdata->vif.type != NL80211_IFTYPE_AP) 3933 break; 3934 3935 if (len < offsetofend(typeof(*mgmt), 3936 u.action.u.eml_omn)) 3937 goto invalid; 3938 goto queue; 3939 default: 3940 break; 3941 } 3942 break; 3943 } 3944 3945 return RX_CONTINUE; 3946 3947 invalid: 3948 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM; 3949 /* will return in the next handlers */ 3950 return RX_CONTINUE; 3951 3952 handled: 3953 if (rx->sta) 3954 rx->link_sta->rx_stats.packets++; 3955 dev_kfree_skb(rx->skb); 3956 return RX_QUEUED; 3957 3958 queue: 3959 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); 3960 return RX_QUEUED; 3961 } 3962 3963 static ieee80211_rx_result debug_noinline 3964 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx) 3965 { 3966 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 3967 struct cfg80211_rx_info info = { 3968 .freq = ieee80211_rx_status_to_khz(status), 3969 .buf = rx->skb->data, 3970 .len = rx->skb->len, 3971 .link_id = rx->link_id, 3972 .have_link_id = rx->link_id >= 0, 3973 }; 3974 3975 /* skip known-bad action frames and return them in the next handler */ 3976 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) 3977 return RX_CONTINUE; 3978 3979 /* 3980 * Getting here means the kernel doesn't know how to handle 3981 * it, but maybe userspace does ... include returned frames 3982 * so userspace can register for those to know whether ones 3983 * it transmitted were processed or returned. 3984 */ 3985 3986 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) && 3987 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) 3988 info.sig_dbm = status->signal; 3989 3990 if (ieee80211_is_timing_measurement(rx->skb) || 3991 ieee80211_is_ftm(rx->skb)) { 3992 info.rx_tstamp = ktime_to_ns(skb_hwtstamps(rx->skb)->hwtstamp); 3993 info.ack_tstamp = ktime_to_ns(status->ack_tx_hwtstamp); 3994 } 3995 3996 if (cfg80211_rx_mgmt_ext(&rx->sdata->wdev, &info)) { 3997 if (rx->sta) 3998 rx->link_sta->rx_stats.packets++; 3999 dev_kfree_skb(rx->skb); 4000 return RX_QUEUED; 4001 } 4002 4003 return RX_CONTINUE; 4004 } 4005 4006 static ieee80211_rx_result debug_noinline 4007 ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx) 4008 { 4009 struct ieee80211_sub_if_data *sdata = rx->sdata; 4010 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 4011 int len = rx->skb->len; 4012 4013 if (!ieee80211_is_action(mgmt->frame_control)) 4014 return RX_CONTINUE; 4015 4016 switch (mgmt->u.action.category) { 4017 case WLAN_CATEGORY_SA_QUERY: 4018 if (len < (IEEE80211_MIN_ACTION_SIZE + 4019 sizeof(mgmt->u.action.u.sa_query))) 4020 break; 4021 4022 switch (mgmt->u.action.u.sa_query.action) { 4023 case WLAN_ACTION_SA_QUERY_REQUEST: 4024 if (sdata->vif.type != NL80211_IFTYPE_STATION) 4025 break; 4026 ieee80211_process_sa_query_req(sdata, mgmt, len); 4027 goto handled; 4028 } 4029 break; 4030 } 4031 4032 return RX_CONTINUE; 4033 4034 handled: 4035 if (rx->sta) 4036 rx->link_sta->rx_stats.packets++; 4037 dev_kfree_skb(rx->skb); 4038 return RX_QUEUED; 4039 } 4040 4041 static ieee80211_rx_result debug_noinline 4042 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx) 4043 { 4044 struct ieee80211_local *local = rx->local; 4045 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data; 4046 struct sk_buff *nskb; 4047 struct ieee80211_sub_if_data *sdata = rx->sdata; 4048 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 4049 4050 if (!ieee80211_is_action(mgmt->frame_control)) 4051 return RX_CONTINUE; 4052 4053 /* 4054 * For AP mode, hostapd is responsible for handling any action 4055 * frames that we didn't handle, including returning unknown 4056 * ones. For all other modes we will return them to the sender, 4057 * setting the 0x80 bit in the action category, as required by 4058 * 802.11-2012 9.24.4. 4059 * Newer versions of hostapd use the management frame registration 4060 * mechanisms and old cooked monitor interface is no longer supported. 4061 */ 4062 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) && 4063 (sdata->vif.type == NL80211_IFTYPE_AP || 4064 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) 4065 return RX_DROP_U_MALFORMED_ACTION; 4066 4067 if (is_multicast_ether_addr(mgmt->da)) 4068 return RX_DROP_U_UNKNOWN_MCAST_ACTION; 4069 4070 /* do not return rejected action frames */ 4071 if (mgmt->u.action.category & 0x80) 4072 return RX_DROP_U_REJECTED_ACTION_RESPONSE; 4073 4074 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 4075 GFP_ATOMIC); 4076 if (nskb) { 4077 struct ieee80211_mgmt *nmgmt = (void *)nskb->data; 4078 4079 nmgmt->u.action.category |= 0x80; 4080 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN); 4081 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 4082 4083 memset(nskb->cb, 0, sizeof(nskb->cb)); 4084 4085 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { 4086 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); 4087 4088 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | 4089 IEEE80211_TX_INTFL_OFFCHAN_TX_OK | 4090 IEEE80211_TX_CTL_NO_CCK_RATE; 4091 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL)) 4092 info->hw_queue = 4093 local->hw.offchannel_tx_hw_queue; 4094 } 4095 4096 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, -1, 4097 status->band); 4098 } 4099 4100 return RX_DROP_U_UNKNOWN_ACTION_REJECTED; 4101 } 4102 4103 static ieee80211_rx_result debug_noinline 4104 ieee80211_rx_h_ext(struct ieee80211_rx_data *rx) 4105 { 4106 struct ieee80211_sub_if_data *sdata = rx->sdata; 4107 struct ieee80211_hdr *hdr = (void *)rx->skb->data; 4108 4109 if (!ieee80211_is_ext(hdr->frame_control)) 4110 return RX_CONTINUE; 4111 4112 if (sdata->vif.type != NL80211_IFTYPE_STATION) 4113 return RX_DROP_U_UNEXPECTED_EXT_FRAME; 4114 4115 /* for now only beacons are ext, so queue them */ 4116 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); 4117 4118 return RX_QUEUED; 4119 } 4120 4121 static ieee80211_rx_result debug_noinline 4122 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) 4123 { 4124 struct ieee80211_sub_if_data *sdata = rx->sdata; 4125 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; 4126 __le16 stype; 4127 4128 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); 4129 4130 if (!ieee80211_vif_is_mesh(&sdata->vif) && 4131 sdata->vif.type != NL80211_IFTYPE_ADHOC && 4132 sdata->vif.type != NL80211_IFTYPE_OCB && 4133 sdata->vif.type != NL80211_IFTYPE_STATION) 4134 return RX_DROP_U_UNHANDLED_MGMT; 4135 4136 switch (stype) { 4137 case cpu_to_le16(IEEE80211_STYPE_AUTH): 4138 case cpu_to_le16(IEEE80211_STYPE_BEACON): 4139 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): 4140 /* process for all: mesh, mlme, ibss */ 4141 break; 4142 case cpu_to_le16(IEEE80211_STYPE_DEAUTH): 4143 if (is_multicast_ether_addr(mgmt->da) && 4144 !is_broadcast_ether_addr(mgmt->da)) 4145 return RX_DROP_U_MCAST_DEAUTH; 4146 4147 /* process only for station/IBSS */ 4148 if (sdata->vif.type != NL80211_IFTYPE_STATION && 4149 sdata->vif.type != NL80211_IFTYPE_ADHOC) 4150 return RX_DROP_U_UNHANDLED_DEAUTH; 4151 break; 4152 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP): 4153 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP): 4154 case cpu_to_le16(IEEE80211_STYPE_DISASSOC): 4155 if (is_multicast_ether_addr(mgmt->da) && 4156 !is_broadcast_ether_addr(mgmt->da)) 4157 return RX_DROP_U_MCAST_DISASSOC; 4158 4159 /* process only for station */ 4160 if (sdata->vif.type != NL80211_IFTYPE_STATION) 4161 return RX_DROP_U_UNHANDLED_DISASSOC; 4162 break; 4163 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): 4164 /* process only for ibss and mesh */ 4165 if (sdata->vif.type != NL80211_IFTYPE_ADHOC && 4166 sdata->vif.type != NL80211_IFTYPE_MESH_POINT) 4167 return RX_DROP_U_UNHANDLED_PREQ; 4168 break; 4169 default: 4170 return RX_DROP_U_UNHANDLED_MGMT_STYPE; 4171 } 4172 4173 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb); 4174 4175 return RX_QUEUED; 4176 } 4177 4178 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, 4179 ieee80211_rx_result res) 4180 { 4181 if (res == RX_QUEUED) { 4182 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued); 4183 return; 4184 } 4185 4186 if (res != RX_CONTINUE) { 4187 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop); 4188 if (rx->sta) 4189 rx->link_sta->rx_stats.dropped++; 4190 } 4191 4192 kfree_skb_reason(rx->skb, (__force u32)res); 4193 } 4194 4195 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, 4196 struct sk_buff_head *frames) 4197 { 4198 ieee80211_rx_result res; 4199 struct sk_buff *skb; 4200 4201 #define CALL_RXH(rxh) \ 4202 do { \ 4203 res = rxh(rx); \ 4204 if (res != RX_CONTINUE) \ 4205 goto rxh_next; \ 4206 } while (0) 4207 4208 /* Lock here to avoid hitting all of the data used in the RX 4209 * path (e.g. key data, station data, ...) concurrently when 4210 * a frame is released from the reorder buffer due to timeout 4211 * from the timer, potentially concurrently with RX from the 4212 * driver. 4213 */ 4214 spin_lock_bh(&rx->local->rx_path_lock); 4215 4216 while ((skb = __skb_dequeue(frames))) { 4217 /* 4218 * all the other fields are valid across frames 4219 * that belong to an aMPDU since they are on the 4220 * same TID from the same station 4221 */ 4222 rx->skb = skb; 4223 4224 if (WARN_ON_ONCE(!rx->link)) { 4225 res = RX_DROP_U_NO_LINK; 4226 goto rxh_next; 4227 } 4228 4229 CALL_RXH(ieee80211_rx_h_check_more_data); 4230 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll); 4231 CALL_RXH(ieee80211_rx_h_sta_process); 4232 CALL_RXH(ieee80211_rx_h_decrypt); 4233 CALL_RXH(ieee80211_rx_h_defragment); 4234 CALL_RXH(ieee80211_rx_h_michael_mic_verify); 4235 /* must be after MMIC verify so header is counted in MPDU mic */ 4236 CALL_RXH(ieee80211_rx_h_amsdu); 4237 CALL_RXH(ieee80211_rx_h_data); 4238 4239 /* special treatment -- needs the queue */ 4240 res = ieee80211_rx_h_ctrl(rx, frames); 4241 if (res != RX_CONTINUE) 4242 goto rxh_next; 4243 4244 CALL_RXH(ieee80211_rx_h_mgmt_check); 4245 CALL_RXH(ieee80211_rx_h_action); 4246 CALL_RXH(ieee80211_rx_h_userspace_mgmt); 4247 CALL_RXH(ieee80211_rx_h_action_post_userspace); 4248 CALL_RXH(ieee80211_rx_h_action_return); 4249 CALL_RXH(ieee80211_rx_h_ext); 4250 CALL_RXH(ieee80211_rx_h_mgmt); 4251 4252 rxh_next: 4253 ieee80211_rx_handlers_result(rx, res); 4254 4255 #undef CALL_RXH 4256 } 4257 4258 spin_unlock_bh(&rx->local->rx_path_lock); 4259 } 4260 4261 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) 4262 { 4263 struct sk_buff_head reorder_release; 4264 ieee80211_rx_result res; 4265 4266 __skb_queue_head_init(&reorder_release); 4267 4268 #define CALL_RXH(rxh) \ 4269 do { \ 4270 res = rxh(rx); \ 4271 if (res != RX_CONTINUE) \ 4272 goto rxh_next; \ 4273 } while (0) 4274 4275 CALL_RXH(ieee80211_rx_h_check_dup); 4276 CALL_RXH(ieee80211_rx_h_check); 4277 4278 ieee80211_rx_reorder_ampdu(rx, &reorder_release); 4279 4280 ieee80211_rx_handlers(rx, &reorder_release); 4281 return; 4282 4283 rxh_next: 4284 ieee80211_rx_handlers_result(rx, res); 4285 4286 #undef CALL_RXH 4287 } 4288 4289 static bool 4290 ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id) 4291 { 4292 return !!(sta->valid_links & BIT(link_id)); 4293 } 4294 4295 static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx, 4296 u8 link_id) 4297 { 4298 rx->link_id = link_id; 4299 rx->link = rcu_dereference(rx->sdata->link[link_id]); 4300 4301 if (!rx->sta) 4302 return rx->link; 4303 4304 if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id)) 4305 return false; 4306 4307 rx->link_sta = rcu_dereference(rx->sta->link[link_id]); 4308 4309 return rx->link && rx->link_sta; 4310 } 4311 4312 static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx, 4313 struct sta_info *sta, int link_id) 4314 { 4315 rx->link_id = link_id; 4316 rx->sta = sta; 4317 4318 if (sta) { 4319 rx->local = sta->sdata->local; 4320 if (!rx->sdata) 4321 rx->sdata = sta->sdata; 4322 rx->link_sta = &sta->deflink; 4323 } else { 4324 rx->link_sta = NULL; 4325 } 4326 4327 if (link_id < 0) { 4328 if (ieee80211_vif_is_mld(&rx->sdata->vif) && 4329 sta && !sta->sta.valid_links) 4330 rx->link = 4331 rcu_dereference(rx->sdata->link[sta->deflink.link_id]); 4332 else 4333 rx->link = &rx->sdata->deflink; 4334 } else if (!ieee80211_rx_data_set_link(rx, link_id)) { 4335 return false; 4336 } 4337 4338 return true; 4339 } 4340 4341 /* 4342 * This function makes calls into the RX path, therefore 4343 * it has to be invoked under RCU read lock. 4344 */ 4345 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) 4346 { 4347 struct sk_buff_head frames; 4348 struct ieee80211_rx_data rx = { 4349 /* This is OK -- must be QoS data frame */ 4350 .security_idx = tid, 4351 .seqno_idx = tid, 4352 }; 4353 struct tid_ampdu_rx *tid_agg_rx; 4354 int link_id = -1; 4355 4356 /* FIXME: statistics won't be right with this */ 4357 if (sta->sta.valid_links) 4358 link_id = ffs(sta->sta.valid_links) - 1; 4359 4360 if (!ieee80211_rx_data_set_sta(&rx, sta, link_id)) 4361 return; 4362 4363 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 4364 if (!tid_agg_rx) 4365 return; 4366 4367 __skb_queue_head_init(&frames); 4368 4369 spin_lock(&tid_agg_rx->reorder_lock); 4370 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 4371 spin_unlock(&tid_agg_rx->reorder_lock); 4372 4373 if (!skb_queue_empty(&frames)) { 4374 struct ieee80211_event event = { 4375 .type = BA_FRAME_TIMEOUT, 4376 .u.ba.tid = tid, 4377 .u.ba.sta = &sta->sta, 4378 }; 4379 drv_event_callback(rx.local, rx.sdata, &event); 4380 } 4381 4382 ieee80211_rx_handlers(&rx, &frames); 4383 } 4384 4385 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, 4386 u16 ssn, u64 filtered, 4387 u16 received_mpdus) 4388 { 4389 struct ieee80211_local *local; 4390 struct sta_info *sta; 4391 struct tid_ampdu_rx *tid_agg_rx; 4392 struct sk_buff_head frames; 4393 struct ieee80211_rx_data rx = { 4394 /* This is OK -- must be QoS data frame */ 4395 .security_idx = tid, 4396 .seqno_idx = tid, 4397 }; 4398 int i, diff; 4399 4400 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS)) 4401 return; 4402 4403 __skb_queue_head_init(&frames); 4404 4405 sta = container_of(pubsta, struct sta_info, sta); 4406 4407 local = sta->sdata->local; 4408 WARN_ONCE(local->hw.max_rx_aggregation_subframes > 64, 4409 "RX BA marker can't support max_rx_aggregation_subframes %u > 64\n", 4410 local->hw.max_rx_aggregation_subframes); 4411 4412 if (!ieee80211_rx_data_set_sta(&rx, sta, -1)) 4413 return; 4414 4415 rcu_read_lock(); 4416 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); 4417 if (!tid_agg_rx) 4418 goto out; 4419 4420 spin_lock_bh(&tid_agg_rx->reorder_lock); 4421 4422 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) { 4423 int release; 4424 4425 /* release all frames in the reorder buffer */ 4426 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) % 4427 IEEE80211_SN_MODULO; 4428 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, 4429 release, &frames); 4430 /* update ssn to match received ssn */ 4431 tid_agg_rx->head_seq_num = ssn; 4432 } else { 4433 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn, 4434 &frames); 4435 } 4436 4437 /* handle the case that received ssn is behind the mac ssn. 4438 * it can be tid_agg_rx->buf_size behind and still be valid */ 4439 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK; 4440 if (diff >= tid_agg_rx->buf_size) { 4441 tid_agg_rx->reorder_buf_filtered = 0; 4442 goto release; 4443 } 4444 filtered = filtered >> diff; 4445 ssn += diff; 4446 4447 /* update bitmap */ 4448 for (i = 0; i < tid_agg_rx->buf_size; i++) { 4449 int index = (ssn + i) % tid_agg_rx->buf_size; 4450 4451 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); 4452 if (filtered & BIT_ULL(i)) 4453 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index); 4454 } 4455 4456 /* now process also frames that the filter marking released */ 4457 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); 4458 4459 release: 4460 spin_unlock_bh(&tid_agg_rx->reorder_lock); 4461 4462 ieee80211_rx_handlers(&rx, &frames); 4463 4464 out: 4465 rcu_read_unlock(); 4466 } 4467 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames); 4468 4469 /* main receive path */ 4470 4471 static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) 4472 { 4473 return ether_addr_equal(raddr, addr) || 4474 is_broadcast_ether_addr(raddr); 4475 } 4476 4477 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) 4478 { 4479 struct ieee80211_sub_if_data *sdata = rx->sdata; 4480 struct sk_buff *skb = rx->skb; 4481 struct ieee80211_hdr *hdr = (void *)skb->data; 4482 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4483 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type); 4484 bool multicast = is_multicast_ether_addr(hdr->addr1) || 4485 ieee80211_is_s1g_beacon(hdr->frame_control); 4486 4487 switch (sdata->vif.type) { 4488 case NL80211_IFTYPE_STATION: 4489 if (!bssid && !sdata->u.mgd.use_4addr) 4490 return false; 4491 if (ieee80211_is_first_frag(hdr->seq_ctrl) && 4492 ieee80211_is_robust_mgmt_frame(skb) && !rx->sta) 4493 return false; 4494 if (multicast) 4495 return true; 4496 return ieee80211_is_our_addr(sdata, hdr->addr1, &rx->link_id); 4497 case NL80211_IFTYPE_ADHOC: 4498 if (!bssid) 4499 return false; 4500 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || 4501 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) || 4502 !is_valid_ether_addr(hdr->addr2)) 4503 return false; 4504 if (ieee80211_is_beacon(hdr->frame_control)) 4505 return true; 4506 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) 4507 return false; 4508 if (!multicast && 4509 !ether_addr_equal(sdata->vif.addr, hdr->addr1)) 4510 return false; 4511 if (!rx->sta) { 4512 int rate_idx; 4513 if (status->encoding != RX_ENC_LEGACY) 4514 rate_idx = 0; /* TODO: HT/VHT rates */ 4515 else 4516 rate_idx = status->rate_idx; 4517 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2, 4518 BIT(rate_idx)); 4519 } 4520 return true; 4521 case NL80211_IFTYPE_OCB: 4522 if (!bssid) 4523 return false; 4524 if (!ieee80211_is_data_present(hdr->frame_control)) 4525 return false; 4526 if (!is_broadcast_ether_addr(bssid)) 4527 return false; 4528 if (!multicast && 4529 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) 4530 return false; 4531 /* reject invalid/our STA address */ 4532 if (!is_valid_ether_addr(hdr->addr2) || 4533 ether_addr_equal(sdata->dev->dev_addr, hdr->addr2)) 4534 return false; 4535 if (!rx->sta) { 4536 int rate_idx; 4537 if (status->encoding != RX_ENC_LEGACY) 4538 rate_idx = 0; /* TODO: HT rates */ 4539 else 4540 rate_idx = status->rate_idx; 4541 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2, 4542 BIT(rate_idx)); 4543 } 4544 return true; 4545 case NL80211_IFTYPE_MESH_POINT: 4546 if (ether_addr_equal(sdata->vif.addr, hdr->addr2)) 4547 return false; 4548 if (multicast) 4549 return true; 4550 return ether_addr_equal(sdata->vif.addr, hdr->addr1); 4551 case NL80211_IFTYPE_AP_VLAN: 4552 case NL80211_IFTYPE_AP: 4553 if (!bssid) 4554 return ieee80211_is_our_addr(sdata, hdr->addr1, 4555 &rx->link_id); 4556 4557 if (!is_broadcast_ether_addr(bssid) && 4558 !ieee80211_is_our_addr(sdata, bssid, NULL)) { 4559 /* 4560 * Accept public action frames even when the 4561 * BSSID doesn't match, this is used for P2P 4562 * and location updates. Note that mac80211 4563 * itself never looks at these frames. 4564 */ 4565 if (!multicast && 4566 !ieee80211_is_our_addr(sdata, hdr->addr1, 4567 &rx->link_id)) 4568 return false; 4569 if (ieee80211_is_public_action(hdr, skb->len)) 4570 return true; 4571 return ieee80211_is_beacon(hdr->frame_control); 4572 } 4573 4574 if (!ieee80211_has_tods(hdr->frame_control)) { 4575 /* ignore data frames to TDLS-peers */ 4576 if (ieee80211_is_data(hdr->frame_control)) 4577 return false; 4578 /* ignore action frames to TDLS-peers */ 4579 if (ieee80211_is_action(hdr->frame_control) && 4580 !is_broadcast_ether_addr(bssid) && 4581 !ether_addr_equal(bssid, hdr->addr1)) 4582 return false; 4583 } 4584 4585 /* 4586 * 802.11-2016 Table 9-26 says that for data frames, A1 must be 4587 * the BSSID - we've checked that already but may have accepted 4588 * the wildcard (ff:ff:ff:ff:ff:ff). 4589 * 4590 * It also says: 4591 * The BSSID of the Data frame is determined as follows: 4592 * a) If the STA is contained within an AP or is associated 4593 * with an AP, the BSSID is the address currently in use 4594 * by the STA contained in the AP. 4595 * 4596 * So we should not accept data frames with an address that's 4597 * multicast. 4598 * 4599 * Accepting it also opens a security problem because stations 4600 * could encrypt it with the GTK and inject traffic that way. 4601 */ 4602 if (ieee80211_is_data(hdr->frame_control) && multicast) 4603 return false; 4604 4605 return true; 4606 case NL80211_IFTYPE_P2P_DEVICE: 4607 return ieee80211_is_public_action(hdr, skb->len) || 4608 ieee80211_is_probe_req(hdr->frame_control) || 4609 ieee80211_is_probe_resp(hdr->frame_control) || 4610 ieee80211_is_beacon(hdr->frame_control) || 4611 (ieee80211_is_auth(hdr->frame_control) && 4612 ether_addr_equal(sdata->vif.addr, hdr->addr1)); 4613 case NL80211_IFTYPE_NAN: 4614 /* Accept only frames that are addressed to the NAN cluster 4615 * (based on the Cluster ID). From these frames, accept only 4616 * action frames or authentication frames that are addressed to 4617 * the local NAN interface. 4618 */ 4619 return memcmp(sdata->wdev.u.nan.cluster_id, 4620 hdr->addr3, ETH_ALEN) == 0 && 4621 (ieee80211_is_public_action(hdr, skb->len) || 4622 (ieee80211_is_auth(hdr->frame_control) && 4623 ether_addr_equal(sdata->vif.addr, hdr->addr1))); 4624 default: 4625 break; 4626 } 4627 4628 WARN_ON_ONCE(1); 4629 return false; 4630 } 4631 4632 void ieee80211_check_fast_rx(struct sta_info *sta) 4633 { 4634 struct ieee80211_sub_if_data *sdata = sta->sdata; 4635 struct ieee80211_local *local = sdata->local; 4636 struct ieee80211_key *key; 4637 struct ieee80211_fast_rx fastrx = { 4638 .dev = sdata->dev, 4639 .vif_type = sdata->vif.type, 4640 .control_port_protocol = sdata->control_port_protocol, 4641 }, *old, *new = NULL; 4642 u32 offload_flags; 4643 bool set_offload = false; 4644 bool assign = false; 4645 bool offload; 4646 4647 /* use sparse to check that we don't return without updating */ 4648 __acquire(check_fast_rx); 4649 4650 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header)); 4651 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN); 4652 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header); 4653 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr); 4654 4655 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS); 4656 4657 /* fast-rx doesn't do reordering */ 4658 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) && 4659 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) 4660 goto clear; 4661 4662 switch (sdata->vif.type) { 4663 case NL80211_IFTYPE_STATION: 4664 if (sta->sta.tdls) { 4665 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 4666 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4667 fastrx.expected_ds_bits = 0; 4668 } else { 4669 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1); 4670 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3); 4671 fastrx.expected_ds_bits = 4672 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4673 } 4674 4675 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) { 4676 fastrx.expected_ds_bits |= 4677 cpu_to_le16(IEEE80211_FCTL_TODS); 4678 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4679 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4680 } 4681 4682 if (!sdata->u.mgd.powersave) 4683 break; 4684 4685 /* software powersave is a huge mess, avoid all of it */ 4686 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK)) 4687 goto clear; 4688 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) && 4689 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS)) 4690 goto clear; 4691 break; 4692 case NL80211_IFTYPE_AP_VLAN: 4693 case NL80211_IFTYPE_AP: 4694 /* parallel-rx requires this, at least with calls to 4695 * ieee80211_sta_ps_transition() 4696 */ 4697 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS)) 4698 goto clear; 4699 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4700 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2); 4701 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS); 4702 4703 fastrx.internal_forward = 4704 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && 4705 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || 4706 !sdata->u.vlan.sta); 4707 4708 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN && 4709 sdata->u.vlan.sta) { 4710 fastrx.expected_ds_bits |= 4711 cpu_to_le16(IEEE80211_FCTL_FROMDS); 4712 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4713 fastrx.internal_forward = 0; 4714 } 4715 4716 break; 4717 case NL80211_IFTYPE_MESH_POINT: 4718 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_FROMDS | 4719 IEEE80211_FCTL_TODS); 4720 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3); 4721 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4); 4722 break; 4723 default: 4724 goto clear; 4725 } 4726 4727 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 4728 goto clear; 4729 4730 rcu_read_lock(); 4731 key = rcu_dereference(sta->ptk[sta->ptk_idx]); 4732 if (!key) 4733 key = rcu_dereference(sdata->default_unicast_key); 4734 if (key) { 4735 switch (key->conf.cipher) { 4736 case WLAN_CIPHER_SUITE_TKIP: 4737 /* we don't want to deal with MMIC in fast-rx */ 4738 goto clear_rcu; 4739 case WLAN_CIPHER_SUITE_CCMP: 4740 case WLAN_CIPHER_SUITE_CCMP_256: 4741 case WLAN_CIPHER_SUITE_GCMP: 4742 case WLAN_CIPHER_SUITE_GCMP_256: 4743 break; 4744 default: 4745 /* We also don't want to deal with 4746 * WEP or cipher scheme. 4747 */ 4748 goto clear_rcu; 4749 } 4750 4751 fastrx.key = true; 4752 fastrx.icv_len = key->conf.icv_len; 4753 } 4754 4755 assign = true; 4756 clear_rcu: 4757 rcu_read_unlock(); 4758 clear: 4759 __release(check_fast_rx); 4760 4761 if (assign) 4762 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL); 4763 4764 offload_flags = get_bss_sdata(sdata)->vif.offload_flags; 4765 offload = offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED; 4766 4767 if (assign && offload) 4768 set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); 4769 else 4770 set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD); 4771 4772 if (set_offload) 4773 drv_sta_set_decap_offload(local, sdata, &sta->sta, assign); 4774 4775 spin_lock_bh(&sta->lock); 4776 old = rcu_dereference_protected(sta->fast_rx, true); 4777 rcu_assign_pointer(sta->fast_rx, new); 4778 spin_unlock_bh(&sta->lock); 4779 4780 if (old) 4781 kfree_rcu(old, rcu_head); 4782 } 4783 4784 void ieee80211_clear_fast_rx(struct sta_info *sta) 4785 { 4786 struct ieee80211_fast_rx *old; 4787 4788 spin_lock_bh(&sta->lock); 4789 old = rcu_dereference_protected(sta->fast_rx, true); 4790 RCU_INIT_POINTER(sta->fast_rx, NULL); 4791 spin_unlock_bh(&sta->lock); 4792 4793 if (old) 4794 kfree_rcu(old, rcu_head); 4795 } 4796 4797 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4798 { 4799 struct ieee80211_local *local = sdata->local; 4800 struct sta_info *sta; 4801 4802 lockdep_assert_wiphy(local->hw.wiphy); 4803 4804 list_for_each_entry(sta, &local->sta_list, list) { 4805 if (sdata != sta->sdata && 4806 (!sta->sdata->bss || sta->sdata->bss != sdata->bss)) 4807 continue; 4808 ieee80211_check_fast_rx(sta); 4809 } 4810 } 4811 4812 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata) 4813 { 4814 struct ieee80211_local *local = sdata->local; 4815 4816 lockdep_assert_wiphy(local->hw.wiphy); 4817 4818 __ieee80211_check_fast_rx_iface(sdata); 4819 } 4820 4821 static void ieee80211_rx_8023(struct ieee80211_rx_data *rx, 4822 struct ieee80211_fast_rx *fast_rx, 4823 int orig_len) 4824 { 4825 struct ieee80211_sta_rx_stats *stats; 4826 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 4827 struct sta_info *sta = rx->sta; 4828 struct link_sta_info *link_sta; 4829 struct sk_buff *skb = rx->skb; 4830 void *sa = skb->data + ETH_ALEN; 4831 void *da = skb->data; 4832 4833 if (rx->link_id >= 0) { 4834 link_sta = rcu_dereference(sta->link[rx->link_id]); 4835 if (WARN_ON_ONCE(!link_sta)) { 4836 dev_kfree_skb(rx->skb); 4837 return; 4838 } 4839 } else { 4840 link_sta = &sta->deflink; 4841 } 4842 4843 stats = &link_sta->rx_stats; 4844 if (fast_rx->uses_rss) 4845 stats = this_cpu_ptr(link_sta->pcpu_rx_stats); 4846 4847 /* statistics part of ieee80211_rx_h_sta_process() */ 4848 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) { 4849 stats->last_signal = status->signal; 4850 if (!fast_rx->uses_rss) 4851 ewma_signal_add(&link_sta->rx_stats_avg.signal, 4852 -status->signal); 4853 } 4854 4855 if (status->chains) { 4856 int i; 4857 4858 stats->chains = status->chains; 4859 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) { 4860 int signal = status->chain_signal[i]; 4861 4862 if (!(status->chains & BIT(i))) 4863 continue; 4864 4865 stats->chain_signal_last[i] = signal; 4866 if (!fast_rx->uses_rss) 4867 ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i], 4868 -signal); 4869 } 4870 } 4871 /* end of statistics */ 4872 4873 stats->last_rx = jiffies; 4874 stats->last_rate = sta_stats_encode_rate(status); 4875 4876 stats->fragments++; 4877 stats->packets++; 4878 4879 skb->dev = fast_rx->dev; 4880 4881 dev_sw_netstats_rx_add(fast_rx->dev, skb->len); 4882 4883 /* The seqno index has the same property as needed 4884 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS 4885 * for non-QoS-data frames. Here we know it's a data 4886 * frame, so count MSDUs. 4887 */ 4888 u64_stats_update_begin(&stats->syncp); 4889 u64_stats_inc(&stats->msdu[rx->seqno_idx]); 4890 u64_stats_add(&stats->bytes, orig_len); 4891 u64_stats_update_end(&stats->syncp); 4892 4893 if (fast_rx->internal_forward) { 4894 struct sk_buff *xmit_skb = NULL; 4895 if (is_multicast_ether_addr(da)) { 4896 xmit_skb = skb_copy(skb, GFP_ATOMIC); 4897 } else if (!ether_addr_equal(da, sa) && 4898 sta_info_get(rx->sdata, da)) { 4899 xmit_skb = skb; 4900 skb = NULL; 4901 } 4902 4903 if (xmit_skb) { 4904 /* 4905 * Send to wireless media and increase priority by 256 4906 * to keep the received priority instead of 4907 * reclassifying the frame (see cfg80211_classify8021d). 4908 */ 4909 xmit_skb->priority += 256; 4910 xmit_skb->protocol = htons(ETH_P_802_3); 4911 skb_reset_network_header(xmit_skb); 4912 skb_reset_mac_header(xmit_skb); 4913 dev_queue_xmit(xmit_skb); 4914 } 4915 4916 if (!skb) 4917 return; 4918 } 4919 4920 /* deliver to local stack */ 4921 skb->protocol = eth_type_trans(skb, fast_rx->dev); 4922 ieee80211_deliver_skb_to_local_stack(skb, rx); 4923 } 4924 4925 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx, 4926 struct ieee80211_fast_rx *fast_rx) 4927 { 4928 struct sk_buff *skb = rx->skb; 4929 struct ieee80211_hdr *hdr = (void *)skb->data; 4930 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 4931 static ieee80211_rx_result res; 4932 int orig_len = skb->len; 4933 int hdrlen = ieee80211_hdrlen(hdr->frame_control); 4934 int snap_offs = hdrlen; 4935 struct { 4936 u8 snap[sizeof(rfc1042_header)]; 4937 __be16 proto; 4938 } *payload __aligned(2); 4939 struct { 4940 u8 da[ETH_ALEN]; 4941 u8 sa[ETH_ALEN]; 4942 } addrs __aligned(2); 4943 struct ieee80211_sta_rx_stats *stats; 4944 4945 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write 4946 * to a common data structure; drivers can implement that per queue 4947 * but we don't have that information in mac80211 4948 */ 4949 if (!(status->flag & RX_FLAG_DUP_VALIDATED)) 4950 return false; 4951 4952 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED) 4953 4954 /* If using encryption, we also need to have: 4955 * - PN_VALIDATED: similar, but the implementation is tricky 4956 * - DECRYPTED: necessary for PN_VALIDATED 4957 */ 4958 if (fast_rx->key && 4959 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS) 4960 return false; 4961 4962 if (unlikely(!ieee80211_is_data_present(hdr->frame_control))) 4963 return false; 4964 4965 if (unlikely(ieee80211_is_frag(hdr))) 4966 return false; 4967 4968 /* Since our interface address cannot be multicast, this 4969 * implicitly also rejects multicast frames without the 4970 * explicit check. 4971 * 4972 * We shouldn't get any *data* frames not addressed to us 4973 * (AP mode will accept multicast *management* frames), but 4974 * punting here will make it go through the full checks in 4975 * ieee80211_accept_frame(). 4976 */ 4977 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1)) 4978 return false; 4979 4980 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS | 4981 IEEE80211_FCTL_TODS)) != 4982 fast_rx->expected_ds_bits) 4983 return false; 4984 4985 /* assign the key to drop unencrypted frames (later) 4986 * and strip the IV/MIC if necessary 4987 */ 4988 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) { 4989 /* GCMP header length is the same */ 4990 snap_offs += IEEE80211_CCMP_HDR_LEN; 4991 } 4992 4993 if (!ieee80211_vif_is_mesh(&rx->sdata->vif) && 4994 !(status->rx_flags & IEEE80211_RX_AMSDU)) { 4995 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload))) 4996 return false; 4997 4998 payload = (void *)(skb->data + snap_offs); 4999 5000 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr)) 5001 return false; 5002 5003 /* Don't handle these here since they require special code. 5004 * Accept AARP and IPX even though they should come with a 5005 * bridge-tunnel header - but if we get them this way then 5006 * there's little point in discarding them. 5007 */ 5008 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) || 5009 payload->proto == fast_rx->control_port_protocol)) 5010 return false; 5011 } 5012 5013 /* after this point, don't punt to the slowpath! */ 5014 5015 if (fast_rx->uses_rss) 5016 stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats); 5017 else 5018 stats = &rx->link_sta->rx_stats; 5019 5020 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) && 5021 pskb_trim(skb, skb->len - fast_rx->icv_len)) 5022 goto drop; 5023 5024 if (rx->key && !ieee80211_has_protected(hdr->frame_control)) 5025 goto drop; 5026 5027 if (status->rx_flags & IEEE80211_RX_AMSDU) { 5028 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) != 5029 RX_QUEUED) 5030 goto drop; 5031 5032 return true; 5033 } 5034 5035 /* do the header conversion - first grab the addresses */ 5036 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs); 5037 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs); 5038 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) { 5039 skb_pull(skb, snap_offs - 2); 5040 put_unaligned_be16(skb->len - 2, skb->data); 5041 } else { 5042 skb_postpull_rcsum(skb, skb->data + snap_offs, 5043 sizeof(rfc1042_header) + 2); 5044 5045 /* remove the SNAP but leave the ethertype */ 5046 skb_pull(skb, snap_offs + sizeof(rfc1042_header)); 5047 } 5048 /* push the addresses in front */ 5049 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs)); 5050 5051 res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb); 5052 switch (res) { 5053 case RX_QUEUED: 5054 stats->last_rx = jiffies; 5055 stats->last_rate = sta_stats_encode_rate(status); 5056 return true; 5057 case RX_CONTINUE: 5058 break; 5059 default: 5060 goto drop; 5061 } 5062 5063 ieee80211_rx_8023(rx, fast_rx, orig_len); 5064 5065 return true; 5066 drop: 5067 dev_kfree_skb(skb); 5068 5069 stats->dropped++; 5070 return true; 5071 } 5072 5073 /* 5074 * This function returns whether or not the SKB 5075 * was destined for RX processing or not, which, 5076 * if consume is true, is equivalent to whether 5077 * or not the skb was consumed. 5078 */ 5079 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx, 5080 struct sk_buff *skb, bool consume) 5081 { 5082 struct ieee80211_local *local = rx->local; 5083 struct ieee80211_sub_if_data *sdata = rx->sdata; 5084 struct ieee80211_hdr *hdr = (void *)skb->data; 5085 struct link_sta_info *link_sta = rx->link_sta; 5086 struct ieee80211_link_data *link = rx->link; 5087 5088 rx->skb = skb; 5089 5090 /* See if we can do fast-rx; if we have to copy we already lost, 5091 * so punt in that case. We should never have to deliver a data 5092 * frame to multiple interfaces anyway. 5093 * 5094 * We skip the ieee80211_accept_frame() call and do the necessary 5095 * checking inside ieee80211_invoke_fast_rx(). 5096 */ 5097 if (consume && rx->sta) { 5098 struct ieee80211_fast_rx *fast_rx; 5099 5100 fast_rx = rcu_dereference(rx->sta->fast_rx); 5101 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx)) 5102 return true; 5103 } 5104 5105 if (!ieee80211_accept_frame(rx)) 5106 return false; 5107 5108 if (!consume) { 5109 struct skb_shared_hwtstamps *shwt; 5110 5111 rx->skb = skb_copy(skb, GFP_ATOMIC); 5112 if (!rx->skb) { 5113 if (net_ratelimit()) 5114 wiphy_debug(local->hw.wiphy, 5115 "failed to copy skb for %s\n", 5116 sdata->name); 5117 return true; 5118 } 5119 5120 /* skb_copy() does not copy the hw timestamps, so copy it 5121 * explicitly 5122 */ 5123 shwt = skb_hwtstamps(rx->skb); 5124 shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp; 5125 5126 /* Update the hdr pointer to the new skb for translation below */ 5127 hdr = (struct ieee80211_hdr *)rx->skb->data; 5128 } 5129 5130 if (unlikely(rx->sta && rx->sta->sta.mlo) && 5131 is_unicast_ether_addr(hdr->addr1) && 5132 !ieee80211_is_probe_resp(hdr->frame_control) && 5133 !ieee80211_is_beacon(hdr->frame_control)) { 5134 /* translate to MLD addresses */ 5135 if (ether_addr_equal(link->conf->addr, hdr->addr1)) 5136 ether_addr_copy(hdr->addr1, rx->sdata->vif.addr); 5137 if (ether_addr_equal(link_sta->addr, hdr->addr2)) 5138 ether_addr_copy(hdr->addr2, rx->sta->addr); 5139 /* translate A3 only if it's the BSSID */ 5140 if (!ieee80211_has_tods(hdr->frame_control) && 5141 !ieee80211_has_fromds(hdr->frame_control)) { 5142 if (ether_addr_equal(link_sta->addr, hdr->addr3)) 5143 ether_addr_copy(hdr->addr3, rx->sta->addr); 5144 else if (ether_addr_equal(link->conf->addr, hdr->addr3)) 5145 ether_addr_copy(hdr->addr3, rx->sdata->vif.addr); 5146 } 5147 /* not needed for A4 since it can only carry the SA */ 5148 } 5149 5150 ieee80211_invoke_rx_handlers(rx); 5151 return true; 5152 } 5153 5154 static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw, 5155 struct ieee80211_sta *pubsta, 5156 struct sk_buff *skb, 5157 struct list_head *list) 5158 { 5159 struct ieee80211_local *local = hw_to_local(hw); 5160 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 5161 struct ieee80211_fast_rx *fast_rx; 5162 struct ieee80211_rx_data rx; 5163 struct sta_info *sta; 5164 int link_id = -1; 5165 5166 memset(&rx, 0, sizeof(rx)); 5167 rx.skb = skb; 5168 rx.local = local; 5169 rx.list = list; 5170 rx.link_id = -1; 5171 5172 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 5173 5174 /* drop frame if too short for header */ 5175 if (skb->len < sizeof(struct ethhdr)) 5176 goto drop; 5177 5178 if (!pubsta) 5179 goto drop; 5180 5181 if (status->link_valid) 5182 link_id = status->link_id; 5183 5184 /* 5185 * TODO: Should the frame be dropped if the right link_id is not 5186 * available? Or may be it is fine in the current form to proceed with 5187 * the frame processing because with frame being in 802.3 format, 5188 * link_id is used only for stats purpose and updating the stats on 5189 * the deflink is fine? 5190 */ 5191 sta = container_of(pubsta, struct sta_info, sta); 5192 if (!ieee80211_rx_data_set_sta(&rx, sta, link_id)) 5193 goto drop; 5194 5195 fast_rx = rcu_dereference(rx.sta->fast_rx); 5196 if (!fast_rx) 5197 goto drop; 5198 5199 ieee80211_rx_8023(&rx, fast_rx, skb->len); 5200 return; 5201 5202 drop: 5203 dev_kfree_skb(skb); 5204 } 5205 5206 static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx, 5207 struct sk_buff *skb, bool consume) 5208 { 5209 struct link_sta_info *link_sta; 5210 struct ieee80211_hdr *hdr = (void *)skb->data; 5211 struct sta_info *sta; 5212 int link_id = -1; 5213 5214 /* 5215 * Look up link station first, in case there's a 5216 * chance that they might have a link address that 5217 * is identical to the MLD address, that way we'll 5218 * have the link information if needed. 5219 */ 5220 link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2); 5221 if (link_sta) { 5222 sta = link_sta->sta; 5223 link_id = link_sta->link_id; 5224 } else { 5225 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 5226 5227 sta = sta_info_get_bss(rx->sdata, hdr->addr2); 5228 if (status->link_valid) { 5229 link_id = status->link_id; 5230 } else if (ieee80211_vif_is_mld(&rx->sdata->vif) && 5231 status->freq) { 5232 struct ieee80211_link_data *link; 5233 struct ieee80211_chanctx_conf *conf; 5234 5235 for_each_link_data_rcu(rx->sdata, link) { 5236 conf = rcu_dereference(link->conf->chanctx_conf); 5237 if (!conf || !conf->def.chan) 5238 continue; 5239 5240 if (status->freq == conf->def.chan->center_freq) { 5241 link_id = link->link_id; 5242 break; 5243 } 5244 } 5245 } 5246 } 5247 5248 if (!ieee80211_rx_data_set_sta(rx, sta, link_id)) 5249 return false; 5250 5251 return ieee80211_prepare_and_rx_handle(rx, skb, consume); 5252 } 5253 5254 /* 5255 * This is the actual Rx frames handler. as it belongs to Rx path it must 5256 * be called with rcu_read_lock protection. 5257 */ 5258 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, 5259 struct ieee80211_sta *pubsta, 5260 struct sk_buff *skb, 5261 struct list_head *list) 5262 { 5263 struct ieee80211_local *local = hw_to_local(hw); 5264 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 5265 struct ieee80211_sub_if_data *sdata; 5266 struct ieee80211_hdr *hdr; 5267 __le16 fc; 5268 struct ieee80211_rx_data rx; 5269 struct ieee80211_sub_if_data *prev; 5270 struct rhlist_head *tmp; 5271 int err = 0; 5272 5273 fc = ((struct ieee80211_hdr *)skb->data)->frame_control; 5274 memset(&rx, 0, sizeof(rx)); 5275 rx.skb = skb; 5276 rx.local = local; 5277 rx.list = list; 5278 rx.link_id = -1; 5279 5280 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc)) 5281 I802_DEBUG_INC(local->dot11ReceivedFragmentCount); 5282 5283 if (ieee80211_is_mgmt(fc)) { 5284 /* drop frame if too short for header */ 5285 if (skb->len < ieee80211_hdrlen(fc)) 5286 err = -ENOBUFS; 5287 else 5288 err = skb_linearize(skb); 5289 } else { 5290 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc)); 5291 } 5292 5293 if (err) { 5294 dev_kfree_skb(skb); 5295 return; 5296 } 5297 5298 hdr = (struct ieee80211_hdr *)skb->data; 5299 ieee80211_parse_qos(&rx); 5300 ieee80211_verify_alignment(&rx); 5301 5302 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) || 5303 ieee80211_is_beacon(hdr->frame_control) || 5304 ieee80211_is_s1g_beacon(hdr->frame_control))) 5305 ieee80211_scan_rx(local, skb); 5306 5307 if (ieee80211_is_data(fc)) { 5308 struct sta_info *sta, *prev_sta; 5309 int link_id = -1; 5310 5311 if (status->link_valid) 5312 link_id = status->link_id; 5313 5314 if (pubsta) { 5315 sta = container_of(pubsta, struct sta_info, sta); 5316 if (!ieee80211_rx_data_set_sta(&rx, sta, link_id)) 5317 goto out; 5318 5319 /* 5320 * In MLO connection, fetch the link_id using addr2 5321 * when the driver does not pass link_id in status. 5322 * When the address translation is already performed by 5323 * driver/hw, the valid link_id must be passed in 5324 * status. 5325 */ 5326 5327 if (!status->link_valid && pubsta->mlo) { 5328 struct link_sta_info *link_sta; 5329 5330 link_sta = link_sta_info_get_bss(rx.sdata, 5331 hdr->addr2); 5332 if (!link_sta) 5333 goto out; 5334 5335 ieee80211_rx_data_set_link(&rx, link_sta->link_id); 5336 } 5337 5338 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 5339 return; 5340 goto out; 5341 } 5342 5343 prev_sta = NULL; 5344 5345 for_each_sta_info(local, hdr->addr2, sta, tmp) { 5346 if (!prev_sta) { 5347 prev_sta = sta; 5348 continue; 5349 } 5350 5351 rx.sdata = prev_sta->sdata; 5352 if (!status->link_valid && prev_sta->sta.mlo) { 5353 struct link_sta_info *link_sta; 5354 5355 link_sta = link_sta_info_get_bss(rx.sdata, 5356 hdr->addr2); 5357 if (!link_sta) 5358 continue; 5359 5360 link_id = link_sta->link_id; 5361 } 5362 5363 if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id)) 5364 goto out; 5365 5366 ieee80211_prepare_and_rx_handle(&rx, skb, false); 5367 5368 prev_sta = sta; 5369 } 5370 5371 if (prev_sta) { 5372 rx.sdata = prev_sta->sdata; 5373 if (!status->link_valid && prev_sta->sta.mlo) { 5374 struct link_sta_info *link_sta; 5375 5376 link_sta = link_sta_info_get_bss(rx.sdata, 5377 hdr->addr2); 5378 if (!link_sta) 5379 goto out; 5380 5381 link_id = link_sta->link_id; 5382 } 5383 5384 if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id)) 5385 goto out; 5386 5387 if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) 5388 return; 5389 goto out; 5390 } 5391 } 5392 5393 prev = NULL; 5394 5395 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 5396 if (!ieee80211_sdata_running(sdata)) 5397 continue; 5398 5399 if (sdata->vif.type == NL80211_IFTYPE_MONITOR || 5400 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 5401 continue; 5402 5403 /* 5404 * frame is destined for this interface, but if it's 5405 * not also for the previous one we handle that after 5406 * the loop to avoid copying the SKB once too much 5407 */ 5408 5409 if (!prev) { 5410 prev = sdata; 5411 continue; 5412 } 5413 5414 rx.sdata = prev; 5415 ieee80211_rx_for_interface(&rx, skb, false); 5416 5417 prev = sdata; 5418 } 5419 5420 if (prev) { 5421 rx.sdata = prev; 5422 5423 if (ieee80211_rx_for_interface(&rx, skb, true)) 5424 return; 5425 } 5426 5427 out: 5428 dev_kfree_skb(skb); 5429 } 5430 5431 /* 5432 * This is the receive path handler. It is called by a low level driver when an 5433 * 802.11 MPDU is received from the hardware. 5434 */ 5435 void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 5436 struct sk_buff *skb, struct list_head *list) 5437 { 5438 struct ieee80211_local *local = hw_to_local(hw); 5439 struct ieee80211_rate *rate = NULL; 5440 struct ieee80211_supported_band *sband; 5441 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 5442 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 5443 5444 WARN_ON_ONCE(softirq_count() == 0); 5445 5446 if (WARN_ON(status->band >= NUM_NL80211_BANDS)) 5447 goto drop; 5448 5449 sband = local->hw.wiphy->bands[status->band]; 5450 if (WARN_ON(!sband)) 5451 goto drop; 5452 5453 /* 5454 * If we're suspending, it is possible although not too likely 5455 * that we'd be receiving frames after having already partially 5456 * quiesced the stack. We can't process such frames then since 5457 * that might, for example, cause stations to be added or other 5458 * driver callbacks be invoked. 5459 */ 5460 if (unlikely(local->quiescing || local->suspended)) 5461 goto drop; 5462 5463 /* We might be during a HW reconfig, prevent Rx for the same reason */ 5464 if (unlikely(local->in_reconfig)) 5465 goto drop; 5466 5467 /* 5468 * The same happens when we're not even started, 5469 * but that's worth a warning. 5470 */ 5471 if (WARN_ON(!local->started)) 5472 goto drop; 5473 5474 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC) && 5475 !(status->flag & RX_FLAG_NO_PSDU && 5476 status->zero_length_psdu_type == 5477 IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED))) { 5478 /* 5479 * Validate the rate, unless there was a PLCP error which may 5480 * have an invalid rate or the PSDU was not capture and may be 5481 * missing rate information. 5482 */ 5483 5484 switch (status->encoding) { 5485 case RX_ENC_HT: 5486 /* 5487 * rate_idx is MCS index, which can be [0-76] 5488 * as documented on: 5489 * 5490 * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n 5491 * 5492 * Anything else would be some sort of driver or 5493 * hardware error. The driver should catch hardware 5494 * errors. 5495 */ 5496 if (WARN(status->rate_idx > 76, 5497 "Rate marked as an HT rate but passed " 5498 "status->rate_idx is not " 5499 "an MCS index [0-76]: %d (0x%02x)\n", 5500 status->rate_idx, 5501 status->rate_idx)) 5502 goto drop; 5503 break; 5504 case RX_ENC_VHT: 5505 if (WARN_ONCE(status->rate_idx > 11 || 5506 !status->nss || 5507 status->nss > 8, 5508 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n", 5509 status->rate_idx, status->nss)) 5510 goto drop; 5511 break; 5512 case RX_ENC_HE: 5513 if (WARN_ONCE(status->rate_idx > 11 || 5514 !status->nss || 5515 status->nss > 8, 5516 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n", 5517 status->rate_idx, status->nss)) 5518 goto drop; 5519 break; 5520 case RX_ENC_EHT: 5521 if (WARN_ONCE(status->rate_idx > 15 || 5522 !status->nss || 5523 status->nss > 8 || 5524 status->eht.gi > NL80211_RATE_INFO_EHT_GI_3_2, 5525 "Rate marked as an EHT rate but data is invalid: MCS:%d, NSS:%d, GI:%d\n", 5526 status->rate_idx, status->nss, status->eht.gi)) 5527 goto drop; 5528 break; 5529 case RX_ENC_UHR: 5530 if (WARN_ONCE(!(status->rate_idx <= 15 || 5531 status->rate_idx == 17 || 5532 status->rate_idx == 19 || 5533 status->rate_idx == 20 || 5534 status->rate_idx == 23) || 5535 !status->nss || 5536 status->nss > 8 || 5537 status->uhr.gi > NL80211_RATE_INFO_EHT_GI_3_2, 5538 "Rate marked as a UHR rate but data is invalid: MCS:%d, NSS:%d, GI:%d\n", 5539 status->rate_idx, status->nss, status->uhr.gi)) 5540 goto drop; 5541 if (WARN_ONCE(status->uhr.elr && 5542 (status->nss != 1 || status->rate_idx > 1 || 5543 status->uhr.gi != NL80211_RATE_INFO_EHT_GI_1_6 || 5544 status->bw != RATE_INFO_BW_20 || status->uhr.im), 5545 "bad UHR ELR MCS MCS:%d, NSS:%d, GI:%d, BW:%d, IM:%d\n", 5546 status->rate_idx, status->nss, status->uhr.gi, 5547 status->bw, status->uhr.im)) 5548 goto drop; 5549 if (WARN_ONCE(status->uhr.im && 5550 (status->nss != 1 || status->rate_idx == 15), 5551 "bad UHR IM MCS MCS:%d, NSS:%d\n", 5552 status->rate_idx, status->nss)) 5553 goto drop; 5554 break; 5555 default: 5556 WARN_ON_ONCE(1); 5557 fallthrough; 5558 case RX_ENC_LEGACY: 5559 if (WARN_ON(status->rate_idx >= sband->n_bitrates)) 5560 goto drop; 5561 rate = &sband->bitrates[status->rate_idx]; 5562 } 5563 } 5564 5565 if (WARN_ON_ONCE(status->link_id >= IEEE80211_LINK_UNSPECIFIED)) 5566 goto drop; 5567 5568 status->rx_flags = 0; 5569 5570 kcov_remote_start_common(skb_get_kcov_handle(skb)); 5571 5572 /* 5573 * Frames with failed FCS/PLCP checksum are not returned, 5574 * all other frames are returned without radiotap header 5575 * if it was previously present. 5576 * Also, frames with less than 16 bytes are dropped. 5577 */ 5578 if (!(status->flag & RX_FLAG_8023)) 5579 skb = ieee80211_rx_monitor(local, skb, rate); 5580 if (skb) { 5581 if ((status->flag & RX_FLAG_8023) || 5582 ieee80211_is_data_present(hdr->frame_control)) 5583 ieee80211_tpt_led_trig_rx(local, skb->len); 5584 5585 if (status->flag & RX_FLAG_8023) 5586 __ieee80211_rx_handle_8023(hw, pubsta, skb, list); 5587 else 5588 __ieee80211_rx_handle_packet(hw, pubsta, skb, list); 5589 } 5590 5591 kcov_remote_stop(); 5592 return; 5593 drop: 5594 kfree_skb(skb); 5595 } 5596 EXPORT_SYMBOL(ieee80211_rx_list); 5597 5598 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, 5599 struct sk_buff *skb, struct napi_struct *napi) 5600 { 5601 struct sk_buff *tmp; 5602 LIST_HEAD(list); 5603 5604 5605 /* 5606 * key references and virtual interfaces are protected using RCU 5607 * and this requires that we are in a read-side RCU section during 5608 * receive processing 5609 */ 5610 rcu_read_lock(); 5611 ieee80211_rx_list(hw, pubsta, skb, &list); 5612 rcu_read_unlock(); 5613 5614 if (!napi) { 5615 netif_receive_skb_list(&list); 5616 return; 5617 } 5618 5619 list_for_each_entry_safe(skb, tmp, &list, list) { 5620 skb_list_del_init(skb); 5621 napi_gro_receive(napi, skb); 5622 } 5623 } 5624 EXPORT_SYMBOL(ieee80211_rx_napi); 5625 5626 /* This is a version of the rx handler that can be called from hard irq 5627 * context. Post the skb on the queue and schedule the tasklet */ 5628 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb) 5629 { 5630 struct ieee80211_local *local = hw_to_local(hw); 5631 5632 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb)); 5633 5634 skb->pkt_type = IEEE80211_RX_MSG; 5635 skb_queue_tail(&local->skb_queue, skb); 5636 tasklet_schedule(&local->tasklet); 5637 } 5638 EXPORT_SYMBOL(ieee80211_rx_irqsafe); 5639