1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2002-2005, Instant802 Networks, Inc.
4 * Copyright 2005-2006, Devicescape Software, Inc.
5 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
6 * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2013-2014 Intel Mobile Communications GmbH
8 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
9 * Copyright (C) 2018-2025 Intel Corporation
10 */
11
12 #include <linux/jiffies.h>
13 #include <linux/slab.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/rcupdate.h>
19 #include <linux/export.h>
20 #include <linux/kcov.h>
21 #include <linux/bitops.h>
22 #include <kunit/visibility.h>
23 #include <net/mac80211.h>
24 #include <net/ieee80211_radiotap.h>
25 #include <linux/unaligned.h>
26
27 #include "ieee80211_i.h"
28 #include "driver-ops.h"
29 #include "led.h"
30 #include "mesh.h"
31 #include "wep.h"
32 #include "wpa.h"
33 #include "tkip.h"
34 #include "wme.h"
35 #include "rate.h"
36
37 /*
38 * monitor mode reception
39 *
40 * This function cleans up the SKB, i.e. it removes all the stuff
41 * only useful for monitoring.
42 */
ieee80211_clean_skb(struct sk_buff * skb,unsigned int present_fcs_len,unsigned int rtap_space)43 static struct sk_buff *ieee80211_clean_skb(struct sk_buff *skb,
44 unsigned int present_fcs_len,
45 unsigned int rtap_space)
46 {
47 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
48 struct ieee80211_hdr *hdr;
49 unsigned int hdrlen;
50 __le16 fc;
51
52 if (present_fcs_len)
53 __pskb_trim(skb, skb->len - present_fcs_len);
54 pskb_pull(skb, rtap_space);
55
56 /* After pulling radiotap header, clear all flags that indicate
57 * info in skb->data.
58 */
59 status->flag &= ~(RX_FLAG_RADIOTAP_TLV_AT_END |
60 RX_FLAG_RADIOTAP_LSIG |
61 RX_FLAG_RADIOTAP_HE_MU |
62 RX_FLAG_RADIOTAP_HE |
63 RX_FLAG_RADIOTAP_VHT);
64
65 hdr = (void *)skb->data;
66 fc = hdr->frame_control;
67
68 /*
69 * Remove the HT-Control field (if present) on management
70 * frames after we've sent the frame to monitoring. We
71 * (currently) don't need it, and don't properly parse
72 * frames with it present, due to the assumption of a
73 * fixed management header length.
74 */
75 if (likely(!ieee80211_is_mgmt(fc) || !ieee80211_has_order(fc)))
76 return skb;
77
78 hdrlen = ieee80211_hdrlen(fc);
79 hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_ORDER);
80
81 if (!pskb_may_pull(skb, hdrlen)) {
82 dev_kfree_skb(skb);
83 return NULL;
84 }
85
86 memmove(skb->data + IEEE80211_HT_CTL_LEN, skb->data,
87 hdrlen - IEEE80211_HT_CTL_LEN);
88 pskb_pull(skb, IEEE80211_HT_CTL_LEN);
89
90 return skb;
91 }
92
should_drop_frame(struct sk_buff * skb,int present_fcs_len,unsigned int rtap_space)93 static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
94 unsigned int rtap_space)
95 {
96 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
97 struct ieee80211_hdr *hdr;
98
99 hdr = (void *)(skb->data + rtap_space);
100
101 if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
102 RX_FLAG_FAILED_PLCP_CRC |
103 RX_FLAG_ONLY_MONITOR |
104 RX_FLAG_NO_PSDU))
105 return true;
106
107 if (unlikely(skb->len < 16 + present_fcs_len + rtap_space))
108 return true;
109
110 if (ieee80211_is_ctl(hdr->frame_control) &&
111 !ieee80211_is_pspoll(hdr->frame_control) &&
112 !ieee80211_is_back_req(hdr->frame_control))
113 return true;
114
115 return false;
116 }
117
118 static int
ieee80211_rx_radiotap_hdrlen(struct ieee80211_local * local,struct ieee80211_rx_status * status,struct sk_buff * skb)119 ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
120 struct ieee80211_rx_status *status,
121 struct sk_buff *skb)
122 {
123 int len;
124
125 /* always present fields */
126 len = sizeof(struct ieee80211_radiotap_header) + 8;
127
128 /* allocate extra bitmaps */
129 if (status->chains)
130 len += 4 * hweight8(status->chains);
131
132 if (ieee80211_have_rx_timestamp(status)) {
133 len = ALIGN(len, 8);
134 len += 8;
135 }
136 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM))
137 len += 1;
138
139 /* antenna field, if we don't have per-chain info */
140 if (!status->chains)
141 len += 1;
142
143 /* padding for RX_FLAGS if necessary */
144 len = ALIGN(len, 2);
145
146 if (status->encoding == RX_ENC_HT) /* HT info */
147 len += 3;
148
149 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
150 len = ALIGN(len, 4);
151 len += 8;
152 }
153
154 if (status->encoding == RX_ENC_VHT) {
155 /* Included even if RX_FLAG_RADIOTAP_VHT is not set */
156 len = ALIGN(len, 2);
157 len += 12;
158 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_vht) != 12);
159 }
160
161 if (local->hw.radiotap_timestamp.units_pos >= 0) {
162 len = ALIGN(len, 8);
163 len += 12;
164 }
165
166 if (status->encoding == RX_ENC_HE &&
167 status->flag & RX_FLAG_RADIOTAP_HE) {
168 len = ALIGN(len, 2);
169 len += 12;
170 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) != 12);
171 }
172
173 if (status->encoding == RX_ENC_HE &&
174 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
175 len = ALIGN(len, 2);
176 len += 12;
177 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) != 12);
178 }
179
180 if (status->flag & RX_FLAG_NO_PSDU)
181 len += 1;
182
183 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
184 len = ALIGN(len, 2);
185 len += 4;
186 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) != 4);
187 }
188
189 if (status->chains) {
190 /* antenna and antenna signal fields */
191 len += 2 * hweight8(status->chains);
192 }
193
194 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) {
195 int tlv_offset = 0;
196
197 /*
198 * The position to look at depends on the existence (or non-
199 * existence) of other elements, so take that into account...
200 */
201 if (status->flag & RX_FLAG_RADIOTAP_VHT)
202 tlv_offset +=
203 sizeof(struct ieee80211_radiotap_vht);
204 if (status->flag & RX_FLAG_RADIOTAP_HE)
205 tlv_offset +=
206 sizeof(struct ieee80211_radiotap_he);
207 if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
208 tlv_offset +=
209 sizeof(struct ieee80211_radiotap_he_mu);
210 if (status->flag & RX_FLAG_RADIOTAP_LSIG)
211 tlv_offset +=
212 sizeof(struct ieee80211_radiotap_lsig);
213
214 /* ensure 4 byte alignment for TLV */
215 len = ALIGN(len, 4);
216
217 /* TLVs until the mac header */
218 len += skb_mac_header(skb) - &skb->data[tlv_offset];
219 }
220
221 return len;
222 }
223
__ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data * sdata,int link_id,struct sta_info * sta,struct sk_buff * skb)224 static void __ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
225 int link_id,
226 struct sta_info *sta,
227 struct sk_buff *skb)
228 {
229 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
230
231 if (link_id >= 0) {
232 status->link_valid = 1;
233 status->link_id = link_id;
234 } else {
235 status->link_valid = 0;
236 }
237
238 skb_queue_tail(&sdata->skb_queue, skb);
239 wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
240 if (sta) {
241 struct link_sta_info *link_sta_info;
242
243 if (link_id >= 0) {
244 link_sta_info = rcu_dereference(sta->link[link_id]);
245 if (!link_sta_info)
246 return;
247 } else {
248 link_sta_info = &sta->deflink;
249 }
250
251 link_sta_info->rx_stats.packets++;
252 }
253 }
254
ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data * sdata,int link_id,struct sta_info * sta,struct sk_buff * skb)255 static void ieee80211_queue_skb_to_iface(struct ieee80211_sub_if_data *sdata,
256 int link_id,
257 struct sta_info *sta,
258 struct sk_buff *skb)
259 {
260 skb->protocol = 0;
261 __ieee80211_queue_skb_to_iface(sdata, link_id, sta, skb);
262 }
263
ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data * sdata,struct sk_buff * skb,int rtap_space)264 static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
265 struct sk_buff *skb,
266 int rtap_space)
267 {
268 struct {
269 struct ieee80211_hdr_3addr hdr;
270 u8 category;
271 u8 action_code;
272 } __packed __aligned(2) action;
273
274 if (!sdata)
275 return;
276
277 BUILD_BUG_ON(sizeof(action) != IEEE80211_MIN_ACTION_SIZE + 1);
278
279 if (skb->len < rtap_space + sizeof(action) +
280 VHT_MUMIMO_GROUPS_DATA_LEN)
281 return;
282
283 if (!is_valid_ether_addr(sdata->u.mntr.mu_follow_addr))
284 return;
285
286 skb_copy_bits(skb, rtap_space, &action, sizeof(action));
287
288 if (!ieee80211_is_action(action.hdr.frame_control))
289 return;
290
291 if (action.category != WLAN_CATEGORY_VHT)
292 return;
293
294 if (action.action_code != WLAN_VHT_ACTION_GROUPID_MGMT)
295 return;
296
297 if (!ether_addr_equal(action.hdr.addr1, sdata->u.mntr.mu_follow_addr))
298 return;
299
300 skb = skb_copy(skb, GFP_ATOMIC);
301 if (!skb)
302 return;
303
304 ieee80211_queue_skb_to_iface(sdata, -1, NULL, skb);
305 }
306
307 /*
308 * ieee80211_add_rx_radiotap_header - add radiotap header
309 *
310 * add a radiotap header containing all the fields which the hardware provided.
311 */
312 static void
ieee80211_add_rx_radiotap_header(struct ieee80211_local * local,struct sk_buff * skb,struct ieee80211_rate * rate,int rtap_len,bool has_fcs)313 ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
314 struct sk_buff *skb,
315 struct ieee80211_rate *rate,
316 int rtap_len, bool has_fcs)
317 {
318 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
319 struct ieee80211_radiotap_header *rthdr;
320 unsigned char *pos;
321 __le32 *it_present;
322 u32 it_present_val;
323 u16 rx_flags = 0;
324 u16 channel_flags = 0;
325 u32 tlvs_len = 0;
326 int mpdulen, chain;
327 unsigned long chains = status->chains;
328 struct ieee80211_radiotap_vht vht = {};
329 struct ieee80211_radiotap_he he = {};
330 struct ieee80211_radiotap_he_mu he_mu = {};
331 struct ieee80211_radiotap_lsig lsig = {};
332
333 if (status->flag & RX_FLAG_RADIOTAP_VHT) {
334 vht = *(struct ieee80211_radiotap_vht *)skb->data;
335 skb_pull(skb, sizeof(vht));
336 WARN_ON_ONCE(status->encoding != RX_ENC_VHT);
337 }
338
339 if (status->flag & RX_FLAG_RADIOTAP_HE) {
340 he = *(struct ieee80211_radiotap_he *)skb->data;
341 skb_pull(skb, sizeof(he));
342 WARN_ON_ONCE(status->encoding != RX_ENC_HE);
343 }
344
345 if (status->flag & RX_FLAG_RADIOTAP_HE_MU) {
346 he_mu = *(struct ieee80211_radiotap_he_mu *)skb->data;
347 skb_pull(skb, sizeof(he_mu));
348 }
349
350 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
351 lsig = *(struct ieee80211_radiotap_lsig *)skb->data;
352 skb_pull(skb, sizeof(lsig));
353 }
354
355 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END) {
356 /* data is pointer at tlv all other info was pulled off */
357 tlvs_len = skb_mac_header(skb) - skb->data;
358 }
359
360 mpdulen = skb->len;
361 if (!(has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)))
362 mpdulen += FCS_LEN;
363
364 rthdr = skb_push(skb, rtap_len - tlvs_len);
365 memset(rthdr, 0, rtap_len - tlvs_len);
366 it_present = &rthdr->it_present;
367
368 /* radiotap header, set always present flags */
369 rthdr->it_len = cpu_to_le16(rtap_len);
370 it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
371 BIT(IEEE80211_RADIOTAP_CHANNEL) |
372 BIT(IEEE80211_RADIOTAP_RX_FLAGS);
373
374 if (!status->chains)
375 it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
376
377 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
378 it_present_val |=
379 BIT(IEEE80211_RADIOTAP_EXT) |
380 BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
381 put_unaligned_le32(it_present_val, it_present);
382 it_present++;
383 it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
384 BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
385 }
386
387 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END)
388 it_present_val |= BIT(IEEE80211_RADIOTAP_TLV);
389
390 put_unaligned_le32(it_present_val, it_present);
391
392 /* This references through an offset into it_optional[] rather
393 * than via it_present otherwise later uses of pos will cause
394 * the compiler to think we have walked past the end of the
395 * struct member.
396 */
397 pos = (void *)&rthdr->it_optional[it_present + 1 - rthdr->it_optional];
398
399 /* the order of the following fields is important */
400
401 /* IEEE80211_RADIOTAP_TSFT */
402 if (ieee80211_have_rx_timestamp(status)) {
403 /* padding */
404 while ((pos - (u8 *)rthdr) & 7)
405 *pos++ = 0;
406 put_unaligned_le64(
407 ieee80211_calculate_rx_timestamp(local, status,
408 mpdulen, 0),
409 pos);
410 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_TSFT));
411 pos += 8;
412 }
413
414 /* IEEE80211_RADIOTAP_FLAGS */
415 if (has_fcs && ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS))
416 *pos |= IEEE80211_RADIOTAP_F_FCS;
417 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
418 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
419 if (status->enc_flags & RX_ENC_FLAG_SHORTPRE)
420 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
421 pos++;
422
423 /* IEEE80211_RADIOTAP_RATE */
424 if (!rate || status->encoding != RX_ENC_LEGACY) {
425 /*
426 * Without rate information don't add it. If we have,
427 * MCS information is a separate field in radiotap,
428 * added below. The byte here is needed as padding
429 * for the channel though, so initialise it to 0.
430 */
431 *pos = 0;
432 } else {
433 int shift = 0;
434 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_RATE));
435 if (status->bw == RATE_INFO_BW_10)
436 shift = 1;
437 else if (status->bw == RATE_INFO_BW_5)
438 shift = 2;
439 *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
440 }
441 pos++;
442
443 /* IEEE80211_RADIOTAP_CHANNEL */
444 /* TODO: frequency offset in KHz */
445 put_unaligned_le16(status->freq, pos);
446 pos += 2;
447 if (status->bw == RATE_INFO_BW_10)
448 channel_flags |= IEEE80211_CHAN_HALF;
449 else if (status->bw == RATE_INFO_BW_5)
450 channel_flags |= IEEE80211_CHAN_QUARTER;
451
452 if (status->band == NL80211_BAND_5GHZ ||
453 status->band == NL80211_BAND_6GHZ)
454 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
455 else if (status->encoding != RX_ENC_LEGACY)
456 channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
457 else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
458 channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
459 else if (rate)
460 channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
461 else
462 channel_flags |= IEEE80211_CHAN_2GHZ;
463 put_unaligned_le16(channel_flags, pos);
464 pos += 2;
465
466 /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
467 if (ieee80211_hw_check(&local->hw, SIGNAL_DBM) &&
468 !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
469 *pos = status->signal;
470 rthdr->it_present |=
471 cpu_to_le32(BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL));
472 pos++;
473 }
474
475 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
476
477 if (!status->chains) {
478 /* IEEE80211_RADIOTAP_ANTENNA */
479 *pos = status->antenna;
480 pos++;
481 }
482
483 /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
484
485 /* IEEE80211_RADIOTAP_RX_FLAGS */
486 /* ensure 2 byte alignment for the 2 byte field as required */
487 if ((pos - (u8 *)rthdr) & 1)
488 *pos++ = 0;
489 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
490 rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
491 put_unaligned_le16(rx_flags, pos);
492 pos += 2;
493
494 if (status->encoding == RX_ENC_HT) {
495 unsigned int stbc;
496
497 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_MCS));
498 *pos = local->hw.radiotap_mcs_details;
499 if (status->enc_flags & RX_ENC_FLAG_HT_GF)
500 *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
501 if (status->enc_flags & RX_ENC_FLAG_LDPC)
502 *pos |= IEEE80211_RADIOTAP_MCS_HAVE_FEC;
503 pos++;
504 *pos = 0;
505 if (status->enc_flags & RX_ENC_FLAG_SHORT_GI)
506 *pos |= IEEE80211_RADIOTAP_MCS_SGI;
507 if (status->bw == RATE_INFO_BW_40)
508 *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
509 if (status->enc_flags & RX_ENC_FLAG_HT_GF)
510 *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
511 if (status->enc_flags & RX_ENC_FLAG_LDPC)
512 *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
513 stbc = (status->enc_flags & RX_ENC_FLAG_STBC_MASK) >> RX_ENC_FLAG_STBC_SHIFT;
514 *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
515 pos++;
516 *pos++ = status->rate_idx;
517 }
518
519 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
520 u16 flags = 0;
521
522 /* ensure 4 byte alignment */
523 while ((pos - (u8 *)rthdr) & 3)
524 pos++;
525 rthdr->it_present |=
526 cpu_to_le32(BIT(IEEE80211_RADIOTAP_AMPDU_STATUS));
527 put_unaligned_le32(status->ampdu_reference, pos);
528 pos += 4;
529 if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
530 flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
531 if (status->flag & RX_FLAG_AMPDU_IS_LAST)
532 flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
533 if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
534 flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
535 if (status->flag & RX_FLAG_AMPDU_EOF_BIT_KNOWN)
536 flags |= IEEE80211_RADIOTAP_AMPDU_EOF_KNOWN;
537 if (status->flag & RX_FLAG_AMPDU_EOF_BIT)
538 flags |= IEEE80211_RADIOTAP_AMPDU_EOF;
539 put_unaligned_le16(flags, pos);
540 pos += 2;
541 *pos++ = 0;
542 *pos++ = 0;
543 }
544
545 if (status->encoding == RX_ENC_VHT) {
546 u16 fill = local->hw.radiotap_vht_details;
547
548 /* Leave driver filled fields alone */
549 fill &= ~le16_to_cpu(vht.known);
550 vht.known |= cpu_to_le16(fill);
551
552 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_GI &&
553 status->enc_flags & RX_ENC_FLAG_SHORT_GI)
554 vht.flags |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
555 /* in VHT, STBC is binary */
556 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_STBC &&
557 status->enc_flags & RX_ENC_FLAG_STBC_MASK)
558 vht.flags |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
559 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED &&
560 status->enc_flags & RX_ENC_FLAG_BF)
561 *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
562
563 if (fill & IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH) {
564 switch (status->bw) {
565 case RATE_INFO_BW_40:
566 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_40;
567 break;
568 case RATE_INFO_BW_80:
569 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_80;
570 break;
571 case RATE_INFO_BW_160:
572 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_160;
573 break;
574 default:
575 vht.bandwidth = IEEE80211_RADIOTAP_VHT_BW_20;
576 break;
577 }
578 }
579
580 /*
581 * If the driver filled in mcs_nss[0], then do not touch it.
582 *
583 * Otherwise, put some information about MCS/NSS into the
584 * user 0 field. Note that this is not technically correct for
585 * an MU frame as we might have decoded a different user.
586 */
587 if (!vht.mcs_nss[0]) {
588 vht.mcs_nss[0] = (status->rate_idx << 4) | status->nss;
589
590 /* coding field */
591 if (status->enc_flags & RX_ENC_FLAG_LDPC)
592 vht.coding |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
593 }
594
595 /* ensure 2 byte alignment */
596 while ((pos - (u8 *)rthdr) & 1)
597 pos++;
598 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_VHT));
599 memcpy(pos, &vht, sizeof(vht));
600 pos += sizeof(vht);
601 }
602
603 if (local->hw.radiotap_timestamp.units_pos >= 0) {
604 u16 accuracy = 0;
605 u8 flags;
606 u64 ts;
607
608 rthdr->it_present |=
609 cpu_to_le32(BIT(IEEE80211_RADIOTAP_TIMESTAMP));
610
611 /* ensure 8 byte alignment */
612 while ((pos - (u8 *)rthdr) & 7)
613 pos++;
614
615 if (status->flag & RX_FLAG_MACTIME_IS_RTAP_TS64) {
616 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_64BIT;
617 ts = status->mactime;
618 } else {
619 flags = IEEE80211_RADIOTAP_TIMESTAMP_FLAG_32BIT;
620 ts = status->device_timestamp;
621 }
622
623 put_unaligned_le64(ts, pos);
624 pos += sizeof(u64);
625
626 if (local->hw.radiotap_timestamp.accuracy >= 0) {
627 accuracy = local->hw.radiotap_timestamp.accuracy;
628 flags |= IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY;
629 }
630 put_unaligned_le16(accuracy, pos);
631 pos += sizeof(u16);
632
633 *pos++ = local->hw.radiotap_timestamp.units_pos;
634 *pos++ = flags;
635 }
636
637 if (status->encoding == RX_ENC_HE &&
638 status->flag & RX_FLAG_RADIOTAP_HE) {
639 #define HE_PREP(f, val) le16_encode_bits(val, IEEE80211_RADIOTAP_HE_##f)
640
641 if (status->enc_flags & RX_ENC_FLAG_STBC_MASK) {
642 he.data6 |= HE_PREP(DATA6_NSTS,
643 FIELD_GET(RX_ENC_FLAG_STBC_MASK,
644 status->enc_flags));
645 he.data3 |= HE_PREP(DATA3_STBC, 1);
646 } else {
647 he.data6 |= HE_PREP(DATA6_NSTS, status->nss);
648 }
649
650 #define CHECK_GI(s) \
651 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_GI_##s != \
652 (int)NL80211_RATE_INFO_HE_GI_##s)
653
654 CHECK_GI(0_8);
655 CHECK_GI(1_6);
656 CHECK_GI(3_2);
657
658 he.data3 |= HE_PREP(DATA3_DATA_MCS, status->rate_idx);
659 he.data3 |= HE_PREP(DATA3_DATA_DCM, status->he_dcm);
660 he.data3 |= HE_PREP(DATA3_CODING,
661 !!(status->enc_flags & RX_ENC_FLAG_LDPC));
662
663 he.data5 |= HE_PREP(DATA5_GI, status->he_gi);
664
665 switch (status->bw) {
666 case RATE_INFO_BW_20:
667 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
668 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ);
669 break;
670 case RATE_INFO_BW_40:
671 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
672 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ);
673 break;
674 case RATE_INFO_BW_80:
675 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
676 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ);
677 break;
678 case RATE_INFO_BW_160:
679 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
680 IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ);
681 break;
682 case RATE_INFO_BW_HE_RU:
683 #define CHECK_RU_ALLOC(s) \
684 BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_##s##T != \
685 NL80211_RATE_INFO_HE_RU_ALLOC_##s + 4)
686
687 CHECK_RU_ALLOC(26);
688 CHECK_RU_ALLOC(52);
689 CHECK_RU_ALLOC(106);
690 CHECK_RU_ALLOC(242);
691 CHECK_RU_ALLOC(484);
692 CHECK_RU_ALLOC(996);
693 CHECK_RU_ALLOC(2x996);
694
695 he.data5 |= HE_PREP(DATA5_DATA_BW_RU_ALLOC,
696 status->he_ru + 4);
697 break;
698 default:
699 WARN_ONCE(1, "Invalid SU BW %d\n", status->bw);
700 }
701
702 /* ensure 2 byte alignment */
703 while ((pos - (u8 *)rthdr) & 1)
704 pos++;
705 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE));
706 memcpy(pos, &he, sizeof(he));
707 pos += sizeof(he);
708 }
709
710 if (status->encoding == RX_ENC_HE &&
711 status->flag & RX_FLAG_RADIOTAP_HE_MU) {
712 /* ensure 2 byte alignment */
713 while ((pos - (u8 *)rthdr) & 1)
714 pos++;
715 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_HE_MU));
716 memcpy(pos, &he_mu, sizeof(he_mu));
717 pos += sizeof(he_mu);
718 }
719
720 if (status->flag & RX_FLAG_NO_PSDU) {
721 rthdr->it_present |=
722 cpu_to_le32(BIT(IEEE80211_RADIOTAP_ZERO_LEN_PSDU));
723 *pos++ = status->zero_length_psdu_type;
724 }
725
726 if (status->flag & RX_FLAG_RADIOTAP_LSIG) {
727 /* ensure 2 byte alignment */
728 while ((pos - (u8 *)rthdr) & 1)
729 pos++;
730 rthdr->it_present |= cpu_to_le32(BIT(IEEE80211_RADIOTAP_LSIG));
731 memcpy(pos, &lsig, sizeof(lsig));
732 pos += sizeof(lsig);
733 }
734
735 for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
736 *pos++ = status->chain_signal[chain];
737 *pos++ = chain;
738 }
739 }
740
741 static struct sk_buff *
ieee80211_make_monitor_skb(struct ieee80211_local * local,struct sk_buff ** origskb,struct ieee80211_rate * rate,int rtap_space,bool use_origskb)742 ieee80211_make_monitor_skb(struct ieee80211_local *local,
743 struct sk_buff **origskb,
744 struct ieee80211_rate *rate,
745 int rtap_space, bool use_origskb)
746 {
747 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(*origskb);
748 int rt_hdrlen, needed_headroom;
749 struct sk_buff *skb;
750
751 /* room for the radiotap header based on driver features */
752 rt_hdrlen = ieee80211_rx_radiotap_hdrlen(local, status, *origskb);
753 needed_headroom = rt_hdrlen - rtap_space;
754
755 if (use_origskb) {
756 /* only need to expand headroom if necessary */
757 skb = *origskb;
758 *origskb = NULL;
759
760 /*
761 * This shouldn't trigger often because most devices have an
762 * RX header they pull before we get here, and that should
763 * be big enough for our radiotap information. We should
764 * probably export the length to drivers so that we can have
765 * them allocate enough headroom to start with.
766 */
767 if (skb_headroom(skb) < needed_headroom &&
768 pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
769 dev_kfree_skb(skb);
770 return NULL;
771 }
772 } else {
773 /*
774 * Need to make a copy and possibly remove radiotap header
775 * and FCS from the original.
776 */
777 skb = skb_copy_expand(*origskb, needed_headroom + NET_SKB_PAD,
778 0, GFP_ATOMIC);
779
780 if (!skb)
781 return NULL;
782 }
783
784 /* prepend radiotap information */
785 ieee80211_add_rx_radiotap_header(local, skb, rate, rt_hdrlen, true);
786
787 skb_reset_mac_header(skb);
788 skb->ip_summed = CHECKSUM_UNNECESSARY;
789 skb->pkt_type = PACKET_OTHERHOST;
790 skb->protocol = htons(ETH_P_802_2);
791
792 return skb;
793 }
794
795 static bool
ieee80211_validate_monitor_radio(struct ieee80211_sub_if_data * sdata,struct ieee80211_local * local,struct ieee80211_rx_status * status)796 ieee80211_validate_monitor_radio(struct ieee80211_sub_if_data *sdata,
797 struct ieee80211_local *local,
798 struct ieee80211_rx_status *status)
799 {
800 struct wiphy *wiphy = local->hw.wiphy;
801 int i, freq, bw;
802
803 if (!wiphy->n_radio)
804 return true;
805
806 switch (status->bw) {
807 case RATE_INFO_BW_20:
808 bw = 20000;
809 break;
810 case RATE_INFO_BW_40:
811 bw = 40000;
812 break;
813 case RATE_INFO_BW_80:
814 bw = 80000;
815 break;
816 case RATE_INFO_BW_160:
817 bw = 160000;
818 break;
819 case RATE_INFO_BW_320:
820 bw = 320000;
821 break;
822 default:
823 return false;
824 }
825
826 freq = MHZ_TO_KHZ(status->freq);
827
828 for (i = 0; i < wiphy->n_radio; i++) {
829 if (!(sdata->wdev.radio_mask & BIT(i)))
830 continue;
831
832 if (!ieee80211_radio_freq_range_valid(&wiphy->radio[i], freq, bw))
833 continue;
834
835 return true;
836 }
837 return false;
838 }
839
840 /*
841 * This function copies a received frame to all monitor interfaces and
842 * returns a cleaned-up SKB that no longer includes the FCS nor the
843 * radiotap header the driver might have added.
844 */
845 static struct sk_buff *
ieee80211_rx_monitor(struct ieee80211_local * local,struct sk_buff * origskb,struct ieee80211_rate * rate)846 ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
847 struct ieee80211_rate *rate)
848 {
849 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
850 struct ieee80211_sub_if_data *sdata, *prev_sdata = NULL;
851 struct sk_buff *skb, *monskb = NULL;
852 int present_fcs_len = 0;
853 unsigned int rtap_space = 0;
854 struct ieee80211_sub_if_data *monitor_sdata =
855 rcu_dereference(local->monitor_sdata);
856 bool only_monitor = false;
857 unsigned int min_head_len;
858
859 if (WARN_ON_ONCE(status->flag & RX_FLAG_RADIOTAP_TLV_AT_END &&
860 !skb_mac_header_was_set(origskb))) {
861 /* with this skb no way to know where frame payload starts */
862 dev_kfree_skb(origskb);
863 return NULL;
864 }
865
866 if (status->flag & RX_FLAG_RADIOTAP_VHT)
867 rtap_space += sizeof(struct ieee80211_radiotap_vht);
868
869 if (status->flag & RX_FLAG_RADIOTAP_HE)
870 rtap_space += sizeof(struct ieee80211_radiotap_he);
871
872 if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
873 rtap_space += sizeof(struct ieee80211_radiotap_he_mu);
874
875 if (status->flag & RX_FLAG_RADIOTAP_LSIG)
876 rtap_space += sizeof(struct ieee80211_radiotap_lsig);
877
878 if (status->flag & RX_FLAG_RADIOTAP_TLV_AT_END)
879 rtap_space += skb_mac_header(origskb) - &origskb->data[rtap_space];
880
881 min_head_len = rtap_space;
882
883 /*
884 * First, we may need to make a copy of the skb because
885 * (1) we need to modify it for radiotap (if not present), and
886 * (2) the other RX handlers will modify the skb we got.
887 *
888 * We don't need to, of course, if we aren't going to return
889 * the SKB because it has a bad FCS/PLCP checksum.
890 */
891
892 if (!(status->flag & RX_FLAG_NO_PSDU)) {
893 if (ieee80211_hw_check(&local->hw, RX_INCLUDES_FCS)) {
894 if (unlikely(origskb->len <= FCS_LEN + rtap_space)) {
895 /* driver bug */
896 WARN_ON(1);
897 dev_kfree_skb(origskb);
898 return NULL;
899 }
900 present_fcs_len = FCS_LEN;
901 }
902
903 /* also consider the hdr->frame_control */
904 min_head_len += 2;
905 }
906
907 /* ensure that the expected data elements are in skb head */
908 if (!pskb_may_pull(origskb, min_head_len)) {
909 dev_kfree_skb(origskb);
910 return NULL;
911 }
912
913 only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space);
914
915 if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
916 if (only_monitor) {
917 dev_kfree_skb(origskb);
918 return NULL;
919 }
920
921 return ieee80211_clean_skb(origskb, present_fcs_len,
922 rtap_space);
923 }
924
925 ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space);
926
927 list_for_each_entry_rcu(sdata, &local->mon_list, u.mntr.list) {
928 struct cfg80211_chan_def *chandef;
929
930 chandef = &sdata->vif.bss_conf.chanreq.oper;
931 if (chandef->chan &&
932 chandef->chan->center_freq != status->freq)
933 continue;
934
935 if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR) &&
936 !ieee80211_validate_monitor_radio(sdata, local, status))
937 continue;
938
939 if (!prev_sdata) {
940 prev_sdata = sdata;
941 continue;
942 }
943
944 if (ieee80211_hw_check(&local->hw, NO_VIRTUAL_MONITOR))
945 ieee80211_handle_mu_mimo_mon(sdata, origskb, rtap_space);
946
947 if (!monskb)
948 monskb = ieee80211_make_monitor_skb(local, &origskb,
949 rate, rtap_space,
950 false);
951 if (!monskb)
952 continue;
953
954 skb = skb_clone(monskb, GFP_ATOMIC);
955 if (!skb)
956 continue;
957
958 skb->dev = prev_sdata->dev;
959 dev_sw_netstats_rx_add(skb->dev, skb->len);
960 netif_receive_skb(skb);
961 prev_sdata = sdata;
962 }
963
964 if (prev_sdata) {
965 if (monskb)
966 skb = monskb;
967 else
968 skb = ieee80211_make_monitor_skb(local, &origskb,
969 rate, rtap_space,
970 only_monitor);
971 if (skb) {
972 skb->dev = prev_sdata->dev;
973 dev_sw_netstats_rx_add(skb->dev, skb->len);
974 netif_receive_skb(skb);
975 }
976 }
977
978 if (!origskb)
979 return NULL;
980
981 return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space);
982 }
983
ieee80211_parse_qos(struct ieee80211_rx_data * rx)984 static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
985 {
986 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
987 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
988 int tid, seqno_idx, security_idx;
989
990 /* does the frame have a qos control field? */
991 if (ieee80211_is_data_qos(hdr->frame_control)) {
992 u8 *qc = ieee80211_get_qos_ctl(hdr);
993 /* frame has qos control */
994 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
995 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
996 status->rx_flags |= IEEE80211_RX_AMSDU;
997
998 seqno_idx = tid;
999 security_idx = tid;
1000 } else {
1001 /*
1002 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
1003 *
1004 * Sequence numbers for management frames, QoS data
1005 * frames with a broadcast/multicast address in the
1006 * Address 1 field, and all non-QoS data frames sent
1007 * by QoS STAs are assigned using an additional single
1008 * modulo-4096 counter, [...]
1009 *
1010 * We also use that counter for non-QoS STAs.
1011 */
1012 seqno_idx = IEEE80211_NUM_TIDS;
1013 security_idx = 0;
1014 if (ieee80211_is_mgmt(hdr->frame_control))
1015 security_idx = IEEE80211_NUM_TIDS;
1016 tid = 0;
1017 }
1018
1019 rx->seqno_idx = seqno_idx;
1020 rx->security_idx = security_idx;
1021 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
1022 * For now, set skb->priority to 0 for other cases. */
1023 rx->skb->priority = (tid > 7) ? 0 : tid;
1024 }
1025
1026 /**
1027 * DOC: Packet alignment
1028 *
1029 * Drivers always need to pass packets that are aligned to two-byte boundaries
1030 * to the stack.
1031 *
1032 * Additionally, they should, if possible, align the payload data in a way that
1033 * guarantees that the contained IP header is aligned to a four-byte
1034 * boundary. In the case of regular frames, this simply means aligning the
1035 * payload to a four-byte boundary (because either the IP header is directly
1036 * contained, or IV/RFC1042 headers that have a length divisible by four are
1037 * in front of it). If the payload data is not properly aligned and the
1038 * architecture doesn't support efficient unaligned operations, mac80211
1039 * will align the data.
1040 *
1041 * With A-MSDU frames, however, the payload data address must yield two modulo
1042 * four because there are 14-byte 802.3 headers within the A-MSDU frames that
1043 * push the IP header further back to a multiple of four again. Thankfully, the
1044 * specs were sane enough this time around to require padding each A-MSDU
1045 * subframe to a length that is a multiple of four.
1046 *
1047 * Padding like Atheros hardware adds which is between the 802.11 header and
1048 * the payload is not supported; the driver is required to move the 802.11
1049 * header to be directly in front of the payload in that case.
1050 */
ieee80211_verify_alignment(struct ieee80211_rx_data * rx)1051 static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
1052 {
1053 #ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1054 WARN_ON_ONCE((unsigned long)rx->skb->data & 1);
1055 #endif
1056 }
1057
1058
1059 /* rx handlers */
1060
ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff * skb)1061 static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
1062 {
1063 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1064
1065 if (is_multicast_ether_addr(hdr->addr1))
1066 return 0;
1067
1068 return ieee80211_is_robust_mgmt_frame(skb);
1069 }
1070
1071
ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff * skb)1072 static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
1073 {
1074 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1075
1076 if (!is_multicast_ether_addr(hdr->addr1))
1077 return 0;
1078
1079 return ieee80211_is_robust_mgmt_frame(skb);
1080 }
1081
1082
1083 /* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
ieee80211_get_mmie_keyidx(struct sk_buff * skb)1084 static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
1085 {
1086 struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
1087 struct ieee80211_mmie *mmie;
1088 struct ieee80211_mmie_16 *mmie16;
1089
1090 if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
1091 return -1;
1092
1093 if (!ieee80211_is_robust_mgmt_frame(skb) &&
1094 !ieee80211_is_beacon(hdr->frame_control))
1095 return -1; /* not a robust management frame */
1096
1097 mmie = (struct ieee80211_mmie *)
1098 (skb->data + skb->len - sizeof(*mmie));
1099 if (mmie->element_id == WLAN_EID_MMIE &&
1100 mmie->length == sizeof(*mmie) - 2)
1101 return le16_to_cpu(mmie->key_id);
1102
1103 mmie16 = (struct ieee80211_mmie_16 *)
1104 (skb->data + skb->len - sizeof(*mmie16));
1105 if (skb->len >= 24 + sizeof(*mmie16) &&
1106 mmie16->element_id == WLAN_EID_MMIE &&
1107 mmie16->length == sizeof(*mmie16) - 2)
1108 return le16_to_cpu(mmie16->key_id);
1109
1110 return -1;
1111 }
1112
ieee80211_get_keyid(struct sk_buff * skb)1113 static int ieee80211_get_keyid(struct sk_buff *skb)
1114 {
1115 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1116 __le16 fc = hdr->frame_control;
1117 int hdrlen = ieee80211_hdrlen(fc);
1118 u8 keyid;
1119
1120 /* WEP, TKIP, CCMP and GCMP */
1121 if (unlikely(skb->len < hdrlen + IEEE80211_WEP_IV_LEN))
1122 return -EINVAL;
1123
1124 skb_copy_bits(skb, hdrlen + 3, &keyid, 1);
1125
1126 keyid >>= 6;
1127
1128 return keyid;
1129 }
1130
ieee80211_rx_mesh_check(struct ieee80211_rx_data * rx)1131 static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
1132 {
1133 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1134 char *dev_addr = rx->sdata->vif.addr;
1135
1136 if (ieee80211_is_data(hdr->frame_control)) {
1137 if (is_multicast_ether_addr(hdr->addr1)) {
1138 if (ieee80211_has_tods(hdr->frame_control) ||
1139 !ieee80211_has_fromds(hdr->frame_control))
1140 return RX_DROP;
1141 if (ether_addr_equal(hdr->addr3, dev_addr))
1142 return RX_DROP;
1143 } else {
1144 if (!ieee80211_has_a4(hdr->frame_control))
1145 return RX_DROP;
1146 if (ether_addr_equal(hdr->addr4, dev_addr))
1147 return RX_DROP;
1148 }
1149 }
1150
1151 /* If there is not an established peer link and this is not a peer link
1152 * establisment frame, beacon or probe, drop the frame.
1153 */
1154
1155 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
1156 struct ieee80211_mgmt *mgmt;
1157
1158 if (!ieee80211_is_mgmt(hdr->frame_control))
1159 return RX_DROP;
1160
1161 if (ieee80211_is_action(hdr->frame_control)) {
1162 u8 category;
1163
1164 /* make sure category field is present */
1165 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
1166 return RX_DROP;
1167
1168 mgmt = (struct ieee80211_mgmt *)hdr;
1169 category = mgmt->u.action.category;
1170 if (category != WLAN_CATEGORY_MESH_ACTION &&
1171 category != WLAN_CATEGORY_SELF_PROTECTED)
1172 return RX_DROP;
1173 return RX_CONTINUE;
1174 }
1175
1176 if (ieee80211_is_probe_req(hdr->frame_control) ||
1177 ieee80211_is_probe_resp(hdr->frame_control) ||
1178 ieee80211_is_beacon(hdr->frame_control) ||
1179 ieee80211_is_auth(hdr->frame_control))
1180 return RX_CONTINUE;
1181
1182 return RX_DROP;
1183 }
1184
1185 return RX_CONTINUE;
1186 }
1187
ieee80211_rx_reorder_ready(struct tid_ampdu_rx * tid_agg_rx,int index)1188 static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
1189 int index)
1190 {
1191 struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
1192 struct sk_buff *tail = skb_peek_tail(frames);
1193 struct ieee80211_rx_status *status;
1194
1195 if (tid_agg_rx->reorder_buf_filtered &&
1196 tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
1197 return true;
1198
1199 if (!tail)
1200 return false;
1201
1202 status = IEEE80211_SKB_RXCB(tail);
1203 if (status->flag & RX_FLAG_AMSDU_MORE)
1204 return false;
1205
1206 return true;
1207 }
1208
ieee80211_release_reorder_frame(struct ieee80211_sub_if_data * sdata,struct tid_ampdu_rx * tid_agg_rx,int index,struct sk_buff_head * frames)1209 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
1210 struct tid_ampdu_rx *tid_agg_rx,
1211 int index,
1212 struct sk_buff_head *frames)
1213 {
1214 struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
1215 struct sk_buff *skb;
1216 struct ieee80211_rx_status *status;
1217
1218 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1219
1220 if (skb_queue_empty(skb_list))
1221 goto no_frame;
1222
1223 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1224 __skb_queue_purge(skb_list);
1225 goto no_frame;
1226 }
1227
1228 /* release frames from the reorder ring buffer */
1229 tid_agg_rx->stored_mpdu_num--;
1230 while ((skb = __skb_dequeue(skb_list))) {
1231 status = IEEE80211_SKB_RXCB(skb);
1232 status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
1233 __skb_queue_tail(frames, skb);
1234 }
1235
1236 no_frame:
1237 if (tid_agg_rx->reorder_buf_filtered)
1238 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
1239 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1240 }
1241
ieee80211_release_reorder_frames(struct ieee80211_sub_if_data * sdata,struct tid_ampdu_rx * tid_agg_rx,u16 head_seq_num,struct sk_buff_head * frames)1242 static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
1243 struct tid_ampdu_rx *tid_agg_rx,
1244 u16 head_seq_num,
1245 struct sk_buff_head *frames)
1246 {
1247 int index;
1248
1249 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1250
1251 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
1252 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1253 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1254 frames);
1255 }
1256 }
1257
1258 /*
1259 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
1260 * the skb was added to the buffer longer than this time ago, the earlier
1261 * frames that have not yet been received are assumed to be lost and the skb
1262 * can be released for processing. This may also release other skb's from the
1263 * reorder buffer if there are no additional gaps between the frames.
1264 *
1265 * Callers must hold tid_agg_rx->reorder_lock.
1266 */
1267 #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
1268
ieee80211_sta_reorder_release(struct ieee80211_sub_if_data * sdata,struct tid_ampdu_rx * tid_agg_rx,struct sk_buff_head * frames)1269 static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
1270 struct tid_ampdu_rx *tid_agg_rx,
1271 struct sk_buff_head *frames)
1272 {
1273 int index, i, j;
1274
1275 lockdep_assert_held(&tid_agg_rx->reorder_lock);
1276
1277 /* release the buffer until next missing frame */
1278 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1279 if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
1280 tid_agg_rx->stored_mpdu_num) {
1281 /*
1282 * No buffers ready to be released, but check whether any
1283 * frames in the reorder buffer have timed out.
1284 */
1285 int skipped = 1;
1286 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
1287 j = (j + 1) % tid_agg_rx->buf_size) {
1288 if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
1289 skipped++;
1290 continue;
1291 }
1292 if (skipped &&
1293 !time_after(jiffies, tid_agg_rx->reorder_time[j] +
1294 HT_RX_REORDER_BUF_TIMEOUT))
1295 goto set_release_timer;
1296
1297 /* don't leave incomplete A-MSDUs around */
1298 for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
1299 i = (i + 1) % tid_agg_rx->buf_size)
1300 __skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
1301
1302 ht_dbg_ratelimited(sdata,
1303 "release an RX reorder frame due to timeout on earlier frames\n");
1304 ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
1305 frames);
1306
1307 /*
1308 * Increment the head seq# also for the skipped slots.
1309 */
1310 tid_agg_rx->head_seq_num =
1311 (tid_agg_rx->head_seq_num +
1312 skipped) & IEEE80211_SN_MASK;
1313 skipped = 0;
1314 }
1315 } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1316 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
1317 frames);
1318 index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1319 }
1320
1321 if (tid_agg_rx->stored_mpdu_num) {
1322 j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
1323
1324 for (; j != (index - 1) % tid_agg_rx->buf_size;
1325 j = (j + 1) % tid_agg_rx->buf_size) {
1326 if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
1327 break;
1328 }
1329
1330 set_release_timer:
1331
1332 if (!tid_agg_rx->removed)
1333 mod_timer(&tid_agg_rx->reorder_timer,
1334 tid_agg_rx->reorder_time[j] + 1 +
1335 HT_RX_REORDER_BUF_TIMEOUT);
1336 } else {
1337 timer_delete(&tid_agg_rx->reorder_timer);
1338 }
1339 }
1340
1341 /*
1342 * As this function belongs to the RX path it must be under
1343 * rcu_read_lock protection. It returns false if the frame
1344 * can be processed immediately, true if it was consumed.
1345 */
ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data * sdata,struct tid_ampdu_rx * tid_agg_rx,struct sk_buff * skb,struct sk_buff_head * frames)1346 static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
1347 struct tid_ampdu_rx *tid_agg_rx,
1348 struct sk_buff *skb,
1349 struct sk_buff_head *frames)
1350 {
1351 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1352 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1353 u16 mpdu_seq_num = ieee80211_get_sn(hdr);
1354 u16 head_seq_num, buf_size;
1355 int index;
1356 bool ret = true;
1357
1358 spin_lock(&tid_agg_rx->reorder_lock);
1359
1360 /*
1361 * Offloaded BA sessions have no known starting sequence number so pick
1362 * one from first Rxed frame for this tid after BA was started.
1363 */
1364 if (unlikely(tid_agg_rx->auto_seq)) {
1365 tid_agg_rx->auto_seq = false;
1366 tid_agg_rx->ssn = mpdu_seq_num;
1367 tid_agg_rx->head_seq_num = mpdu_seq_num;
1368 }
1369
1370 buf_size = tid_agg_rx->buf_size;
1371 head_seq_num = tid_agg_rx->head_seq_num;
1372
1373 /*
1374 * If the current MPDU's SN is smaller than the SSN, it shouldn't
1375 * be reordered.
1376 */
1377 if (unlikely(!tid_agg_rx->started)) {
1378 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1379 ret = false;
1380 goto out;
1381 }
1382 tid_agg_rx->started = true;
1383 }
1384
1385 /* frame with out of date sequence number */
1386 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
1387 dev_kfree_skb(skb);
1388 goto out;
1389 }
1390
1391 /*
1392 * If frame the sequence number exceeds our buffering window
1393 * size release some previous frames to make room for this one.
1394 */
1395 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
1396 head_seq_num = ieee80211_sn_inc(
1397 ieee80211_sn_sub(mpdu_seq_num, buf_size));
1398 /* release stored frames up to new head to stack */
1399 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
1400 head_seq_num, frames);
1401 }
1402
1403 /* Now the new frame is always in the range of the reordering buffer */
1404
1405 index = mpdu_seq_num % tid_agg_rx->buf_size;
1406
1407 /* check if we already stored this frame */
1408 if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
1409 dev_kfree_skb(skb);
1410 goto out;
1411 }
1412
1413 /*
1414 * If the current MPDU is in the right order and nothing else
1415 * is stored we can process it directly, no need to buffer it.
1416 * If it is first but there's something stored, we may be able
1417 * to release frames after this one.
1418 */
1419 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
1420 tid_agg_rx->stored_mpdu_num == 0) {
1421 if (!(status->flag & RX_FLAG_AMSDU_MORE))
1422 tid_agg_rx->head_seq_num =
1423 ieee80211_sn_inc(tid_agg_rx->head_seq_num);
1424 ret = false;
1425 goto out;
1426 }
1427
1428 /* put the frame in the reordering buffer */
1429 __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
1430 if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1431 tid_agg_rx->reorder_time[index] = jiffies;
1432 tid_agg_rx->stored_mpdu_num++;
1433 ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
1434 }
1435
1436 out:
1437 spin_unlock(&tid_agg_rx->reorder_lock);
1438 return ret;
1439 }
1440
1441 /*
1442 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
1443 * true if the MPDU was buffered, false if it should be processed.
1444 */
ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data * rx,struct sk_buff_head * frames)1445 static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
1446 struct sk_buff_head *frames)
1447 {
1448 struct sk_buff *skb = rx->skb;
1449 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1450 struct sta_info *sta = rx->sta;
1451 struct tid_ampdu_rx *tid_agg_rx;
1452 u16 sc;
1453 u8 tid, ack_policy;
1454
1455 if (!ieee80211_is_data_qos(hdr->frame_control) ||
1456 is_multicast_ether_addr(hdr->addr1))
1457 goto dont_reorder;
1458
1459 /*
1460 * filter the QoS data rx stream according to
1461 * STA/TID and check if this STA/TID is on aggregation
1462 */
1463
1464 if (!sta)
1465 goto dont_reorder;
1466
1467 ack_policy = *ieee80211_get_qos_ctl(hdr) &
1468 IEEE80211_QOS_CTL_ACK_POLICY_MASK;
1469 tid = ieee80211_get_tid(hdr);
1470
1471 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
1472 if (!tid_agg_rx) {
1473 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
1474 !test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
1475 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
1476 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
1477 WLAN_BACK_RECIPIENT,
1478 WLAN_REASON_QSTA_REQUIRE_SETUP);
1479 goto dont_reorder;
1480 }
1481
1482 /* qos null data frames are excluded */
1483 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
1484 goto dont_reorder;
1485
1486 /* not part of a BA session */
1487 if (ack_policy == IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
1488 goto dont_reorder;
1489
1490 /* new, potentially un-ordered, ampdu frame - process it */
1491
1492 /* reset session timer */
1493 if (tid_agg_rx->timeout)
1494 tid_agg_rx->last_rx = jiffies;
1495
1496 /* if this mpdu is fragmented - terminate rx aggregation session */
1497 sc = le16_to_cpu(hdr->seq_ctrl);
1498 if (sc & IEEE80211_SCTL_FRAG) {
1499 ieee80211_queue_skb_to_iface(rx->sdata, rx->link_id, NULL, skb);
1500 return;
1501 }
1502
1503 /*
1504 * No locking needed -- we will only ever process one
1505 * RX packet at a time, and thus own tid_agg_rx. All
1506 * other code manipulating it needs to (and does) make
1507 * sure that we cannot get to it any more before doing
1508 * anything with it.
1509 */
1510 if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
1511 frames))
1512 return;
1513
1514 dont_reorder:
1515 __skb_queue_tail(frames, skb);
1516 }
1517
1518 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_check_dup(struct ieee80211_rx_data * rx)1519 ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
1520 {
1521 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1522 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1523
1524 if (status->flag & RX_FLAG_DUP_VALIDATED)
1525 return RX_CONTINUE;
1526
1527 /*
1528 * Drop duplicate 802.11 retransmissions
1529 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
1530 */
1531
1532 if (rx->skb->len < 24)
1533 return RX_CONTINUE;
1534
1535 if (ieee80211_is_ctl(hdr->frame_control) ||
1536 ieee80211_is_any_nullfunc(hdr->frame_control))
1537 return RX_CONTINUE;
1538
1539 if (!rx->sta)
1540 return RX_CONTINUE;
1541
1542 if (unlikely(is_multicast_ether_addr(hdr->addr1))) {
1543 struct ieee80211_sub_if_data *sdata = rx->sdata;
1544 u16 sn = ieee80211_get_sn(hdr);
1545
1546 if (!ieee80211_is_data_present(hdr->frame_control))
1547 return RX_CONTINUE;
1548
1549 if (!ieee80211_vif_is_mld(&sdata->vif) ||
1550 sdata->vif.type != NL80211_IFTYPE_STATION)
1551 return RX_CONTINUE;
1552
1553 if (sdata->u.mgd.mcast_seq_last != IEEE80211_SN_MODULO &&
1554 ieee80211_sn_less_eq(sn, sdata->u.mgd.mcast_seq_last))
1555 return RX_DROP_U_DUP;
1556
1557 sdata->u.mgd.mcast_seq_last = sn;
1558 return RX_CONTINUE;
1559 }
1560
1561 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
1562 rx->sta->last_seq_ctrl[rx->seqno_idx] == hdr->seq_ctrl)) {
1563 I802_DEBUG_INC(rx->local->dot11FrameDuplicateCount);
1564 rx->link_sta->rx_stats.num_duplicates++;
1565 return RX_DROP_U_DUP;
1566 } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
1567 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
1568 }
1569
1570 return RX_CONTINUE;
1571 }
1572
1573 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_check(struct ieee80211_rx_data * rx)1574 ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
1575 {
1576 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1577
1578 /* Drop disallowed frame classes based on STA auth/assoc state;
1579 * IEEE 802.11, Chap 5.5.
1580 *
1581 * mac80211 filters only based on association state, i.e. it drops
1582 * Class 3 frames from not associated stations. hostapd sends
1583 * deauth/disassoc frames when needed. In addition, hostapd is
1584 * responsible for filtering on both auth and assoc states.
1585 */
1586
1587 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1588 return ieee80211_rx_mesh_check(rx);
1589
1590 if (unlikely((ieee80211_is_data(hdr->frame_control) ||
1591 ieee80211_is_pspoll(hdr->frame_control)) &&
1592 rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
1593 rx->sdata->vif.type != NL80211_IFTYPE_OCB &&
1594 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
1595 /*
1596 * accept port control frames from the AP even when it's not
1597 * yet marked ASSOC to prevent a race where we don't set the
1598 * assoc bit quickly enough before it sends the first frame
1599 */
1600 if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
1601 ieee80211_is_data_present(hdr->frame_control)) {
1602 unsigned int hdrlen;
1603 __be16 ethertype;
1604
1605 hdrlen = ieee80211_hdrlen(hdr->frame_control);
1606
1607 if (rx->skb->len < hdrlen + 8)
1608 return RX_DROP;
1609
1610 skb_copy_bits(rx->skb, hdrlen + 6, ðertype, 2);
1611 if (ethertype == rx->sdata->control_port_protocol)
1612 return RX_CONTINUE;
1613 }
1614
1615 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
1616 cfg80211_rx_spurious_frame(rx->sdata->dev, hdr->addr2,
1617 rx->link_id, GFP_ATOMIC))
1618 return RX_DROP_U_SPURIOUS;
1619
1620 return RX_DROP;
1621 }
1622
1623 return RX_CONTINUE;
1624 }
1625
1626
1627 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_check_more_data(struct ieee80211_rx_data * rx)1628 ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
1629 {
1630 struct ieee80211_local *local;
1631 struct ieee80211_hdr *hdr;
1632 struct sk_buff *skb;
1633
1634 local = rx->local;
1635 skb = rx->skb;
1636 hdr = (struct ieee80211_hdr *) skb->data;
1637
1638 if (!local->pspolling)
1639 return RX_CONTINUE;
1640
1641 if (!ieee80211_has_fromds(hdr->frame_control))
1642 /* this is not from AP */
1643 return RX_CONTINUE;
1644
1645 if (!ieee80211_is_data(hdr->frame_control))
1646 return RX_CONTINUE;
1647
1648 if (!ieee80211_has_moredata(hdr->frame_control)) {
1649 /* AP has no more frames buffered for us */
1650 local->pspolling = false;
1651 return RX_CONTINUE;
1652 }
1653
1654 /* more data bit is set, let's request a new frame from the AP */
1655 ieee80211_send_pspoll(local, rx->sdata);
1656
1657 return RX_CONTINUE;
1658 }
1659
sta_ps_start(struct sta_info * sta)1660 static void sta_ps_start(struct sta_info *sta)
1661 {
1662 struct ieee80211_sub_if_data *sdata = sta->sdata;
1663 struct ieee80211_local *local = sdata->local;
1664 struct ps_data *ps;
1665 int tid;
1666
1667 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1668 sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1669 ps = &sdata->bss->ps;
1670 else
1671 return;
1672
1673 atomic_inc(&ps->num_sta_ps);
1674 set_sta_flag(sta, WLAN_STA_PS_STA);
1675 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
1676 drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
1677 ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
1678 sta->sta.addr, sta->sta.aid);
1679
1680 ieee80211_clear_fast_xmit(sta);
1681
1682 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
1683 struct ieee80211_txq *txq = sta->sta.txq[tid];
1684 struct txq_info *txqi = to_txq_info(txq);
1685
1686 spin_lock(&local->active_txq_lock[txq->ac]);
1687 if (!list_empty(&txqi->schedule_order))
1688 list_del_init(&txqi->schedule_order);
1689 spin_unlock(&local->active_txq_lock[txq->ac]);
1690
1691 if (txq_has_queue(txq))
1692 set_bit(tid, &sta->txq_buffered_tids);
1693 else
1694 clear_bit(tid, &sta->txq_buffered_tids);
1695 }
1696 }
1697
sta_ps_end(struct sta_info * sta)1698 static void sta_ps_end(struct sta_info *sta)
1699 {
1700 ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
1701 sta->sta.addr, sta->sta.aid);
1702
1703 if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
1704 /*
1705 * Clear the flag only if the other one is still set
1706 * so that the TX path won't start TX'ing new frames
1707 * directly ... In the case that the driver flag isn't
1708 * set ieee80211_sta_ps_deliver_wakeup() will clear it.
1709 */
1710 clear_sta_flag(sta, WLAN_STA_PS_STA);
1711 ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
1712 sta->sta.addr, sta->sta.aid);
1713 return;
1714 }
1715
1716 set_sta_flag(sta, WLAN_STA_PS_DELIVER);
1717 clear_sta_flag(sta, WLAN_STA_PS_STA);
1718 ieee80211_sta_ps_deliver_wakeup(sta);
1719 }
1720
ieee80211_sta_ps_transition(struct ieee80211_sta * pubsta,bool start)1721 int ieee80211_sta_ps_transition(struct ieee80211_sta *pubsta, bool start)
1722 {
1723 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1724 bool in_ps;
1725
1726 WARN_ON(!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS));
1727
1728 /* Don't let the same PS state be set twice */
1729 in_ps = test_sta_flag(sta, WLAN_STA_PS_STA);
1730 if ((start && in_ps) || (!start && !in_ps))
1731 return -EINVAL;
1732
1733 if (start)
1734 sta_ps_start(sta);
1735 else
1736 sta_ps_end(sta);
1737
1738 return 0;
1739 }
1740 EXPORT_SYMBOL(ieee80211_sta_ps_transition);
1741
ieee80211_sta_pspoll(struct ieee80211_sta * pubsta)1742 void ieee80211_sta_pspoll(struct ieee80211_sta *pubsta)
1743 {
1744 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1745
1746 if (test_sta_flag(sta, WLAN_STA_SP))
1747 return;
1748
1749 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1750 ieee80211_sta_ps_deliver_poll_response(sta);
1751 else
1752 set_sta_flag(sta, WLAN_STA_PSPOLL);
1753 }
1754 EXPORT_SYMBOL(ieee80211_sta_pspoll);
1755
ieee80211_sta_uapsd_trigger(struct ieee80211_sta * pubsta,u8 tid)1756 void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *pubsta, u8 tid)
1757 {
1758 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1759 int ac = ieee80211_ac_from_tid(tid);
1760
1761 /*
1762 * If this AC is not trigger-enabled do nothing unless the
1763 * driver is calling us after it already checked.
1764 *
1765 * NB: This could/should check a separate bitmap of trigger-
1766 * enabled queues, but for now we only implement uAPSD w/o
1767 * TSPEC changes to the ACs, so they're always the same.
1768 */
1769 if (!(sta->sta.uapsd_queues & ieee80211_ac_to_qos_mask[ac]) &&
1770 tid != IEEE80211_NUM_TIDS)
1771 return;
1772
1773 /* if we are in a service period, do nothing */
1774 if (test_sta_flag(sta, WLAN_STA_SP))
1775 return;
1776
1777 if (!test_sta_flag(sta, WLAN_STA_PS_DRIVER))
1778 ieee80211_sta_ps_deliver_uapsd(sta);
1779 else
1780 set_sta_flag(sta, WLAN_STA_UAPSD);
1781 }
1782 EXPORT_SYMBOL(ieee80211_sta_uapsd_trigger);
1783
1784 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data * rx)1785 ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
1786 {
1787 struct ieee80211_sub_if_data *sdata = rx->sdata;
1788 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
1789 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1790
1791 if (!rx->sta)
1792 return RX_CONTINUE;
1793
1794 if (sdata->vif.type != NL80211_IFTYPE_AP &&
1795 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
1796 return RX_CONTINUE;
1797
1798 /*
1799 * The device handles station powersave, so don't do anything about
1800 * uAPSD and PS-Poll frames (the latter shouldn't even come up from
1801 * it to mac80211 since they're handled.)
1802 */
1803 if (ieee80211_hw_check(&sdata->local->hw, AP_LINK_PS))
1804 return RX_CONTINUE;
1805
1806 /*
1807 * Don't do anything if the station isn't already asleep. In
1808 * the uAPSD case, the station will probably be marked asleep,
1809 * in the PS-Poll case the station must be confused ...
1810 */
1811 if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
1812 return RX_CONTINUE;
1813
1814 if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
1815 ieee80211_sta_pspoll(&rx->sta->sta);
1816
1817 /* Free PS Poll skb here instead of returning RX_DROP that would
1818 * count as an dropped frame. */
1819 dev_kfree_skb(rx->skb);
1820
1821 return RX_QUEUED;
1822 } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
1823 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1824 ieee80211_has_pm(hdr->frame_control) &&
1825 (ieee80211_is_data_qos(hdr->frame_control) ||
1826 ieee80211_is_qos_nullfunc(hdr->frame_control))) {
1827 u8 tid = ieee80211_get_tid(hdr);
1828
1829 ieee80211_sta_uapsd_trigger(&rx->sta->sta, tid);
1830 }
1831
1832 return RX_CONTINUE;
1833 }
1834
1835 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_sta_process(struct ieee80211_rx_data * rx)1836 ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1837 {
1838 struct sta_info *sta = rx->sta;
1839 struct link_sta_info *link_sta = rx->link_sta;
1840 struct sk_buff *skb = rx->skb;
1841 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1842 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1843 int i;
1844
1845 if (!sta || !link_sta)
1846 return RX_CONTINUE;
1847
1848 /*
1849 * Update last_rx only for IBSS packets which are for the current
1850 * BSSID and for station already AUTHORIZED to avoid keeping the
1851 * current IBSS network alive in cases where other STAs start
1852 * using different BSSID. This will also give the station another
1853 * chance to restart the authentication/authorization in case
1854 * something went wrong the first time.
1855 */
1856 if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
1857 u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
1858 NL80211_IFTYPE_ADHOC);
1859 if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
1860 test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1861 link_sta->rx_stats.last_rx = jiffies;
1862 if (ieee80211_is_data_present(hdr->frame_control) &&
1863 !is_multicast_ether_addr(hdr->addr1))
1864 link_sta->rx_stats.last_rate =
1865 sta_stats_encode_rate(status);
1866 }
1867 } else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
1868 link_sta->rx_stats.last_rx = jiffies;
1869 } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) &&
1870 !is_multicast_ether_addr(hdr->addr1)) {
1871 /*
1872 * Mesh beacons will update last_rx when if they are found to
1873 * match the current local configuration when processed.
1874 */
1875 link_sta->rx_stats.last_rx = jiffies;
1876 if (ieee80211_is_data_present(hdr->frame_control))
1877 link_sta->rx_stats.last_rate = sta_stats_encode_rate(status);
1878 }
1879
1880 link_sta->rx_stats.fragments++;
1881
1882 u64_stats_update_begin(&link_sta->rx_stats.syncp);
1883 link_sta->rx_stats.bytes += rx->skb->len;
1884 u64_stats_update_end(&link_sta->rx_stats.syncp);
1885
1886 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
1887 link_sta->rx_stats.last_signal = status->signal;
1888 ewma_signal_add(&link_sta->rx_stats_avg.signal,
1889 -status->signal);
1890 }
1891
1892 if (status->chains) {
1893 link_sta->rx_stats.chains = status->chains;
1894 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
1895 int signal = status->chain_signal[i];
1896
1897 if (!(status->chains & BIT(i)))
1898 continue;
1899
1900 link_sta->rx_stats.chain_signal_last[i] = signal;
1901 ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i],
1902 -signal);
1903 }
1904 }
1905
1906 if (ieee80211_is_s1g_beacon(hdr->frame_control))
1907 return RX_CONTINUE;
1908
1909 /*
1910 * Change STA power saving mode only at the end of a frame
1911 * exchange sequence, and only for a data or management
1912 * frame as specified in IEEE 802.11-2016 11.2.3.2
1913 */
1914 if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
1915 !ieee80211_has_morefrags(hdr->frame_control) &&
1916 !is_multicast_ether_addr(hdr->addr1) &&
1917 (ieee80211_is_mgmt(hdr->frame_control) ||
1918 ieee80211_is_data(hdr->frame_control)) &&
1919 !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
1920 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1921 rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
1922 if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
1923 if (!ieee80211_has_pm(hdr->frame_control))
1924 sta_ps_end(sta);
1925 } else {
1926 if (ieee80211_has_pm(hdr->frame_control))
1927 sta_ps_start(sta);
1928 }
1929 }
1930
1931 /* mesh power save support */
1932 if (ieee80211_vif_is_mesh(&rx->sdata->vif))
1933 ieee80211_mps_rx_h_sta_process(sta, hdr);
1934
1935 /*
1936 * Drop (qos-)data::nullfunc frames silently, since they
1937 * are used only to control station power saving mode.
1938 */
1939 if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
1940 I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
1941
1942 /*
1943 * If we receive a 4-addr nullfunc frame from a STA
1944 * that was not moved to a 4-addr STA vlan yet send
1945 * the event to userspace and for older hostapd drop
1946 * the frame to the monitor interface.
1947 */
1948 if (ieee80211_has_a4(hdr->frame_control) &&
1949 (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
1950 (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1951 !rx->sdata->u.vlan.sta))) {
1952 if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
1953 cfg80211_rx_unexpected_4addr_frame(
1954 rx->sdata->dev, sta->sta.addr,
1955 rx->link_id, GFP_ATOMIC);
1956 return RX_DROP_U_UNEXPECTED_4ADDR_FRAME;
1957 }
1958 /*
1959 * Update counter and free packet here to avoid
1960 * counting this as a dropped packed.
1961 */
1962 link_sta->rx_stats.packets++;
1963 dev_kfree_skb(rx->skb);
1964 return RX_QUEUED;
1965 }
1966
1967 return RX_CONTINUE;
1968 } /* ieee80211_rx_h_sta_process */
1969
1970 static struct ieee80211_key *
ieee80211_rx_get_bigtk(struct ieee80211_rx_data * rx,int idx)1971 ieee80211_rx_get_bigtk(struct ieee80211_rx_data *rx, int idx)
1972 {
1973 struct ieee80211_key *key = NULL;
1974 int idx2;
1975
1976 /* Make sure key gets set if either BIGTK key index is set so that
1977 * ieee80211_drop_unencrypted_mgmt() can properly drop both unprotected
1978 * Beacon frames and Beacon frames that claim to use another BIGTK key
1979 * index (i.e., a key that we do not have).
1980 */
1981
1982 if (idx < 0) {
1983 idx = NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS;
1984 idx2 = idx + 1;
1985 } else {
1986 if (idx == NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
1987 idx2 = idx + 1;
1988 else
1989 idx2 = idx - 1;
1990 }
1991
1992 if (rx->link_sta)
1993 key = rcu_dereference(rx->link_sta->gtk[idx]);
1994 if (!key)
1995 key = rcu_dereference(rx->link->gtk[idx]);
1996 if (!key && rx->link_sta)
1997 key = rcu_dereference(rx->link_sta->gtk[idx2]);
1998 if (!key)
1999 key = rcu_dereference(rx->link->gtk[idx2]);
2000
2001 return key;
2002 }
2003
2004 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_decrypt(struct ieee80211_rx_data * rx)2005 ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
2006 {
2007 struct sk_buff *skb = rx->skb;
2008 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2009 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2010 int keyidx;
2011 ieee80211_rx_result result = RX_DROP_U_DECRYPT_FAIL;
2012 struct ieee80211_key *sta_ptk = NULL;
2013 struct ieee80211_key *ptk_idx = NULL;
2014 int mmie_keyidx = -1;
2015 __le16 fc;
2016
2017 if (ieee80211_is_ext(hdr->frame_control))
2018 return RX_CONTINUE;
2019
2020 /*
2021 * Key selection 101
2022 *
2023 * There are five types of keys:
2024 * - GTK (group keys)
2025 * - IGTK (group keys for management frames)
2026 * - BIGTK (group keys for Beacon frames)
2027 * - PTK (pairwise keys)
2028 * - STK (station-to-station pairwise keys)
2029 *
2030 * When selecting a key, we have to distinguish between multicast
2031 * (including broadcast) and unicast frames, the latter can only
2032 * use PTKs and STKs while the former always use GTKs, IGTKs, and
2033 * BIGTKs. Unless, of course, actual WEP keys ("pre-RSNA") are used,
2034 * then unicast frames can also use key indices like GTKs. Hence, if we
2035 * don't have a PTK/STK we check the key index for a WEP key.
2036 *
2037 * Note that in a regular BSS, multicast frames are sent by the
2038 * AP only, associated stations unicast the frame to the AP first
2039 * which then multicasts it on their behalf.
2040 *
2041 * There is also a slight problem in IBSS mode: GTKs are negotiated
2042 * with each station, that is something we don't currently handle.
2043 * The spec seems to expect that one negotiates the same key with
2044 * every station but there's no such requirement; VLANs could be
2045 * possible.
2046 */
2047
2048 /* start without a key */
2049 rx->key = NULL;
2050 fc = hdr->frame_control;
2051
2052 if (rx->sta) {
2053 int keyid = rx->sta->ptk_idx;
2054 sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
2055
2056 if (ieee80211_has_protected(fc) &&
2057 !(status->flag & RX_FLAG_IV_STRIPPED)) {
2058 keyid = ieee80211_get_keyid(rx->skb);
2059
2060 if (unlikely(keyid < 0))
2061 return RX_DROP_U_NO_KEY_ID;
2062
2063 ptk_idx = rcu_dereference(rx->sta->ptk[keyid]);
2064 }
2065 }
2066
2067 if (!ieee80211_has_protected(fc))
2068 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
2069
2070 if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
2071 rx->key = ptk_idx ? ptk_idx : sta_ptk;
2072 if ((status->flag & RX_FLAG_DECRYPTED) &&
2073 (status->flag & RX_FLAG_IV_STRIPPED))
2074 return RX_CONTINUE;
2075 /* Skip decryption if the frame is not protected. */
2076 if (!ieee80211_has_protected(fc))
2077 return RX_CONTINUE;
2078 } else if (mmie_keyidx >= 0 && ieee80211_is_beacon(fc)) {
2079 /* Broadcast/multicast robust management frame / BIP */
2080 if ((status->flag & RX_FLAG_DECRYPTED) &&
2081 (status->flag & RX_FLAG_IV_STRIPPED))
2082 return RX_CONTINUE;
2083
2084 if (mmie_keyidx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS ||
2085 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS +
2086 NUM_DEFAULT_BEACON_KEYS) {
2087 if (rx->sdata->dev)
2088 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2089 skb->data,
2090 skb->len);
2091 return RX_DROP_U_BAD_BCN_KEYIDX;
2092 }
2093
2094 rx->key = ieee80211_rx_get_bigtk(rx, mmie_keyidx);
2095 if (!rx->key)
2096 return RX_CONTINUE; /* Beacon protection not in use */
2097 } else if (mmie_keyidx >= 0) {
2098 /* Broadcast/multicast robust management frame / BIP */
2099 if ((status->flag & RX_FLAG_DECRYPTED) &&
2100 (status->flag & RX_FLAG_IV_STRIPPED))
2101 return RX_CONTINUE;
2102
2103 if (mmie_keyidx < NUM_DEFAULT_KEYS ||
2104 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
2105 return RX_DROP_U_BAD_MGMT_KEYIDX; /* unexpected BIP keyidx */
2106 if (rx->link_sta) {
2107 if (ieee80211_is_group_privacy_action(skb) &&
2108 test_sta_flag(rx->sta, WLAN_STA_MFP))
2109 return RX_DROP;
2110
2111 rx->key = rcu_dereference(rx->link_sta->gtk[mmie_keyidx]);
2112 }
2113 if (!rx->key)
2114 rx->key = rcu_dereference(rx->link->gtk[mmie_keyidx]);
2115 } else if (!ieee80211_has_protected(fc)) {
2116 /*
2117 * The frame was not protected, so skip decryption. However, we
2118 * need to set rx->key if there is a key that could have been
2119 * used so that the frame may be dropped if encryption would
2120 * have been expected.
2121 */
2122 struct ieee80211_key *key = NULL;
2123 int i;
2124
2125 if (ieee80211_is_beacon(fc)) {
2126 key = ieee80211_rx_get_bigtk(rx, -1);
2127 } else if (ieee80211_is_mgmt(fc) &&
2128 is_multicast_ether_addr(hdr->addr1)) {
2129 key = rcu_dereference(rx->link->default_mgmt_key);
2130 } else {
2131 if (rx->link_sta) {
2132 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
2133 key = rcu_dereference(rx->link_sta->gtk[i]);
2134 if (key)
2135 break;
2136 }
2137 }
2138 if (!key) {
2139 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
2140 key = rcu_dereference(rx->link->gtk[i]);
2141 if (key)
2142 break;
2143 }
2144 }
2145 }
2146 if (key)
2147 rx->key = key;
2148 return RX_CONTINUE;
2149 } else {
2150 /*
2151 * The device doesn't give us the IV so we won't be
2152 * able to look up the key. That's ok though, we
2153 * don't need to decrypt the frame, we just won't
2154 * be able to keep statistics accurate.
2155 * Except for key threshold notifications, should
2156 * we somehow allow the driver to tell us which key
2157 * the hardware used if this flag is set?
2158 */
2159 if ((status->flag & RX_FLAG_DECRYPTED) &&
2160 (status->flag & RX_FLAG_IV_STRIPPED))
2161 return RX_CONTINUE;
2162
2163 keyidx = ieee80211_get_keyid(rx->skb);
2164
2165 if (unlikely(keyidx < 0))
2166 return RX_DROP_U_NO_KEY_ID;
2167
2168 /* check per-station GTK first, if multicast packet */
2169 if (is_multicast_ether_addr(hdr->addr1) && rx->link_sta)
2170 rx->key = rcu_dereference(rx->link_sta->gtk[keyidx]);
2171
2172 /* if not found, try default key */
2173 if (!rx->key) {
2174 if (is_multicast_ether_addr(hdr->addr1))
2175 rx->key = rcu_dereference(rx->link->gtk[keyidx]);
2176 if (!rx->key)
2177 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
2178
2179 /*
2180 * RSNA-protected unicast frames should always be
2181 * sent with pairwise or station-to-station keys,
2182 * but for WEP we allow using a key index as well.
2183 */
2184 if (rx->key &&
2185 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
2186 rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
2187 !is_multicast_ether_addr(hdr->addr1))
2188 rx->key = NULL;
2189 }
2190 }
2191
2192 if (rx->key) {
2193 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
2194 return RX_DROP;
2195
2196 /* TODO: add threshold stuff again */
2197 } else {
2198 return RX_DROP;
2199 }
2200
2201 switch (rx->key->conf.cipher) {
2202 case WLAN_CIPHER_SUITE_WEP40:
2203 case WLAN_CIPHER_SUITE_WEP104:
2204 result = ieee80211_crypto_wep_decrypt(rx);
2205 break;
2206 case WLAN_CIPHER_SUITE_TKIP:
2207 result = ieee80211_crypto_tkip_decrypt(rx);
2208 break;
2209 case WLAN_CIPHER_SUITE_CCMP:
2210 result = ieee80211_crypto_ccmp_decrypt(
2211 rx, IEEE80211_CCMP_MIC_LEN);
2212 break;
2213 case WLAN_CIPHER_SUITE_CCMP_256:
2214 result = ieee80211_crypto_ccmp_decrypt(
2215 rx, IEEE80211_CCMP_256_MIC_LEN);
2216 break;
2217 case WLAN_CIPHER_SUITE_AES_CMAC:
2218 result = ieee80211_crypto_aes_cmac_decrypt(
2219 rx, IEEE80211_CMAC_128_MIC_LEN);
2220 break;
2221 case WLAN_CIPHER_SUITE_BIP_CMAC_256:
2222 result = ieee80211_crypto_aes_cmac_decrypt(
2223 rx, IEEE80211_CMAC_256_MIC_LEN);
2224 break;
2225 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2226 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2227 result = ieee80211_crypto_aes_gmac_decrypt(rx);
2228 break;
2229 case WLAN_CIPHER_SUITE_GCMP:
2230 case WLAN_CIPHER_SUITE_GCMP_256:
2231 result = ieee80211_crypto_gcmp_decrypt(rx);
2232 break;
2233 default:
2234 result = RX_DROP_U_BAD_CIPHER;
2235 }
2236
2237 /* the hdr variable is invalid after the decrypt handlers */
2238
2239 /* either the frame has been decrypted or will be dropped */
2240 status->flag |= RX_FLAG_DECRYPTED;
2241
2242 if (unlikely(ieee80211_is_beacon(fc) && RX_RES_IS_UNUSABLE(result) &&
2243 rx->sdata->dev))
2244 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2245 skb->data, skb->len);
2246
2247 return result;
2248 }
2249
ieee80211_init_frag_cache(struct ieee80211_fragment_cache * cache)2250 void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache)
2251 {
2252 int i;
2253
2254 for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
2255 skb_queue_head_init(&cache->entries[i].skb_list);
2256 }
2257
ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache * cache)2258 void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache)
2259 {
2260 int i;
2261
2262 for (i = 0; i < ARRAY_SIZE(cache->entries); i++)
2263 __skb_queue_purge(&cache->entries[i].skb_list);
2264 }
2265
2266 static inline struct ieee80211_fragment_entry *
ieee80211_reassemble_add(struct ieee80211_fragment_cache * cache,unsigned int frag,unsigned int seq,int rx_queue,struct sk_buff ** skb)2267 ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache,
2268 unsigned int frag, unsigned int seq, int rx_queue,
2269 struct sk_buff **skb)
2270 {
2271 struct ieee80211_fragment_entry *entry;
2272
2273 entry = &cache->entries[cache->next++];
2274 if (cache->next >= IEEE80211_FRAGMENT_MAX)
2275 cache->next = 0;
2276
2277 __skb_queue_purge(&entry->skb_list);
2278
2279 __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
2280 *skb = NULL;
2281 entry->first_frag_time = jiffies;
2282 entry->seq = seq;
2283 entry->rx_queue = rx_queue;
2284 entry->last_frag = frag;
2285 entry->check_sequential_pn = false;
2286 entry->extra_len = 0;
2287
2288 return entry;
2289 }
2290
2291 static inline struct ieee80211_fragment_entry *
ieee80211_reassemble_find(struct ieee80211_fragment_cache * cache,unsigned int frag,unsigned int seq,int rx_queue,struct ieee80211_hdr * hdr)2292 ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache,
2293 unsigned int frag, unsigned int seq,
2294 int rx_queue, struct ieee80211_hdr *hdr)
2295 {
2296 struct ieee80211_fragment_entry *entry;
2297 int i, idx;
2298
2299 idx = cache->next;
2300 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
2301 struct ieee80211_hdr *f_hdr;
2302 struct sk_buff *f_skb;
2303
2304 idx--;
2305 if (idx < 0)
2306 idx = IEEE80211_FRAGMENT_MAX - 1;
2307
2308 entry = &cache->entries[idx];
2309 if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
2310 entry->rx_queue != rx_queue ||
2311 entry->last_frag + 1 != frag)
2312 continue;
2313
2314 f_skb = __skb_peek(&entry->skb_list);
2315 f_hdr = (struct ieee80211_hdr *) f_skb->data;
2316
2317 /*
2318 * Check ftype and addresses are equal, else check next fragment
2319 */
2320 if (((hdr->frame_control ^ f_hdr->frame_control) &
2321 cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
2322 !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
2323 !ether_addr_equal(hdr->addr2, f_hdr->addr2))
2324 continue;
2325
2326 if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
2327 __skb_queue_purge(&entry->skb_list);
2328 continue;
2329 }
2330 return entry;
2331 }
2332
2333 return NULL;
2334 }
2335
requires_sequential_pn(struct ieee80211_rx_data * rx,__le16 fc)2336 static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc)
2337 {
2338 return rx->key &&
2339 (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP ||
2340 rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 ||
2341 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP ||
2342 rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) &&
2343 ieee80211_has_protected(fc);
2344 }
2345
2346 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_defragment(struct ieee80211_rx_data * rx)2347 ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
2348 {
2349 struct ieee80211_fragment_cache *cache = &rx->sdata->frags;
2350 struct ieee80211_hdr *hdr;
2351 u16 sc;
2352 __le16 fc;
2353 unsigned int frag, seq;
2354 struct ieee80211_fragment_entry *entry;
2355 struct sk_buff *skb;
2356 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2357
2358 hdr = (struct ieee80211_hdr *)rx->skb->data;
2359 fc = hdr->frame_control;
2360
2361 if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc))
2362 return RX_CONTINUE;
2363
2364 sc = le16_to_cpu(hdr->seq_ctrl);
2365 frag = sc & IEEE80211_SCTL_FRAG;
2366
2367 if (rx->sta)
2368 cache = &rx->sta->frags;
2369
2370 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
2371 goto out;
2372
2373 if (is_multicast_ether_addr(hdr->addr1))
2374 return RX_DROP;
2375
2376 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
2377
2378 if (skb_linearize(rx->skb))
2379 return RX_DROP_U_OOM;
2380
2381 /*
2382 * skb_linearize() might change the skb->data and
2383 * previously cached variables (in this case, hdr) need to
2384 * be refreshed with the new data.
2385 */
2386 hdr = (struct ieee80211_hdr *)rx->skb->data;
2387 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
2388
2389 if (frag == 0) {
2390 /* This is the first fragment of a new frame. */
2391 entry = ieee80211_reassemble_add(cache, frag, seq,
2392 rx->seqno_idx, &(rx->skb));
2393 if (requires_sequential_pn(rx, fc)) {
2394 int queue = rx->security_idx;
2395
2396 /* Store CCMP/GCMP PN so that we can verify that the
2397 * next fragment has a sequential PN value.
2398 */
2399 entry->check_sequential_pn = true;
2400 entry->is_protected = true;
2401 entry->key_color = rx->key->color;
2402 memcpy(entry->last_pn,
2403 rx->key->u.ccmp.rx_pn[queue],
2404 IEEE80211_CCMP_PN_LEN);
2405 BUILD_BUG_ON(offsetof(struct ieee80211_key,
2406 u.ccmp.rx_pn) !=
2407 offsetof(struct ieee80211_key,
2408 u.gcmp.rx_pn));
2409 BUILD_BUG_ON(sizeof(rx->key->u.ccmp.rx_pn[queue]) !=
2410 sizeof(rx->key->u.gcmp.rx_pn[queue]));
2411 BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN !=
2412 IEEE80211_GCMP_PN_LEN);
2413 } else if (rx->key &&
2414 (ieee80211_has_protected(fc) ||
2415 (status->flag & RX_FLAG_DECRYPTED))) {
2416 entry->is_protected = true;
2417 entry->key_color = rx->key->color;
2418 }
2419 return RX_QUEUED;
2420 }
2421
2422 /* This is a fragment for a frame that should already be pending in
2423 * fragment cache. Add this fragment to the end of the pending entry.
2424 */
2425 entry = ieee80211_reassemble_find(cache, frag, seq,
2426 rx->seqno_idx, hdr);
2427 if (!entry) {
2428 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2429 return RX_DROP;
2430 }
2431
2432 /* "The receiver shall discard MSDUs and MMPDUs whose constituent
2433 * MPDU PN values are not incrementing in steps of 1."
2434 * see IEEE P802.11-REVmc/D5.0, 12.5.3.4.4, item d (for CCMP)
2435 * and IEEE P802.11-REVmc/D5.0, 12.5.5.4.4, item d (for GCMP)
2436 */
2437 if (entry->check_sequential_pn) {
2438 int i;
2439 u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
2440
2441 if (!requires_sequential_pn(rx, fc))
2442 return RX_DROP_U_NONSEQ_PN;
2443
2444 /* Prevent mixed key and fragment cache attacks */
2445 if (entry->key_color != rx->key->color)
2446 return RX_DROP_U_BAD_KEY_COLOR;
2447
2448 memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
2449 for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
2450 pn[i]++;
2451 if (pn[i])
2452 break;
2453 }
2454
2455 rpn = rx->ccm_gcm.pn;
2456 if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
2457 return RX_DROP_U_REPLAY;
2458 memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
2459 } else if (entry->is_protected &&
2460 (!rx->key ||
2461 (!ieee80211_has_protected(fc) &&
2462 !(status->flag & RX_FLAG_DECRYPTED)) ||
2463 rx->key->color != entry->key_color)) {
2464 /* Drop this as a mixed key or fragment cache attack, even
2465 * if for TKIP Michael MIC should protect us, and WEP is a
2466 * lost cause anyway.
2467 */
2468 return RX_DROP_U_EXPECT_DEFRAG_PROT;
2469 } else if (entry->is_protected && rx->key &&
2470 entry->key_color != rx->key->color &&
2471 (status->flag & RX_FLAG_DECRYPTED)) {
2472 return RX_DROP_U_BAD_KEY_COLOR;
2473 }
2474
2475 skb_pull(rx->skb, ieee80211_hdrlen(fc));
2476 __skb_queue_tail(&entry->skb_list, rx->skb);
2477 entry->last_frag = frag;
2478 entry->extra_len += rx->skb->len;
2479 if (ieee80211_has_morefrags(fc)) {
2480 rx->skb = NULL;
2481 return RX_QUEUED;
2482 }
2483
2484 rx->skb = __skb_dequeue(&entry->skb_list);
2485 if (skb_tailroom(rx->skb) < entry->extra_len) {
2486 I802_DEBUG_INC(rx->local->rx_expand_skb_head_defrag);
2487 if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
2488 GFP_ATOMIC))) {
2489 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
2490 __skb_queue_purge(&entry->skb_list);
2491 return RX_DROP_U_OOM;
2492 }
2493 }
2494 while ((skb = __skb_dequeue(&entry->skb_list))) {
2495 skb_put_data(rx->skb, skb->data, skb->len);
2496 dev_kfree_skb(skb);
2497 }
2498
2499 out:
2500 ieee80211_led_rx(rx->local);
2501 if (rx->sta)
2502 rx->link_sta->rx_stats.packets++;
2503 return RX_CONTINUE;
2504 }
2505
ieee80211_802_1x_port_control(struct ieee80211_rx_data * rx)2506 static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
2507 {
2508 if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
2509 return -EACCES;
2510
2511 return 0;
2512 }
2513
ieee80211_drop_unencrypted(struct ieee80211_rx_data * rx,__le16 fc)2514 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
2515 {
2516 struct sk_buff *skb = rx->skb;
2517 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2518
2519 /*
2520 * Pass through unencrypted frames if the hardware has
2521 * decrypted them already.
2522 */
2523 if (status->flag & RX_FLAG_DECRYPTED)
2524 return 0;
2525
2526 /* Drop unencrypted frames if key is set. */
2527 if (unlikely(!ieee80211_has_protected(fc) &&
2528 !ieee80211_is_any_nullfunc(fc) &&
2529 ieee80211_is_data(fc) && rx->key))
2530 return -EACCES;
2531
2532 return 0;
2533 }
2534
2535 VISIBLE_IF_MAC80211_KUNIT ieee80211_rx_result
ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data * rx)2536 ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
2537 {
2538 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
2539 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2540 __le16 fc = mgmt->frame_control;
2541
2542 /*
2543 * Pass through unencrypted frames if the hardware has
2544 * decrypted them already.
2545 */
2546 if (status->flag & RX_FLAG_DECRYPTED)
2547 return RX_CONTINUE;
2548
2549 /* drop unicast protected dual (that wasn't protected) */
2550 if (ieee80211_is_action(fc) &&
2551 mgmt->u.action.category == WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION)
2552 return RX_DROP_U_UNPROT_DUAL;
2553
2554 if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
2555 if (unlikely(!ieee80211_has_protected(fc) &&
2556 ieee80211_is_unicast_robust_mgmt_frame(rx->skb))) {
2557 if (ieee80211_is_deauth(fc) ||
2558 ieee80211_is_disassoc(fc)) {
2559 /*
2560 * Permit unprotected deauth/disassoc frames
2561 * during 4-way-HS (key is installed after HS).
2562 */
2563 if (!rx->key)
2564 return RX_CONTINUE;
2565
2566 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2567 rx->skb->data,
2568 rx->skb->len);
2569 }
2570 return RX_DROP_U_UNPROT_UCAST_MGMT;
2571 }
2572 /* BIP does not use Protected field, so need to check MMIE */
2573 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
2574 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2575 if (ieee80211_is_deauth(fc) ||
2576 ieee80211_is_disassoc(fc))
2577 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2578 rx->skb->data,
2579 rx->skb->len);
2580 return RX_DROP_U_UNPROT_MCAST_MGMT;
2581 }
2582 if (unlikely(ieee80211_is_beacon(fc) && rx->key &&
2583 ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
2584 cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
2585 rx->skb->data,
2586 rx->skb->len);
2587 return RX_DROP_U_UNPROT_BEACON;
2588 }
2589 /*
2590 * When using MFP, Action frames are not allowed prior to
2591 * having configured keys.
2592 */
2593 if (unlikely(ieee80211_is_action(fc) && !rx->key &&
2594 ieee80211_is_robust_mgmt_frame(rx->skb)))
2595 return RX_DROP_U_UNPROT_ACTION;
2596
2597 /* drop unicast public action frames when using MPF */
2598 if (is_unicast_ether_addr(mgmt->da) &&
2599 ieee80211_is_protected_dual_of_public_action(rx->skb))
2600 return RX_DROP_U_UNPROT_UNICAST_PUB_ACTION;
2601 }
2602
2603 /*
2604 * Drop robust action frames before assoc regardless of MFP state,
2605 * after assoc we also have decided on MFP or not.
2606 */
2607 if (ieee80211_is_action(fc) &&
2608 ieee80211_is_robust_mgmt_frame(rx->skb) &&
2609 (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))
2610 return RX_DROP_U_UNPROT_ROBUST_ACTION;
2611
2612 return RX_CONTINUE;
2613 }
2614 EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_drop_unencrypted_mgmt);
2615
2616 static ieee80211_rx_result
__ieee80211_data_to_8023(struct ieee80211_rx_data * rx,bool * port_control)2617 __ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
2618 {
2619 struct ieee80211_sub_if_data *sdata = rx->sdata;
2620 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
2621 bool check_port_control = false;
2622 struct ethhdr *ehdr;
2623 int ret;
2624
2625 *port_control = false;
2626 if (ieee80211_has_a4(hdr->frame_control) &&
2627 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
2628 return RX_DROP_U_UNEXPECTED_VLAN_4ADDR;
2629
2630 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
2631 !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
2632 if (!sdata->u.mgd.use_4addr)
2633 return RX_DROP_U_UNEXPECTED_STA_4ADDR;
2634 else if (!ether_addr_equal(hdr->addr1, sdata->vif.addr))
2635 check_port_control = true;
2636 }
2637
2638 if (is_multicast_ether_addr(hdr->addr1) &&
2639 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
2640 return RX_DROP_U_UNEXPECTED_VLAN_MCAST;
2641
2642 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
2643 if (ret < 0)
2644 return RX_DROP_U_INVALID_8023;
2645
2646 ehdr = (struct ethhdr *) rx->skb->data;
2647 if (ehdr->h_proto == rx->sdata->control_port_protocol)
2648 *port_control = true;
2649 else if (check_port_control)
2650 return RX_DROP_U_NOT_PORT_CONTROL;
2651
2652 return RX_CONTINUE;
2653 }
2654
ieee80211_is_our_addr(struct ieee80211_sub_if_data * sdata,const u8 * addr,int * out_link_id)2655 bool ieee80211_is_our_addr(struct ieee80211_sub_if_data *sdata,
2656 const u8 *addr, int *out_link_id)
2657 {
2658 unsigned int link_id;
2659
2660 /* non-MLO, or MLD address replaced by hardware */
2661 if (ether_addr_equal(sdata->vif.addr, addr))
2662 return true;
2663
2664 if (!ieee80211_vif_is_mld(&sdata->vif))
2665 return false;
2666
2667 for (link_id = 0; link_id < ARRAY_SIZE(sdata->vif.link_conf); link_id++) {
2668 struct ieee80211_bss_conf *conf;
2669
2670 conf = rcu_dereference(sdata->vif.link_conf[link_id]);
2671
2672 if (!conf)
2673 continue;
2674 if (ether_addr_equal(conf->addr, addr)) {
2675 if (out_link_id)
2676 *out_link_id = link_id;
2677 return true;
2678 }
2679 }
2680
2681 return false;
2682 }
2683
2684 /*
2685 * requires that rx->skb is a frame with ethernet header
2686 */
ieee80211_frame_allowed(struct ieee80211_rx_data * rx,__le16 fc)2687 static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
2688 {
2689 static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
2690 = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
2691 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2692
2693 /*
2694 * Allow EAPOL frames to us/the PAE group address regardless of
2695 * whether the frame was encrypted or not, and always disallow
2696 * all other destination addresses for them.
2697 */
2698 if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol))
2699 return ieee80211_is_our_addr(rx->sdata, ehdr->h_dest, NULL) ||
2700 ether_addr_equal(ehdr->h_dest, pae_group_addr);
2701
2702 if (ieee80211_802_1x_port_control(rx) ||
2703 ieee80211_drop_unencrypted(rx, fc))
2704 return false;
2705
2706 return true;
2707 }
2708
ieee80211_deliver_skb_to_local_stack(struct sk_buff * skb,struct ieee80211_rx_data * rx)2709 static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
2710 struct ieee80211_rx_data *rx)
2711 {
2712 struct ieee80211_sub_if_data *sdata = rx->sdata;
2713 struct net_device *dev = sdata->dev;
2714
2715 if (unlikely((skb->protocol == sdata->control_port_protocol ||
2716 (skb->protocol == cpu_to_be16(ETH_P_PREAUTH) &&
2717 !sdata->control_port_no_preauth)) &&
2718 sdata->control_port_over_nl80211)) {
2719 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
2720 bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED);
2721
2722 cfg80211_rx_control_port(dev, skb, noencrypt, rx->link_id);
2723 dev_kfree_skb(skb);
2724 } else {
2725 struct ethhdr *ehdr = (void *)skb_mac_header(skb);
2726
2727 memset(skb->cb, 0, sizeof(skb->cb));
2728
2729 /*
2730 * 802.1X over 802.11 requires that the authenticator address
2731 * be used for EAPOL frames. However, 802.1X allows the use of
2732 * the PAE group address instead. If the interface is part of
2733 * a bridge and we pass the frame with the PAE group address,
2734 * then the bridge will forward it to the network (even if the
2735 * client was not associated yet), which isn't supposed to
2736 * happen.
2737 * To avoid that, rewrite the destination address to our own
2738 * address, so that the authenticator (e.g. hostapd) will see
2739 * the frame, but bridge won't forward it anywhere else. Note
2740 * that due to earlier filtering, the only other address can
2741 * be the PAE group address, unless the hardware allowed them
2742 * through in 802.3 offloaded mode.
2743 */
2744 if (unlikely(skb->protocol == sdata->control_port_protocol &&
2745 !ether_addr_equal(ehdr->h_dest, sdata->vif.addr)))
2746 ether_addr_copy(ehdr->h_dest, sdata->vif.addr);
2747
2748 /* deliver to local stack */
2749 if (rx->list)
2750 list_add_tail(&skb->list, rx->list);
2751 else
2752 netif_receive_skb(skb);
2753 }
2754 }
2755
2756 /*
2757 * requires that rx->skb is a frame with ethernet header
2758 */
2759 static void
ieee80211_deliver_skb(struct ieee80211_rx_data * rx)2760 ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
2761 {
2762 struct ieee80211_sub_if_data *sdata = rx->sdata;
2763 struct net_device *dev = sdata->dev;
2764 struct sk_buff *skb, *xmit_skb;
2765 struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
2766 struct sta_info *dsta;
2767
2768 skb = rx->skb;
2769 xmit_skb = NULL;
2770
2771 dev_sw_netstats_rx_add(dev, skb->len);
2772
2773 if (rx->sta) {
2774 /* The seqno index has the same property as needed
2775 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
2776 * for non-QoS-data frames. Here we know it's a data
2777 * frame, so count MSDUs.
2778 */
2779 u64_stats_update_begin(&rx->link_sta->rx_stats.syncp);
2780 rx->link_sta->rx_stats.msdu[rx->seqno_idx]++;
2781 u64_stats_update_end(&rx->link_sta->rx_stats.syncp);
2782 }
2783
2784 if ((sdata->vif.type == NL80211_IFTYPE_AP ||
2785 sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
2786 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
2787 ehdr->h_proto != rx->sdata->control_port_protocol &&
2788 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
2789 if (is_multicast_ether_addr(ehdr->h_dest) &&
2790 ieee80211_vif_get_num_mcast_if(sdata) != 0) {
2791 /*
2792 * send multicast frames both to higher layers in
2793 * local net stack and back to the wireless medium
2794 */
2795 xmit_skb = skb_copy(skb, GFP_ATOMIC);
2796 if (!xmit_skb)
2797 net_info_ratelimited("%s: failed to clone multicast frame\n",
2798 dev->name);
2799 } else if (!is_multicast_ether_addr(ehdr->h_dest) &&
2800 !ether_addr_equal(ehdr->h_dest, ehdr->h_source)) {
2801 dsta = sta_info_get(sdata, ehdr->h_dest);
2802 if (dsta) {
2803 /*
2804 * The destination station is associated to
2805 * this AP (in this VLAN), so send the frame
2806 * directly to it and do not pass it to local
2807 * net stack.
2808 */
2809 xmit_skb = skb;
2810 skb = NULL;
2811 }
2812 }
2813 }
2814
2815 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2816 if (skb) {
2817 /* 'align' will only take the values 0 or 2 here since all
2818 * frames are required to be aligned to 2-byte boundaries
2819 * when being passed to mac80211; the code here works just
2820 * as well if that isn't true, but mac80211 assumes it can
2821 * access fields as 2-byte aligned (e.g. for ether_addr_equal)
2822 */
2823 int align;
2824
2825 align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
2826 if (align) {
2827 if (WARN_ON(skb_headroom(skb) < 3)) {
2828 dev_kfree_skb(skb);
2829 skb = NULL;
2830 } else {
2831 u8 *data = skb->data;
2832 size_t len = skb_headlen(skb);
2833 skb->data -= align;
2834 memmove(skb->data, data, len);
2835 skb_set_tail_pointer(skb, len);
2836 }
2837 }
2838 }
2839 #endif
2840
2841 if (skb) {
2842 skb->protocol = eth_type_trans(skb, dev);
2843 ieee80211_deliver_skb_to_local_stack(skb, rx);
2844 }
2845
2846 if (xmit_skb) {
2847 /*
2848 * Send to wireless media and increase priority by 256 to
2849 * keep the received priority instead of reclassifying
2850 * the frame (see cfg80211_classify8021d).
2851 */
2852 xmit_skb->priority += 256;
2853 xmit_skb->protocol = htons(ETH_P_802_3);
2854 skb_reset_network_header(xmit_skb);
2855 skb_reset_mac_header(xmit_skb);
2856 dev_queue_xmit(xmit_skb);
2857 }
2858 }
2859
2860 #ifdef CONFIG_MAC80211_MESH
2861 static bool
ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data * sdata,struct sk_buff * skb,int hdrlen)2862 ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata,
2863 struct sk_buff *skb, int hdrlen)
2864 {
2865 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2866 struct ieee80211_mesh_fast_tx_key key = {
2867 .type = MESH_FAST_TX_TYPE_FORWARDED
2868 };
2869 struct ieee80211_mesh_fast_tx *entry;
2870 struct ieee80211s_hdr *mesh_hdr;
2871 struct tid_ampdu_tx *tid_tx;
2872 struct sta_info *sta;
2873 struct ethhdr eth;
2874 u8 tid;
2875
2876 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(eth));
2877 if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6)
2878 ether_addr_copy(key.addr, mesh_hdr->eaddr1);
2879 else if (!(mesh_hdr->flags & MESH_FLAGS_AE))
2880 ether_addr_copy(key.addr, skb->data);
2881 else
2882 return false;
2883
2884 entry = mesh_fast_tx_get(sdata, &key);
2885 if (!entry)
2886 return false;
2887
2888 sta = rcu_dereference(entry->mpath->next_hop);
2889 if (!sta)
2890 return false;
2891
2892 if (skb_linearize(skb))
2893 return false;
2894
2895 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
2896 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
2897 if (tid_tx) {
2898 if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state))
2899 return false;
2900
2901 if (tid_tx->timeout)
2902 tid_tx->last_tx = jiffies;
2903 }
2904
2905 ieee80211_aggr_check(sdata, sta, skb);
2906
2907 if (ieee80211_get_8023_tunnel_proto(skb->data + hdrlen,
2908 &skb->protocol))
2909 hdrlen += ETH_ALEN;
2910 else
2911 skb->protocol = htons(skb->len - hdrlen);
2912 skb_set_network_header(skb, hdrlen + 2);
2913
2914 skb->dev = sdata->dev;
2915 memcpy(ð, skb->data, ETH_HLEN - 2);
2916 skb_pull(skb, 2);
2917 __ieee80211_xmit_fast(sdata, sta, &entry->fast_tx, skb, tid_tx,
2918 eth.h_dest, eth.h_source);
2919 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
2920 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
2921
2922 return true;
2923 }
2924 #endif
2925
2926 static ieee80211_rx_result
ieee80211_rx_mesh_data(struct ieee80211_sub_if_data * sdata,struct sta_info * sta,struct sk_buff * skb)2927 ieee80211_rx_mesh_data(struct ieee80211_sub_if_data *sdata, struct sta_info *sta,
2928 struct sk_buff *skb)
2929 {
2930 #ifdef CONFIG_MAC80211_MESH
2931 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2932 struct ieee80211_local *local = sdata->local;
2933 uint16_t fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA;
2934 struct ieee80211_hdr hdr = {
2935 .frame_control = cpu_to_le16(fc)
2936 };
2937 struct ieee80211_hdr *fwd_hdr;
2938 struct ieee80211s_hdr *mesh_hdr;
2939 struct ieee80211_tx_info *info;
2940 struct sk_buff *fwd_skb;
2941 struct ethhdr *eth;
2942 bool multicast;
2943 int tailroom = 0;
2944 int hdrlen, mesh_hdrlen;
2945 u8 *qos;
2946
2947 if (!ieee80211_vif_is_mesh(&sdata->vif))
2948 return RX_CONTINUE;
2949
2950 if (!pskb_may_pull(skb, sizeof(*eth) + 6))
2951 return RX_DROP;
2952
2953 mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(*eth));
2954 mesh_hdrlen = ieee80211_get_mesh_hdrlen(mesh_hdr);
2955
2956 if (!pskb_may_pull(skb, sizeof(*eth) + mesh_hdrlen))
2957 return RX_DROP;
2958
2959 eth = (struct ethhdr *)skb->data;
2960 multicast = is_multicast_ether_addr(eth->h_dest);
2961
2962 mesh_hdr = (struct ieee80211s_hdr *)(eth + 1);
2963 if (!mesh_hdr->ttl)
2964 return RX_DROP;
2965
2966 /* frame is in RMC, don't forward */
2967 if (is_multicast_ether_addr(eth->h_dest) &&
2968 mesh_rmc_check(sdata, eth->h_source, mesh_hdr))
2969 return RX_DROP;
2970
2971 /* forward packet */
2972 if (sdata->crypto_tx_tailroom_needed_cnt)
2973 tailroom = IEEE80211_ENCRYPT_TAILROOM;
2974
2975 if (mesh_hdr->flags & MESH_FLAGS_AE) {
2976 struct mesh_path *mppath;
2977 char *proxied_addr;
2978 bool update = false;
2979
2980 if (multicast)
2981 proxied_addr = mesh_hdr->eaddr1;
2982 else if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6)
2983 /* has_a4 already checked in ieee80211_rx_mesh_check */
2984 proxied_addr = mesh_hdr->eaddr2;
2985 else
2986 return RX_DROP;
2987
2988 rcu_read_lock();
2989 mppath = mpp_path_lookup(sdata, proxied_addr);
2990 if (!mppath) {
2991 mpp_path_add(sdata, proxied_addr, eth->h_source);
2992 } else {
2993 spin_lock_bh(&mppath->state_lock);
2994 if (!ether_addr_equal(mppath->mpp, eth->h_source)) {
2995 memcpy(mppath->mpp, eth->h_source, ETH_ALEN);
2996 update = true;
2997 }
2998 mppath->exp_time = jiffies;
2999 spin_unlock_bh(&mppath->state_lock);
3000 }
3001
3002 /* flush fast xmit cache if the address path changed */
3003 if (update)
3004 mesh_fast_tx_flush_addr(sdata, proxied_addr);
3005
3006 rcu_read_unlock();
3007 }
3008
3009 /* Frame has reached destination. Don't forward */
3010 if (ether_addr_equal(sdata->vif.addr, eth->h_dest))
3011 goto rx_accept;
3012
3013 if (!--mesh_hdr->ttl) {
3014 if (multicast)
3015 goto rx_accept;
3016
3017 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
3018 return RX_DROP;
3019 }
3020
3021 if (!ifmsh->mshcfg.dot11MeshForwarding) {
3022 if (is_multicast_ether_addr(eth->h_dest))
3023 goto rx_accept;
3024
3025 return RX_DROP;
3026 }
3027
3028 skb_set_queue_mapping(skb, ieee802_1d_to_ac[skb->priority]);
3029
3030 if (!multicast &&
3031 ieee80211_rx_mesh_fast_forward(sdata, skb, mesh_hdrlen))
3032 return RX_QUEUED;
3033
3034 ieee80211_fill_mesh_addresses(&hdr, &hdr.frame_control,
3035 eth->h_dest, eth->h_source);
3036 hdrlen = ieee80211_hdrlen(hdr.frame_control);
3037 if (multicast) {
3038 int extra_head = sizeof(struct ieee80211_hdr) - sizeof(*eth);
3039
3040 fwd_skb = skb_copy_expand(skb, local->tx_headroom + extra_head +
3041 IEEE80211_ENCRYPT_HEADROOM,
3042 tailroom, GFP_ATOMIC);
3043 if (!fwd_skb)
3044 goto rx_accept;
3045 } else {
3046 fwd_skb = skb;
3047 skb = NULL;
3048
3049 if (skb_cow_head(fwd_skb, hdrlen - sizeof(struct ethhdr)))
3050 return RX_DROP_U_OOM;
3051
3052 if (skb_linearize(fwd_skb))
3053 return RX_DROP_U_OOM;
3054 }
3055
3056 fwd_hdr = skb_push(fwd_skb, hdrlen - sizeof(struct ethhdr));
3057 memcpy(fwd_hdr, &hdr, hdrlen - 2);
3058 qos = ieee80211_get_qos_ctl(fwd_hdr);
3059 qos[0] = qos[1] = 0;
3060
3061 skb_reset_mac_header(fwd_skb);
3062 hdrlen += mesh_hdrlen;
3063 if (ieee80211_get_8023_tunnel_proto(fwd_skb->data + hdrlen,
3064 &fwd_skb->protocol))
3065 hdrlen += ETH_ALEN;
3066 else
3067 fwd_skb->protocol = htons(fwd_skb->len - hdrlen);
3068 skb_set_network_header(fwd_skb, hdrlen + 2);
3069
3070 info = IEEE80211_SKB_CB(fwd_skb);
3071 memset(info, 0, sizeof(*info));
3072 info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
3073 info->control.vif = &sdata->vif;
3074 info->control.jiffies = jiffies;
3075 fwd_skb->dev = sdata->dev;
3076 if (multicast) {
3077 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
3078 memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
3079 /* update power mode indication when forwarding */
3080 ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
3081 } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
3082 /* mesh power mode flags updated in mesh_nexthop_lookup */
3083 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
3084 } else {
3085 /* unable to resolve next hop */
3086 if (sta)
3087 mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
3088 hdr.addr3, 0,
3089 WLAN_REASON_MESH_PATH_NOFORWARD,
3090 sta->sta.addr);
3091 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
3092 kfree_skb(fwd_skb);
3093 goto rx_accept;
3094 }
3095
3096 IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
3097 ieee80211_set_qos_hdr(sdata, fwd_skb);
3098 ieee80211_add_pending_skb(local, fwd_skb);
3099
3100 rx_accept:
3101 if (!skb)
3102 return RX_QUEUED;
3103
3104 ieee80211_strip_8023_mesh_hdr(skb);
3105 #endif
3106
3107 return RX_CONTINUE;
3108 }
3109
3110 static ieee80211_rx_result debug_noinline
__ieee80211_rx_h_amsdu(struct ieee80211_rx_data * rx,u8 data_offset)3111 __ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx, u8 data_offset)
3112 {
3113 struct net_device *dev = rx->sdata->dev;
3114 struct sk_buff *skb = rx->skb;
3115 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3116 __le16 fc = hdr->frame_control;
3117 struct sk_buff_head frame_list;
3118 struct ethhdr ethhdr;
3119 const u8 *check_da = ethhdr.h_dest, *check_sa = ethhdr.h_source;
3120
3121 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
3122 check_da = NULL;
3123 check_sa = NULL;
3124 } else switch (rx->sdata->vif.type) {
3125 case NL80211_IFTYPE_AP:
3126 case NL80211_IFTYPE_AP_VLAN:
3127 check_da = NULL;
3128 break;
3129 case NL80211_IFTYPE_STATION:
3130 if (!test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
3131 check_sa = NULL;
3132 break;
3133 case NL80211_IFTYPE_MESH_POINT:
3134 check_sa = NULL;
3135 check_da = NULL;
3136 break;
3137 default:
3138 break;
3139 }
3140
3141 skb->dev = dev;
3142 __skb_queue_head_init(&frame_list);
3143
3144 if (ieee80211_data_to_8023_exthdr(skb, ðhdr,
3145 rx->sdata->vif.addr,
3146 rx->sdata->vif.type,
3147 data_offset, true))
3148 return RX_DROP_U_BAD_AMSDU;
3149
3150 if (rx->sta->amsdu_mesh_control < 0) {
3151 s8 valid = -1;
3152 int i;
3153
3154 for (i = 0; i <= 2; i++) {
3155 if (!ieee80211_is_valid_amsdu(skb, i))
3156 continue;
3157
3158 if (valid >= 0) {
3159 /* ambiguous */
3160 valid = -1;
3161 break;
3162 }
3163
3164 valid = i;
3165 }
3166
3167 rx->sta->amsdu_mesh_control = valid;
3168 }
3169
3170 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
3171 rx->sdata->vif.type,
3172 rx->local->hw.extra_tx_headroom,
3173 check_da, check_sa,
3174 rx->sta->amsdu_mesh_control);
3175
3176 while (!skb_queue_empty(&frame_list)) {
3177 rx->skb = __skb_dequeue(&frame_list);
3178
3179 switch (ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb)) {
3180 case RX_QUEUED:
3181 break;
3182 case RX_CONTINUE:
3183 if (ieee80211_frame_allowed(rx, fc)) {
3184 ieee80211_deliver_skb(rx);
3185 break;
3186 }
3187 fallthrough;
3188 default:
3189 dev_kfree_skb(rx->skb);
3190 }
3191 }
3192
3193 return RX_QUEUED;
3194 }
3195
3196 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_amsdu(struct ieee80211_rx_data * rx)3197 ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
3198 {
3199 struct sk_buff *skb = rx->skb;
3200 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
3201 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3202 __le16 fc = hdr->frame_control;
3203
3204 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
3205 return RX_CONTINUE;
3206
3207 if (unlikely(!ieee80211_is_data(fc)))
3208 return RX_CONTINUE;
3209
3210 if (unlikely(!ieee80211_is_data_present(fc)))
3211 return RX_DROP;
3212
3213 if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
3214 switch (rx->sdata->vif.type) {
3215 case NL80211_IFTYPE_AP_VLAN:
3216 if (!rx->sdata->u.vlan.sta)
3217 return RX_DROP_U_BAD_4ADDR;
3218 break;
3219 case NL80211_IFTYPE_STATION:
3220 if (!rx->sdata->u.mgd.use_4addr)
3221 return RX_DROP_U_BAD_4ADDR;
3222 break;
3223 case NL80211_IFTYPE_MESH_POINT:
3224 break;
3225 default:
3226 return RX_DROP_U_BAD_4ADDR;
3227 }
3228 }
3229
3230 if (is_multicast_ether_addr(hdr->addr1) || !rx->sta)
3231 return RX_DROP_U_BAD_AMSDU;
3232
3233 if (rx->key) {
3234 /*
3235 * We should not receive A-MSDUs on pre-HT connections,
3236 * and HT connections cannot use old ciphers. Thus drop
3237 * them, as in those cases we couldn't even have SPP
3238 * A-MSDUs or such.
3239 */
3240 switch (rx->key->conf.cipher) {
3241 case WLAN_CIPHER_SUITE_WEP40:
3242 case WLAN_CIPHER_SUITE_WEP104:
3243 case WLAN_CIPHER_SUITE_TKIP:
3244 return RX_DROP_U_BAD_AMSDU_CIPHER;
3245 default:
3246 break;
3247 }
3248 }
3249
3250 return __ieee80211_rx_h_amsdu(rx, 0);
3251 }
3252
3253 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_data(struct ieee80211_rx_data * rx)3254 ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
3255 {
3256 struct ieee80211_sub_if_data *sdata = rx->sdata;
3257 struct ieee80211_local *local = rx->local;
3258 struct net_device *dev = sdata->dev;
3259 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
3260 __le16 fc = hdr->frame_control;
3261 ieee80211_rx_result res;
3262 bool port_control;
3263
3264 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
3265 return RX_CONTINUE;
3266
3267 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
3268 return RX_DROP;
3269
3270 /* Send unexpected-4addr-frame event to hostapd */
3271 if (ieee80211_has_a4(hdr->frame_control) &&
3272 sdata->vif.type == NL80211_IFTYPE_AP) {
3273 if (rx->sta &&
3274 !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
3275 cfg80211_rx_unexpected_4addr_frame(
3276 rx->sdata->dev, rx->sta->sta.addr, rx->link_id,
3277 GFP_ATOMIC);
3278 return RX_DROP;
3279 }
3280
3281 res = __ieee80211_data_to_8023(rx, &port_control);
3282 if (unlikely(res != RX_CONTINUE))
3283 return res;
3284
3285 res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb);
3286 if (res != RX_CONTINUE)
3287 return res;
3288
3289 if (!ieee80211_frame_allowed(rx, fc))
3290 return RX_DROP;
3291
3292 /* directly handle TDLS channel switch requests/responses */
3293 if (unlikely(((struct ethhdr *)rx->skb->data)->h_proto ==
3294 cpu_to_be16(ETH_P_TDLS))) {
3295 struct ieee80211_tdls_data *tf = (void *)rx->skb->data;
3296
3297 if (pskb_may_pull(rx->skb,
3298 offsetof(struct ieee80211_tdls_data, u)) &&
3299 tf->payload_type == WLAN_TDLS_SNAP_RFTYPE &&
3300 tf->category == WLAN_CATEGORY_TDLS &&
3301 (tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ||
3302 tf->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE)) {
3303 rx->skb->protocol = cpu_to_be16(ETH_P_TDLS);
3304 __ieee80211_queue_skb_to_iface(sdata, rx->link_id,
3305 rx->sta, rx->skb);
3306 return RX_QUEUED;
3307 }
3308 }
3309
3310 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
3311 unlikely(port_control) && sdata->bss) {
3312 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
3313 u.ap);
3314 dev = sdata->dev;
3315 rx->sdata = sdata;
3316 }
3317
3318 rx->skb->dev = dev;
3319
3320 if (!ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS) &&
3321 local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
3322 !is_multicast_ether_addr(
3323 ((struct ethhdr *)rx->skb->data)->h_dest) &&
3324 (!local->scanning &&
3325 !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)))
3326 mod_timer(&local->dynamic_ps_timer, jiffies +
3327 msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
3328
3329 ieee80211_deliver_skb(rx);
3330
3331 return RX_QUEUED;
3332 }
3333
3334 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_ctrl(struct ieee80211_rx_data * rx,struct sk_buff_head * frames)3335 ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
3336 {
3337 struct sk_buff *skb = rx->skb;
3338 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
3339 struct tid_ampdu_rx *tid_agg_rx;
3340 u16 start_seq_num;
3341 u16 tid;
3342
3343 if (likely(!ieee80211_is_ctl(bar->frame_control)))
3344 return RX_CONTINUE;
3345
3346 if (ieee80211_is_back_req(bar->frame_control)) {
3347 struct {
3348 __le16 control, start_seq_num;
3349 } __packed bar_data;
3350 struct ieee80211_event event = {
3351 .type = BAR_RX_EVENT,
3352 };
3353
3354 if (!rx->sta)
3355 return RX_DROP;
3356
3357 if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
3358 &bar_data, sizeof(bar_data)))
3359 return RX_DROP;
3360
3361 tid = le16_to_cpu(bar_data.control) >> 12;
3362
3363 if (!test_bit(tid, rx->sta->ampdu_mlme.agg_session_valid) &&
3364 !test_and_set_bit(tid, rx->sta->ampdu_mlme.unexpected_agg))
3365 ieee80211_send_delba(rx->sdata, rx->sta->sta.addr, tid,
3366 WLAN_BACK_RECIPIENT,
3367 WLAN_REASON_QSTA_REQUIRE_SETUP);
3368
3369 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
3370 if (!tid_agg_rx)
3371 return RX_DROP;
3372
3373 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
3374 event.u.ba.tid = tid;
3375 event.u.ba.ssn = start_seq_num;
3376 event.u.ba.sta = &rx->sta->sta;
3377
3378 /* reset session timer */
3379 if (tid_agg_rx->timeout)
3380 mod_timer(&tid_agg_rx->session_timer,
3381 TU_TO_EXP_TIME(tid_agg_rx->timeout));
3382
3383 spin_lock(&tid_agg_rx->reorder_lock);
3384 /* release stored frames up to start of BAR */
3385 ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
3386 start_seq_num, frames);
3387 spin_unlock(&tid_agg_rx->reorder_lock);
3388
3389 drv_event_callback(rx->local, rx->sdata, &event);
3390
3391 kfree_skb(skb);
3392 return RX_QUEUED;
3393 }
3394
3395 return RX_DROP;
3396 }
3397
ieee80211_process_sa_query_req(struct ieee80211_sub_if_data * sdata,struct ieee80211_mgmt * mgmt,size_t len)3398 static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
3399 struct ieee80211_mgmt *mgmt,
3400 size_t len)
3401 {
3402 struct ieee80211_local *local = sdata->local;
3403 struct sk_buff *skb;
3404 struct ieee80211_mgmt *resp;
3405
3406 if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
3407 /* Not to own unicast address */
3408 return;
3409 }
3410
3411 if (!ether_addr_equal(mgmt->sa, sdata->vif.cfg.ap_addr) ||
3412 !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) {
3413 /* Not from the current AP or not associated yet. */
3414 return;
3415 }
3416
3417 if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
3418 /* Too short SA Query request frame */
3419 return;
3420 }
3421
3422 skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
3423 if (skb == NULL)
3424 return;
3425
3426 skb_reserve(skb, local->hw.extra_tx_headroom);
3427 resp = skb_put_zero(skb, 24);
3428 memcpy(resp->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
3429 memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
3430 memcpy(resp->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
3431 resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3432 IEEE80211_STYPE_ACTION);
3433 skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
3434 resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
3435 resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
3436 memcpy(resp->u.action.u.sa_query.trans_id,
3437 mgmt->u.action.u.sa_query.trans_id,
3438 WLAN_SA_QUERY_TR_ID_LEN);
3439
3440 ieee80211_tx_skb(sdata, skb);
3441 }
3442
3443 static void
ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data * rx)3444 ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)
3445 {
3446 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
3447 struct ieee80211_bss_conf *bss_conf;
3448 const struct element *ie;
3449 size_t baselen;
3450
3451 if (!wiphy_ext_feature_isset(rx->local->hw.wiphy,
3452 NL80211_EXT_FEATURE_BSS_COLOR))
3453 return;
3454
3455 if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION))
3456 return;
3457
3458 bss_conf = rx->link->conf;
3459 if (bss_conf->csa_active || bss_conf->color_change_active ||
3460 !bss_conf->he_bss_color.enabled)
3461 return;
3462
3463 baselen = mgmt->u.beacon.variable - rx->skb->data;
3464 if (baselen > rx->skb->len)
3465 return;
3466
3467 ie = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION,
3468 mgmt->u.beacon.variable,
3469 rx->skb->len - baselen);
3470 if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) &&
3471 ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) {
3472 const struct ieee80211_he_operation *he_oper;
3473 u8 color;
3474
3475 he_oper = (void *)(ie->data + 1);
3476 if (le32_get_bits(he_oper->he_oper_params,
3477 IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED))
3478 return;
3479
3480 color = le32_get_bits(he_oper->he_oper_params,
3481 IEEE80211_HE_OPERATION_BSS_COLOR_MASK);
3482 if (color == bss_conf->he_bss_color.color)
3483 ieee80211_obss_color_collision_notify(&rx->sdata->vif,
3484 BIT_ULL(color),
3485 bss_conf->link_id);
3486 }
3487 }
3488
3489 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data * rx)3490 ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
3491 {
3492 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3493 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3494
3495 if (ieee80211_is_s1g_beacon(mgmt->frame_control))
3496 return RX_CONTINUE;
3497
3498 /*
3499 * From here on, look only at management frames.
3500 * Data and control frames are already handled,
3501 * and unknown (reserved) frames are useless.
3502 */
3503 if (rx->skb->len < 24)
3504 return RX_DROP;
3505
3506 if (!ieee80211_is_mgmt(mgmt->frame_control))
3507 return RX_DROP;
3508
3509 /* drop too small action frames */
3510 if (ieee80211_is_action(mgmt->frame_control) &&
3511 rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
3512 return RX_DROP_U_RUNT_ACTION;
3513
3514 if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
3515 ieee80211_is_beacon(mgmt->frame_control) &&
3516 !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
3517 int sig = 0;
3518
3519 /* sw bss color collision detection */
3520 ieee80211_rx_check_bss_color_collision(rx);
3521
3522 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3523 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3524 sig = status->signal;
3525
3526 cfg80211_report_obss_beacon_khz(rx->local->hw.wiphy,
3527 rx->skb->data, rx->skb->len,
3528 ieee80211_rx_status_to_khz(status),
3529 sig);
3530 rx->flags |= IEEE80211_RX_BEACON_REPORTED;
3531 }
3532
3533 return ieee80211_drop_unencrypted_mgmt(rx);
3534 }
3535
3536 static bool
ieee80211_process_rx_twt_action(struct ieee80211_rx_data * rx)3537 ieee80211_process_rx_twt_action(struct ieee80211_rx_data *rx)
3538 {
3539 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)rx->skb->data;
3540 struct ieee80211_sub_if_data *sdata = rx->sdata;
3541
3542 /* TWT actions are only supported in AP for the moment */
3543 if (sdata->vif.type != NL80211_IFTYPE_AP)
3544 return false;
3545
3546 if (!rx->local->ops->add_twt_setup)
3547 return false;
3548
3549 if (!sdata->vif.bss_conf.twt_responder)
3550 return false;
3551
3552 if (!rx->sta)
3553 return false;
3554
3555 switch (mgmt->u.action.u.s1g.action_code) {
3556 case WLAN_S1G_TWT_SETUP: {
3557 struct ieee80211_twt_setup *twt;
3558
3559 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE +
3560 1 + /* action code */
3561 sizeof(struct ieee80211_twt_setup) +
3562 2 /* TWT req_type agrt */)
3563 break;
3564
3565 twt = (void *)mgmt->u.action.u.s1g.variable;
3566 if (twt->element_id != WLAN_EID_S1G_TWT)
3567 break;
3568
3569 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE +
3570 4 + /* action code + token + tlv */
3571 twt->length)
3572 break;
3573
3574 return true; /* queue the frame */
3575 }
3576 case WLAN_S1G_TWT_TEARDOWN:
3577 if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE + 2)
3578 break;
3579
3580 return true; /* queue the frame */
3581 default:
3582 break;
3583 }
3584
3585 return false;
3586 }
3587
3588 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_action(struct ieee80211_rx_data * rx)3589 ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
3590 {
3591 struct ieee80211_local *local = rx->local;
3592 struct ieee80211_sub_if_data *sdata = rx->sdata;
3593 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3594 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3595 int len = rx->skb->len;
3596
3597 if (!ieee80211_is_action(mgmt->frame_control))
3598 return RX_CONTINUE;
3599
3600 if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
3601 mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
3602 mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
3603 return RX_DROP_U_ACTION_UNKNOWN_SRC;
3604
3605 switch (mgmt->u.action.category) {
3606 case WLAN_CATEGORY_HT:
3607 /* reject HT action frames from stations not supporting HT
3608 * or not HE Capable
3609 */
3610 if (!rx->link_sta->pub->ht_cap.ht_supported &&
3611 !rx->link_sta->pub->he_cap.has_he)
3612 goto invalid;
3613
3614 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3615 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3616 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3617 sdata->vif.type != NL80211_IFTYPE_AP &&
3618 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3619 break;
3620
3621 /* verify action & smps_control/chanwidth are present */
3622 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3623 goto invalid;
3624
3625 switch (mgmt->u.action.u.ht_smps.action) {
3626 case WLAN_HT_ACTION_SMPS: {
3627 struct ieee80211_supported_band *sband;
3628 enum ieee80211_smps_mode smps_mode;
3629 struct sta_opmode_info sta_opmode = {};
3630
3631 if (sdata->vif.type != NL80211_IFTYPE_AP &&
3632 sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
3633 goto handled;
3634
3635 /* convert to HT capability */
3636 switch (mgmt->u.action.u.ht_smps.smps_control) {
3637 case WLAN_HT_SMPS_CONTROL_DISABLED:
3638 smps_mode = IEEE80211_SMPS_OFF;
3639 break;
3640 case WLAN_HT_SMPS_CONTROL_STATIC:
3641 smps_mode = IEEE80211_SMPS_STATIC;
3642 break;
3643 case WLAN_HT_SMPS_CONTROL_DYNAMIC:
3644 smps_mode = IEEE80211_SMPS_DYNAMIC;
3645 break;
3646 default:
3647 goto invalid;
3648 }
3649
3650 /* if no change do nothing */
3651 if (rx->link_sta->pub->smps_mode == smps_mode)
3652 goto handled;
3653 rx->link_sta->pub->smps_mode = smps_mode;
3654 sta_opmode.smps_mode =
3655 ieee80211_smps_mode_to_smps_mode(smps_mode);
3656 sta_opmode.changed = STA_OPMODE_SMPS_MODE_CHANGED;
3657
3658 sband = rx->local->hw.wiphy->bands[status->band];
3659
3660 rate_control_rate_update(local, sband, rx->link_sta,
3661 IEEE80211_RC_SMPS_CHANGED);
3662 cfg80211_sta_opmode_change_notify(sdata->dev,
3663 rx->sta->addr,
3664 &sta_opmode,
3665 GFP_ATOMIC);
3666 goto handled;
3667 }
3668 case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
3669 u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
3670
3671 if (chanwidth != IEEE80211_HT_CHANWIDTH_20MHZ &&
3672 chanwidth != IEEE80211_HT_CHANWIDTH_ANY)
3673 goto invalid;
3674
3675 /* If it doesn't support 40 MHz it can't change ... */
3676 if (!(rx->link_sta->pub->ht_cap.cap &
3677 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
3678 goto handled;
3679
3680 goto queue;
3681 }
3682 default:
3683 goto invalid;
3684 }
3685
3686 break;
3687 case WLAN_CATEGORY_PUBLIC:
3688 case WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION:
3689 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3690 goto invalid;
3691 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3692 break;
3693 if (!rx->sta)
3694 break;
3695 if (!ether_addr_equal(mgmt->bssid, sdata->deflink.u.mgd.bssid))
3696 break;
3697 if (mgmt->u.action.u.ext_chan_switch.action_code !=
3698 WLAN_PUB_ACTION_EXT_CHANSW_ANN)
3699 break;
3700 if (len < offsetof(struct ieee80211_mgmt,
3701 u.action.u.ext_chan_switch.variable))
3702 goto invalid;
3703 goto queue;
3704 case WLAN_CATEGORY_VHT:
3705 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3706 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3707 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3708 sdata->vif.type != NL80211_IFTYPE_AP &&
3709 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3710 break;
3711
3712 /* verify action code is present */
3713 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3714 goto invalid;
3715
3716 switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
3717 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
3718 /* verify opmode is present */
3719 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
3720 goto invalid;
3721 goto queue;
3722 }
3723 case WLAN_VHT_ACTION_GROUPID_MGMT: {
3724 if (len < IEEE80211_MIN_ACTION_SIZE + 25)
3725 goto invalid;
3726 goto queue;
3727 }
3728 default:
3729 break;
3730 }
3731 break;
3732 case WLAN_CATEGORY_BACK:
3733 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3734 sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
3735 sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
3736 sdata->vif.type != NL80211_IFTYPE_AP &&
3737 sdata->vif.type != NL80211_IFTYPE_ADHOC)
3738 break;
3739
3740 /* verify action_code is present */
3741 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3742 break;
3743
3744 switch (mgmt->u.action.u.addba_req.action_code) {
3745 case WLAN_ACTION_ADDBA_REQ:
3746 if (len < (IEEE80211_MIN_ACTION_SIZE +
3747 sizeof(mgmt->u.action.u.addba_req)))
3748 goto invalid;
3749 break;
3750 case WLAN_ACTION_ADDBA_RESP:
3751 if (len < (IEEE80211_MIN_ACTION_SIZE +
3752 sizeof(mgmt->u.action.u.addba_resp)))
3753 goto invalid;
3754 break;
3755 case WLAN_ACTION_DELBA:
3756 if (len < (IEEE80211_MIN_ACTION_SIZE +
3757 sizeof(mgmt->u.action.u.delba)))
3758 goto invalid;
3759 break;
3760 default:
3761 goto invalid;
3762 }
3763
3764 goto queue;
3765 case WLAN_CATEGORY_SPECTRUM_MGMT:
3766 /* verify action_code is present */
3767 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
3768 break;
3769
3770 switch (mgmt->u.action.u.measurement.action_code) {
3771 case WLAN_ACTION_SPCT_MSR_REQ:
3772 if (status->band != NL80211_BAND_5GHZ)
3773 break;
3774
3775 if (len < (IEEE80211_MIN_ACTION_SIZE +
3776 sizeof(mgmt->u.action.u.measurement)))
3777 break;
3778
3779 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3780 break;
3781
3782 ieee80211_process_measurement_req(sdata, mgmt, len);
3783 goto handled;
3784 case WLAN_ACTION_SPCT_CHL_SWITCH: {
3785 u8 *bssid;
3786 if (len < (IEEE80211_MIN_ACTION_SIZE +
3787 sizeof(mgmt->u.action.u.chan_switch)))
3788 break;
3789
3790 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
3791 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
3792 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
3793 break;
3794
3795 if (sdata->vif.type == NL80211_IFTYPE_STATION)
3796 bssid = sdata->deflink.u.mgd.bssid;
3797 else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
3798 bssid = sdata->u.ibss.bssid;
3799 else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
3800 bssid = mgmt->sa;
3801 else
3802 break;
3803
3804 if (!ether_addr_equal(mgmt->bssid, bssid))
3805 break;
3806
3807 goto queue;
3808 }
3809 }
3810 break;
3811 case WLAN_CATEGORY_SELF_PROTECTED:
3812 if (len < (IEEE80211_MIN_ACTION_SIZE +
3813 sizeof(mgmt->u.action.u.self_prot.action_code)))
3814 break;
3815
3816 switch (mgmt->u.action.u.self_prot.action_code) {
3817 case WLAN_SP_MESH_PEERING_OPEN:
3818 case WLAN_SP_MESH_PEERING_CLOSE:
3819 case WLAN_SP_MESH_PEERING_CONFIRM:
3820 if (!ieee80211_vif_is_mesh(&sdata->vif))
3821 goto invalid;
3822 if (sdata->u.mesh.user_mpm)
3823 /* userspace handles this frame */
3824 break;
3825 goto queue;
3826 case WLAN_SP_MGK_INFORM:
3827 case WLAN_SP_MGK_ACK:
3828 if (!ieee80211_vif_is_mesh(&sdata->vif))
3829 goto invalid;
3830 break;
3831 }
3832 break;
3833 case WLAN_CATEGORY_MESH_ACTION:
3834 if (len < (IEEE80211_MIN_ACTION_SIZE +
3835 sizeof(mgmt->u.action.u.mesh_action.action_code)))
3836 break;
3837
3838 if (!ieee80211_vif_is_mesh(&sdata->vif))
3839 break;
3840 if (mesh_action_is_path_sel(mgmt) &&
3841 !mesh_path_sel_is_hwmp(sdata))
3842 break;
3843 goto queue;
3844 case WLAN_CATEGORY_S1G:
3845 if (len < offsetofend(typeof(*mgmt),
3846 u.action.u.s1g.action_code))
3847 break;
3848
3849 switch (mgmt->u.action.u.s1g.action_code) {
3850 case WLAN_S1G_TWT_SETUP:
3851 case WLAN_S1G_TWT_TEARDOWN:
3852 if (ieee80211_process_rx_twt_action(rx))
3853 goto queue;
3854 break;
3855 default:
3856 break;
3857 }
3858 break;
3859 case WLAN_CATEGORY_PROTECTED_EHT:
3860 if (len < offsetofend(typeof(*mgmt),
3861 u.action.u.ttlm_req.action_code))
3862 break;
3863
3864 switch (mgmt->u.action.u.ttlm_req.action_code) {
3865 case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ:
3866 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3867 break;
3868
3869 if (len < offsetofend(typeof(*mgmt),
3870 u.action.u.ttlm_req))
3871 goto invalid;
3872 goto queue;
3873 case WLAN_PROTECTED_EHT_ACTION_TTLM_RES:
3874 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3875 break;
3876
3877 if (len < offsetofend(typeof(*mgmt),
3878 u.action.u.ttlm_res))
3879 goto invalid;
3880 goto queue;
3881 case WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN:
3882 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3883 break;
3884
3885 if (len < offsetofend(typeof(*mgmt),
3886 u.action.u.ttlm_tear_down))
3887 goto invalid;
3888 goto queue;
3889 case WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP:
3890 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3891 break;
3892
3893 /* The reconfiguration response action frame must
3894 * least one 'Status Duple' entry (3 octets)
3895 */
3896 if (len <
3897 offsetofend(typeof(*mgmt),
3898 u.action.u.ml_reconf_resp) + 3)
3899 goto invalid;
3900 goto queue;
3901 case WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_RESP:
3902 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3903 break;
3904
3905 if (len < offsetofend(typeof(*mgmt),
3906 u.action.u.epcs) +
3907 IEEE80211_EPCS_ENA_RESP_BODY_LEN)
3908 goto invalid;
3909 goto queue;
3910 case WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_TEARDOWN:
3911 if (sdata->vif.type != NL80211_IFTYPE_STATION)
3912 break;
3913
3914 if (len < offsetofend(typeof(*mgmt),
3915 u.action.u.epcs))
3916 goto invalid;
3917 goto queue;
3918 default:
3919 break;
3920 }
3921 break;
3922 }
3923
3924 return RX_CONTINUE;
3925
3926 invalid:
3927 status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
3928 /* will return in the next handlers */
3929 return RX_CONTINUE;
3930
3931 handled:
3932 if (rx->sta)
3933 rx->link_sta->rx_stats.packets++;
3934 dev_kfree_skb(rx->skb);
3935 return RX_QUEUED;
3936
3937 queue:
3938 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb);
3939 return RX_QUEUED;
3940 }
3941
3942 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data * rx)3943 ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
3944 {
3945 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
3946 struct cfg80211_rx_info info = {
3947 .freq = ieee80211_rx_status_to_khz(status),
3948 .buf = rx->skb->data,
3949 .len = rx->skb->len,
3950 .link_id = rx->link_id,
3951 .have_link_id = rx->link_id >= 0,
3952 };
3953
3954 /* skip known-bad action frames and return them in the next handler */
3955 if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
3956 return RX_CONTINUE;
3957
3958 /*
3959 * Getting here means the kernel doesn't know how to handle
3960 * it, but maybe userspace does ... include returned frames
3961 * so userspace can register for those to know whether ones
3962 * it transmitted were processed or returned.
3963 */
3964
3965 if (ieee80211_hw_check(&rx->local->hw, SIGNAL_DBM) &&
3966 !(status->flag & RX_FLAG_NO_SIGNAL_VAL))
3967 info.sig_dbm = status->signal;
3968
3969 if (ieee80211_is_timing_measurement(rx->skb) ||
3970 ieee80211_is_ftm(rx->skb)) {
3971 info.rx_tstamp = ktime_to_ns(skb_hwtstamps(rx->skb)->hwtstamp);
3972 info.ack_tstamp = ktime_to_ns(status->ack_tx_hwtstamp);
3973 }
3974
3975 if (cfg80211_rx_mgmt_ext(&rx->sdata->wdev, &info)) {
3976 if (rx->sta)
3977 rx->link_sta->rx_stats.packets++;
3978 dev_kfree_skb(rx->skb);
3979 return RX_QUEUED;
3980 }
3981
3982 return RX_CONTINUE;
3983 }
3984
3985 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data * rx)3986 ieee80211_rx_h_action_post_userspace(struct ieee80211_rx_data *rx)
3987 {
3988 struct ieee80211_sub_if_data *sdata = rx->sdata;
3989 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
3990 int len = rx->skb->len;
3991
3992 if (!ieee80211_is_action(mgmt->frame_control))
3993 return RX_CONTINUE;
3994
3995 switch (mgmt->u.action.category) {
3996 case WLAN_CATEGORY_SA_QUERY:
3997 if (len < (IEEE80211_MIN_ACTION_SIZE +
3998 sizeof(mgmt->u.action.u.sa_query)))
3999 break;
4000
4001 switch (mgmt->u.action.u.sa_query.action) {
4002 case WLAN_ACTION_SA_QUERY_REQUEST:
4003 if (sdata->vif.type != NL80211_IFTYPE_STATION)
4004 break;
4005 ieee80211_process_sa_query_req(sdata, mgmt, len);
4006 goto handled;
4007 }
4008 break;
4009 }
4010
4011 return RX_CONTINUE;
4012
4013 handled:
4014 if (rx->sta)
4015 rx->link_sta->rx_stats.packets++;
4016 dev_kfree_skb(rx->skb);
4017 return RX_QUEUED;
4018 }
4019
4020 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_action_return(struct ieee80211_rx_data * rx)4021 ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
4022 {
4023 struct ieee80211_local *local = rx->local;
4024 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
4025 struct sk_buff *nskb;
4026 struct ieee80211_sub_if_data *sdata = rx->sdata;
4027 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
4028
4029 if (!ieee80211_is_action(mgmt->frame_control))
4030 return RX_CONTINUE;
4031
4032 /*
4033 * For AP mode, hostapd is responsible for handling any action
4034 * frames that we didn't handle, including returning unknown
4035 * ones. For all other modes we will return them to the sender,
4036 * setting the 0x80 bit in the action category, as required by
4037 * 802.11-2012 9.24.4.
4038 * Newer versions of hostapd use the management frame registration
4039 * mechanisms and old cooked monitor interface is no longer supported.
4040 */
4041 if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
4042 (sdata->vif.type == NL80211_IFTYPE_AP ||
4043 sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
4044 return RX_DROP;
4045
4046 if (is_multicast_ether_addr(mgmt->da))
4047 return RX_DROP;
4048
4049 /* do not return rejected action frames */
4050 if (mgmt->u.action.category & 0x80)
4051 return RX_DROP_U_REJECTED_ACTION_RESPONSE;
4052
4053 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
4054 GFP_ATOMIC);
4055 if (nskb) {
4056 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
4057
4058 nmgmt->u.action.category |= 0x80;
4059 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
4060 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
4061
4062 memset(nskb->cb, 0, sizeof(nskb->cb));
4063
4064 if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
4065 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
4066
4067 info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
4068 IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
4069 IEEE80211_TX_CTL_NO_CCK_RATE;
4070 if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
4071 info->hw_queue =
4072 local->hw.offchannel_tx_hw_queue;
4073 }
4074
4075 __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, -1,
4076 status->band);
4077 }
4078
4079 return RX_DROP_U_UNKNOWN_ACTION_REJECTED;
4080 }
4081
4082 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_ext(struct ieee80211_rx_data * rx)4083 ieee80211_rx_h_ext(struct ieee80211_rx_data *rx)
4084 {
4085 struct ieee80211_sub_if_data *sdata = rx->sdata;
4086 struct ieee80211_hdr *hdr = (void *)rx->skb->data;
4087
4088 if (!ieee80211_is_ext(hdr->frame_control))
4089 return RX_CONTINUE;
4090
4091 if (sdata->vif.type != NL80211_IFTYPE_STATION)
4092 return RX_DROP;
4093
4094 /* for now only beacons are ext, so queue them */
4095 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb);
4096
4097 return RX_QUEUED;
4098 }
4099
4100 static ieee80211_rx_result debug_noinline
ieee80211_rx_h_mgmt(struct ieee80211_rx_data * rx)4101 ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
4102 {
4103 struct ieee80211_sub_if_data *sdata = rx->sdata;
4104 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
4105 __le16 stype;
4106
4107 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
4108
4109 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
4110 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
4111 sdata->vif.type != NL80211_IFTYPE_OCB &&
4112 sdata->vif.type != NL80211_IFTYPE_STATION)
4113 return RX_DROP;
4114
4115 switch (stype) {
4116 case cpu_to_le16(IEEE80211_STYPE_AUTH):
4117 case cpu_to_le16(IEEE80211_STYPE_BEACON):
4118 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
4119 /* process for all: mesh, mlme, ibss */
4120 break;
4121 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
4122 if (is_multicast_ether_addr(mgmt->da) &&
4123 !is_broadcast_ether_addr(mgmt->da))
4124 return RX_DROP;
4125
4126 /* process only for station/IBSS */
4127 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
4128 sdata->vif.type != NL80211_IFTYPE_ADHOC)
4129 return RX_DROP;
4130 break;
4131 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
4132 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
4133 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
4134 if (is_multicast_ether_addr(mgmt->da) &&
4135 !is_broadcast_ether_addr(mgmt->da))
4136 return RX_DROP;
4137
4138 /* process only for station */
4139 if (sdata->vif.type != NL80211_IFTYPE_STATION)
4140 return RX_DROP;
4141 break;
4142 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
4143 /* process only for ibss and mesh */
4144 if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
4145 sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
4146 return RX_DROP;
4147 break;
4148 default:
4149 return RX_DROP;
4150 }
4151
4152 ieee80211_queue_skb_to_iface(sdata, rx->link_id, rx->sta, rx->skb);
4153
4154 return RX_QUEUED;
4155 }
4156
ieee80211_rx_handlers_result(struct ieee80211_rx_data * rx,ieee80211_rx_result res)4157 static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
4158 ieee80211_rx_result res)
4159 {
4160 if (res == RX_QUEUED) {
4161 I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
4162 return;
4163 }
4164
4165 if (res != RX_CONTINUE) {
4166 I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
4167 if (rx->sta)
4168 rx->link_sta->rx_stats.dropped++;
4169 }
4170
4171 kfree_skb_reason(rx->skb, (__force u32)res);
4172 }
4173
ieee80211_rx_handlers(struct ieee80211_rx_data * rx,struct sk_buff_head * frames)4174 static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
4175 struct sk_buff_head *frames)
4176 {
4177 ieee80211_rx_result res = RX_DROP;
4178 struct sk_buff *skb;
4179
4180 #define CALL_RXH(rxh) \
4181 do { \
4182 res = rxh(rx); \
4183 if (res != RX_CONTINUE) \
4184 goto rxh_next; \
4185 } while (0)
4186
4187 /* Lock here to avoid hitting all of the data used in the RX
4188 * path (e.g. key data, station data, ...) concurrently when
4189 * a frame is released from the reorder buffer due to timeout
4190 * from the timer, potentially concurrently with RX from the
4191 * driver.
4192 */
4193 spin_lock_bh(&rx->local->rx_path_lock);
4194
4195 while ((skb = __skb_dequeue(frames))) {
4196 /*
4197 * all the other fields are valid across frames
4198 * that belong to an aMPDU since they are on the
4199 * same TID from the same station
4200 */
4201 rx->skb = skb;
4202
4203 if (WARN_ON_ONCE(!rx->link))
4204 goto rxh_next;
4205
4206 CALL_RXH(ieee80211_rx_h_check_more_data);
4207 CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll);
4208 CALL_RXH(ieee80211_rx_h_sta_process);
4209 CALL_RXH(ieee80211_rx_h_decrypt);
4210 CALL_RXH(ieee80211_rx_h_defragment);
4211 CALL_RXH(ieee80211_rx_h_michael_mic_verify);
4212 /* must be after MMIC verify so header is counted in MPDU mic */
4213 CALL_RXH(ieee80211_rx_h_amsdu);
4214 CALL_RXH(ieee80211_rx_h_data);
4215
4216 /* special treatment -- needs the queue */
4217 res = ieee80211_rx_h_ctrl(rx, frames);
4218 if (res != RX_CONTINUE)
4219 goto rxh_next;
4220
4221 CALL_RXH(ieee80211_rx_h_mgmt_check);
4222 CALL_RXH(ieee80211_rx_h_action);
4223 CALL_RXH(ieee80211_rx_h_userspace_mgmt);
4224 CALL_RXH(ieee80211_rx_h_action_post_userspace);
4225 CALL_RXH(ieee80211_rx_h_action_return);
4226 CALL_RXH(ieee80211_rx_h_ext);
4227 CALL_RXH(ieee80211_rx_h_mgmt);
4228
4229 rxh_next:
4230 ieee80211_rx_handlers_result(rx, res);
4231
4232 #undef CALL_RXH
4233 }
4234
4235 spin_unlock_bh(&rx->local->rx_path_lock);
4236 }
4237
ieee80211_invoke_rx_handlers(struct ieee80211_rx_data * rx)4238 static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
4239 {
4240 struct sk_buff_head reorder_release;
4241 ieee80211_rx_result res = RX_DROP;
4242
4243 __skb_queue_head_init(&reorder_release);
4244
4245 #define CALL_RXH(rxh) \
4246 do { \
4247 res = rxh(rx); \
4248 if (res != RX_CONTINUE) \
4249 goto rxh_next; \
4250 } while (0)
4251
4252 CALL_RXH(ieee80211_rx_h_check_dup);
4253 CALL_RXH(ieee80211_rx_h_check);
4254
4255 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
4256
4257 ieee80211_rx_handlers(rx, &reorder_release);
4258 return;
4259
4260 rxh_next:
4261 ieee80211_rx_handlers_result(rx, res);
4262
4263 #undef CALL_RXH
4264 }
4265
4266 static bool
ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta * sta,u8 link_id)4267 ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
4268 {
4269 return !!(sta->valid_links & BIT(link_id));
4270 }
4271
ieee80211_rx_data_set_link(struct ieee80211_rx_data * rx,u8 link_id)4272 static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx,
4273 u8 link_id)
4274 {
4275 rx->link_id = link_id;
4276 rx->link = rcu_dereference(rx->sdata->link[link_id]);
4277
4278 if (!rx->sta)
4279 return rx->link;
4280
4281 if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id))
4282 return false;
4283
4284 rx->link_sta = rcu_dereference(rx->sta->link[link_id]);
4285
4286 return rx->link && rx->link_sta;
4287 }
4288
ieee80211_rx_data_set_sta(struct ieee80211_rx_data * rx,struct sta_info * sta,int link_id)4289 static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
4290 struct sta_info *sta, int link_id)
4291 {
4292 rx->link_id = link_id;
4293 rx->sta = sta;
4294
4295 if (sta) {
4296 rx->local = sta->sdata->local;
4297 if (!rx->sdata)
4298 rx->sdata = sta->sdata;
4299 rx->link_sta = &sta->deflink;
4300 } else {
4301 rx->link_sta = NULL;
4302 }
4303
4304 if (link_id < 0) {
4305 if (ieee80211_vif_is_mld(&rx->sdata->vif) &&
4306 sta && !sta->sta.valid_links)
4307 rx->link =
4308 rcu_dereference(rx->sdata->link[sta->deflink.link_id]);
4309 else
4310 rx->link = &rx->sdata->deflink;
4311 } else if (!ieee80211_rx_data_set_link(rx, link_id)) {
4312 return false;
4313 }
4314
4315 return true;
4316 }
4317
4318 /*
4319 * This function makes calls into the RX path, therefore
4320 * it has to be invoked under RCU read lock.
4321 */
ieee80211_release_reorder_timeout(struct sta_info * sta,int tid)4322 void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
4323 {
4324 struct sk_buff_head frames;
4325 struct ieee80211_rx_data rx = {
4326 /* This is OK -- must be QoS data frame */
4327 .security_idx = tid,
4328 .seqno_idx = tid,
4329 };
4330 struct tid_ampdu_rx *tid_agg_rx;
4331 int link_id = -1;
4332
4333 /* FIXME: statistics won't be right with this */
4334 if (sta->sta.valid_links)
4335 link_id = ffs(sta->sta.valid_links) - 1;
4336
4337 if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
4338 return;
4339
4340 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
4341 if (!tid_agg_rx)
4342 return;
4343
4344 __skb_queue_head_init(&frames);
4345
4346 spin_lock(&tid_agg_rx->reorder_lock);
4347 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
4348 spin_unlock(&tid_agg_rx->reorder_lock);
4349
4350 if (!skb_queue_empty(&frames)) {
4351 struct ieee80211_event event = {
4352 .type = BA_FRAME_TIMEOUT,
4353 .u.ba.tid = tid,
4354 .u.ba.sta = &sta->sta,
4355 };
4356 drv_event_callback(rx.local, rx.sdata, &event);
4357 }
4358
4359 ieee80211_rx_handlers(&rx, &frames);
4360 }
4361
ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta * pubsta,u8 tid,u16 ssn,u64 filtered,u16 received_mpdus)4362 void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
4363 u16 ssn, u64 filtered,
4364 u16 received_mpdus)
4365 {
4366 struct ieee80211_local *local;
4367 struct sta_info *sta;
4368 struct tid_ampdu_rx *tid_agg_rx;
4369 struct sk_buff_head frames;
4370 struct ieee80211_rx_data rx = {
4371 /* This is OK -- must be QoS data frame */
4372 .security_idx = tid,
4373 .seqno_idx = tid,
4374 };
4375 int i, diff;
4376
4377 if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
4378 return;
4379
4380 __skb_queue_head_init(&frames);
4381
4382 sta = container_of(pubsta, struct sta_info, sta);
4383
4384 local = sta->sdata->local;
4385 WARN_ONCE(local->hw.max_rx_aggregation_subframes > 64,
4386 "RX BA marker can't support max_rx_aggregation_subframes %u > 64\n",
4387 local->hw.max_rx_aggregation_subframes);
4388
4389 if (!ieee80211_rx_data_set_sta(&rx, sta, -1))
4390 return;
4391
4392 rcu_read_lock();
4393 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
4394 if (!tid_agg_rx)
4395 goto out;
4396
4397 spin_lock_bh(&tid_agg_rx->reorder_lock);
4398
4399 if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
4400 int release;
4401
4402 /* release all frames in the reorder buffer */
4403 release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
4404 IEEE80211_SN_MODULO;
4405 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
4406 release, &frames);
4407 /* update ssn to match received ssn */
4408 tid_agg_rx->head_seq_num = ssn;
4409 } else {
4410 ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
4411 &frames);
4412 }
4413
4414 /* handle the case that received ssn is behind the mac ssn.
4415 * it can be tid_agg_rx->buf_size behind and still be valid */
4416 diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
4417 if (diff >= tid_agg_rx->buf_size) {
4418 tid_agg_rx->reorder_buf_filtered = 0;
4419 goto release;
4420 }
4421 filtered = filtered >> diff;
4422 ssn += diff;
4423
4424 /* update bitmap */
4425 for (i = 0; i < tid_agg_rx->buf_size; i++) {
4426 int index = (ssn + i) % tid_agg_rx->buf_size;
4427
4428 tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
4429 if (filtered & BIT_ULL(i))
4430 tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
4431 }
4432
4433 /* now process also frames that the filter marking released */
4434 ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
4435
4436 release:
4437 spin_unlock_bh(&tid_agg_rx->reorder_lock);
4438
4439 ieee80211_rx_handlers(&rx, &frames);
4440
4441 out:
4442 rcu_read_unlock();
4443 }
4444 EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
4445
4446 /* main receive path */
4447
ieee80211_bssid_match(const u8 * raddr,const u8 * addr)4448 static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr)
4449 {
4450 return ether_addr_equal(raddr, addr) ||
4451 is_broadcast_ether_addr(raddr);
4452 }
4453
ieee80211_accept_frame(struct ieee80211_rx_data * rx)4454 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
4455 {
4456 struct ieee80211_sub_if_data *sdata = rx->sdata;
4457 struct sk_buff *skb = rx->skb;
4458 struct ieee80211_hdr *hdr = (void *)skb->data;
4459 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4460 u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
4461 bool multicast = is_multicast_ether_addr(hdr->addr1) ||
4462 ieee80211_is_s1g_beacon(hdr->frame_control);
4463
4464 switch (sdata->vif.type) {
4465 case NL80211_IFTYPE_STATION:
4466 if (!bssid && !sdata->u.mgd.use_4addr)
4467 return false;
4468 if (ieee80211_is_first_frag(hdr->seq_ctrl) &&
4469 ieee80211_is_robust_mgmt_frame(skb) && !rx->sta)
4470 return false;
4471 if (multicast)
4472 return true;
4473 return ieee80211_is_our_addr(sdata, hdr->addr1, &rx->link_id);
4474 case NL80211_IFTYPE_ADHOC:
4475 if (!bssid)
4476 return false;
4477 if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
4478 ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
4479 !is_valid_ether_addr(hdr->addr2))
4480 return false;
4481 if (ieee80211_is_beacon(hdr->frame_control))
4482 return true;
4483 if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid))
4484 return false;
4485 if (!multicast &&
4486 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
4487 return false;
4488 if (!rx->sta) {
4489 int rate_idx;
4490 if (status->encoding != RX_ENC_LEGACY)
4491 rate_idx = 0; /* TODO: HT/VHT rates */
4492 else
4493 rate_idx = status->rate_idx;
4494 ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
4495 BIT(rate_idx));
4496 }
4497 return true;
4498 case NL80211_IFTYPE_OCB:
4499 if (!bssid)
4500 return false;
4501 if (!ieee80211_is_data_present(hdr->frame_control))
4502 return false;
4503 if (!is_broadcast_ether_addr(bssid))
4504 return false;
4505 if (!multicast &&
4506 !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
4507 return false;
4508 /* reject invalid/our STA address */
4509 if (!is_valid_ether_addr(hdr->addr2) ||
4510 ether_addr_equal(sdata->dev->dev_addr, hdr->addr2))
4511 return false;
4512 if (!rx->sta) {
4513 int rate_idx;
4514 if (status->encoding != RX_ENC_LEGACY)
4515 rate_idx = 0; /* TODO: HT rates */
4516 else
4517 rate_idx = status->rate_idx;
4518 ieee80211_ocb_rx_no_sta(sdata, bssid, hdr->addr2,
4519 BIT(rate_idx));
4520 }
4521 return true;
4522 case NL80211_IFTYPE_MESH_POINT:
4523 if (ether_addr_equal(sdata->vif.addr, hdr->addr2))
4524 return false;
4525 if (multicast)
4526 return true;
4527 return ether_addr_equal(sdata->vif.addr, hdr->addr1);
4528 case NL80211_IFTYPE_AP_VLAN:
4529 case NL80211_IFTYPE_AP:
4530 if (!bssid)
4531 return ieee80211_is_our_addr(sdata, hdr->addr1,
4532 &rx->link_id);
4533
4534 if (!is_broadcast_ether_addr(bssid) &&
4535 !ieee80211_is_our_addr(sdata, bssid, NULL)) {
4536 /*
4537 * Accept public action frames even when the
4538 * BSSID doesn't match, this is used for P2P
4539 * and location updates. Note that mac80211
4540 * itself never looks at these frames.
4541 */
4542 if (!multicast &&
4543 !ieee80211_is_our_addr(sdata, hdr->addr1,
4544 &rx->link_id))
4545 return false;
4546 if (ieee80211_is_public_action(hdr, skb->len))
4547 return true;
4548 return ieee80211_is_beacon(hdr->frame_control);
4549 }
4550
4551 if (!ieee80211_has_tods(hdr->frame_control)) {
4552 /* ignore data frames to TDLS-peers */
4553 if (ieee80211_is_data(hdr->frame_control))
4554 return false;
4555 /* ignore action frames to TDLS-peers */
4556 if (ieee80211_is_action(hdr->frame_control) &&
4557 !is_broadcast_ether_addr(bssid) &&
4558 !ether_addr_equal(bssid, hdr->addr1))
4559 return false;
4560 }
4561
4562 /*
4563 * 802.11-2016 Table 9-26 says that for data frames, A1 must be
4564 * the BSSID - we've checked that already but may have accepted
4565 * the wildcard (ff:ff:ff:ff:ff:ff).
4566 *
4567 * It also says:
4568 * The BSSID of the Data frame is determined as follows:
4569 * a) If the STA is contained within an AP or is associated
4570 * with an AP, the BSSID is the address currently in use
4571 * by the STA contained in the AP.
4572 *
4573 * So we should not accept data frames with an address that's
4574 * multicast.
4575 *
4576 * Accepting it also opens a security problem because stations
4577 * could encrypt it with the GTK and inject traffic that way.
4578 */
4579 if (ieee80211_is_data(hdr->frame_control) && multicast)
4580 return false;
4581
4582 return true;
4583 case NL80211_IFTYPE_P2P_DEVICE:
4584 return ieee80211_is_public_action(hdr, skb->len) ||
4585 ieee80211_is_probe_req(hdr->frame_control) ||
4586 ieee80211_is_probe_resp(hdr->frame_control) ||
4587 ieee80211_is_beacon(hdr->frame_control) ||
4588 (ieee80211_is_auth(hdr->frame_control) &&
4589 ether_addr_equal(sdata->vif.addr, hdr->addr1));
4590 case NL80211_IFTYPE_NAN:
4591 /* Accept only frames that are addressed to the NAN cluster
4592 * (based on the Cluster ID). From these frames, accept only
4593 * action frames or authentication frames that are addressed to
4594 * the local NAN interface.
4595 */
4596 return memcmp(sdata->wdev.u.nan.cluster_id,
4597 hdr->addr3, ETH_ALEN) == 0 &&
4598 (ieee80211_is_public_action(hdr, skb->len) ||
4599 (ieee80211_is_auth(hdr->frame_control) &&
4600 ether_addr_equal(sdata->vif.addr, hdr->addr1)));
4601 default:
4602 break;
4603 }
4604
4605 WARN_ON_ONCE(1);
4606 return false;
4607 }
4608
ieee80211_check_fast_rx(struct sta_info * sta)4609 void ieee80211_check_fast_rx(struct sta_info *sta)
4610 {
4611 struct ieee80211_sub_if_data *sdata = sta->sdata;
4612 struct ieee80211_local *local = sdata->local;
4613 struct ieee80211_key *key;
4614 struct ieee80211_fast_rx fastrx = {
4615 .dev = sdata->dev,
4616 .vif_type = sdata->vif.type,
4617 .control_port_protocol = sdata->control_port_protocol,
4618 }, *old, *new = NULL;
4619 u32 offload_flags;
4620 bool set_offload = false;
4621 bool assign = false;
4622 bool offload;
4623
4624 /* use sparse to check that we don't return without updating */
4625 __acquire(check_fast_rx);
4626
4627 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != sizeof(rfc1042_header));
4628 BUILD_BUG_ON(sizeof(fastrx.rfc1042_hdr) != ETH_ALEN);
4629 ether_addr_copy(fastrx.rfc1042_hdr, rfc1042_header);
4630 ether_addr_copy(fastrx.vif_addr, sdata->vif.addr);
4631
4632 fastrx.uses_rss = ieee80211_hw_check(&local->hw, USES_RSS);
4633
4634 /* fast-rx doesn't do reordering */
4635 if (ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) &&
4636 !ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER))
4637 goto clear;
4638
4639 switch (sdata->vif.type) {
4640 case NL80211_IFTYPE_STATION:
4641 if (sta->sta.tdls) {
4642 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4643 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4644 fastrx.expected_ds_bits = 0;
4645 } else {
4646 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
4647 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
4648 fastrx.expected_ds_bits =
4649 cpu_to_le16(IEEE80211_FCTL_FROMDS);
4650 }
4651
4652 if (sdata->u.mgd.use_4addr && !sta->sta.tdls) {
4653 fastrx.expected_ds_bits |=
4654 cpu_to_le16(IEEE80211_FCTL_TODS);
4655 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4656 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4657 }
4658
4659 if (!sdata->u.mgd.powersave)
4660 break;
4661
4662 /* software powersave is a huge mess, avoid all of it */
4663 if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
4664 goto clear;
4665 if (ieee80211_hw_check(&local->hw, SUPPORTS_PS) &&
4666 !ieee80211_hw_check(&local->hw, SUPPORTS_DYNAMIC_PS))
4667 goto clear;
4668 break;
4669 case NL80211_IFTYPE_AP_VLAN:
4670 case NL80211_IFTYPE_AP:
4671 /* parallel-rx requires this, at least with calls to
4672 * ieee80211_sta_ps_transition()
4673 */
4674 if (!ieee80211_hw_check(&local->hw, AP_LINK_PS))
4675 goto clear;
4676 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4677 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
4678 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_TODS);
4679
4680 fastrx.internal_forward =
4681 !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
4682 (sdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
4683 !sdata->u.vlan.sta);
4684
4685 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
4686 sdata->u.vlan.sta) {
4687 fastrx.expected_ds_bits |=
4688 cpu_to_le16(IEEE80211_FCTL_FROMDS);
4689 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4690 fastrx.internal_forward = 0;
4691 }
4692
4693 break;
4694 case NL80211_IFTYPE_MESH_POINT:
4695 fastrx.expected_ds_bits = cpu_to_le16(IEEE80211_FCTL_FROMDS |
4696 IEEE80211_FCTL_TODS);
4697 fastrx.da_offs = offsetof(struct ieee80211_hdr, addr3);
4698 fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
4699 break;
4700 default:
4701 goto clear;
4702 }
4703
4704 if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
4705 goto clear;
4706
4707 rcu_read_lock();
4708 key = rcu_dereference(sta->ptk[sta->ptk_idx]);
4709 if (!key)
4710 key = rcu_dereference(sdata->default_unicast_key);
4711 if (key) {
4712 switch (key->conf.cipher) {
4713 case WLAN_CIPHER_SUITE_TKIP:
4714 /* we don't want to deal with MMIC in fast-rx */
4715 goto clear_rcu;
4716 case WLAN_CIPHER_SUITE_CCMP:
4717 case WLAN_CIPHER_SUITE_CCMP_256:
4718 case WLAN_CIPHER_SUITE_GCMP:
4719 case WLAN_CIPHER_SUITE_GCMP_256:
4720 break;
4721 default:
4722 /* We also don't want to deal with
4723 * WEP or cipher scheme.
4724 */
4725 goto clear_rcu;
4726 }
4727
4728 fastrx.key = true;
4729 fastrx.icv_len = key->conf.icv_len;
4730 }
4731
4732 assign = true;
4733 clear_rcu:
4734 rcu_read_unlock();
4735 clear:
4736 __release(check_fast_rx);
4737
4738 if (assign)
4739 new = kmemdup(&fastrx, sizeof(fastrx), GFP_KERNEL);
4740
4741 offload_flags = get_bss_sdata(sdata)->vif.offload_flags;
4742 offload = offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED;
4743
4744 if (assign && offload)
4745 set_offload = !test_and_set_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
4746 else
4747 set_offload = test_and_clear_sta_flag(sta, WLAN_STA_DECAP_OFFLOAD);
4748
4749 if (set_offload)
4750 drv_sta_set_decap_offload(local, sdata, &sta->sta, assign);
4751
4752 spin_lock_bh(&sta->lock);
4753 old = rcu_dereference_protected(sta->fast_rx, true);
4754 rcu_assign_pointer(sta->fast_rx, new);
4755 spin_unlock_bh(&sta->lock);
4756
4757 if (old)
4758 kfree_rcu(old, rcu_head);
4759 }
4760
ieee80211_clear_fast_rx(struct sta_info * sta)4761 void ieee80211_clear_fast_rx(struct sta_info *sta)
4762 {
4763 struct ieee80211_fast_rx *old;
4764
4765 spin_lock_bh(&sta->lock);
4766 old = rcu_dereference_protected(sta->fast_rx, true);
4767 RCU_INIT_POINTER(sta->fast_rx, NULL);
4768 spin_unlock_bh(&sta->lock);
4769
4770 if (old)
4771 kfree_rcu(old, rcu_head);
4772 }
4773
__ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data * sdata)4774 void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4775 {
4776 struct ieee80211_local *local = sdata->local;
4777 struct sta_info *sta;
4778
4779 lockdep_assert_wiphy(local->hw.wiphy);
4780
4781 list_for_each_entry(sta, &local->sta_list, list) {
4782 if (sdata != sta->sdata &&
4783 (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
4784 continue;
4785 ieee80211_check_fast_rx(sta);
4786 }
4787 }
4788
ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data * sdata)4789 void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
4790 {
4791 struct ieee80211_local *local = sdata->local;
4792
4793 lockdep_assert_wiphy(local->hw.wiphy);
4794
4795 __ieee80211_check_fast_rx_iface(sdata);
4796 }
4797
ieee80211_rx_8023(struct ieee80211_rx_data * rx,struct ieee80211_fast_rx * fast_rx,int orig_len)4798 static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
4799 struct ieee80211_fast_rx *fast_rx,
4800 int orig_len)
4801 {
4802 struct ieee80211_sta_rx_stats *stats;
4803 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
4804 struct sta_info *sta = rx->sta;
4805 struct link_sta_info *link_sta;
4806 struct sk_buff *skb = rx->skb;
4807 void *sa = skb->data + ETH_ALEN;
4808 void *da = skb->data;
4809
4810 if (rx->link_id >= 0) {
4811 link_sta = rcu_dereference(sta->link[rx->link_id]);
4812 if (WARN_ON_ONCE(!link_sta)) {
4813 dev_kfree_skb(rx->skb);
4814 return;
4815 }
4816 } else {
4817 link_sta = &sta->deflink;
4818 }
4819
4820 stats = &link_sta->rx_stats;
4821 if (fast_rx->uses_rss)
4822 stats = this_cpu_ptr(link_sta->pcpu_rx_stats);
4823
4824 /* statistics part of ieee80211_rx_h_sta_process() */
4825 if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
4826 stats->last_signal = status->signal;
4827 if (!fast_rx->uses_rss)
4828 ewma_signal_add(&link_sta->rx_stats_avg.signal,
4829 -status->signal);
4830 }
4831
4832 if (status->chains) {
4833 int i;
4834
4835 stats->chains = status->chains;
4836 for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
4837 int signal = status->chain_signal[i];
4838
4839 if (!(status->chains & BIT(i)))
4840 continue;
4841
4842 stats->chain_signal_last[i] = signal;
4843 if (!fast_rx->uses_rss)
4844 ewma_signal_add(&link_sta->rx_stats_avg.chain_signal[i],
4845 -signal);
4846 }
4847 }
4848 /* end of statistics */
4849
4850 stats->last_rx = jiffies;
4851 stats->last_rate = sta_stats_encode_rate(status);
4852
4853 stats->fragments++;
4854 stats->packets++;
4855
4856 skb->dev = fast_rx->dev;
4857
4858 dev_sw_netstats_rx_add(fast_rx->dev, skb->len);
4859
4860 /* The seqno index has the same property as needed
4861 * for the rx_msdu field, i.e. it is IEEE80211_NUM_TIDS
4862 * for non-QoS-data frames. Here we know it's a data
4863 * frame, so count MSDUs.
4864 */
4865 u64_stats_update_begin(&stats->syncp);
4866 stats->msdu[rx->seqno_idx]++;
4867 stats->bytes += orig_len;
4868 u64_stats_update_end(&stats->syncp);
4869
4870 if (fast_rx->internal_forward) {
4871 struct sk_buff *xmit_skb = NULL;
4872 if (is_multicast_ether_addr(da)) {
4873 xmit_skb = skb_copy(skb, GFP_ATOMIC);
4874 } else if (!ether_addr_equal(da, sa) &&
4875 sta_info_get(rx->sdata, da)) {
4876 xmit_skb = skb;
4877 skb = NULL;
4878 }
4879
4880 if (xmit_skb) {
4881 /*
4882 * Send to wireless media and increase priority by 256
4883 * to keep the received priority instead of
4884 * reclassifying the frame (see cfg80211_classify8021d).
4885 */
4886 xmit_skb->priority += 256;
4887 xmit_skb->protocol = htons(ETH_P_802_3);
4888 skb_reset_network_header(xmit_skb);
4889 skb_reset_mac_header(xmit_skb);
4890 dev_queue_xmit(xmit_skb);
4891 }
4892
4893 if (!skb)
4894 return;
4895 }
4896
4897 /* deliver to local stack */
4898 skb->protocol = eth_type_trans(skb, fast_rx->dev);
4899 ieee80211_deliver_skb_to_local_stack(skb, rx);
4900 }
4901
ieee80211_invoke_fast_rx(struct ieee80211_rx_data * rx,struct ieee80211_fast_rx * fast_rx)4902 static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
4903 struct ieee80211_fast_rx *fast_rx)
4904 {
4905 struct sk_buff *skb = rx->skb;
4906 struct ieee80211_hdr *hdr = (void *)skb->data;
4907 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
4908 static ieee80211_rx_result res;
4909 int orig_len = skb->len;
4910 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
4911 int snap_offs = hdrlen;
4912 struct {
4913 u8 snap[sizeof(rfc1042_header)];
4914 __be16 proto;
4915 } *payload __aligned(2);
4916 struct {
4917 u8 da[ETH_ALEN];
4918 u8 sa[ETH_ALEN];
4919 } addrs __aligned(2);
4920 struct ieee80211_sta_rx_stats *stats;
4921
4922 /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
4923 * to a common data structure; drivers can implement that per queue
4924 * but we don't have that information in mac80211
4925 */
4926 if (!(status->flag & RX_FLAG_DUP_VALIDATED))
4927 return false;
4928
4929 #define FAST_RX_CRYPT_FLAGS (RX_FLAG_PN_VALIDATED | RX_FLAG_DECRYPTED)
4930
4931 /* If using encryption, we also need to have:
4932 * - PN_VALIDATED: similar, but the implementation is tricky
4933 * - DECRYPTED: necessary for PN_VALIDATED
4934 */
4935 if (fast_rx->key &&
4936 (status->flag & FAST_RX_CRYPT_FLAGS) != FAST_RX_CRYPT_FLAGS)
4937 return false;
4938
4939 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
4940 return false;
4941
4942 if (unlikely(ieee80211_is_frag(hdr)))
4943 return false;
4944
4945 /* Since our interface address cannot be multicast, this
4946 * implicitly also rejects multicast frames without the
4947 * explicit check.
4948 *
4949 * We shouldn't get any *data* frames not addressed to us
4950 * (AP mode will accept multicast *management* frames), but
4951 * punting here will make it go through the full checks in
4952 * ieee80211_accept_frame().
4953 */
4954 if (!ether_addr_equal(fast_rx->vif_addr, hdr->addr1))
4955 return false;
4956
4957 if ((hdr->frame_control & cpu_to_le16(IEEE80211_FCTL_FROMDS |
4958 IEEE80211_FCTL_TODS)) !=
4959 fast_rx->expected_ds_bits)
4960 return false;
4961
4962 /* assign the key to drop unencrypted frames (later)
4963 * and strip the IV/MIC if necessary
4964 */
4965 if (fast_rx->key && !(status->flag & RX_FLAG_IV_STRIPPED)) {
4966 /* GCMP header length is the same */
4967 snap_offs += IEEE80211_CCMP_HDR_LEN;
4968 }
4969
4970 if (!ieee80211_vif_is_mesh(&rx->sdata->vif) &&
4971 !(status->rx_flags & IEEE80211_RX_AMSDU)) {
4972 if (!pskb_may_pull(skb, snap_offs + sizeof(*payload)))
4973 return false;
4974
4975 payload = (void *)(skb->data + snap_offs);
4976
4977 if (!ether_addr_equal(payload->snap, fast_rx->rfc1042_hdr))
4978 return false;
4979
4980 /* Don't handle these here since they require special code.
4981 * Accept AARP and IPX even though they should come with a
4982 * bridge-tunnel header - but if we get them this way then
4983 * there's little point in discarding them.
4984 */
4985 if (unlikely(payload->proto == cpu_to_be16(ETH_P_TDLS) ||
4986 payload->proto == fast_rx->control_port_protocol))
4987 return false;
4988 }
4989
4990 /* after this point, don't punt to the slowpath! */
4991
4992 if (fast_rx->uses_rss)
4993 stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats);
4994 else
4995 stats = &rx->link_sta->rx_stats;
4996
4997 if (rx->key && !(status->flag & RX_FLAG_MIC_STRIPPED) &&
4998 pskb_trim(skb, skb->len - fast_rx->icv_len))
4999 goto drop;
5000
5001 if (rx->key && !ieee80211_has_protected(hdr->frame_control))
5002 goto drop;
5003
5004 if (status->rx_flags & IEEE80211_RX_AMSDU) {
5005 if (__ieee80211_rx_h_amsdu(rx, snap_offs - hdrlen) !=
5006 RX_QUEUED)
5007 goto drop;
5008
5009 return true;
5010 }
5011
5012 /* do the header conversion - first grab the addresses */
5013 ether_addr_copy(addrs.da, skb->data + fast_rx->da_offs);
5014 ether_addr_copy(addrs.sa, skb->data + fast_rx->sa_offs);
5015 if (ieee80211_vif_is_mesh(&rx->sdata->vif)) {
5016 skb_pull(skb, snap_offs - 2);
5017 put_unaligned_be16(skb->len - 2, skb->data);
5018 } else {
5019 skb_postpull_rcsum(skb, skb->data + snap_offs,
5020 sizeof(rfc1042_header) + 2);
5021
5022 /* remove the SNAP but leave the ethertype */
5023 skb_pull(skb, snap_offs + sizeof(rfc1042_header));
5024 }
5025 /* push the addresses in front */
5026 memcpy(skb_push(skb, sizeof(addrs)), &addrs, sizeof(addrs));
5027
5028 res = ieee80211_rx_mesh_data(rx->sdata, rx->sta, rx->skb);
5029 switch (res) {
5030 case RX_QUEUED:
5031 stats->last_rx = jiffies;
5032 stats->last_rate = sta_stats_encode_rate(status);
5033 return true;
5034 case RX_CONTINUE:
5035 break;
5036 default:
5037 goto drop;
5038 }
5039
5040 ieee80211_rx_8023(rx, fast_rx, orig_len);
5041
5042 return true;
5043 drop:
5044 dev_kfree_skb(skb);
5045
5046 stats->dropped++;
5047 return true;
5048 }
5049
5050 /*
5051 * This function returns whether or not the SKB
5052 * was destined for RX processing or not, which,
5053 * if consume is true, is equivalent to whether
5054 * or not the skb was consumed.
5055 */
ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data * rx,struct sk_buff * skb,bool consume)5056 static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
5057 struct sk_buff *skb, bool consume)
5058 {
5059 struct ieee80211_local *local = rx->local;
5060 struct ieee80211_sub_if_data *sdata = rx->sdata;
5061 struct ieee80211_hdr *hdr = (void *)skb->data;
5062 struct link_sta_info *link_sta = rx->link_sta;
5063 struct ieee80211_link_data *link = rx->link;
5064
5065 rx->skb = skb;
5066
5067 /* See if we can do fast-rx; if we have to copy we already lost,
5068 * so punt in that case. We should never have to deliver a data
5069 * frame to multiple interfaces anyway.
5070 *
5071 * We skip the ieee80211_accept_frame() call and do the necessary
5072 * checking inside ieee80211_invoke_fast_rx().
5073 */
5074 if (consume && rx->sta) {
5075 struct ieee80211_fast_rx *fast_rx;
5076
5077 fast_rx = rcu_dereference(rx->sta->fast_rx);
5078 if (fast_rx && ieee80211_invoke_fast_rx(rx, fast_rx))
5079 return true;
5080 }
5081
5082 if (!ieee80211_accept_frame(rx))
5083 return false;
5084
5085 if (!consume) {
5086 struct skb_shared_hwtstamps *shwt;
5087
5088 rx->skb = skb_copy(skb, GFP_ATOMIC);
5089 if (!rx->skb) {
5090 if (net_ratelimit())
5091 wiphy_debug(local->hw.wiphy,
5092 "failed to copy skb for %s\n",
5093 sdata->name);
5094 return true;
5095 }
5096
5097 /* skb_copy() does not copy the hw timestamps, so copy it
5098 * explicitly
5099 */
5100 shwt = skb_hwtstamps(rx->skb);
5101 shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
5102
5103 /* Update the hdr pointer to the new skb for translation below */
5104 hdr = (struct ieee80211_hdr *)rx->skb->data;
5105 }
5106
5107 if (unlikely(rx->sta && rx->sta->sta.mlo) &&
5108 is_unicast_ether_addr(hdr->addr1) &&
5109 !ieee80211_is_probe_resp(hdr->frame_control) &&
5110 !ieee80211_is_beacon(hdr->frame_control)) {
5111 /* translate to MLD addresses */
5112 if (ether_addr_equal(link->conf->addr, hdr->addr1))
5113 ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
5114 if (ether_addr_equal(link_sta->addr, hdr->addr2))
5115 ether_addr_copy(hdr->addr2, rx->sta->addr);
5116 /* translate A3 only if it's the BSSID */
5117 if (!ieee80211_has_tods(hdr->frame_control) &&
5118 !ieee80211_has_fromds(hdr->frame_control)) {
5119 if (ether_addr_equal(link_sta->addr, hdr->addr3))
5120 ether_addr_copy(hdr->addr3, rx->sta->addr);
5121 else if (ether_addr_equal(link->conf->addr, hdr->addr3))
5122 ether_addr_copy(hdr->addr3, rx->sdata->vif.addr);
5123 }
5124 /* not needed for A4 since it can only carry the SA */
5125 }
5126
5127 ieee80211_invoke_rx_handlers(rx);
5128 return true;
5129 }
5130
__ieee80211_rx_handle_8023(struct ieee80211_hw * hw,struct ieee80211_sta * pubsta,struct sk_buff * skb,struct list_head * list)5131 static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
5132 struct ieee80211_sta *pubsta,
5133 struct sk_buff *skb,
5134 struct list_head *list)
5135 {
5136 struct ieee80211_local *local = hw_to_local(hw);
5137 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
5138 struct ieee80211_fast_rx *fast_rx;
5139 struct ieee80211_rx_data rx;
5140 struct sta_info *sta;
5141 int link_id = -1;
5142
5143 memset(&rx, 0, sizeof(rx));
5144 rx.skb = skb;
5145 rx.local = local;
5146 rx.list = list;
5147 rx.link_id = -1;
5148
5149 I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
5150
5151 /* drop frame if too short for header */
5152 if (skb->len < sizeof(struct ethhdr))
5153 goto drop;
5154
5155 if (!pubsta)
5156 goto drop;
5157
5158 if (status->link_valid)
5159 link_id = status->link_id;
5160
5161 /*
5162 * TODO: Should the frame be dropped if the right link_id is not
5163 * available? Or may be it is fine in the current form to proceed with
5164 * the frame processing because with frame being in 802.3 format,
5165 * link_id is used only for stats purpose and updating the stats on
5166 * the deflink is fine?
5167 */
5168 sta = container_of(pubsta, struct sta_info, sta);
5169 if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
5170 goto drop;
5171
5172 fast_rx = rcu_dereference(rx.sta->fast_rx);
5173 if (!fast_rx)
5174 goto drop;
5175
5176 ieee80211_rx_8023(&rx, fast_rx, skb->len);
5177 return;
5178
5179 drop:
5180 dev_kfree_skb(skb);
5181 }
5182
ieee80211_rx_for_interface(struct ieee80211_rx_data * rx,struct sk_buff * skb,bool consume)5183 static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
5184 struct sk_buff *skb, bool consume)
5185 {
5186 struct link_sta_info *link_sta;
5187 struct ieee80211_hdr *hdr = (void *)skb->data;
5188 struct sta_info *sta;
5189 int link_id = -1;
5190
5191 /*
5192 * Look up link station first, in case there's a
5193 * chance that they might have a link address that
5194 * is identical to the MLD address, that way we'll
5195 * have the link information if needed.
5196 */
5197 link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2);
5198 if (link_sta) {
5199 sta = link_sta->sta;
5200 link_id = link_sta->link_id;
5201 } else {
5202 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
5203
5204 sta = sta_info_get_bss(rx->sdata, hdr->addr2);
5205 if (status->link_valid) {
5206 link_id = status->link_id;
5207 } else if (ieee80211_vif_is_mld(&rx->sdata->vif) &&
5208 status->freq) {
5209 struct ieee80211_link_data *link;
5210 struct ieee80211_chanctx_conf *conf;
5211
5212 for_each_link_data_rcu(rx->sdata, link) {
5213 conf = rcu_dereference(link->conf->chanctx_conf);
5214 if (!conf || !conf->def.chan)
5215 continue;
5216
5217 if (status->freq == conf->def.chan->center_freq) {
5218 link_id = link->link_id;
5219 break;
5220 }
5221 }
5222 }
5223 }
5224
5225 if (!ieee80211_rx_data_set_sta(rx, sta, link_id))
5226 return false;
5227
5228 return ieee80211_prepare_and_rx_handle(rx, skb, consume);
5229 }
5230
5231 /*
5232 * This is the actual Rx frames handler. as it belongs to Rx path it must
5233 * be called with rcu_read_lock protection.
5234 */
__ieee80211_rx_handle_packet(struct ieee80211_hw * hw,struct ieee80211_sta * pubsta,struct sk_buff * skb,struct list_head * list)5235 static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
5236 struct ieee80211_sta *pubsta,
5237 struct sk_buff *skb,
5238 struct list_head *list)
5239 {
5240 struct ieee80211_local *local = hw_to_local(hw);
5241 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
5242 struct ieee80211_sub_if_data *sdata;
5243 struct ieee80211_hdr *hdr;
5244 __le16 fc;
5245 struct ieee80211_rx_data rx;
5246 struct ieee80211_sub_if_data *prev;
5247 struct rhlist_head *tmp;
5248 int err = 0;
5249
5250 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
5251 memset(&rx, 0, sizeof(rx));
5252 rx.skb = skb;
5253 rx.local = local;
5254 rx.list = list;
5255 rx.link_id = -1;
5256
5257 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
5258 I802_DEBUG_INC(local->dot11ReceivedFragmentCount);
5259
5260 if (ieee80211_is_mgmt(fc)) {
5261 /* drop frame if too short for header */
5262 if (skb->len < ieee80211_hdrlen(fc))
5263 err = -ENOBUFS;
5264 else
5265 err = skb_linearize(skb);
5266 } else {
5267 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
5268 }
5269
5270 if (err) {
5271 dev_kfree_skb(skb);
5272 return;
5273 }
5274
5275 hdr = (struct ieee80211_hdr *)skb->data;
5276 ieee80211_parse_qos(&rx);
5277 ieee80211_verify_alignment(&rx);
5278
5279 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
5280 ieee80211_is_beacon(hdr->frame_control) ||
5281 ieee80211_is_s1g_beacon(hdr->frame_control)))
5282 ieee80211_scan_rx(local, skb);
5283
5284 if (ieee80211_is_data(fc)) {
5285 struct sta_info *sta, *prev_sta;
5286 int link_id = -1;
5287
5288 if (status->link_valid)
5289 link_id = status->link_id;
5290
5291 if (pubsta) {
5292 sta = container_of(pubsta, struct sta_info, sta);
5293 if (!ieee80211_rx_data_set_sta(&rx, sta, link_id))
5294 goto out;
5295
5296 /*
5297 * In MLO connection, fetch the link_id using addr2
5298 * when the driver does not pass link_id in status.
5299 * When the address translation is already performed by
5300 * driver/hw, the valid link_id must be passed in
5301 * status.
5302 */
5303
5304 if (!status->link_valid && pubsta->mlo) {
5305 struct link_sta_info *link_sta;
5306
5307 link_sta = link_sta_info_get_bss(rx.sdata,
5308 hdr->addr2);
5309 if (!link_sta)
5310 goto out;
5311
5312 ieee80211_rx_data_set_link(&rx, link_sta->link_id);
5313 }
5314
5315 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
5316 return;
5317 goto out;
5318 }
5319
5320 prev_sta = NULL;
5321
5322 for_each_sta_info(local, hdr->addr2, sta, tmp) {
5323 if (!prev_sta) {
5324 prev_sta = sta;
5325 continue;
5326 }
5327
5328 rx.sdata = prev_sta->sdata;
5329 if (!status->link_valid && prev_sta->sta.mlo) {
5330 struct link_sta_info *link_sta;
5331
5332 link_sta = link_sta_info_get_bss(rx.sdata,
5333 hdr->addr2);
5334 if (!link_sta)
5335 continue;
5336
5337 link_id = link_sta->link_id;
5338 }
5339
5340 if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
5341 goto out;
5342
5343 ieee80211_prepare_and_rx_handle(&rx, skb, false);
5344
5345 prev_sta = sta;
5346 }
5347
5348 if (prev_sta) {
5349 rx.sdata = prev_sta->sdata;
5350 if (!status->link_valid && prev_sta->sta.mlo) {
5351 struct link_sta_info *link_sta;
5352
5353 link_sta = link_sta_info_get_bss(rx.sdata,
5354 hdr->addr2);
5355 if (!link_sta)
5356 goto out;
5357
5358 link_id = link_sta->link_id;
5359 }
5360
5361 if (!ieee80211_rx_data_set_sta(&rx, prev_sta, link_id))
5362 goto out;
5363
5364 if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
5365 return;
5366 goto out;
5367 }
5368 }
5369
5370 prev = NULL;
5371
5372 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
5373 if (!ieee80211_sdata_running(sdata))
5374 continue;
5375
5376 if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
5377 sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
5378 continue;
5379
5380 /*
5381 * frame is destined for this interface, but if it's
5382 * not also for the previous one we handle that after
5383 * the loop to avoid copying the SKB once too much
5384 */
5385
5386 if (!prev) {
5387 prev = sdata;
5388 continue;
5389 }
5390
5391 rx.sdata = prev;
5392 ieee80211_rx_for_interface(&rx, skb, false);
5393
5394 prev = sdata;
5395 }
5396
5397 if (prev) {
5398 rx.sdata = prev;
5399
5400 if (ieee80211_rx_for_interface(&rx, skb, true))
5401 return;
5402 }
5403
5404 out:
5405 dev_kfree_skb(skb);
5406 }
5407
5408 /*
5409 * This is the receive path handler. It is called by a low level driver when an
5410 * 802.11 MPDU is received from the hardware.
5411 */
ieee80211_rx_list(struct ieee80211_hw * hw,struct ieee80211_sta * pubsta,struct sk_buff * skb,struct list_head * list)5412 void ieee80211_rx_list(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
5413 struct sk_buff *skb, struct list_head *list)
5414 {
5415 struct ieee80211_local *local = hw_to_local(hw);
5416 struct ieee80211_rate *rate = NULL;
5417 struct ieee80211_supported_band *sband;
5418 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
5419 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
5420
5421 WARN_ON_ONCE(softirq_count() == 0);
5422
5423 if (WARN_ON(status->band >= NUM_NL80211_BANDS))
5424 goto drop;
5425
5426 sband = local->hw.wiphy->bands[status->band];
5427 if (WARN_ON(!sband))
5428 goto drop;
5429
5430 /*
5431 * If we're suspending, it is possible although not too likely
5432 * that we'd be receiving frames after having already partially
5433 * quiesced the stack. We can't process such frames then since
5434 * that might, for example, cause stations to be added or other
5435 * driver callbacks be invoked.
5436 */
5437 if (unlikely(local->quiescing || local->suspended))
5438 goto drop;
5439
5440 /* We might be during a HW reconfig, prevent Rx for the same reason */
5441 if (unlikely(local->in_reconfig))
5442 goto drop;
5443
5444 /*
5445 * The same happens when we're not even started,
5446 * but that's worth a warning.
5447 */
5448 if (WARN_ON(!local->started))
5449 goto drop;
5450
5451 if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC) &&
5452 !(status->flag & RX_FLAG_NO_PSDU &&
5453 status->zero_length_psdu_type ==
5454 IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED))) {
5455 /*
5456 * Validate the rate, unless there was a PLCP error which may
5457 * have an invalid rate or the PSDU was not capture and may be
5458 * missing rate information.
5459 */
5460
5461 switch (status->encoding) {
5462 case RX_ENC_HT:
5463 /*
5464 * rate_idx is MCS index, which can be [0-76]
5465 * as documented on:
5466 *
5467 * https://wireless.wiki.kernel.org/en/developers/Documentation/ieee80211/802.11n
5468 *
5469 * Anything else would be some sort of driver or
5470 * hardware error. The driver should catch hardware
5471 * errors.
5472 */
5473 if (WARN(status->rate_idx > 76,
5474 "Rate marked as an HT rate but passed "
5475 "status->rate_idx is not "
5476 "an MCS index [0-76]: %d (0x%02x)\n",
5477 status->rate_idx,
5478 status->rate_idx))
5479 goto drop;
5480 break;
5481 case RX_ENC_VHT:
5482 if (WARN_ONCE(status->rate_idx > 11 ||
5483 !status->nss ||
5484 status->nss > 8,
5485 "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
5486 status->rate_idx, status->nss))
5487 goto drop;
5488 break;
5489 case RX_ENC_HE:
5490 if (WARN_ONCE(status->rate_idx > 11 ||
5491 !status->nss ||
5492 status->nss > 8,
5493 "Rate marked as an HE rate but data is invalid: MCS: %d, NSS: %d\n",
5494 status->rate_idx, status->nss))
5495 goto drop;
5496 break;
5497 case RX_ENC_EHT:
5498 if (WARN_ONCE(status->rate_idx > 15 ||
5499 !status->nss ||
5500 status->nss > 8 ||
5501 status->eht.gi > NL80211_RATE_INFO_EHT_GI_3_2,
5502 "Rate marked as an EHT rate but data is invalid: MCS:%d, NSS:%d, GI:%d\n",
5503 status->rate_idx, status->nss, status->eht.gi))
5504 goto drop;
5505 break;
5506 default:
5507 WARN_ON_ONCE(1);
5508 fallthrough;
5509 case RX_ENC_LEGACY:
5510 if (WARN_ON(status->rate_idx >= sband->n_bitrates))
5511 goto drop;
5512 rate = &sband->bitrates[status->rate_idx];
5513 }
5514 }
5515
5516 if (WARN_ON_ONCE(status->link_id >= IEEE80211_LINK_UNSPECIFIED))
5517 goto drop;
5518
5519 status->rx_flags = 0;
5520
5521 kcov_remote_start_common(skb_get_kcov_handle(skb));
5522
5523 /*
5524 * Frames with failed FCS/PLCP checksum are not returned,
5525 * all other frames are returned without radiotap header
5526 * if it was previously present.
5527 * Also, frames with less than 16 bytes are dropped.
5528 */
5529 if (!(status->flag & RX_FLAG_8023))
5530 skb = ieee80211_rx_monitor(local, skb, rate);
5531 if (skb) {
5532 if ((status->flag & RX_FLAG_8023) ||
5533 ieee80211_is_data_present(hdr->frame_control))
5534 ieee80211_tpt_led_trig_rx(local, skb->len);
5535
5536 if (status->flag & RX_FLAG_8023)
5537 __ieee80211_rx_handle_8023(hw, pubsta, skb, list);
5538 else
5539 __ieee80211_rx_handle_packet(hw, pubsta, skb, list);
5540 }
5541
5542 kcov_remote_stop();
5543 return;
5544 drop:
5545 kfree_skb(skb);
5546 }
5547 EXPORT_SYMBOL(ieee80211_rx_list);
5548
ieee80211_rx_napi(struct ieee80211_hw * hw,struct ieee80211_sta * pubsta,struct sk_buff * skb,struct napi_struct * napi)5549 void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta,
5550 struct sk_buff *skb, struct napi_struct *napi)
5551 {
5552 struct sk_buff *tmp;
5553 LIST_HEAD(list);
5554
5555
5556 /*
5557 * key references and virtual interfaces are protected using RCU
5558 * and this requires that we are in a read-side RCU section during
5559 * receive processing
5560 */
5561 rcu_read_lock();
5562 ieee80211_rx_list(hw, pubsta, skb, &list);
5563 rcu_read_unlock();
5564
5565 if (!napi) {
5566 netif_receive_skb_list(&list);
5567 return;
5568 }
5569
5570 list_for_each_entry_safe(skb, tmp, &list, list) {
5571 skb_list_del_init(skb);
5572 napi_gro_receive(napi, skb);
5573 }
5574 }
5575 EXPORT_SYMBOL(ieee80211_rx_napi);
5576
5577 /* This is a version of the rx handler that can be called from hard irq
5578 * context. Post the skb on the queue and schedule the tasklet */
ieee80211_rx_irqsafe(struct ieee80211_hw * hw,struct sk_buff * skb)5579 void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
5580 {
5581 struct ieee80211_local *local = hw_to_local(hw);
5582
5583 BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
5584
5585 skb->pkt_type = IEEE80211_RX_MSG;
5586 skb_queue_tail(&local->skb_queue, skb);
5587 tasklet_schedule(&local->tasklet);
5588 }
5589 EXPORT_SYMBOL(ieee80211_rx_irqsafe);
5590