1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * KUnit tests for channel helper functions 4 * 5 * Copyright (C) 2024 Intel Corporation 6 */ 7 #include <kunit/test.h> 8 #include <kunit/static_stub.h> 9 #include <kunit/skbuff.h> 10 11 #include "utils.h" 12 #include "mld.h" 13 #include "sta.h" 14 #include "agg.h" 15 #include "rx.h" 16 17 #define FC_QOS_DATA (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA) 18 #define BA_WINDOW_SIZE 64 19 #define QUEUE 0 20 21 static const struct reorder_buffer_case { 22 const char *desc; 23 struct { 24 /* ieee80211_hdr fields */ 25 u16 fc; 26 u8 tid; 27 bool multicast; 28 /* iwl_rx_mpdu_desc fields */ 29 u16 nssn; 30 /* used also for setting hdr::seq_ctrl */ 31 u16 sn; 32 u8 baid; 33 bool amsdu; 34 bool last_subframe; 35 bool old_sn; 36 bool dup; 37 } rx_pkt; 38 struct { 39 bool valid; 40 u16 head_sn; 41 u8 baid; 42 u16 num_entries; 43 /* The test prepares the reorder buffer with fake skbs based 44 * on the sequence numbers provided in @entries array. 45 */ 46 struct { 47 u16 sn; 48 /* Set add_subframes > 0 to simulate an A-MSDU by 49 * queueing additional @add_subframes skbs in the 50 * appropriate reorder buffer entry (based on the @sn) 51 */ 52 u8 add_subframes; 53 } entries[BA_WINDOW_SIZE]; 54 } reorder_buf_state; 55 struct { 56 enum iwl_mld_reorder_result reorder_res; 57 u16 head_sn; 58 u16 num_stored; 59 u16 skb_release_order[BA_WINDOW_SIZE]; 60 u16 skb_release_order_count; 61 } expected; 62 } reorder_buffer_cases[] = { 63 { 64 .desc = "RX packet with invalid BAID", 65 .rx_pkt = { 66 .fc = FC_QOS_DATA, 67 .baid = IWL_RX_REORDER_DATA_INVALID_BAID, 68 }, 69 .reorder_buf_state = { 70 .valid = true, 71 }, 72 .expected = { 73 /* Invalid BAID should not be buffered. The frame is 74 * passed to the network stack immediately. 75 */ 76 .reorder_res = IWL_MLD_PASS_SKB, 77 .num_stored = 0, 78 }, 79 }, 80 { 81 .desc = "RX Multicast packet", 82 .rx_pkt = { 83 .fc = FC_QOS_DATA, 84 .multicast = true, 85 }, 86 .reorder_buf_state = { 87 .valid = true, 88 }, 89 .expected = { 90 /* Multicast packets are not buffered. The packet is 91 * passed to the network stack immediately. 92 */ 93 .reorder_res = IWL_MLD_PASS_SKB, 94 .num_stored = 0, 95 }, 96 }, 97 { 98 .desc = "RX non-QoS data", 99 .rx_pkt = { 100 .fc = IEEE80211_FTYPE_DATA, 101 }, 102 .reorder_buf_state = { 103 .valid = true, 104 }, 105 .expected = { 106 /* non-QoS data frames do not require reordering. 107 * The packet is passed to the network stack 108 * immediately. 109 */ 110 .reorder_res = IWL_MLD_PASS_SKB, 111 }, 112 }, 113 { 114 .desc = "RX old SN packet, reorder buffer is not yet valid", 115 .rx_pkt = { 116 .fc = FC_QOS_DATA, 117 .old_sn = true, 118 }, 119 .reorder_buf_state = { 120 .valid = false, 121 }, 122 .expected = { 123 /* The buffer is invalid and the RX packet has an old 124 * SN. The packet is passed to the network stack 125 * immediately. 126 */ 127 .reorder_res = IWL_MLD_PASS_SKB, 128 }, 129 }, 130 { 131 .desc = "RX old SN packet, reorder buffer valid", 132 .rx_pkt = { 133 .fc = FC_QOS_DATA, 134 .old_sn = true, 135 }, 136 .reorder_buf_state = { 137 .valid = true, 138 .head_sn = 100, 139 }, 140 .expected = { 141 /* The buffer is valid and the RX packet has an old SN. 142 * The packet should be dropped. 143 */ 144 .reorder_res = IWL_MLD_DROP_SKB, 145 .num_stored = 0, 146 .head_sn = 100, 147 }, 148 }, 149 { 150 .desc = "RX duplicate packet", 151 .rx_pkt = { 152 .fc = FC_QOS_DATA, 153 .dup = true, 154 }, 155 .reorder_buf_state = { 156 .valid = true, 157 .head_sn = 100, 158 }, 159 .expected = { 160 /* Duplicate packets should be dropped */ 161 .reorder_res = IWL_MLD_DROP_SKB, 162 .num_stored = 0, 163 .head_sn = 100, 164 }, 165 }, 166 { 167 .desc = "RX In-order packet, sn < nssn", 168 .rx_pkt = { 169 .fc = FC_QOS_DATA, 170 .sn = 100, 171 .nssn = 101, 172 }, 173 .reorder_buf_state = { 174 .valid = true, 175 .head_sn = 100, 176 }, 177 .expected = { 178 /* 1. Reorder buffer is empty. 179 * 2. RX packet SN is in order and less than NSSN. 180 * Packet is released to the network stack immediately 181 * and buffer->head_sn is updated to NSSN. 182 */ 183 .reorder_res = IWL_MLD_PASS_SKB, 184 .num_stored = 0, 185 .head_sn = 101, 186 }, 187 }, 188 { 189 .desc = "RX In-order packet, sn == head_sn", 190 .rx_pkt = { 191 .fc = FC_QOS_DATA, 192 .sn = 101, 193 .nssn = 100, 194 }, 195 .reorder_buf_state = { 196 .valid = true, 197 .head_sn = 101, 198 }, 199 .expected = { 200 /* 1. Reorder buffer is empty. 201 * 2. RX packet SN is equal to buffer->head_sn. 202 * Packet is released to the network stack immediately 203 * and buffer->head_sn is incremented. 204 */ 205 .reorder_res = IWL_MLD_PASS_SKB, 206 .num_stored = 0, 207 .head_sn = 102, 208 }, 209 }, 210 { 211 .desc = "RX In-order packet, IEEE80211_MAX_SN wrap around", 212 .rx_pkt = { 213 .fc = FC_QOS_DATA, 214 .sn = IEEE80211_MAX_SN, 215 .nssn = IEEE80211_MAX_SN - 1, 216 }, 217 .reorder_buf_state = { 218 .valid = true, 219 .head_sn = IEEE80211_MAX_SN, 220 }, 221 .expected = { 222 /* 1. Reorder buffer is empty. 223 * 2. RX SN == buffer->head_sn == IEEE80211_MAX_SN 224 * Packet is released to the network stack immediately 225 * and buffer->head_sn is incremented correctly (wraps 226 * around to 0). 227 */ 228 .reorder_res = IWL_MLD_PASS_SKB, 229 .num_stored = 0, 230 .head_sn = 0, 231 }, 232 }, 233 { 234 .desc = "RX Out-of-order packet, pending packet in buffer", 235 .rx_pkt = { 236 .fc = FC_QOS_DATA, 237 .sn = 100, 238 .nssn = 102, 239 }, 240 .reorder_buf_state = { 241 .valid = true, 242 .head_sn = 100, 243 .num_entries = 1, 244 .entries[0].sn = 101, 245 }, 246 .expected = { 247 /* 1. Reorder buffer contains one packet with SN=101. 248 * 2. RX packet SN = buffer->head_sn. 249 * Both packets are released (in order) to the network 250 * stack as there are no gaps. 251 */ 252 .reorder_res = IWL_MLD_BUFFERED_SKB, 253 .num_stored = 0, 254 .head_sn = 102, 255 .skb_release_order = {100, 101}, 256 .skb_release_order_count = 2, 257 }, 258 }, 259 { 260 .desc = "RX Out-of-order packet, pending packet in buffer (wrap around)", 261 .rx_pkt = { 262 .fc = FC_QOS_DATA, 263 .sn = 0, 264 .nssn = 1, 265 }, 266 .reorder_buf_state = { 267 .valid = true, 268 .head_sn = IEEE80211_MAX_SN - 1, 269 .num_entries = 1, 270 .entries[0].sn = IEEE80211_MAX_SN, 271 }, 272 .expected = { 273 /* 1. Reorder buffer contains one packet with 274 * SN=IEEE80211_MAX_SN. 275 * 2. RX Packet SN = 0 (after wrap around) 276 * Both packets are released (in order) to the network 277 * stack as there are no gaps. 278 */ 279 .reorder_res = IWL_MLD_BUFFERED_SKB, 280 .num_stored = 0, 281 .head_sn = 1, 282 .skb_release_order = { 4095, 0 }, 283 .skb_release_order_count = 2, 284 }, 285 }, 286 { 287 .desc = "RX Out-of-order packet, filling 1/2 holes in buffer, release RX packet", 288 .rx_pkt = { 289 .fc = FC_QOS_DATA, 290 .sn = 100, 291 .nssn = 101, 292 }, 293 .reorder_buf_state = { 294 .valid = true, 295 .head_sn = 100, 296 .num_entries = 1, 297 .entries[0].sn = 102, 298 }, 299 .expected = { 300 /* 1. Reorder buffer contains one packet with SN=102. 301 * 2. There are 2 holes at SN={100, 101}. 302 * Only the RX packet (SN=100) is released, there is 303 * still a hole at 101. 304 */ 305 .reorder_res = IWL_MLD_BUFFERED_SKB, 306 .num_stored = 1, 307 .head_sn = 101, 308 .skb_release_order = {100}, 309 .skb_release_order_count = 1, 310 }, 311 }, 312 { 313 .desc = "RX Out-of-order packet, filling 1/2 holes, release 2 packets", 314 .rx_pkt = { 315 .fc = FC_QOS_DATA, 316 .sn = 102, 317 .nssn = 103, 318 }, 319 .reorder_buf_state = { 320 .valid = true, 321 .head_sn = 100, 322 .num_entries = 3, 323 .entries[0].sn = 101, 324 .entries[1].sn = 104, 325 .entries[2].sn = 105, 326 }, 327 .expected = { 328 /* 1. Reorder buffer contains three packets. 329 * 2. RX packet fills one of two holes (at SN=102). 330 * Two packets are released (until the next hole at 331 * SN=103). 332 */ 333 .reorder_res = IWL_MLD_BUFFERED_SKB, 334 .num_stored = 2, 335 .head_sn = 103, 336 .skb_release_order = {101, 102}, 337 .skb_release_order_count = 2, 338 }, 339 }, 340 { 341 .desc = "RX Out-of-order packet, filling 1/1 holes, no packets released", 342 .rx_pkt = { 343 .fc = FC_QOS_DATA, 344 .sn = 102, 345 .nssn = 100, 346 }, 347 .reorder_buf_state = { 348 .valid = true, 349 .head_sn = 100, 350 .num_entries = 3, 351 .entries[0].sn = 101, 352 .entries[1].sn = 103, 353 .entries[2].sn = 104, 354 }, 355 .expected = { 356 /* 1. Reorder buffer contains three packets: 357 * SN={101, 103, 104}. 358 * 2. RX packet fills a hole (SN=102), but NSSN is 359 * smaller than buffered frames. 360 * No packets can be released yet and buffer->head_sn 361 * is not updated. 362 */ 363 .reorder_res = IWL_MLD_BUFFERED_SKB, 364 .num_stored = 4, 365 .head_sn = 100, 366 }, 367 }, 368 { 369 .desc = "RX In-order A-MSDU, last subframe", 370 .rx_pkt = { 371 .fc = FC_QOS_DATA, 372 .sn = 100, 373 .nssn = 101, 374 .amsdu = true, 375 .last_subframe = true, 376 }, 377 .reorder_buf_state = { 378 .valid = true, 379 .head_sn = 100, 380 .num_entries = 1, 381 .entries[0] = { 382 .sn = 100, 383 .add_subframes = 1, 384 }, 385 }, 386 .expected = { 387 /* 1. Reorder buffer contains a 2-sub frames A-MSDU 388 * at SN=100. 389 * 2. RX packet is the last SN=100 A-MSDU subframe 390 * All packets are released in order (3 x SN=100). 391 */ 392 .reorder_res = IWL_MLD_BUFFERED_SKB, 393 .num_stored = 0, 394 .head_sn = 101, 395 .skb_release_order = {100, 100, 100}, 396 .skb_release_order_count = 3, 397 }, 398 }, 399 { 400 .desc = "RX In-order A-MSDU, not the last subframe", 401 .rx_pkt = { 402 .fc = FC_QOS_DATA, 403 .sn = 100, 404 .nssn = 101, 405 .amsdu = true, 406 .last_subframe = false, 407 }, 408 .reorder_buf_state = { 409 .valid = true, 410 .head_sn = 100, 411 .num_entries = 1, 412 .entries[0] = { 413 .sn = 100, 414 .add_subframes = 1, 415 }, 416 }, 417 .expected = { 418 /* 1. Reorder buffer contains a 2-sub frames A-MSDU 419 * at SN=100. 420 * 2. RX packet additional SN=100 A-MSDU subframe, 421 * but not the last one 422 * No packets are released and head_sn is not updated. 423 */ 424 .reorder_res = IWL_MLD_BUFFERED_SKB, 425 .num_stored = 3, 426 .head_sn = 100, 427 }, 428 }, 429 }; 430 431 KUNIT_ARRAY_PARAM_DESC(test_reorder_buffer, reorder_buffer_cases, desc); 432 433 static struct sk_buff_head g_released_skbs; 434 static u16 g_num_released_skbs; 435 436 /* Add released skbs from reorder buffer to a global list; This allows us 437 * to verify the correct release order of packets after they pass through the 438 * simulated reorder logic. 439 */ 440 static void 441 fake_iwl_mld_pass_packet_to_mac80211(struct iwl_mld *mld, 442 struct napi_struct *napi, 443 struct sk_buff *skb, int queue, 444 struct ieee80211_sta *sta) 445 { 446 __skb_queue_tail(&g_released_skbs, skb); 447 g_num_released_skbs++; 448 } 449 450 static u32 451 fake_iwl_mld_fw_sta_id_mask(struct iwl_mld *mld, struct ieee80211_sta *sta) 452 { 453 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta); 454 struct iwl_mld_link_sta *mld_link_sta; 455 u8 link_id; 456 u32 sta_mask = 0; 457 458 /* This is the expectation in the real function */ 459 lockdep_assert_wiphy(mld->wiphy); 460 461 /* We can't use for_each_sta_active_link */ 462 for_each_mld_link_sta(mld_sta, mld_link_sta, link_id) 463 sta_mask |= BIT(mld_link_sta->fw_id); 464 return sta_mask; 465 } 466 467 static struct iwl_rx_mpdu_desc *setup_mpdu_desc(void) 468 { 469 struct kunit *test = kunit_get_current_test(); 470 const struct reorder_buffer_case *param = 471 (const void *)(test->param_value); 472 struct iwl_rx_mpdu_desc *mpdu_desc; 473 474 KUNIT_ALLOC_AND_ASSERT(test, mpdu_desc); 475 476 mpdu_desc->reorder_data |= 477 cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_BAID_MASK, 478 param->rx_pkt.baid)); 479 mpdu_desc->reorder_data |= 480 cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_SN_MASK, 481 param->rx_pkt.sn)); 482 mpdu_desc->reorder_data |= 483 cpu_to_le32(FIELD_PREP(IWL_RX_MPDU_REORDER_NSSN_MASK, 484 param->rx_pkt.nssn)); 485 if (param->rx_pkt.old_sn) 486 mpdu_desc->reorder_data |= 487 cpu_to_le32(IWL_RX_MPDU_REORDER_BA_OLD_SN); 488 489 if (param->rx_pkt.dup) 490 mpdu_desc->status |= cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE); 491 492 if (param->rx_pkt.amsdu) { 493 mpdu_desc->mac_flags2 |= IWL_RX_MPDU_MFLG2_AMSDU; 494 if (param->rx_pkt.last_subframe) 495 mpdu_desc->amsdu_info |= 496 IWL_RX_MPDU_AMSDU_LAST_SUBFRAME; 497 } 498 499 return mpdu_desc; 500 } 501 502 static struct sk_buff *alloc_and_setup_skb(u16 fc, u16 seq_ctrl, u8 tid, 503 bool mcast) 504 { 505 struct kunit *test = kunit_get_current_test(); 506 struct ieee80211_hdr hdr = { 507 .frame_control = cpu_to_le16(fc), 508 .seq_ctrl = cpu_to_le16(seq_ctrl), 509 }; 510 struct sk_buff *skb; 511 512 skb = kunit_zalloc_skb(test, 128, GFP_KERNEL); 513 KUNIT_ASSERT_NOT_NULL(test, skb); 514 515 if (ieee80211_is_data_qos(hdr.frame_control)) { 516 u8 *qc = ieee80211_get_qos_ctl(&hdr); 517 518 qc[0] = tid & IEEE80211_QOS_CTL_TID_MASK; 519 } 520 521 if (mcast) 522 hdr.addr1[0] = 0x1; 523 524 skb_set_mac_header(skb, skb->len); 525 skb_put_data(skb, &hdr, ieee80211_hdrlen(hdr.frame_control)); 526 527 return skb; 528 } 529 530 static struct iwl_mld_reorder_buffer * 531 setup_reorder_buffer(struct iwl_mld_baid_data *baid_data) 532 { 533 struct kunit *test = kunit_get_current_test(); 534 const struct reorder_buffer_case *param = 535 (const void *)(test->param_value); 536 struct iwl_mld_reorder_buffer *buffer = baid_data->reorder_buf; 537 struct iwl_mld_reorder_buf_entry *entries = baid_data->entries; 538 struct sk_buff *fake_skb; 539 540 buffer->valid = param->reorder_buf_state.valid; 541 buffer->head_sn = param->reorder_buf_state.head_sn; 542 buffer->queue = QUEUE; 543 544 for (int i = 0; i < baid_data->buf_size; i++) 545 __skb_queue_head_init(&entries[i].frames); 546 547 for (int i = 0; i < param->reorder_buf_state.num_entries; i++) { 548 u16 sn = param->reorder_buf_state.entries[i].sn; 549 int index = sn % baid_data->buf_size; 550 u8 add_subframes = 551 param->reorder_buf_state.entries[i].add_subframes; 552 /* create 1 skb per entry + additional skbs per num of 553 * requested subframes 554 */ 555 u8 num_skbs = 1 + add_subframes; 556 557 for (int j = 0; j < num_skbs; j++) { 558 fake_skb = alloc_and_setup_skb(FC_QOS_DATA, sn, 0, 559 false); 560 __skb_queue_tail(&entries[index].frames, fake_skb); 561 buffer->num_stored++; 562 } 563 } 564 565 return buffer; 566 } 567 568 static struct iwl_mld_reorder_buffer *setup_ba_data(struct ieee80211_sta *sta) 569 { 570 struct kunit *test = kunit_get_current_test(); 571 struct iwl_mld *mld = test->priv; 572 const struct reorder_buffer_case *param = 573 (const void *)(test->param_value); 574 struct iwl_mld_baid_data *baid_data = NULL; 575 struct iwl_mld_reorder_buffer *buffer; 576 u32 reorder_buf_size = BA_WINDOW_SIZE * sizeof(baid_data->entries[0]); 577 u8 baid = param->reorder_buf_state.baid; 578 579 /* Assuming only 1 RXQ */ 580 KUNIT_ALLOC_AND_ASSERT_SIZE(test, baid_data, 581 sizeof(*baid_data) + reorder_buf_size); 582 583 baid_data->baid = baid; 584 baid_data->tid = param->rx_pkt.tid; 585 baid_data->buf_size = BA_WINDOW_SIZE; 586 587 wiphy_lock(mld->wiphy); 588 baid_data->sta_mask = iwl_mld_fw_sta_id_mask(mld, sta); 589 wiphy_unlock(mld->wiphy); 590 591 baid_data->entries_per_queue = BA_WINDOW_SIZE; 592 593 buffer = setup_reorder_buffer(baid_data); 594 595 KUNIT_EXPECT_NULL(test, rcu_access_pointer(mld->fw_id_to_ba[baid])); 596 rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data); 597 598 return buffer; 599 } 600 601 static void test_reorder_buffer(struct kunit *test) 602 { 603 struct iwl_mld *mld = test->priv; 604 const struct reorder_buffer_case *param = 605 (const void *)(test->param_value); 606 struct iwl_rx_mpdu_desc *mpdu_desc; 607 struct ieee80211_vif *vif; 608 struct ieee80211_sta *sta; 609 struct sk_buff *skb; 610 struct iwl_mld_reorder_buffer *buffer; 611 enum iwl_mld_reorder_result reorder_res; 612 u16 skb_release_order_count = param->expected.skb_release_order_count; 613 u16 skb_idx = 0; 614 615 /* Init globals and activate stubs */ 616 __skb_queue_head_init(&g_released_skbs); 617 g_num_released_skbs = 0; 618 kunit_activate_static_stub(test, iwl_mld_fw_sta_id_mask, 619 fake_iwl_mld_fw_sta_id_mask); 620 kunit_activate_static_stub(test, iwl_mld_pass_packet_to_mac80211, 621 fake_iwl_mld_pass_packet_to_mac80211); 622 623 vif = iwlmld_kunit_add_vif(false, NL80211_IFTYPE_STATION); 624 sta = iwlmld_kunit_setup_sta(vif, IEEE80211_STA_AUTHORIZED, -1); 625 626 /* Prepare skb, mpdu_desc, BA data and the reorder buffer */ 627 skb = alloc_and_setup_skb(param->rx_pkt.fc, param->rx_pkt.sn, 628 param->rx_pkt.tid, param->rx_pkt.multicast); 629 buffer = setup_ba_data(sta); 630 mpdu_desc = setup_mpdu_desc(); 631 632 rcu_read_lock(); 633 reorder_res = iwl_mld_reorder(mld, NULL, QUEUE, sta, skb, mpdu_desc); 634 rcu_read_unlock(); 635 636 KUNIT_ASSERT_EQ(test, reorder_res, param->expected.reorder_res); 637 KUNIT_ASSERT_EQ(test, buffer->num_stored, param->expected.num_stored); 638 KUNIT_ASSERT_EQ(test, buffer->head_sn, param->expected.head_sn); 639 640 /* Verify skbs release order */ 641 KUNIT_ASSERT_EQ(test, skb_release_order_count, g_num_released_skbs); 642 while ((skb = __skb_dequeue(&g_released_skbs))) { 643 struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb); 644 645 KUNIT_ASSERT_EQ(test, le16_to_cpu(hdr->seq_ctrl), 646 param->expected.skb_release_order[skb_idx]); 647 skb_idx++; 648 } 649 KUNIT_ASSERT_EQ(test, skb_idx, skb_release_order_count); 650 } 651 652 static struct kunit_case reorder_buffer_test_cases[] = { 653 KUNIT_CASE_PARAM(test_reorder_buffer, test_reorder_buffer_gen_params), 654 {}, 655 }; 656 657 static struct kunit_suite reorder_buffer = { 658 .name = "iwlmld-reorder-buffer", 659 .test_cases = reorder_buffer_test_cases, 660 .init = iwlmld_kunit_test_init, 661 }; 662 663 kunit_test_suite(reorder_buffer); 664