1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <net/mac80211.h> 8 #include <linux/etherdevice.h> 9 #include "mac.h" 10 #include "core.h" 11 #include "debug.h" 12 #include "wmi.h" 13 #include "hw.h" 14 #include "dp_tx.h" 15 #include "dp_rx.h" 16 #include "peer.h" 17 18 #define CHAN2G(_channel, _freq, _flags) { \ 19 .band = NL80211_BAND_2GHZ, \ 20 .hw_value = (_channel), \ 21 .center_freq = (_freq), \ 22 .flags = (_flags), \ 23 .max_antenna_gain = 0, \ 24 .max_power = 30, \ 25 } 26 27 #define CHAN5G(_channel, _freq, _flags) { \ 28 .band = NL80211_BAND_5GHZ, \ 29 .hw_value = (_channel), \ 30 .center_freq = (_freq), \ 31 .flags = (_flags), \ 32 .max_antenna_gain = 0, \ 33 .max_power = 30, \ 34 } 35 36 #define CHAN6G(_channel, _freq, _flags) { \ 37 .band = NL80211_BAND_6GHZ, \ 38 .hw_value = (_channel), \ 39 .center_freq = (_freq), \ 40 .flags = (_flags), \ 41 .max_antenna_gain = 0, \ 42 .max_power = 30, \ 43 } 44 45 static const struct ieee80211_channel ath12k_2ghz_channels[] = { 46 CHAN2G(1, 2412, 0), 47 CHAN2G(2, 2417, 0), 48 CHAN2G(3, 2422, 0), 49 CHAN2G(4, 2427, 0), 50 CHAN2G(5, 2432, 0), 51 CHAN2G(6, 2437, 0), 52 CHAN2G(7, 2442, 0), 53 CHAN2G(8, 2447, 0), 54 CHAN2G(9, 2452, 0), 55 CHAN2G(10, 2457, 0), 56 CHAN2G(11, 2462, 0), 57 CHAN2G(12, 2467, 0), 58 CHAN2G(13, 2472, 0), 59 CHAN2G(14, 2484, 0), 60 }; 61 62 static const struct ieee80211_channel ath12k_5ghz_channels[] = { 63 CHAN5G(36, 5180, 0), 64 CHAN5G(40, 5200, 0), 65 CHAN5G(44, 5220, 0), 66 CHAN5G(48, 5240, 0), 67 CHAN5G(52, 5260, 0), 68 CHAN5G(56, 5280, 0), 69 CHAN5G(60, 5300, 0), 70 CHAN5G(64, 5320, 0), 71 CHAN5G(100, 5500, 0), 72 CHAN5G(104, 5520, 0), 73 CHAN5G(108, 5540, 0), 74 CHAN5G(112, 5560, 0), 75 CHAN5G(116, 5580, 0), 76 CHAN5G(120, 5600, 0), 77 CHAN5G(124, 5620, 0), 78 CHAN5G(128, 5640, 0), 79 CHAN5G(132, 5660, 0), 80 CHAN5G(136, 5680, 0), 81 CHAN5G(140, 5700, 0), 82 CHAN5G(144, 5720, 0), 83 CHAN5G(149, 5745, 0), 84 CHAN5G(153, 5765, 0), 85 CHAN5G(157, 5785, 0), 86 CHAN5G(161, 5805, 0), 87 CHAN5G(165, 5825, 0), 88 CHAN5G(169, 5845, 0), 89 CHAN5G(173, 5865, 0), 90 }; 91 92 static const struct ieee80211_channel ath12k_6ghz_channels[] = { 93 CHAN6G(1, 5955, 0), 94 CHAN6G(5, 5975, 0), 95 CHAN6G(9, 5995, 0), 96 CHAN6G(13, 6015, 0), 97 CHAN6G(17, 6035, 0), 98 CHAN6G(21, 6055, 0), 99 CHAN6G(25, 6075, 0), 100 CHAN6G(29, 6095, 0), 101 CHAN6G(33, 6115, 0), 102 CHAN6G(37, 6135, 0), 103 CHAN6G(41, 6155, 0), 104 CHAN6G(45, 6175, 0), 105 CHAN6G(49, 6195, 0), 106 CHAN6G(53, 6215, 0), 107 CHAN6G(57, 6235, 0), 108 CHAN6G(61, 6255, 0), 109 CHAN6G(65, 6275, 0), 110 CHAN6G(69, 6295, 0), 111 CHAN6G(73, 6315, 0), 112 CHAN6G(77, 6335, 0), 113 CHAN6G(81, 6355, 0), 114 CHAN6G(85, 6375, 0), 115 CHAN6G(89, 6395, 0), 116 CHAN6G(93, 6415, 0), 117 CHAN6G(97, 6435, 0), 118 CHAN6G(101, 6455, 0), 119 CHAN6G(105, 6475, 0), 120 CHAN6G(109, 6495, 0), 121 CHAN6G(113, 6515, 0), 122 CHAN6G(117, 6535, 0), 123 CHAN6G(121, 6555, 0), 124 CHAN6G(125, 6575, 0), 125 CHAN6G(129, 6595, 0), 126 CHAN6G(133, 6615, 0), 127 CHAN6G(137, 6635, 0), 128 CHAN6G(141, 6655, 0), 129 CHAN6G(145, 6675, 0), 130 CHAN6G(149, 6695, 0), 131 CHAN6G(153, 6715, 0), 132 CHAN6G(157, 6735, 0), 133 CHAN6G(161, 6755, 0), 134 CHAN6G(165, 6775, 0), 135 CHAN6G(169, 6795, 0), 136 CHAN6G(173, 6815, 0), 137 CHAN6G(177, 6835, 0), 138 CHAN6G(181, 6855, 0), 139 CHAN6G(185, 6875, 0), 140 CHAN6G(189, 6895, 0), 141 CHAN6G(193, 6915, 0), 142 CHAN6G(197, 6935, 0), 143 CHAN6G(201, 6955, 0), 144 CHAN6G(205, 6975, 0), 145 CHAN6G(209, 6995, 0), 146 CHAN6G(213, 7015, 0), 147 CHAN6G(217, 7035, 0), 148 CHAN6G(221, 7055, 0), 149 CHAN6G(225, 7075, 0), 150 CHAN6G(229, 7095, 0), 151 CHAN6G(233, 7115, 0), 152 }; 153 154 static struct ieee80211_rate ath12k_legacy_rates[] = { 155 { .bitrate = 10, 156 .hw_value = ATH12K_HW_RATE_CCK_LP_1M }, 157 { .bitrate = 20, 158 .hw_value = ATH12K_HW_RATE_CCK_LP_2M, 159 .hw_value_short = ATH12K_HW_RATE_CCK_SP_2M, 160 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 161 { .bitrate = 55, 162 .hw_value = ATH12K_HW_RATE_CCK_LP_5_5M, 163 .hw_value_short = ATH12K_HW_RATE_CCK_SP_5_5M, 164 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 165 { .bitrate = 110, 166 .hw_value = ATH12K_HW_RATE_CCK_LP_11M, 167 .hw_value_short = ATH12K_HW_RATE_CCK_SP_11M, 168 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 169 170 { .bitrate = 60, .hw_value = ATH12K_HW_RATE_OFDM_6M }, 171 { .bitrate = 90, .hw_value = ATH12K_HW_RATE_OFDM_9M }, 172 { .bitrate = 120, .hw_value = ATH12K_HW_RATE_OFDM_12M }, 173 { .bitrate = 180, .hw_value = ATH12K_HW_RATE_OFDM_18M }, 174 { .bitrate = 240, .hw_value = ATH12K_HW_RATE_OFDM_24M }, 175 { .bitrate = 360, .hw_value = ATH12K_HW_RATE_OFDM_36M }, 176 { .bitrate = 480, .hw_value = ATH12K_HW_RATE_OFDM_48M }, 177 { .bitrate = 540, .hw_value = ATH12K_HW_RATE_OFDM_54M }, 178 }; 179 180 static const int 181 ath12k_phymodes[NUM_NL80211_BANDS][ATH12K_CHAN_WIDTH_NUM] = { 182 [NL80211_BAND_2GHZ] = { 183 [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, 184 [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, 185 [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20_2G, 186 [NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20_2G, 187 [NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40_2G, 188 [NL80211_CHAN_WIDTH_80] = MODE_UNKNOWN, 189 [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN, 190 [NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN, 191 [NL80211_CHAN_WIDTH_320] = MODE_UNKNOWN, 192 }, 193 [NL80211_BAND_5GHZ] = { 194 [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, 195 [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, 196 [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20, 197 [NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20, 198 [NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40, 199 [NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80, 200 [NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160, 201 [NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80, 202 [NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320, 203 }, 204 [NL80211_BAND_6GHZ] = { 205 [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, 206 [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, 207 [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20, 208 [NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20, 209 [NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40, 210 [NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80, 211 [NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160, 212 [NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80, 213 [NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320, 214 }, 215 216 }; 217 218 const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default = { 219 .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START | 220 HTT_RX_FILTER_TLV_FLAGS_PPDU_END | 221 HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE, 222 .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0, 223 .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1, 224 .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2, 225 .pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 | 226 HTT_RX_FP_CTRL_FILTER_FLASG3 227 }; 228 229 #define ATH12K_MAC_FIRST_OFDM_RATE_IDX 4 230 #define ath12k_g_rates ath12k_legacy_rates 231 #define ath12k_g_rates_size (ARRAY_SIZE(ath12k_legacy_rates)) 232 #define ath12k_a_rates (ath12k_legacy_rates + 4) 233 #define ath12k_a_rates_size (ARRAY_SIZE(ath12k_legacy_rates) - 4) 234 235 #define ATH12K_MAC_SCAN_TIMEOUT_MSECS 200 /* in msecs */ 236 237 static const u32 ath12k_smps_map[] = { 238 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, 239 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, 240 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, 241 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 242 }; 243 244 static int ath12k_start_vdev_delay(struct ath12k *ar, 245 struct ath12k_vif *arvif); 246 247 static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode) 248 { 249 switch (mode) { 250 case MODE_11A: 251 return "11a"; 252 case MODE_11G: 253 return "11g"; 254 case MODE_11B: 255 return "11b"; 256 case MODE_11GONLY: 257 return "11gonly"; 258 case MODE_11NA_HT20: 259 return "11na-ht20"; 260 case MODE_11NG_HT20: 261 return "11ng-ht20"; 262 case MODE_11NA_HT40: 263 return "11na-ht40"; 264 case MODE_11NG_HT40: 265 return "11ng-ht40"; 266 case MODE_11AC_VHT20: 267 return "11ac-vht20"; 268 case MODE_11AC_VHT40: 269 return "11ac-vht40"; 270 case MODE_11AC_VHT80: 271 return "11ac-vht80"; 272 case MODE_11AC_VHT160: 273 return "11ac-vht160"; 274 case MODE_11AC_VHT80_80: 275 return "11ac-vht80+80"; 276 case MODE_11AC_VHT20_2G: 277 return "11ac-vht20-2g"; 278 case MODE_11AC_VHT40_2G: 279 return "11ac-vht40-2g"; 280 case MODE_11AC_VHT80_2G: 281 return "11ac-vht80-2g"; 282 case MODE_11AX_HE20: 283 return "11ax-he20"; 284 case MODE_11AX_HE40: 285 return "11ax-he40"; 286 case MODE_11AX_HE80: 287 return "11ax-he80"; 288 case MODE_11AX_HE80_80: 289 return "11ax-he80+80"; 290 case MODE_11AX_HE160: 291 return "11ax-he160"; 292 case MODE_11AX_HE20_2G: 293 return "11ax-he20-2g"; 294 case MODE_11AX_HE40_2G: 295 return "11ax-he40-2g"; 296 case MODE_11AX_HE80_2G: 297 return "11ax-he80-2g"; 298 case MODE_11BE_EHT20: 299 return "11be-eht20"; 300 case MODE_11BE_EHT40: 301 return "11be-eht40"; 302 case MODE_11BE_EHT80: 303 return "11be-eht80"; 304 case MODE_11BE_EHT80_80: 305 return "11be-eht80+80"; 306 case MODE_11BE_EHT160: 307 return "11be-eht160"; 308 case MODE_11BE_EHT160_160: 309 return "11be-eht160+160"; 310 case MODE_11BE_EHT320: 311 return "11be-eht320"; 312 case MODE_11BE_EHT20_2G: 313 return "11be-eht20-2g"; 314 case MODE_11BE_EHT40_2G: 315 return "11be-eht40-2g"; 316 case MODE_UNKNOWN: 317 /* skip */ 318 break; 319 320 /* no default handler to allow compiler to check that the 321 * enum is fully handled 322 */ 323 } 324 325 return "<unknown>"; 326 } 327 328 enum rate_info_bw 329 ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw) 330 { 331 u8 ret = RATE_INFO_BW_20; 332 333 switch (bw) { 334 case ATH12K_BW_20: 335 ret = RATE_INFO_BW_20; 336 break; 337 case ATH12K_BW_40: 338 ret = RATE_INFO_BW_40; 339 break; 340 case ATH12K_BW_80: 341 ret = RATE_INFO_BW_80; 342 break; 343 case ATH12K_BW_160: 344 ret = RATE_INFO_BW_160; 345 break; 346 case ATH12K_BW_320: 347 ret = RATE_INFO_BW_320; 348 break; 349 } 350 351 return ret; 352 } 353 354 enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw) 355 { 356 switch (bw) { 357 case RATE_INFO_BW_20: 358 return ATH12K_BW_20; 359 case RATE_INFO_BW_40: 360 return ATH12K_BW_40; 361 case RATE_INFO_BW_80: 362 return ATH12K_BW_80; 363 case RATE_INFO_BW_160: 364 return ATH12K_BW_160; 365 case RATE_INFO_BW_320: 366 return ATH12K_BW_320; 367 default: 368 return ATH12K_BW_20; 369 } 370 } 371 372 int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx, 373 u16 *rate) 374 { 375 /* As default, it is OFDM rates */ 376 int i = ATH12K_MAC_FIRST_OFDM_RATE_IDX; 377 int max_rates_idx = ath12k_g_rates_size; 378 379 if (preamble == WMI_RATE_PREAMBLE_CCK) { 380 hw_rc &= ~ATH12K_HW_RATECODE_CCK_SHORT_PREAM_MASK; 381 i = 0; 382 max_rates_idx = ATH12K_MAC_FIRST_OFDM_RATE_IDX; 383 } 384 385 while (i < max_rates_idx) { 386 if (hw_rc == ath12k_legacy_rates[i].hw_value) { 387 *rateidx = i; 388 *rate = ath12k_legacy_rates[i].bitrate; 389 return 0; 390 } 391 i++; 392 } 393 394 return -EINVAL; 395 } 396 397 u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, 398 u32 bitrate) 399 { 400 int i; 401 402 for (i = 0; i < sband->n_bitrates; i++) 403 if (sband->bitrates[i].bitrate == bitrate) 404 return i; 405 406 return 0; 407 } 408 409 static u32 410 ath12k_mac_max_ht_nss(const u8 *ht_mcs_mask) 411 { 412 int nss; 413 414 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) 415 if (ht_mcs_mask[nss]) 416 return nss + 1; 417 418 return 1; 419 } 420 421 static u32 422 ath12k_mac_max_vht_nss(const u16 *vht_mcs_mask) 423 { 424 int nss; 425 426 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) 427 if (vht_mcs_mask[nss]) 428 return nss + 1; 429 430 return 1; 431 } 432 433 static u8 ath12k_parse_mpdudensity(u8 mpdudensity) 434 { 435 /* From IEEE Std 802.11-2020 defined values for "Minimum MPDU Start Spacing": 436 * 0 for no restriction 437 * 1 for 1/4 us 438 * 2 for 1/2 us 439 * 3 for 1 us 440 * 4 for 2 us 441 * 5 for 4 us 442 * 6 for 8 us 443 * 7 for 16 us 444 */ 445 switch (mpdudensity) { 446 case 0: 447 return 0; 448 case 1: 449 case 2: 450 case 3: 451 /* Our lower layer calculations limit our precision to 452 * 1 microsecond 453 */ 454 return 1; 455 case 4: 456 return 2; 457 case 5: 458 return 4; 459 case 6: 460 return 8; 461 case 7: 462 return 16; 463 default: 464 return 0; 465 } 466 } 467 468 static int ath12k_mac_vif_chan(struct ieee80211_vif *vif, 469 struct cfg80211_chan_def *def) 470 { 471 struct ieee80211_chanctx_conf *conf; 472 473 rcu_read_lock(); 474 conf = rcu_dereference(vif->bss_conf.chanctx_conf); 475 if (!conf) { 476 rcu_read_unlock(); 477 return -ENOENT; 478 } 479 480 *def = conf->def; 481 rcu_read_unlock(); 482 483 return 0; 484 } 485 486 static bool ath12k_mac_bitrate_is_cck(int bitrate) 487 { 488 switch (bitrate) { 489 case 10: 490 case 20: 491 case 55: 492 case 110: 493 return true; 494 } 495 496 return false; 497 } 498 499 u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, 500 u8 hw_rate, bool cck) 501 { 502 const struct ieee80211_rate *rate; 503 int i; 504 505 for (i = 0; i < sband->n_bitrates; i++) { 506 rate = &sband->bitrates[i]; 507 508 if (ath12k_mac_bitrate_is_cck(rate->bitrate) != cck) 509 continue; 510 511 if (rate->hw_value == hw_rate) 512 return i; 513 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && 514 rate->hw_value_short == hw_rate) 515 return i; 516 } 517 518 return 0; 519 } 520 521 static u8 ath12k_mac_bitrate_to_rate(int bitrate) 522 { 523 return DIV_ROUND_UP(bitrate, 5) | 524 (ath12k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); 525 } 526 527 static void ath12k_get_arvif_iter(void *data, u8 *mac, 528 struct ieee80211_vif *vif) 529 { 530 struct ath12k_vif_iter *arvif_iter = data; 531 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 532 533 if (arvif->vdev_id == arvif_iter->vdev_id) 534 arvif_iter->arvif = arvif; 535 } 536 537 struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id) 538 { 539 struct ath12k_vif_iter arvif_iter = {}; 540 u32 flags; 541 542 arvif_iter.vdev_id = vdev_id; 543 544 flags = IEEE80211_IFACE_ITER_RESUME_ALL; 545 ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar), 546 flags, 547 ath12k_get_arvif_iter, 548 &arvif_iter); 549 if (!arvif_iter.arvif) { 550 ath12k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id); 551 return NULL; 552 } 553 554 return arvif_iter.arvif; 555 } 556 557 struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab, 558 u32 vdev_id) 559 { 560 int i; 561 struct ath12k_pdev *pdev; 562 struct ath12k_vif *arvif; 563 564 for (i = 0; i < ab->num_radios; i++) { 565 pdev = rcu_dereference(ab->pdevs_active[i]); 566 if (pdev && pdev->ar) { 567 arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id); 568 if (arvif) 569 return arvif; 570 } 571 } 572 573 return NULL; 574 } 575 576 struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id) 577 { 578 int i; 579 struct ath12k_pdev *pdev; 580 581 for (i = 0; i < ab->num_radios; i++) { 582 pdev = rcu_dereference(ab->pdevs_active[i]); 583 if (pdev && pdev->ar) { 584 if (pdev->ar->allocated_vdev_map & (1LL << vdev_id)) 585 return pdev->ar; 586 } 587 } 588 589 return NULL; 590 } 591 592 struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id) 593 { 594 int i; 595 struct ath12k_pdev *pdev; 596 597 if (ab->hw_params->single_pdev_only) { 598 pdev = rcu_dereference(ab->pdevs_active[0]); 599 return pdev ? pdev->ar : NULL; 600 } 601 602 if (WARN_ON(pdev_id > ab->num_radios)) 603 return NULL; 604 605 for (i = 0; i < ab->num_radios; i++) { 606 pdev = rcu_dereference(ab->pdevs_active[i]); 607 608 if (pdev && pdev->pdev_id == pdev_id) 609 return (pdev->ar ? pdev->ar : NULL); 610 } 611 612 return NULL; 613 } 614 615 static void ath12k_pdev_caps_update(struct ath12k *ar) 616 { 617 struct ath12k_base *ab = ar->ab; 618 619 ar->max_tx_power = ab->target_caps.hw_max_tx_power; 620 621 /* FIXME: Set min_tx_power to ab->target_caps.hw_min_tx_power. 622 * But since the received value in svcrdy is same as hw_max_tx_power, 623 * we can set ar->min_tx_power to 0 currently until 624 * this is fixed in firmware 625 */ 626 ar->min_tx_power = 0; 627 628 ar->txpower_limit_2g = ar->max_tx_power; 629 ar->txpower_limit_5g = ar->max_tx_power; 630 ar->txpower_scale = WMI_HOST_TP_SCALE_MAX; 631 } 632 633 static int ath12k_mac_txpower_recalc(struct ath12k *ar) 634 { 635 struct ath12k_pdev *pdev = ar->pdev; 636 struct ath12k_vif *arvif; 637 int ret, txpower = -1; 638 u32 param; 639 640 lockdep_assert_held(&ar->conf_mutex); 641 642 list_for_each_entry(arvif, &ar->arvifs, list) { 643 if (arvif->txpower <= 0) 644 continue; 645 646 if (txpower == -1) 647 txpower = arvif->txpower; 648 else 649 txpower = min(txpower, arvif->txpower); 650 } 651 652 if (txpower == -1) 653 return 0; 654 655 /* txpwr is set as 2 units per dBm in FW*/ 656 txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower), 657 ar->max_tx_power) * 2; 658 659 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower to set in hw %d\n", 660 txpower / 2); 661 662 if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) && 663 ar->txpower_limit_2g != txpower) { 664 param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G; 665 ret = ath12k_wmi_pdev_set_param(ar, param, 666 txpower, ar->pdev->pdev_id); 667 if (ret) 668 goto fail; 669 ar->txpower_limit_2g = txpower; 670 } 671 672 if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) && 673 ar->txpower_limit_5g != txpower) { 674 param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G; 675 ret = ath12k_wmi_pdev_set_param(ar, param, 676 txpower, ar->pdev->pdev_id); 677 if (ret) 678 goto fail; 679 ar->txpower_limit_5g = txpower; 680 } 681 682 return 0; 683 684 fail: 685 ath12k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n", 686 txpower / 2, param, ret); 687 return ret; 688 } 689 690 static int ath12k_recalc_rtscts_prot(struct ath12k_vif *arvif) 691 { 692 struct ath12k *ar = arvif->ar; 693 u32 vdev_param, rts_cts; 694 int ret; 695 696 lockdep_assert_held(&ar->conf_mutex); 697 698 vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS; 699 700 /* Enable RTS/CTS protection for sw retries (when legacy stations 701 * are in BSS) or by default only for second rate series. 702 * TODO: Check if we need to enable CTS 2 Self in any case 703 */ 704 rts_cts = WMI_USE_RTS_CTS; 705 706 if (arvif->num_legacy_stations > 0) 707 rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4; 708 else 709 rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4; 710 711 /* Need not send duplicate param value to firmware */ 712 if (arvif->rtscts_prot_mode == rts_cts) 713 return 0; 714 715 arvif->rtscts_prot_mode = rts_cts; 716 717 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", 718 arvif->vdev_id, rts_cts); 719 720 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 721 vdev_param, rts_cts); 722 if (ret) 723 ath12k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n", 724 arvif->vdev_id, ret); 725 726 return ret; 727 } 728 729 static int ath12k_mac_set_kickout(struct ath12k_vif *arvif) 730 { 731 struct ath12k *ar = arvif->ar; 732 u32 param; 733 int ret; 734 735 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH, 736 ATH12K_KICKOUT_THRESHOLD, 737 ar->pdev->pdev_id); 738 if (ret) { 739 ath12k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n", 740 arvif->vdev_id, ret); 741 return ret; 742 } 743 744 param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS; 745 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, 746 ATH12K_KEEPALIVE_MIN_IDLE); 747 if (ret) { 748 ath12k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n", 749 arvif->vdev_id, ret); 750 return ret; 751 } 752 753 param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS; 754 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, 755 ATH12K_KEEPALIVE_MAX_IDLE); 756 if (ret) { 757 ath12k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n", 758 arvif->vdev_id, ret); 759 return ret; 760 } 761 762 param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS; 763 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, 764 ATH12K_KEEPALIVE_MAX_UNRESPONSIVE); 765 if (ret) { 766 ath12k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 767 arvif->vdev_id, ret); 768 return ret; 769 } 770 771 return 0; 772 } 773 774 void ath12k_mac_peer_cleanup_all(struct ath12k *ar) 775 { 776 struct ath12k_peer *peer, *tmp; 777 struct ath12k_base *ab = ar->ab; 778 779 lockdep_assert_held(&ar->conf_mutex); 780 781 spin_lock_bh(&ab->base_lock); 782 list_for_each_entry_safe(peer, tmp, &ab->peers, list) { 783 ath12k_dp_rx_peer_tid_cleanup(ar, peer); 784 list_del(&peer->list); 785 kfree(peer); 786 } 787 spin_unlock_bh(&ab->base_lock); 788 789 ar->num_peers = 0; 790 ar->num_stations = 0; 791 } 792 793 static int ath12k_mac_vdev_setup_sync(struct ath12k *ar) 794 { 795 lockdep_assert_held(&ar->conf_mutex); 796 797 if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) 798 return -ESHUTDOWN; 799 800 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev setup timeout %d\n", 801 ATH12K_VDEV_SETUP_TIMEOUT_HZ); 802 803 if (!wait_for_completion_timeout(&ar->vdev_setup_done, 804 ATH12K_VDEV_SETUP_TIMEOUT_HZ)) 805 return -ETIMEDOUT; 806 807 return ar->last_wmi_vdev_start_status ? -EINVAL : 0; 808 } 809 810 static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id) 811 { 812 int ret; 813 814 ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 815 if (ret) { 816 ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n", 817 vdev_id, ret); 818 return ret; 819 } 820 821 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n", 822 vdev_id); 823 return 0; 824 } 825 826 static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id, 827 struct cfg80211_chan_def *chandef) 828 { 829 struct ieee80211_channel *channel; 830 struct wmi_vdev_start_req_arg arg = {}; 831 int ret; 832 833 lockdep_assert_held(&ar->conf_mutex); 834 835 channel = chandef->chan; 836 arg.vdev_id = vdev_id; 837 arg.freq = channel->center_freq; 838 arg.band_center_freq1 = chandef->center_freq1; 839 arg.band_center_freq2 = chandef->center_freq2; 840 arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width]; 841 arg.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR); 842 843 arg.min_power = 0; 844 arg.max_power = channel->max_power; 845 arg.max_reg_power = channel->max_reg_power; 846 arg.max_antenna_gain = channel->max_antenna_gain; 847 848 arg.pref_tx_streams = ar->num_tx_chains; 849 arg.pref_rx_streams = ar->num_rx_chains; 850 arg.punct_bitmap = 0xFFFFFFFF; 851 852 arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); 853 854 reinit_completion(&ar->vdev_setup_done); 855 reinit_completion(&ar->vdev_delete_done); 856 857 ret = ath12k_wmi_vdev_start(ar, &arg, false); 858 if (ret) { 859 ath12k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n", 860 vdev_id, ret); 861 return ret; 862 } 863 864 ret = ath12k_mac_vdev_setup_sync(ar); 865 if (ret) { 866 ath12k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n", 867 vdev_id, ret); 868 return ret; 869 } 870 871 ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 872 if (ret) { 873 ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n", 874 vdev_id, ret); 875 goto vdev_stop; 876 } 877 878 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n", 879 vdev_id); 880 return 0; 881 882 vdev_stop: 883 ret = ath12k_wmi_vdev_stop(ar, vdev_id); 884 if (ret) 885 ath12k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n", 886 vdev_id, ret); 887 return ret; 888 } 889 890 static int ath12k_mac_monitor_vdev_stop(struct ath12k *ar) 891 { 892 int ret; 893 894 lockdep_assert_held(&ar->conf_mutex); 895 896 reinit_completion(&ar->vdev_setup_done); 897 898 ret = ath12k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 899 if (ret) 900 ath12k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n", 901 ar->monitor_vdev_id, ret); 902 903 ret = ath12k_mac_vdev_setup_sync(ar); 904 if (ret) 905 ath12k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n", 906 ar->monitor_vdev_id, ret); 907 908 ret = ath12k_wmi_vdev_down(ar, ar->monitor_vdev_id); 909 if (ret) 910 ath12k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n", 911 ar->monitor_vdev_id, ret); 912 913 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i stopped\n", 914 ar->monitor_vdev_id); 915 return ret; 916 } 917 918 static int ath12k_mac_monitor_vdev_create(struct ath12k *ar) 919 { 920 struct ath12k_pdev *pdev = ar->pdev; 921 struct ath12k_wmi_vdev_create_arg arg = {}; 922 int bit, ret; 923 u8 tmp_addr[6]; 924 u16 nss; 925 926 lockdep_assert_held(&ar->conf_mutex); 927 928 if (ar->monitor_vdev_created) 929 return 0; 930 931 if (ar->ab->free_vdev_map == 0) { 932 ath12k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n"); 933 return -ENOMEM; 934 } 935 936 bit = __ffs64(ar->ab->free_vdev_map); 937 938 ar->monitor_vdev_id = bit; 939 940 arg.if_id = ar->monitor_vdev_id; 941 arg.type = WMI_VDEV_TYPE_MONITOR; 942 arg.subtype = WMI_VDEV_SUBTYPE_NONE; 943 arg.pdev_id = pdev->pdev_id; 944 arg.if_stats_id = ATH12K_INVAL_VDEV_STATS_ID; 945 946 if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) { 947 arg.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains; 948 arg.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains; 949 } 950 951 if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) { 952 arg.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains; 953 arg.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains; 954 } 955 956 ret = ath12k_wmi_vdev_create(ar, tmp_addr, &arg); 957 if (ret) { 958 ath12k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n", 959 ar->monitor_vdev_id, ret); 960 ar->monitor_vdev_id = -1; 961 return ret; 962 } 963 964 nss = hweight32(ar->cfg_tx_chainmask) ? : 1; 965 ret = ath12k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id, 966 WMI_VDEV_PARAM_NSS, nss); 967 if (ret) { 968 ath12k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n", 969 ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret); 970 return ret; 971 } 972 973 ret = ath12k_mac_txpower_recalc(ar); 974 if (ret) 975 return ret; 976 977 ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id; 978 ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); 979 ar->num_created_vdevs++; 980 ar->monitor_vdev_created = true; 981 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d created\n", 982 ar->monitor_vdev_id); 983 984 return 0; 985 } 986 987 static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar) 988 { 989 int ret; 990 unsigned long time_left; 991 992 lockdep_assert_held(&ar->conf_mutex); 993 994 if (!ar->monitor_vdev_created) 995 return 0; 996 997 reinit_completion(&ar->vdev_delete_done); 998 999 ret = ath12k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 1000 if (ret) { 1001 ath12k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n", 1002 ar->monitor_vdev_id, ret); 1003 return ret; 1004 } 1005 1006 time_left = wait_for_completion_timeout(&ar->vdev_delete_done, 1007 ATH12K_VDEV_DELETE_TIMEOUT_HZ); 1008 if (time_left == 0) { 1009 ath12k_warn(ar->ab, "Timeout in receiving vdev delete response\n"); 1010 } else { 1011 ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id); 1012 ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id); 1013 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d deleted\n", 1014 ar->monitor_vdev_id); 1015 ar->num_created_vdevs--; 1016 ar->monitor_vdev_id = -1; 1017 ar->monitor_vdev_created = false; 1018 } 1019 1020 return ret; 1021 } 1022 1023 static void 1024 ath12k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, 1025 struct ieee80211_chanctx_conf *conf, 1026 void *data) 1027 { 1028 struct cfg80211_chan_def **def = data; 1029 1030 *def = &conf->def; 1031 } 1032 1033 static int ath12k_mac_monitor_start(struct ath12k *ar) 1034 { 1035 struct cfg80211_chan_def *chandef = NULL; 1036 int ret; 1037 1038 lockdep_assert_held(&ar->conf_mutex); 1039 1040 if (ar->monitor_started) 1041 return 0; 1042 1043 ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar), 1044 ath12k_mac_get_any_chandef_iter, 1045 &chandef); 1046 if (!chandef) 1047 return 0; 1048 1049 ret = ath12k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef); 1050 if (ret) { 1051 ath12k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret); 1052 ath12k_mac_monitor_vdev_delete(ar); 1053 return ret; 1054 } 1055 1056 ar->monitor_started = true; 1057 ar->num_started_vdevs++; 1058 ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false); 1059 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor started ret %d\n", ret); 1060 1061 return ret; 1062 } 1063 1064 static int ath12k_mac_monitor_stop(struct ath12k *ar) 1065 { 1066 int ret; 1067 1068 lockdep_assert_held(&ar->conf_mutex); 1069 1070 if (!ar->monitor_started) 1071 return 0; 1072 1073 ret = ath12k_mac_monitor_vdev_stop(ar); 1074 if (ret) { 1075 ath12k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret); 1076 return ret; 1077 } 1078 1079 ar->monitor_started = false; 1080 ar->num_started_vdevs--; 1081 ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, true); 1082 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor stopped ret %d\n", ret); 1083 return ret; 1084 } 1085 1086 static int ath12k_mac_config(struct ath12k *ar, u32 changed) 1087 { 1088 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1089 struct ieee80211_conf *conf = &hw->conf; 1090 int ret = 0; 1091 1092 mutex_lock(&ar->conf_mutex); 1093 1094 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1095 ar->monitor_conf_enabled = conf->flags & IEEE80211_CONF_MONITOR; 1096 if (ar->monitor_conf_enabled) { 1097 if (ar->monitor_vdev_created) 1098 goto exit; 1099 ret = ath12k_mac_monitor_vdev_create(ar); 1100 if (ret) 1101 goto exit; 1102 ret = ath12k_mac_monitor_start(ar); 1103 if (ret) 1104 goto err_mon_del; 1105 } else { 1106 if (!ar->monitor_vdev_created) 1107 goto exit; 1108 ret = ath12k_mac_monitor_stop(ar); 1109 if (ret) 1110 goto exit; 1111 ath12k_mac_monitor_vdev_delete(ar); 1112 } 1113 } 1114 1115 exit: 1116 mutex_unlock(&ar->conf_mutex); 1117 return ret; 1118 1119 err_mon_del: 1120 ath12k_mac_monitor_vdev_delete(ar); 1121 mutex_unlock(&ar->conf_mutex); 1122 return ret; 1123 } 1124 1125 static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed) 1126 { 1127 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 1128 struct ath12k *ar; 1129 int ret; 1130 1131 ar = ath12k_ah_to_ar(ah); 1132 1133 ret = ath12k_mac_config(ar, changed); 1134 if (ret) 1135 ath12k_warn(ar->ab, "failed to update config pdev idx %d: %d\n", 1136 ar->pdev_idx, ret); 1137 1138 return ret; 1139 } 1140 1141 static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif) 1142 { 1143 struct ath12k *ar = arvif->ar; 1144 struct ath12k_base *ab = ar->ab; 1145 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1146 struct ieee80211_vif *vif = arvif->vif; 1147 struct ieee80211_mutable_offsets offs = {}; 1148 struct sk_buff *bcn; 1149 struct ieee80211_mgmt *mgmt; 1150 u8 *ies; 1151 int ret; 1152 1153 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1154 return 0; 1155 1156 bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0); 1157 if (!bcn) { 1158 ath12k_warn(ab, "failed to get beacon template from mac80211\n"); 1159 return -EPERM; 1160 } 1161 1162 ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn); 1163 ies += sizeof(mgmt->u.beacon); 1164 1165 if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies))) 1166 arvif->rsnie_present = true; 1167 1168 if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 1169 WLAN_OUI_TYPE_MICROSOFT_WPA, 1170 ies, (skb_tail_pointer(bcn) - ies))) 1171 arvif->wpaie_present = true; 1172 1173 ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn); 1174 1175 kfree_skb(bcn); 1176 1177 if (ret) 1178 ath12k_warn(ab, "failed to submit beacon template command: %d\n", 1179 ret); 1180 1181 return ret; 1182 } 1183 1184 static void ath12k_control_beaconing(struct ath12k_vif *arvif, 1185 struct ieee80211_bss_conf *info) 1186 { 1187 struct ath12k *ar = arvif->ar; 1188 int ret; 1189 1190 lockdep_assert_held(&arvif->ar->conf_mutex); 1191 1192 if (!info->enable_beacon) { 1193 ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id); 1194 if (ret) 1195 ath12k_warn(ar->ab, "failed to down vdev_id %i: %d\n", 1196 arvif->vdev_id, ret); 1197 1198 arvif->is_up = false; 1199 return; 1200 } 1201 1202 /* Install the beacon template to the FW */ 1203 ret = ath12k_mac_setup_bcn_tmpl(arvif); 1204 if (ret) { 1205 ath12k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n", 1206 ret); 1207 return; 1208 } 1209 1210 arvif->aid = 0; 1211 1212 ether_addr_copy(arvif->bssid, info->bssid); 1213 1214 ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1215 arvif->bssid); 1216 if (ret) { 1217 ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n", 1218 arvif->vdev_id, ret); 1219 return; 1220 } 1221 1222 arvif->is_up = true; 1223 1224 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); 1225 } 1226 1227 static void ath12k_peer_assoc_h_basic(struct ath12k *ar, 1228 struct ieee80211_vif *vif, 1229 struct ieee80211_sta *sta, 1230 struct ath12k_wmi_peer_assoc_arg *arg) 1231 { 1232 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1233 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1234 u32 aid; 1235 1236 lockdep_assert_held(&ar->conf_mutex); 1237 1238 if (vif->type == NL80211_IFTYPE_STATION) 1239 aid = vif->cfg.aid; 1240 else 1241 aid = sta->aid; 1242 1243 ether_addr_copy(arg->peer_mac, sta->addr); 1244 arg->vdev_id = arvif->vdev_id; 1245 arg->peer_associd = aid; 1246 arg->auth_flag = true; 1247 /* TODO: STA WAR in ath10k for listen interval required? */ 1248 arg->peer_listen_intval = hw->conf.listen_interval; 1249 arg->peer_nss = 1; 1250 arg->peer_caps = vif->bss_conf.assoc_capability; 1251 } 1252 1253 static void ath12k_peer_assoc_h_crypto(struct ath12k *ar, 1254 struct ieee80211_vif *vif, 1255 struct ieee80211_sta *sta, 1256 struct ath12k_wmi_peer_assoc_arg *arg) 1257 { 1258 struct ieee80211_bss_conf *info = &vif->bss_conf; 1259 struct cfg80211_chan_def def; 1260 struct cfg80211_bss *bss; 1261 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1262 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1263 const u8 *rsnie = NULL; 1264 const u8 *wpaie = NULL; 1265 1266 lockdep_assert_held(&ar->conf_mutex); 1267 1268 if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) 1269 return; 1270 1271 bss = cfg80211_get_bss(hw->wiphy, def.chan, info->bssid, NULL, 0, 1272 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); 1273 1274 if (arvif->rsnie_present || arvif->wpaie_present) { 1275 arg->need_ptk_4_way = true; 1276 if (arvif->wpaie_present) 1277 arg->need_gtk_2_way = true; 1278 } else if (bss) { 1279 const struct cfg80211_bss_ies *ies; 1280 1281 rcu_read_lock(); 1282 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); 1283 1284 ies = rcu_dereference(bss->ies); 1285 1286 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 1287 WLAN_OUI_TYPE_MICROSOFT_WPA, 1288 ies->data, 1289 ies->len); 1290 rcu_read_unlock(); 1291 cfg80211_put_bss(hw->wiphy, bss); 1292 } 1293 1294 /* FIXME: base on RSN IE/WPA IE is a correct idea? */ 1295 if (rsnie || wpaie) { 1296 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1297 "%s: rsn ie found\n", __func__); 1298 arg->need_ptk_4_way = true; 1299 } 1300 1301 if (wpaie) { 1302 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1303 "%s: wpa ie found\n", __func__); 1304 arg->need_gtk_2_way = true; 1305 } 1306 1307 if (sta->mfp) { 1308 /* TODO: Need to check if FW supports PMF? */ 1309 arg->is_pmf_enabled = true; 1310 } 1311 1312 /* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */ 1313 } 1314 1315 static void ath12k_peer_assoc_h_rates(struct ath12k *ar, 1316 struct ieee80211_vif *vif, 1317 struct ieee80211_sta *sta, 1318 struct ath12k_wmi_peer_assoc_arg *arg) 1319 { 1320 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1321 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; 1322 struct cfg80211_chan_def def; 1323 const struct ieee80211_supported_band *sband; 1324 const struct ieee80211_rate *rates; 1325 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1326 enum nl80211_band band; 1327 u32 ratemask; 1328 u8 rate; 1329 int i; 1330 1331 lockdep_assert_held(&ar->conf_mutex); 1332 1333 if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) 1334 return; 1335 1336 band = def.chan->band; 1337 sband = hw->wiphy->bands[band]; 1338 ratemask = sta->deflink.supp_rates[band]; 1339 ratemask &= arvif->bitrate_mask.control[band].legacy; 1340 rates = sband->bitrates; 1341 1342 rateset->num_rates = 0; 1343 1344 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { 1345 if (!(ratemask & 1)) 1346 continue; 1347 1348 rate = ath12k_mac_bitrate_to_rate(rates->bitrate); 1349 rateset->rates[rateset->num_rates] = rate; 1350 rateset->num_rates++; 1351 } 1352 } 1353 1354 static bool 1355 ath12k_peer_assoc_h_ht_masked(const u8 *ht_mcs_mask) 1356 { 1357 int nss; 1358 1359 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) 1360 if (ht_mcs_mask[nss]) 1361 return false; 1362 1363 return true; 1364 } 1365 1366 static bool 1367 ath12k_peer_assoc_h_vht_masked(const u16 *vht_mcs_mask) 1368 { 1369 int nss; 1370 1371 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) 1372 if (vht_mcs_mask[nss]) 1373 return false; 1374 1375 return true; 1376 } 1377 1378 static void ath12k_peer_assoc_h_ht(struct ath12k *ar, 1379 struct ieee80211_vif *vif, 1380 struct ieee80211_sta *sta, 1381 struct ath12k_wmi_peer_assoc_arg *arg) 1382 { 1383 const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; 1384 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1385 struct cfg80211_chan_def def; 1386 enum nl80211_band band; 1387 const u8 *ht_mcs_mask; 1388 int i, n; 1389 u8 max_nss; 1390 u32 stbc; 1391 1392 lockdep_assert_held(&ar->conf_mutex); 1393 1394 if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) 1395 return; 1396 1397 if (!ht_cap->ht_supported) 1398 return; 1399 1400 band = def.chan->band; 1401 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 1402 1403 if (ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) 1404 return; 1405 1406 arg->ht_flag = true; 1407 1408 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 1409 ht_cap->ampdu_factor)) - 1; 1410 1411 arg->peer_mpdu_density = 1412 ath12k_parse_mpdudensity(ht_cap->ampdu_density); 1413 1414 arg->peer_ht_caps = ht_cap->cap; 1415 arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG; 1416 1417 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) 1418 arg->ldpc_flag = true; 1419 1420 if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) { 1421 arg->bw_40 = true; 1422 arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG; 1423 } 1424 1425 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { 1426 if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 | 1427 IEEE80211_HT_CAP_SGI_40)) 1428 arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG; 1429 } 1430 1431 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { 1432 arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG; 1433 arg->stbc_flag = true; 1434 } 1435 1436 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { 1437 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; 1438 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; 1439 stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S; 1440 arg->peer_rate_caps |= stbc; 1441 arg->stbc_flag = true; 1442 } 1443 1444 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) 1445 arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG; 1446 else if (ht_cap->mcs.rx_mask[1]) 1447 arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG; 1448 1449 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) 1450 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && 1451 (ht_mcs_mask[i / 8] & BIT(i % 8))) { 1452 max_nss = (i / 8) + 1; 1453 arg->peer_ht_rates.rates[n++] = i; 1454 } 1455 1456 /* This is a workaround for HT-enabled STAs which break the spec 1457 * and have no HT capabilities RX mask (no HT RX MCS map). 1458 * 1459 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), 1460 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. 1461 * 1462 * Firmware asserts if such situation occurs. 1463 */ 1464 if (n == 0) { 1465 arg->peer_ht_rates.num_rates = 8; 1466 for (i = 0; i < arg->peer_ht_rates.num_rates; i++) 1467 arg->peer_ht_rates.rates[i] = i; 1468 } else { 1469 arg->peer_ht_rates.num_rates = n; 1470 arg->peer_nss = min(sta->deflink.rx_nss, max_nss); 1471 } 1472 1473 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", 1474 arg->peer_mac, 1475 arg->peer_ht_rates.num_rates, 1476 arg->peer_nss); 1477 } 1478 1479 static int ath12k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) 1480 { 1481 switch ((mcs_map >> (2 * nss)) & 0x3) { 1482 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; 1483 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; 1484 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; 1485 } 1486 return 0; 1487 } 1488 1489 static u16 1490 ath12k_peer_assoc_h_vht_limit(u16 tx_mcs_set, 1491 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) 1492 { 1493 int idx_limit; 1494 int nss; 1495 u16 mcs_map; 1496 u16 mcs; 1497 1498 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { 1499 mcs_map = ath12k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & 1500 vht_mcs_limit[nss]; 1501 1502 if (mcs_map) 1503 idx_limit = fls(mcs_map) - 1; 1504 else 1505 idx_limit = -1; 1506 1507 switch (idx_limit) { 1508 case 0: 1509 case 1: 1510 case 2: 1511 case 3: 1512 case 4: 1513 case 5: 1514 case 6: 1515 case 7: 1516 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; 1517 break; 1518 case 8: 1519 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; 1520 break; 1521 case 9: 1522 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; 1523 break; 1524 default: 1525 WARN_ON(1); 1526 fallthrough; 1527 case -1: 1528 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; 1529 break; 1530 } 1531 1532 tx_mcs_set &= ~(0x3 << (nss * 2)); 1533 tx_mcs_set |= mcs << (nss * 2); 1534 } 1535 1536 return tx_mcs_set; 1537 } 1538 1539 static void ath12k_peer_assoc_h_vht(struct ath12k *ar, 1540 struct ieee80211_vif *vif, 1541 struct ieee80211_sta *sta, 1542 struct ath12k_wmi_peer_assoc_arg *arg) 1543 { 1544 const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; 1545 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1546 struct cfg80211_chan_def def; 1547 enum nl80211_band band; 1548 const u16 *vht_mcs_mask; 1549 u16 tx_mcs_map; 1550 u8 ampdu_factor; 1551 u8 max_nss, vht_mcs; 1552 int i; 1553 1554 if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) 1555 return; 1556 1557 if (!vht_cap->vht_supported) 1558 return; 1559 1560 band = def.chan->band; 1561 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 1562 1563 if (ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) 1564 return; 1565 1566 arg->vht_flag = true; 1567 1568 /* TODO: similar flags required? */ 1569 arg->vht_capable = true; 1570 1571 if (def.chan->band == NL80211_BAND_2GHZ) 1572 arg->vht_ng_flag = true; 1573 1574 arg->peer_vht_caps = vht_cap->cap; 1575 1576 ampdu_factor = (vht_cap->cap & 1577 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> 1578 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 1579 1580 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to 1581 * zero in VHT IE. Using it would result in degraded throughput. 1582 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep 1583 * it if VHT max_mpdu is smaller. 1584 */ 1585 arg->peer_max_mpdu = max(arg->peer_max_mpdu, 1586 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + 1587 ampdu_factor)) - 1); 1588 1589 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 1590 arg->bw_80 = true; 1591 1592 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) 1593 arg->bw_160 = true; 1594 1595 /* Calculate peer NSS capability from VHT capabilities if STA 1596 * supports VHT. 1597 */ 1598 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { 1599 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> 1600 (2 * i) & 3; 1601 1602 if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED && 1603 vht_mcs_mask[i]) 1604 max_nss = i + 1; 1605 } 1606 arg->peer_nss = min(sta->deflink.rx_nss, max_nss); 1607 arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); 1608 arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); 1609 arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest); 1610 1611 tx_mcs_map = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); 1612 arg->tx_mcs_set = ath12k_peer_assoc_h_vht_limit(tx_mcs_map, vht_mcs_mask); 1613 1614 /* In QCN9274 platform, VHT MCS rate 10 and 11 is enabled by default. 1615 * VHT MCS rate 10 and 11 is not supported in 11ac standard. 1616 * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode. 1617 */ 1618 arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK; 1619 arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11; 1620 1621 if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) == 1622 IEEE80211_VHT_MCS_NOT_SUPPORTED) 1623 arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; 1624 1625 /* TODO: Check */ 1626 arg->tx_max_mcs_nss = 0xFF; 1627 1628 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", 1629 sta->addr, arg->peer_max_mpdu, arg->peer_flags); 1630 1631 /* TODO: rxnss_override */ 1632 } 1633 1634 static void ath12k_peer_assoc_h_he(struct ath12k *ar, 1635 struct ieee80211_vif *vif, 1636 struct ieee80211_sta *sta, 1637 struct ath12k_wmi_peer_assoc_arg *arg) 1638 { 1639 const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; 1640 int i; 1641 u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss; 1642 u16 mcs_160_map, mcs_80_map; 1643 bool support_160; 1644 u16 v; 1645 1646 if (!he_cap->has_he) 1647 return; 1648 1649 arg->he_flag = true; 1650 1651 support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] & 1652 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G); 1653 1654 /* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */ 1655 mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); 1656 mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); 1657 1658 if (support_160) { 1659 for (i = 7; i >= 0; i--) { 1660 u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3; 1661 1662 if (mcs_160 != IEEE80211_HE_MCS_NOT_SUPPORTED) { 1663 rx_mcs_160 = i + 1; 1664 break; 1665 } 1666 } 1667 } 1668 1669 for (i = 7; i >= 0; i--) { 1670 u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3; 1671 1672 if (mcs_80 != IEEE80211_HE_MCS_NOT_SUPPORTED) { 1673 rx_mcs_80 = i + 1; 1674 break; 1675 } 1676 } 1677 1678 if (support_160) 1679 max_nss = min(rx_mcs_80, rx_mcs_160); 1680 else 1681 max_nss = rx_mcs_80; 1682 1683 arg->peer_nss = min(sta->deflink.rx_nss, max_nss); 1684 1685 memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info, 1686 sizeof(he_cap->he_cap_elem.mac_cap_info)); 1687 memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info, 1688 sizeof(he_cap->he_cap_elem.phy_cap_info)); 1689 arg->peer_he_ops = vif->bss_conf.he_oper.params; 1690 1691 /* the top most byte is used to indicate BSS color info */ 1692 arg->peer_he_ops &= 0xffffff; 1693 1694 /* As per section 26.6.1 IEEE Std 802.11ax‐2022, if the Max AMPDU 1695 * Exponent Extension in HE cap is zero, use the arg->peer_max_mpdu 1696 * as calculated while parsing VHT caps(if VHT caps is present) 1697 * or HT caps (if VHT caps is not present). 1698 * 1699 * For non-zero value of Max AMPDU Exponent Extension in HE MAC caps, 1700 * if a HE STA sends VHT cap and HE cap IE in assoc request then, use 1701 * MAX_AMPDU_LEN_FACTOR as 20 to calculate max_ampdu length. 1702 * If a HE STA that does not send VHT cap, but HE and HT cap in assoc 1703 * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu 1704 * length. 1705 */ 1706 ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] & 1707 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >> 1708 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK; 1709 1710 if (ampdu_factor) { 1711 if (sta->deflink.vht_cap.vht_supported) 1712 arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR + 1713 ampdu_factor)) - 1; 1714 else if (sta->deflink.ht_cap.ht_supported) 1715 arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR + 1716 ampdu_factor)) - 1; 1717 } 1718 1719 if (he_cap->he_cap_elem.phy_cap_info[6] & 1720 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { 1721 int bit = 7; 1722 int nss, ru; 1723 1724 arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] & 1725 IEEE80211_PPE_THRES_NSS_MASK; 1726 arg->peer_ppet.ru_bit_mask = 1727 (he_cap->ppe_thres[0] & 1728 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >> 1729 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS; 1730 1731 for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) { 1732 for (ru = 0; ru < 4; ru++) { 1733 u32 val = 0; 1734 int i; 1735 1736 if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0) 1737 continue; 1738 for (i = 0; i < 6; i++) { 1739 val >>= 1; 1740 val |= ((he_cap->ppe_thres[bit / 8] >> 1741 (bit % 8)) & 0x1) << 5; 1742 bit++; 1743 } 1744 arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |= 1745 val << (ru * 6); 1746 } 1747 } 1748 } 1749 1750 if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES) 1751 arg->twt_responder = true; 1752 if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ) 1753 arg->twt_requester = true; 1754 1755 switch (sta->deflink.bandwidth) { 1756 case IEEE80211_STA_RX_BW_160: 1757 if (he_cap->he_cap_elem.phy_cap_info[0] & 1758 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { 1759 v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80); 1760 arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v; 1761 1762 v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80); 1763 arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v; 1764 1765 arg->peer_he_mcs_count++; 1766 } 1767 v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); 1768 arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; 1769 1770 v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160); 1771 arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; 1772 1773 arg->peer_he_mcs_count++; 1774 fallthrough; 1775 1776 default: 1777 v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); 1778 arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; 1779 1780 v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80); 1781 arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; 1782 1783 arg->peer_he_mcs_count++; 1784 break; 1785 } 1786 } 1787 1788 static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta, 1789 struct ath12k_wmi_peer_assoc_arg *arg) 1790 { 1791 const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; 1792 int smps; 1793 1794 if (!ht_cap->ht_supported) 1795 return; 1796 1797 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 1798 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 1799 1800 switch (smps) { 1801 case WLAN_HT_CAP_SM_PS_STATIC: 1802 arg->static_mimops_flag = true; 1803 break; 1804 case WLAN_HT_CAP_SM_PS_DYNAMIC: 1805 arg->dynamic_mimops_flag = true; 1806 break; 1807 case WLAN_HT_CAP_SM_PS_DISABLED: 1808 arg->spatial_mux_flag = true; 1809 break; 1810 default: 1811 break; 1812 } 1813 } 1814 1815 static void ath12k_peer_assoc_h_qos(struct ath12k *ar, 1816 struct ieee80211_vif *vif, 1817 struct ieee80211_sta *sta, 1818 struct ath12k_wmi_peer_assoc_arg *arg) 1819 { 1820 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1821 1822 switch (arvif->vdev_type) { 1823 case WMI_VDEV_TYPE_AP: 1824 if (sta->wme) { 1825 /* TODO: Check WME vs QoS */ 1826 arg->is_wme_set = true; 1827 arg->qos_flag = true; 1828 } 1829 1830 if (sta->wme && sta->uapsd_queues) { 1831 /* TODO: Check WME vs QoS */ 1832 arg->is_wme_set = true; 1833 arg->apsd_flag = true; 1834 arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG; 1835 } 1836 break; 1837 case WMI_VDEV_TYPE_STA: 1838 if (sta->wme) { 1839 arg->is_wme_set = true; 1840 arg->qos_flag = true; 1841 } 1842 break; 1843 default: 1844 break; 1845 } 1846 1847 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n", 1848 sta->addr, arg->qos_flag); 1849 } 1850 1851 static int ath12k_peer_assoc_qos_ap(struct ath12k *ar, 1852 struct ath12k_vif *arvif, 1853 struct ieee80211_sta *sta) 1854 { 1855 struct ath12k_wmi_ap_ps_arg arg; 1856 u32 max_sp; 1857 u32 uapsd; 1858 int ret; 1859 1860 lockdep_assert_held(&ar->conf_mutex); 1861 1862 arg.vdev_id = arvif->vdev_id; 1863 1864 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", 1865 sta->uapsd_queues, sta->max_sp); 1866 1867 uapsd = 0; 1868 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 1869 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 1870 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; 1871 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 1872 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | 1873 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; 1874 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 1875 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | 1876 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; 1877 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 1878 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | 1879 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; 1880 1881 max_sp = 0; 1882 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) 1883 max_sp = sta->max_sp; 1884 1885 arg.param = WMI_AP_PS_PEER_PARAM_UAPSD; 1886 arg.value = uapsd; 1887 ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); 1888 if (ret) 1889 goto err; 1890 1891 arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP; 1892 arg.value = max_sp; 1893 ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); 1894 if (ret) 1895 goto err; 1896 1897 /* TODO: revisit during testing */ 1898 arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE; 1899 arg.value = DISABLE_SIFS_RESPONSE_TRIGGER; 1900 ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); 1901 if (ret) 1902 goto err; 1903 1904 arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD; 1905 arg.value = DISABLE_SIFS_RESPONSE_TRIGGER; 1906 ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); 1907 if (ret) 1908 goto err; 1909 1910 return 0; 1911 1912 err: 1913 ath12k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n", 1914 arg.param, arvif->vdev_id, ret); 1915 return ret; 1916 } 1917 1918 static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 1919 { 1920 return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >> 1921 ATH12K_MAC_FIRST_OFDM_RATE_IDX; 1922 } 1923 1924 static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar, 1925 struct ieee80211_sta *sta) 1926 { 1927 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { 1928 switch (sta->deflink.vht_cap.cap & 1929 IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 1930 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 1931 return MODE_11AC_VHT160; 1932 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: 1933 return MODE_11AC_VHT80_80; 1934 default: 1935 /* not sure if this is a valid case? */ 1936 return MODE_11AC_VHT160; 1937 } 1938 } 1939 1940 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 1941 return MODE_11AC_VHT80; 1942 1943 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 1944 return MODE_11AC_VHT40; 1945 1946 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) 1947 return MODE_11AC_VHT20; 1948 1949 return MODE_UNKNOWN; 1950 } 1951 1952 static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar, 1953 struct ieee80211_sta *sta) 1954 { 1955 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { 1956 if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & 1957 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) 1958 return MODE_11AX_HE160; 1959 else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & 1960 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) 1961 return MODE_11AX_HE80_80; 1962 /* not sure if this is a valid case? */ 1963 return MODE_11AX_HE160; 1964 } 1965 1966 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 1967 return MODE_11AX_HE80; 1968 1969 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 1970 return MODE_11AX_HE40; 1971 1972 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) 1973 return MODE_11AX_HE20; 1974 1975 return MODE_UNKNOWN; 1976 } 1977 1978 static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar, 1979 struct ieee80211_sta *sta) 1980 { 1981 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) 1982 if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[0] & 1983 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) 1984 return MODE_11BE_EHT320; 1985 1986 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { 1987 if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & 1988 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) 1989 return MODE_11BE_EHT160; 1990 1991 if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & 1992 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) 1993 return MODE_11BE_EHT80_80; 1994 1995 ath12k_warn(ar->ab, "invalid EHT PHY capability info for 160 Mhz: %d\n", 1996 sta->deflink.he_cap.he_cap_elem.phy_cap_info[0]); 1997 1998 return MODE_11BE_EHT160; 1999 } 2000 2001 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 2002 return MODE_11BE_EHT80; 2003 2004 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2005 return MODE_11BE_EHT40; 2006 2007 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) 2008 return MODE_11BE_EHT20; 2009 2010 return MODE_UNKNOWN; 2011 } 2012 2013 static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, 2014 struct ieee80211_vif *vif, 2015 struct ieee80211_sta *sta, 2016 struct ath12k_wmi_peer_assoc_arg *arg) 2017 { 2018 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2019 struct cfg80211_chan_def def; 2020 enum nl80211_band band; 2021 const u8 *ht_mcs_mask; 2022 const u16 *vht_mcs_mask; 2023 enum wmi_phy_mode phymode = MODE_UNKNOWN; 2024 2025 if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) 2026 return; 2027 2028 band = def.chan->band; 2029 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2030 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2031 2032 switch (band) { 2033 case NL80211_BAND_2GHZ: 2034 if (sta->deflink.eht_cap.has_eht) { 2035 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2036 phymode = MODE_11BE_EHT40_2G; 2037 else 2038 phymode = MODE_11BE_EHT20_2G; 2039 } else if (sta->deflink.he_cap.has_he) { 2040 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 2041 phymode = MODE_11AX_HE80_2G; 2042 else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2043 phymode = MODE_11AX_HE40_2G; 2044 else 2045 phymode = MODE_11AX_HE20_2G; 2046 } else if (sta->deflink.vht_cap.vht_supported && 2047 !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2048 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2049 phymode = MODE_11AC_VHT40; 2050 else 2051 phymode = MODE_11AC_VHT20; 2052 } else if (sta->deflink.ht_cap.ht_supported && 2053 !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2054 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2055 phymode = MODE_11NG_HT40; 2056 else 2057 phymode = MODE_11NG_HT20; 2058 } else if (ath12k_mac_sta_has_ofdm_only(sta)) { 2059 phymode = MODE_11G; 2060 } else { 2061 phymode = MODE_11B; 2062 } 2063 break; 2064 case NL80211_BAND_5GHZ: 2065 case NL80211_BAND_6GHZ: 2066 /* Check EHT first */ 2067 if (sta->deflink.eht_cap.has_eht) { 2068 phymode = ath12k_mac_get_phymode_eht(ar, sta); 2069 } else if (sta->deflink.he_cap.has_he) { 2070 phymode = ath12k_mac_get_phymode_he(ar, sta); 2071 } else if (sta->deflink.vht_cap.vht_supported && 2072 !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2073 phymode = ath12k_mac_get_phymode_vht(ar, sta); 2074 } else if (sta->deflink.ht_cap.ht_supported && 2075 !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2076 if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) 2077 phymode = MODE_11NA_HT40; 2078 else 2079 phymode = MODE_11NA_HT20; 2080 } else { 2081 phymode = MODE_11A; 2082 } 2083 break; 2084 default: 2085 break; 2086 } 2087 2088 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n", 2089 sta->addr, ath12k_mac_phymode_str(phymode)); 2090 2091 arg->peer_phymode = phymode; 2092 WARN_ON(phymode == MODE_UNKNOWN); 2093 } 2094 2095 static void ath12k_mac_set_eht_mcs(u8 rx_tx_mcs7, u8 rx_tx_mcs9, 2096 u8 rx_tx_mcs11, u8 rx_tx_mcs13, 2097 u32 *rx_mcs, u32 *tx_mcs) 2098 { 2099 *rx_mcs = 0; 2100 u32p_replace_bits(rx_mcs, 2101 u8_get_bits(rx_tx_mcs7, IEEE80211_EHT_MCS_NSS_RX), 2102 WMI_EHT_MCS_NSS_0_7); 2103 u32p_replace_bits(rx_mcs, 2104 u8_get_bits(rx_tx_mcs9, IEEE80211_EHT_MCS_NSS_RX), 2105 WMI_EHT_MCS_NSS_8_9); 2106 u32p_replace_bits(rx_mcs, 2107 u8_get_bits(rx_tx_mcs11, IEEE80211_EHT_MCS_NSS_RX), 2108 WMI_EHT_MCS_NSS_10_11); 2109 u32p_replace_bits(rx_mcs, 2110 u8_get_bits(rx_tx_mcs13, IEEE80211_EHT_MCS_NSS_RX), 2111 WMI_EHT_MCS_NSS_12_13); 2112 2113 *tx_mcs = 0; 2114 u32p_replace_bits(tx_mcs, 2115 u8_get_bits(rx_tx_mcs7, IEEE80211_EHT_MCS_NSS_TX), 2116 WMI_EHT_MCS_NSS_0_7); 2117 u32p_replace_bits(tx_mcs, 2118 u8_get_bits(rx_tx_mcs9, IEEE80211_EHT_MCS_NSS_TX), 2119 WMI_EHT_MCS_NSS_8_9); 2120 u32p_replace_bits(tx_mcs, 2121 u8_get_bits(rx_tx_mcs11, IEEE80211_EHT_MCS_NSS_TX), 2122 WMI_EHT_MCS_NSS_10_11); 2123 u32p_replace_bits(tx_mcs, 2124 u8_get_bits(rx_tx_mcs13, IEEE80211_EHT_MCS_NSS_TX), 2125 WMI_EHT_MCS_NSS_12_13); 2126 } 2127 2128 static void ath12k_mac_set_eht_ppe_threshold(const u8 *ppe_thres, 2129 struct ath12k_wmi_ppe_threshold_arg *ppet) 2130 { 2131 u32 bit_pos = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE, val; 2132 u8 nss, ru, i; 2133 u8 ppet_bit_len_per_ru = IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 2134 2135 ppet->numss_m1 = u8_get_bits(ppe_thres[0], IEEE80211_EHT_PPE_THRES_NSS_MASK); 2136 ppet->ru_bit_mask = u16_get_bits(get_unaligned_le16(ppe_thres), 2137 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 2138 2139 for (nss = 0; nss <= ppet->numss_m1; nss++) { 2140 for (ru = 0; 2141 ru < hweight16(IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 2142 ru++) { 2143 if ((ppet->ru_bit_mask & BIT(ru)) == 0) 2144 continue; 2145 2146 val = 0; 2147 for (i = 0; i < ppet_bit_len_per_ru; i++) { 2148 val |= (((ppe_thres[bit_pos / 8] >> 2149 (bit_pos % 8)) & 0x1) << i); 2150 bit_pos++; 2151 } 2152 ppet->ppet16_ppet8_ru3_ru0[nss] |= 2153 (val << (ru * ppet_bit_len_per_ru)); 2154 } 2155 } 2156 } 2157 2158 static void ath12k_peer_assoc_h_eht(struct ath12k *ar, 2159 struct ieee80211_vif *vif, 2160 struct ieee80211_sta *sta, 2161 struct ath12k_wmi_peer_assoc_arg *arg) 2162 { 2163 const struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap; 2164 const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; 2165 const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20; 2166 const struct ieee80211_eht_mcs_nss_supp_bw *bw; 2167 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2168 u32 *rx_mcs, *tx_mcs; 2169 2170 if (!sta->deflink.he_cap.has_he || !eht_cap->has_eht) 2171 return; 2172 2173 arg->eht_flag = true; 2174 2175 if ((eht_cap->eht_cap_elem.phy_cap_info[5] & 2176 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) && 2177 eht_cap->eht_ppe_thres[0] != 0) 2178 ath12k_mac_set_eht_ppe_threshold(eht_cap->eht_ppe_thres, 2179 &arg->peer_eht_ppet); 2180 2181 memcpy(arg->peer_eht_cap_mac, eht_cap->eht_cap_elem.mac_cap_info, 2182 sizeof(eht_cap->eht_cap_elem.mac_cap_info)); 2183 memcpy(arg->peer_eht_cap_phy, eht_cap->eht_cap_elem.phy_cap_info, 2184 sizeof(eht_cap->eht_cap_elem.phy_cap_info)); 2185 2186 rx_mcs = arg->peer_eht_rx_mcs_set; 2187 tx_mcs = arg->peer_eht_tx_mcs_set; 2188 2189 switch (sta->deflink.bandwidth) { 2190 case IEEE80211_STA_RX_BW_320: 2191 bw = &eht_cap->eht_mcs_nss_supp.bw._320; 2192 ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss, 2193 bw->rx_tx_mcs9_max_nss, 2194 bw->rx_tx_mcs11_max_nss, 2195 bw->rx_tx_mcs13_max_nss, 2196 &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_320], 2197 &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_320]); 2198 arg->peer_eht_mcs_count++; 2199 fallthrough; 2200 case IEEE80211_STA_RX_BW_160: 2201 bw = &eht_cap->eht_mcs_nss_supp.bw._160; 2202 ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss, 2203 bw->rx_tx_mcs9_max_nss, 2204 bw->rx_tx_mcs11_max_nss, 2205 bw->rx_tx_mcs13_max_nss, 2206 &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_160], 2207 &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_160]); 2208 arg->peer_eht_mcs_count++; 2209 fallthrough; 2210 default: 2211 if ((he_cap->he_cap_elem.phy_cap_info[0] & 2212 (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | 2213 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | 2214 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | 2215 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) { 2216 bw_20 = &eht_cap->eht_mcs_nss_supp.only_20mhz; 2217 2218 ath12k_mac_set_eht_mcs(bw_20->rx_tx_mcs7_max_nss, 2219 bw_20->rx_tx_mcs9_max_nss, 2220 bw_20->rx_tx_mcs11_max_nss, 2221 bw_20->rx_tx_mcs13_max_nss, 2222 &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80], 2223 &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80]); 2224 } else { 2225 bw = &eht_cap->eht_mcs_nss_supp.bw._80; 2226 ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss, 2227 bw->rx_tx_mcs9_max_nss, 2228 bw->rx_tx_mcs11_max_nss, 2229 bw->rx_tx_mcs13_max_nss, 2230 &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80], 2231 &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80]); 2232 } 2233 2234 arg->peer_eht_mcs_count++; 2235 break; 2236 } 2237 2238 arg->punct_bitmap = ~arvif->punct_bitmap; 2239 } 2240 2241 static void ath12k_peer_assoc_prepare(struct ath12k *ar, 2242 struct ieee80211_vif *vif, 2243 struct ieee80211_sta *sta, 2244 struct ath12k_wmi_peer_assoc_arg *arg, 2245 bool reassoc) 2246 { 2247 lockdep_assert_held(&ar->conf_mutex); 2248 2249 memset(arg, 0, sizeof(*arg)); 2250 2251 reinit_completion(&ar->peer_assoc_done); 2252 2253 arg->peer_new_assoc = !reassoc; 2254 ath12k_peer_assoc_h_basic(ar, vif, sta, arg); 2255 ath12k_peer_assoc_h_crypto(ar, vif, sta, arg); 2256 ath12k_peer_assoc_h_rates(ar, vif, sta, arg); 2257 ath12k_peer_assoc_h_ht(ar, vif, sta, arg); 2258 ath12k_peer_assoc_h_vht(ar, vif, sta, arg); 2259 ath12k_peer_assoc_h_he(ar, vif, sta, arg); 2260 ath12k_peer_assoc_h_eht(ar, vif, sta, arg); 2261 ath12k_peer_assoc_h_qos(ar, vif, sta, arg); 2262 ath12k_peer_assoc_h_phymode(ar, vif, sta, arg); 2263 ath12k_peer_assoc_h_smps(sta, arg); 2264 2265 /* TODO: amsdu_disable req? */ 2266 } 2267 2268 static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif, 2269 const u8 *addr, 2270 const struct ieee80211_sta_ht_cap *ht_cap) 2271 { 2272 int smps; 2273 2274 if (!ht_cap->ht_supported) 2275 return 0; 2276 2277 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 2278 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 2279 2280 if (smps >= ARRAY_SIZE(ath12k_smps_map)) 2281 return -EINVAL; 2282 2283 return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id, 2284 WMI_PEER_MIMO_PS_STATE, 2285 ath12k_smps_map[smps]); 2286 } 2287 2288 static void ath12k_bss_assoc(struct ath12k *ar, 2289 struct ath12k_vif *arvif, 2290 struct ieee80211_bss_conf *bss_conf) 2291 { 2292 struct ieee80211_vif *vif = arvif->vif; 2293 struct ath12k_wmi_peer_assoc_arg peer_arg; 2294 struct ieee80211_sta *ap_sta; 2295 struct ath12k_peer *peer; 2296 bool is_auth = false; 2297 int ret; 2298 2299 lockdep_assert_held(&ar->conf_mutex); 2300 2301 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", 2302 arvif->vdev_id, arvif->bssid, arvif->aid); 2303 2304 rcu_read_lock(); 2305 2306 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 2307 if (!ap_sta) { 2308 ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n", 2309 bss_conf->bssid, arvif->vdev_id); 2310 rcu_read_unlock(); 2311 return; 2312 } 2313 2314 ath12k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false); 2315 2316 rcu_read_unlock(); 2317 2318 ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); 2319 if (ret) { 2320 ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n", 2321 bss_conf->bssid, arvif->vdev_id, ret); 2322 return; 2323 } 2324 2325 if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { 2326 ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", 2327 bss_conf->bssid, arvif->vdev_id); 2328 return; 2329 } 2330 2331 ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid, 2332 &ap_sta->deflink.ht_cap); 2333 if (ret) { 2334 ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", 2335 arvif->vdev_id, ret); 2336 return; 2337 } 2338 2339 WARN_ON(arvif->is_up); 2340 2341 arvif->aid = vif->cfg.aid; 2342 ether_addr_copy(arvif->bssid, bss_conf->bssid); 2343 2344 ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 2345 if (ret) { 2346 ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n", 2347 arvif->vdev_id, ret); 2348 return; 2349 } 2350 2351 arvif->is_up = true; 2352 2353 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2354 "mac vdev %d up (associated) bssid %pM aid %d\n", 2355 arvif->vdev_id, bss_conf->bssid, vif->cfg.aid); 2356 2357 spin_lock_bh(&ar->ab->base_lock); 2358 2359 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid); 2360 if (peer && peer->is_authorized) 2361 is_auth = true; 2362 2363 spin_unlock_bh(&ar->ab->base_lock); 2364 2365 /* Authorize BSS Peer */ 2366 if (is_auth) { 2367 ret = ath12k_wmi_set_peer_param(ar, arvif->bssid, 2368 arvif->vdev_id, 2369 WMI_PEER_AUTHORIZE, 2370 1); 2371 if (ret) 2372 ath12k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret); 2373 } 2374 2375 ret = ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id, 2376 &bss_conf->he_obss_pd); 2377 if (ret) 2378 ath12k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n", 2379 arvif->vdev_id, ret); 2380 } 2381 2382 static void ath12k_bss_disassoc(struct ath12k *ar, 2383 struct ath12k_vif *arvif) 2384 { 2385 int ret; 2386 2387 lockdep_assert_held(&ar->conf_mutex); 2388 2389 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", 2390 arvif->vdev_id, arvif->bssid); 2391 2392 ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id); 2393 if (ret) 2394 ath12k_warn(ar->ab, "failed to down vdev %i: %d\n", 2395 arvif->vdev_id, ret); 2396 2397 arvif->is_up = false; 2398 2399 /* TODO: cancel connection_loss_work */ 2400 } 2401 2402 static u32 ath12k_mac_get_rate_hw_value(int bitrate) 2403 { 2404 u32 preamble; 2405 u16 hw_value; 2406 int rate; 2407 size_t i; 2408 2409 if (ath12k_mac_bitrate_is_cck(bitrate)) 2410 preamble = WMI_RATE_PREAMBLE_CCK; 2411 else 2412 preamble = WMI_RATE_PREAMBLE_OFDM; 2413 2414 for (i = 0; i < ARRAY_SIZE(ath12k_legacy_rates); i++) { 2415 if (ath12k_legacy_rates[i].bitrate != bitrate) 2416 continue; 2417 2418 hw_value = ath12k_legacy_rates[i].hw_value; 2419 rate = ATH12K_HW_RATE_CODE(hw_value, 0, preamble); 2420 2421 return rate; 2422 } 2423 2424 return -EINVAL; 2425 } 2426 2427 static void ath12k_recalculate_mgmt_rate(struct ath12k *ar, 2428 struct ieee80211_vif *vif, 2429 struct cfg80211_chan_def *def) 2430 { 2431 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2432 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 2433 const struct ieee80211_supported_band *sband; 2434 u8 basic_rate_idx; 2435 int hw_rate_code; 2436 u32 vdev_param; 2437 u16 bitrate; 2438 int ret; 2439 2440 lockdep_assert_held(&ar->conf_mutex); 2441 2442 sband = hw->wiphy->bands[def->chan->band]; 2443 basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; 2444 bitrate = sband->bitrates[basic_rate_idx].bitrate; 2445 2446 hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate); 2447 if (hw_rate_code < 0) { 2448 ath12k_warn(ar->ab, "bitrate not supported %d\n", bitrate); 2449 return; 2450 } 2451 2452 vdev_param = WMI_VDEV_PARAM_MGMT_RATE; 2453 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, 2454 hw_rate_code); 2455 if (ret) 2456 ath12k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret); 2457 2458 vdev_param = WMI_VDEV_PARAM_BEACON_RATE; 2459 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, 2460 hw_rate_code); 2461 if (ret) 2462 ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret); 2463 } 2464 2465 static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif, 2466 struct ieee80211_bss_conf *info) 2467 { 2468 struct ath12k *ar = arvif->ar; 2469 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 2470 struct sk_buff *tmpl; 2471 int ret; 2472 u32 interval; 2473 bool unsol_bcast_probe_resp_enabled = false; 2474 2475 if (info->fils_discovery.max_interval) { 2476 interval = info->fils_discovery.max_interval; 2477 2478 tmpl = ieee80211_get_fils_discovery_tmpl(hw, arvif->vif); 2479 if (tmpl) 2480 ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id, 2481 tmpl); 2482 } else if (info->unsol_bcast_probe_resp_interval) { 2483 unsol_bcast_probe_resp_enabled = 1; 2484 interval = info->unsol_bcast_probe_resp_interval; 2485 2486 tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, 2487 arvif->vif); 2488 if (tmpl) 2489 ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id, 2490 tmpl); 2491 } else { /* Disable */ 2492 return ath12k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false); 2493 } 2494 2495 if (!tmpl) { 2496 ath12k_warn(ar->ab, 2497 "mac vdev %i failed to retrieve %s template\n", 2498 arvif->vdev_id, (unsol_bcast_probe_resp_enabled ? 2499 "unsolicited broadcast probe response" : 2500 "FILS discovery")); 2501 return -EPERM; 2502 } 2503 kfree_skb(tmpl); 2504 2505 if (!ret) 2506 ret = ath12k_wmi_fils_discovery(ar, arvif->vdev_id, interval, 2507 unsol_bcast_probe_resp_enabled); 2508 2509 return ret; 2510 } 2511 2512 static void ath12k_mac_bss_info_changed(struct ath12k *ar, 2513 struct ath12k_vif *arvif, 2514 struct ieee80211_bss_conf *info, 2515 u64 changed) 2516 { 2517 struct ieee80211_vif *vif = arvif->vif; 2518 struct cfg80211_chan_def def; 2519 u32 param_id, param_value; 2520 enum nl80211_band band; 2521 u32 vdev_param; 2522 int mcast_rate; 2523 u32 preamble; 2524 u16 hw_value; 2525 u16 bitrate; 2526 int ret; 2527 u8 rateidx; 2528 u32 rate; 2529 2530 lockdep_assert_held(&ar->conf_mutex); 2531 2532 if (changed & BSS_CHANGED_BEACON_INT) { 2533 arvif->beacon_interval = info->beacon_int; 2534 2535 param_id = WMI_VDEV_PARAM_BEACON_INTERVAL; 2536 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2537 param_id, 2538 arvif->beacon_interval); 2539 if (ret) 2540 ath12k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n", 2541 arvif->vdev_id); 2542 else 2543 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2544 "Beacon interval: %d set for VDEV: %d\n", 2545 arvif->beacon_interval, arvif->vdev_id); 2546 } 2547 2548 if (changed & BSS_CHANGED_BEACON) { 2549 param_id = WMI_PDEV_PARAM_BEACON_TX_MODE; 2550 param_value = WMI_BEACON_BURST_MODE; 2551 ret = ath12k_wmi_pdev_set_param(ar, param_id, 2552 param_value, ar->pdev->pdev_id); 2553 if (ret) 2554 ath12k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n", 2555 arvif->vdev_id); 2556 else 2557 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2558 "Set burst beacon mode for VDEV: %d\n", 2559 arvif->vdev_id); 2560 2561 ret = ath12k_mac_setup_bcn_tmpl(arvif); 2562 if (ret) 2563 ath12k_warn(ar->ab, "failed to update bcn template: %d\n", 2564 ret); 2565 } 2566 2567 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { 2568 arvif->dtim_period = info->dtim_period; 2569 2570 param_id = WMI_VDEV_PARAM_DTIM_PERIOD; 2571 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2572 param_id, 2573 arvif->dtim_period); 2574 2575 if (ret) 2576 ath12k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n", 2577 arvif->vdev_id, ret); 2578 else 2579 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2580 "DTIM period: %d set for VDEV: %d\n", 2581 arvif->dtim_period, arvif->vdev_id); 2582 } 2583 2584 if (changed & BSS_CHANGED_SSID && 2585 vif->type == NL80211_IFTYPE_AP) { 2586 arvif->u.ap.ssid_len = vif->cfg.ssid_len; 2587 if (vif->cfg.ssid_len) 2588 memcpy(arvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len); 2589 arvif->u.ap.hidden_ssid = info->hidden_ssid; 2590 } 2591 2592 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) 2593 ether_addr_copy(arvif->bssid, info->bssid); 2594 2595 if (changed & BSS_CHANGED_BEACON_ENABLED) { 2596 ath12k_control_beaconing(arvif, info); 2597 2598 if (arvif->is_up && vif->bss_conf.he_support && 2599 vif->bss_conf.he_oper.params) { 2600 /* TODO: Extend to support 1024 BA Bitmap size */ 2601 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2602 WMI_VDEV_PARAM_BA_MODE, 2603 WMI_BA_MODE_BUFFER_SIZE_256); 2604 if (ret) 2605 ath12k_warn(ar->ab, 2606 "failed to set BA BUFFER SIZE 256 for vdev: %d\n", 2607 arvif->vdev_id); 2608 2609 param_id = WMI_VDEV_PARAM_HEOPS_0_31; 2610 param_value = vif->bss_conf.he_oper.params; 2611 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2612 param_id, param_value); 2613 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2614 "he oper param: %x set for VDEV: %d\n", 2615 param_value, arvif->vdev_id); 2616 2617 if (ret) 2618 ath12k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n", 2619 param_value, arvif->vdev_id, ret); 2620 } 2621 } 2622 2623 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 2624 u32 cts_prot; 2625 2626 cts_prot = !!(info->use_cts_prot); 2627 param_id = WMI_VDEV_PARAM_PROTECTION_MODE; 2628 2629 if (arvif->is_started) { 2630 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2631 param_id, cts_prot); 2632 if (ret) 2633 ath12k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n", 2634 arvif->vdev_id); 2635 else 2636 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n", 2637 cts_prot, arvif->vdev_id); 2638 } else { 2639 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n"); 2640 } 2641 } 2642 2643 if (changed & BSS_CHANGED_ERP_SLOT) { 2644 u32 slottime; 2645 2646 if (info->use_short_slot) 2647 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ 2648 2649 else 2650 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ 2651 2652 param_id = WMI_VDEV_PARAM_SLOT_TIME; 2653 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2654 param_id, slottime); 2655 if (ret) 2656 ath12k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n", 2657 arvif->vdev_id); 2658 else 2659 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2660 "Set slottime: %d for VDEV: %d\n", 2661 slottime, arvif->vdev_id); 2662 } 2663 2664 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 2665 u32 preamble; 2666 2667 if (info->use_short_preamble) 2668 preamble = WMI_VDEV_PREAMBLE_SHORT; 2669 else 2670 preamble = WMI_VDEV_PREAMBLE_LONG; 2671 2672 param_id = WMI_VDEV_PARAM_PREAMBLE; 2673 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2674 param_id, preamble); 2675 if (ret) 2676 ath12k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n", 2677 arvif->vdev_id); 2678 else 2679 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2680 "Set preamble: %d for VDEV: %d\n", 2681 preamble, arvif->vdev_id); 2682 } 2683 2684 if (changed & BSS_CHANGED_ASSOC) { 2685 if (vif->cfg.assoc) 2686 ath12k_bss_assoc(ar, arvif, info); 2687 else 2688 ath12k_bss_disassoc(ar, arvif); 2689 } 2690 2691 if (changed & BSS_CHANGED_TXPOWER) { 2692 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev_id %i txpower %d\n", 2693 arvif->vdev_id, info->txpower); 2694 2695 arvif->txpower = info->txpower; 2696 ath12k_mac_txpower_recalc(ar); 2697 } 2698 2699 if (changed & BSS_CHANGED_MCAST_RATE && 2700 !ath12k_mac_vif_chan(arvif->vif, &def)) { 2701 band = def.chan->band; 2702 mcast_rate = vif->bss_conf.mcast_rate[band]; 2703 2704 if (mcast_rate > 0) 2705 rateidx = mcast_rate - 1; 2706 else 2707 rateidx = ffs(vif->bss_conf.basic_rates) - 1; 2708 2709 if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) 2710 rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX; 2711 2712 bitrate = ath12k_legacy_rates[rateidx].bitrate; 2713 hw_value = ath12k_legacy_rates[rateidx].hw_value; 2714 2715 if (ath12k_mac_bitrate_is_cck(bitrate)) 2716 preamble = WMI_RATE_PREAMBLE_CCK; 2717 else 2718 preamble = WMI_RATE_PREAMBLE_OFDM; 2719 2720 rate = ATH12K_HW_RATE_CODE(hw_value, 0, preamble); 2721 2722 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2723 "mac vdev %d mcast_rate %x\n", 2724 arvif->vdev_id, rate); 2725 2726 vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE; 2727 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2728 vdev_param, rate); 2729 if (ret) 2730 ath12k_warn(ar->ab, 2731 "failed to set mcast rate on vdev %i: %d\n", 2732 arvif->vdev_id, ret); 2733 2734 vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE; 2735 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2736 vdev_param, rate); 2737 if (ret) 2738 ath12k_warn(ar->ab, 2739 "failed to set bcast rate on vdev %i: %d\n", 2740 arvif->vdev_id, ret); 2741 } 2742 2743 if (changed & BSS_CHANGED_BASIC_RATES && 2744 !ath12k_mac_vif_chan(arvif->vif, &def)) 2745 ath12k_recalculate_mgmt_rate(ar, vif, &def); 2746 2747 if (changed & BSS_CHANGED_TWT) { 2748 if (info->twt_requester || info->twt_responder) 2749 ath12k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id); 2750 else 2751 ath12k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id); 2752 } 2753 2754 if (changed & BSS_CHANGED_HE_OBSS_PD) 2755 ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id, 2756 &info->he_obss_pd); 2757 2758 if (changed & BSS_CHANGED_HE_BSS_COLOR) { 2759 if (vif->type == NL80211_IFTYPE_AP) { 2760 ret = ath12k_wmi_obss_color_cfg_cmd(ar, 2761 arvif->vdev_id, 2762 info->he_bss_color.color, 2763 ATH12K_BSS_COLOR_AP_PERIODS, 2764 info->he_bss_color.enabled); 2765 if (ret) 2766 ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", 2767 arvif->vdev_id, ret); 2768 } else if (vif->type == NL80211_IFTYPE_STATION) { 2769 ret = ath12k_wmi_send_bss_color_change_enable_cmd(ar, 2770 arvif->vdev_id, 2771 1); 2772 if (ret) 2773 ath12k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n", 2774 arvif->vdev_id, ret); 2775 ret = ath12k_wmi_obss_color_cfg_cmd(ar, 2776 arvif->vdev_id, 2777 0, 2778 ATH12K_BSS_COLOR_STA_PERIODS, 2779 1); 2780 if (ret) 2781 ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", 2782 arvif->vdev_id, ret); 2783 } 2784 } 2785 2786 ath12k_mac_fils_discovery(arvif, info); 2787 } 2788 2789 static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, 2790 struct ieee80211_vif *vif, 2791 struct ieee80211_bss_conf *info, 2792 u64 changed) 2793 { 2794 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 2795 struct ath12k *ar; 2796 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2797 2798 ar = ath12k_ah_to_ar(ah); 2799 2800 mutex_lock(&ar->conf_mutex); 2801 2802 ath12k_mac_bss_info_changed(ar, arvif, info, changed); 2803 2804 mutex_unlock(&ar->conf_mutex); 2805 } 2806 2807 void __ath12k_mac_scan_finish(struct ath12k *ar) 2808 { 2809 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 2810 2811 lockdep_assert_held(&ar->data_lock); 2812 2813 switch (ar->scan.state) { 2814 case ATH12K_SCAN_IDLE: 2815 break; 2816 case ATH12K_SCAN_RUNNING: 2817 case ATH12K_SCAN_ABORTING: 2818 if (ar->scan.is_roc && ar->scan.roc_notify) 2819 ieee80211_remain_on_channel_expired(hw); 2820 fallthrough; 2821 case ATH12K_SCAN_STARTING: 2822 if (!ar->scan.is_roc) { 2823 struct cfg80211_scan_info info = { 2824 .aborted = ((ar->scan.state == 2825 ATH12K_SCAN_ABORTING) || 2826 (ar->scan.state == 2827 ATH12K_SCAN_STARTING)), 2828 }; 2829 2830 ieee80211_scan_completed(hw, &info); 2831 } 2832 2833 ar->scan.state = ATH12K_SCAN_IDLE; 2834 ar->scan_channel = NULL; 2835 ar->scan.roc_freq = 0; 2836 cancel_delayed_work(&ar->scan.timeout); 2837 complete(&ar->scan.completed); 2838 break; 2839 } 2840 } 2841 2842 void ath12k_mac_scan_finish(struct ath12k *ar) 2843 { 2844 spin_lock_bh(&ar->data_lock); 2845 __ath12k_mac_scan_finish(ar); 2846 spin_unlock_bh(&ar->data_lock); 2847 } 2848 2849 static int ath12k_scan_stop(struct ath12k *ar) 2850 { 2851 struct ath12k_wmi_scan_cancel_arg arg = { 2852 .req_type = WLAN_SCAN_CANCEL_SINGLE, 2853 .scan_id = ATH12K_SCAN_ID, 2854 }; 2855 int ret; 2856 2857 lockdep_assert_held(&ar->conf_mutex); 2858 2859 /* TODO: Fill other STOP Params */ 2860 arg.pdev_id = ar->pdev->pdev_id; 2861 2862 ret = ath12k_wmi_send_scan_stop_cmd(ar, &arg); 2863 if (ret) { 2864 ath12k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret); 2865 goto out; 2866 } 2867 2868 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); 2869 if (ret == 0) { 2870 ath12k_warn(ar->ab, 2871 "failed to receive scan abort comple: timed out\n"); 2872 ret = -ETIMEDOUT; 2873 } else if (ret > 0) { 2874 ret = 0; 2875 } 2876 2877 out: 2878 /* Scan state should be updated upon scan completion but in case 2879 * firmware fails to deliver the event (for whatever reason) it is 2880 * desired to clean up scan state anyway. Firmware may have just 2881 * dropped the scan completion event delivery due to transport pipe 2882 * being overflown with data and/or it can recover on its own before 2883 * next scan request is submitted. 2884 */ 2885 spin_lock_bh(&ar->data_lock); 2886 if (ar->scan.state != ATH12K_SCAN_IDLE) 2887 __ath12k_mac_scan_finish(ar); 2888 spin_unlock_bh(&ar->data_lock); 2889 2890 return ret; 2891 } 2892 2893 static void ath12k_scan_abort(struct ath12k *ar) 2894 { 2895 int ret; 2896 2897 lockdep_assert_held(&ar->conf_mutex); 2898 2899 spin_lock_bh(&ar->data_lock); 2900 2901 switch (ar->scan.state) { 2902 case ATH12K_SCAN_IDLE: 2903 /* This can happen if timeout worker kicked in and called 2904 * abortion while scan completion was being processed. 2905 */ 2906 break; 2907 case ATH12K_SCAN_STARTING: 2908 case ATH12K_SCAN_ABORTING: 2909 ath12k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n", 2910 ar->scan.state); 2911 break; 2912 case ATH12K_SCAN_RUNNING: 2913 ar->scan.state = ATH12K_SCAN_ABORTING; 2914 spin_unlock_bh(&ar->data_lock); 2915 2916 ret = ath12k_scan_stop(ar); 2917 if (ret) 2918 ath12k_warn(ar->ab, "failed to abort scan: %d\n", ret); 2919 2920 spin_lock_bh(&ar->data_lock); 2921 break; 2922 } 2923 2924 spin_unlock_bh(&ar->data_lock); 2925 } 2926 2927 static void ath12k_scan_timeout_work(struct work_struct *work) 2928 { 2929 struct ath12k *ar = container_of(work, struct ath12k, 2930 scan.timeout.work); 2931 2932 mutex_lock(&ar->conf_mutex); 2933 ath12k_scan_abort(ar); 2934 mutex_unlock(&ar->conf_mutex); 2935 } 2936 2937 static int ath12k_start_scan(struct ath12k *ar, 2938 struct ath12k_wmi_scan_req_arg *arg) 2939 { 2940 int ret; 2941 2942 lockdep_assert_held(&ar->conf_mutex); 2943 2944 ret = ath12k_wmi_send_scan_start_cmd(ar, arg); 2945 if (ret) 2946 return ret; 2947 2948 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); 2949 if (ret == 0) { 2950 ret = ath12k_scan_stop(ar); 2951 if (ret) 2952 ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret); 2953 2954 return -ETIMEDOUT; 2955 } 2956 2957 /* If we failed to start the scan, return error code at 2958 * this point. This is probably due to some issue in the 2959 * firmware, but no need to wedge the driver due to that... 2960 */ 2961 spin_lock_bh(&ar->data_lock); 2962 if (ar->scan.state == ATH12K_SCAN_IDLE) { 2963 spin_unlock_bh(&ar->data_lock); 2964 return -EINVAL; 2965 } 2966 spin_unlock_bh(&ar->data_lock); 2967 2968 return 0; 2969 } 2970 2971 static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, 2972 struct ieee80211_vif *vif, 2973 struct ieee80211_scan_request *hw_req) 2974 { 2975 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 2976 struct ath12k *ar; 2977 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2978 struct cfg80211_scan_request *req = &hw_req->req; 2979 struct ath12k_wmi_scan_req_arg arg = {}; 2980 int ret; 2981 int i; 2982 2983 ar = ath12k_ah_to_ar(ah); 2984 2985 mutex_lock(&ar->conf_mutex); 2986 2987 spin_lock_bh(&ar->data_lock); 2988 switch (ar->scan.state) { 2989 case ATH12K_SCAN_IDLE: 2990 reinit_completion(&ar->scan.started); 2991 reinit_completion(&ar->scan.completed); 2992 ar->scan.state = ATH12K_SCAN_STARTING; 2993 ar->scan.is_roc = false; 2994 ar->scan.vdev_id = arvif->vdev_id; 2995 ret = 0; 2996 break; 2997 case ATH12K_SCAN_STARTING: 2998 case ATH12K_SCAN_RUNNING: 2999 case ATH12K_SCAN_ABORTING: 3000 ret = -EBUSY; 3001 break; 3002 } 3003 spin_unlock_bh(&ar->data_lock); 3004 3005 if (ret) 3006 goto exit; 3007 3008 ath12k_wmi_start_scan_init(ar, &arg); 3009 arg.vdev_id = arvif->vdev_id; 3010 arg.scan_id = ATH12K_SCAN_ID; 3011 3012 if (req->ie_len) { 3013 arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); 3014 if (!arg.extraie.ptr) { 3015 ret = -ENOMEM; 3016 goto exit; 3017 } 3018 arg.extraie.len = req->ie_len; 3019 } 3020 3021 if (req->n_ssids) { 3022 arg.num_ssids = req->n_ssids; 3023 for (i = 0; i < arg.num_ssids; i++) 3024 arg.ssid[i] = req->ssids[i]; 3025 } else { 3026 arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE; 3027 } 3028 3029 if (req->n_channels) { 3030 arg.num_chan = req->n_channels; 3031 arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list), 3032 GFP_KERNEL); 3033 3034 if (!arg.chan_list) { 3035 ret = -ENOMEM; 3036 goto exit; 3037 } 3038 3039 for (i = 0; i < arg.num_chan; i++) 3040 arg.chan_list[i] = req->channels[i]->center_freq; 3041 } 3042 3043 ret = ath12k_start_scan(ar, &arg); 3044 if (ret) { 3045 ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret); 3046 spin_lock_bh(&ar->data_lock); 3047 ar->scan.state = ATH12K_SCAN_IDLE; 3048 spin_unlock_bh(&ar->data_lock); 3049 } 3050 3051 /* Add a margin to account for event/command processing */ 3052 ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout, 3053 msecs_to_jiffies(arg.max_scan_time + 3054 ATH12K_MAC_SCAN_TIMEOUT_MSECS)); 3055 3056 exit: 3057 kfree(arg.chan_list); 3058 3059 if (req->ie_len) 3060 kfree(arg.extraie.ptr); 3061 3062 mutex_unlock(&ar->conf_mutex); 3063 3064 return ret; 3065 } 3066 3067 static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw, 3068 struct ieee80211_vif *vif) 3069 { 3070 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 3071 struct ath12k *ar; 3072 3073 ar = ath12k_ah_to_ar(ah); 3074 3075 mutex_lock(&ar->conf_mutex); 3076 ath12k_scan_abort(ar); 3077 mutex_unlock(&ar->conf_mutex); 3078 3079 cancel_delayed_work_sync(&ar->scan.timeout); 3080 } 3081 3082 static int ath12k_install_key(struct ath12k_vif *arvif, 3083 struct ieee80211_key_conf *key, 3084 enum set_key_cmd cmd, 3085 const u8 *macaddr, u32 flags) 3086 { 3087 int ret; 3088 struct ath12k *ar = arvif->ar; 3089 struct wmi_vdev_install_key_arg arg = { 3090 .vdev_id = arvif->vdev_id, 3091 .key_idx = key->keyidx, 3092 .key_len = key->keylen, 3093 .key_data = key->key, 3094 .key_flags = flags, 3095 .macaddr = macaddr, 3096 }; 3097 3098 lockdep_assert_held(&arvif->ar->conf_mutex); 3099 3100 reinit_completion(&ar->install_key_done); 3101 3102 if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) 3103 return 0; 3104 3105 if (cmd == DISABLE_KEY) { 3106 /* TODO: Check if FW expects value other than NONE for del */ 3107 /* arg.key_cipher = WMI_CIPHER_NONE; */ 3108 arg.key_len = 0; 3109 arg.key_data = NULL; 3110 goto install; 3111 } 3112 3113 switch (key->cipher) { 3114 case WLAN_CIPHER_SUITE_CCMP: 3115 arg.key_cipher = WMI_CIPHER_AES_CCM; 3116 /* TODO: Re-check if flag is valid */ 3117 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; 3118 break; 3119 case WLAN_CIPHER_SUITE_TKIP: 3120 arg.key_cipher = WMI_CIPHER_TKIP; 3121 arg.key_txmic_len = 8; 3122 arg.key_rxmic_len = 8; 3123 break; 3124 case WLAN_CIPHER_SUITE_CCMP_256: 3125 arg.key_cipher = WMI_CIPHER_AES_CCM; 3126 break; 3127 case WLAN_CIPHER_SUITE_GCMP: 3128 case WLAN_CIPHER_SUITE_GCMP_256: 3129 arg.key_cipher = WMI_CIPHER_AES_GCM; 3130 break; 3131 default: 3132 ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher); 3133 return -EOPNOTSUPP; 3134 } 3135 3136 if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) 3137 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV | 3138 IEEE80211_KEY_FLAG_RESERVE_TAILROOM; 3139 3140 install: 3141 ret = ath12k_wmi_vdev_install_key(arvif->ar, &arg); 3142 3143 if (ret) 3144 return ret; 3145 3146 if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ)) 3147 return -ETIMEDOUT; 3148 3149 if (ether_addr_equal(macaddr, arvif->vif->addr)) 3150 arvif->key_cipher = key->cipher; 3151 3152 return ar->install_key_status ? -EINVAL : 0; 3153 } 3154 3155 static int ath12k_clear_peer_keys(struct ath12k_vif *arvif, 3156 const u8 *addr) 3157 { 3158 struct ath12k *ar = arvif->ar; 3159 struct ath12k_base *ab = ar->ab; 3160 struct ath12k_peer *peer; 3161 int first_errno = 0; 3162 int ret; 3163 int i; 3164 u32 flags = 0; 3165 3166 lockdep_assert_held(&ar->conf_mutex); 3167 3168 spin_lock_bh(&ab->base_lock); 3169 peer = ath12k_peer_find(ab, arvif->vdev_id, addr); 3170 spin_unlock_bh(&ab->base_lock); 3171 3172 if (!peer) 3173 return -ENOENT; 3174 3175 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 3176 if (!peer->keys[i]) 3177 continue; 3178 3179 /* key flags are not required to delete the key */ 3180 ret = ath12k_install_key(arvif, peer->keys[i], 3181 DISABLE_KEY, addr, flags); 3182 if (ret < 0 && first_errno == 0) 3183 first_errno = ret; 3184 3185 if (ret < 0) 3186 ath12k_warn(ab, "failed to remove peer key %d: %d\n", 3187 i, ret); 3188 3189 spin_lock_bh(&ab->base_lock); 3190 peer->keys[i] = NULL; 3191 spin_unlock_bh(&ab->base_lock); 3192 } 3193 3194 return first_errno; 3195 } 3196 3197 static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3198 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 3199 struct ieee80211_key_conf *key) 3200 { 3201 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 3202 struct ath12k *ar; 3203 struct ath12k_base *ab; 3204 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3205 struct ath12k_peer *peer; 3206 struct ath12k_sta *arsta; 3207 const u8 *peer_addr; 3208 int ret = 0; 3209 u32 flags = 0; 3210 3211 /* BIP needs to be done in software */ 3212 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3213 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3214 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || 3215 key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) 3216 return 1; 3217 3218 ar = ath12k_ah_to_ar(ah); 3219 ab = ar->ab; 3220 3221 if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) 3222 return 1; 3223 3224 if (key->keyidx > WMI_MAX_KEY_INDEX) 3225 return -ENOSPC; 3226 3227 mutex_lock(&ar->conf_mutex); 3228 3229 if (sta) 3230 peer_addr = sta->addr; 3231 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 3232 peer_addr = vif->bss_conf.bssid; 3233 else 3234 peer_addr = vif->addr; 3235 3236 key->hw_key_idx = key->keyidx; 3237 3238 /* the peer should not disappear in mid-way (unless FW goes awry) since 3239 * we already hold conf_mutex. we just make sure its there now. 3240 */ 3241 spin_lock_bh(&ab->base_lock); 3242 peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); 3243 spin_unlock_bh(&ab->base_lock); 3244 3245 if (!peer) { 3246 if (cmd == SET_KEY) { 3247 ath12k_warn(ab, "cannot install key for non-existent peer %pM\n", 3248 peer_addr); 3249 ret = -EOPNOTSUPP; 3250 goto exit; 3251 } else { 3252 /* if the peer doesn't exist there is no key to disable 3253 * anymore 3254 */ 3255 goto exit; 3256 } 3257 } 3258 3259 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 3260 flags |= WMI_KEY_PAIRWISE; 3261 else 3262 flags |= WMI_KEY_GROUP; 3263 3264 ret = ath12k_install_key(arvif, key, cmd, peer_addr, flags); 3265 if (ret) { 3266 ath12k_warn(ab, "ath12k_install_key failed (%d)\n", ret); 3267 goto exit; 3268 } 3269 3270 ret = ath12k_dp_rx_peer_pn_replay_config(arvif, peer_addr, cmd, key); 3271 if (ret) { 3272 ath12k_warn(ab, "failed to offload PN replay detection %d\n", ret); 3273 goto exit; 3274 } 3275 3276 spin_lock_bh(&ab->base_lock); 3277 peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); 3278 if (peer && cmd == SET_KEY) { 3279 peer->keys[key->keyidx] = key; 3280 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 3281 peer->ucast_keyidx = key->keyidx; 3282 peer->sec_type = ath12k_dp_tx_get_encrypt_type(key->cipher); 3283 } else { 3284 peer->mcast_keyidx = key->keyidx; 3285 peer->sec_type_grp = ath12k_dp_tx_get_encrypt_type(key->cipher); 3286 } 3287 } else if (peer && cmd == DISABLE_KEY) { 3288 peer->keys[key->keyidx] = NULL; 3289 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 3290 peer->ucast_keyidx = 0; 3291 else 3292 peer->mcast_keyidx = 0; 3293 } else if (!peer) 3294 /* impossible unless FW goes crazy */ 3295 ath12k_warn(ab, "peer %pM disappeared!\n", peer_addr); 3296 3297 if (sta) { 3298 arsta = ath12k_sta_to_arsta(sta); 3299 3300 switch (key->cipher) { 3301 case WLAN_CIPHER_SUITE_TKIP: 3302 case WLAN_CIPHER_SUITE_CCMP: 3303 case WLAN_CIPHER_SUITE_CCMP_256: 3304 case WLAN_CIPHER_SUITE_GCMP: 3305 case WLAN_CIPHER_SUITE_GCMP_256: 3306 if (cmd == SET_KEY) 3307 arsta->pn_type = HAL_PN_TYPE_WPA; 3308 else 3309 arsta->pn_type = HAL_PN_TYPE_NONE; 3310 break; 3311 default: 3312 arsta->pn_type = HAL_PN_TYPE_NONE; 3313 break; 3314 } 3315 } 3316 3317 spin_unlock_bh(&ab->base_lock); 3318 3319 exit: 3320 mutex_unlock(&ar->conf_mutex); 3321 return ret; 3322 } 3323 3324 static int 3325 ath12k_mac_bitrate_mask_num_vht_rates(struct ath12k *ar, 3326 enum nl80211_band band, 3327 const struct cfg80211_bitrate_mask *mask) 3328 { 3329 int num_rates = 0; 3330 int i; 3331 3332 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) 3333 num_rates += hweight16(mask->control[band].vht_mcs[i]); 3334 3335 return num_rates; 3336 } 3337 3338 static int 3339 ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif, 3340 struct ieee80211_sta *sta, 3341 const struct cfg80211_bitrate_mask *mask, 3342 enum nl80211_band band) 3343 { 3344 struct ath12k *ar = arvif->ar; 3345 u8 vht_rate, nss; 3346 u32 rate_code; 3347 int ret, i; 3348 3349 lockdep_assert_held(&ar->conf_mutex); 3350 3351 nss = 0; 3352 3353 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 3354 if (hweight16(mask->control[band].vht_mcs[i]) == 1) { 3355 nss = i + 1; 3356 vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1; 3357 } 3358 } 3359 3360 if (!nss) { 3361 ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM", 3362 sta->addr); 3363 return -EINVAL; 3364 } 3365 3366 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 3367 "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates", 3368 sta->addr); 3369 3370 rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1, 3371 WMI_RATE_PREAMBLE_VHT); 3372 ret = ath12k_wmi_set_peer_param(ar, sta->addr, 3373 arvif->vdev_id, 3374 WMI_PEER_PARAM_FIXED_RATE, 3375 rate_code); 3376 if (ret) 3377 ath12k_warn(ar->ab, 3378 "failed to update STA %pM Fixed Rate %d: %d\n", 3379 sta->addr, rate_code, ret); 3380 3381 return ret; 3382 } 3383 3384 static int ath12k_station_assoc(struct ath12k *ar, 3385 struct ieee80211_vif *vif, 3386 struct ieee80211_sta *sta, 3387 bool reassoc) 3388 { 3389 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3390 struct ath12k_wmi_peer_assoc_arg peer_arg; 3391 int ret; 3392 struct cfg80211_chan_def def; 3393 enum nl80211_band band; 3394 struct cfg80211_bitrate_mask *mask; 3395 u8 num_vht_rates; 3396 3397 lockdep_assert_held(&ar->conf_mutex); 3398 3399 if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) 3400 return -EPERM; 3401 3402 band = def.chan->band; 3403 mask = &arvif->bitrate_mask; 3404 3405 ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); 3406 3407 ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); 3408 if (ret) { 3409 ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", 3410 sta->addr, arvif->vdev_id, ret); 3411 return ret; 3412 } 3413 3414 if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { 3415 ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", 3416 sta->addr, arvif->vdev_id); 3417 return -ETIMEDOUT; 3418 } 3419 3420 num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask); 3421 3422 /* If single VHT rate is configured (by set_bitrate_mask()), 3423 * peer_assoc will disable VHT. This is now enabled by a peer specific 3424 * fixed param. 3425 * Note that all other rates and NSS will be disabled for this peer. 3426 */ 3427 if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { 3428 ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, 3429 band); 3430 if (ret) 3431 return ret; 3432 } 3433 3434 /* Re-assoc is run only to update supported rates for given station. It 3435 * doesn't make much sense to reconfigure the peer completely. 3436 */ 3437 if (reassoc) 3438 return 0; 3439 3440 ret = ath12k_setup_peer_smps(ar, arvif, sta->addr, 3441 &sta->deflink.ht_cap); 3442 if (ret) { 3443 ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", 3444 arvif->vdev_id, ret); 3445 return ret; 3446 } 3447 3448 if (!sta->wme) { 3449 arvif->num_legacy_stations++; 3450 ret = ath12k_recalc_rtscts_prot(arvif); 3451 if (ret) 3452 return ret; 3453 } 3454 3455 if (sta->wme && sta->uapsd_queues) { 3456 ret = ath12k_peer_assoc_qos_ap(ar, arvif, sta); 3457 if (ret) { 3458 ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n", 3459 sta->addr, arvif->vdev_id, ret); 3460 return ret; 3461 } 3462 } 3463 3464 return 0; 3465 } 3466 3467 static int ath12k_station_disassoc(struct ath12k *ar, 3468 struct ieee80211_vif *vif, 3469 struct ieee80211_sta *sta) 3470 { 3471 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3472 int ret; 3473 3474 lockdep_assert_held(&ar->conf_mutex); 3475 3476 if (!sta->wme) { 3477 arvif->num_legacy_stations--; 3478 ret = ath12k_recalc_rtscts_prot(arvif); 3479 if (ret) 3480 return ret; 3481 } 3482 3483 ret = ath12k_clear_peer_keys(arvif, sta->addr); 3484 if (ret) { 3485 ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n", 3486 arvif->vdev_id, ret); 3487 return ret; 3488 } 3489 return 0; 3490 } 3491 3492 static void ath12k_sta_rc_update_wk(struct work_struct *wk) 3493 { 3494 struct ath12k *ar; 3495 struct ath12k_vif *arvif; 3496 struct ath12k_sta *arsta; 3497 struct ieee80211_sta *sta; 3498 struct cfg80211_chan_def def; 3499 enum nl80211_band band; 3500 const u8 *ht_mcs_mask; 3501 const u16 *vht_mcs_mask; 3502 u32 changed, bw, nss, smps, bw_prev; 3503 int err, num_vht_rates; 3504 const struct cfg80211_bitrate_mask *mask; 3505 struct ath12k_wmi_peer_assoc_arg peer_arg; 3506 enum wmi_phy_mode peer_phymode; 3507 3508 arsta = container_of(wk, struct ath12k_sta, update_wk); 3509 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); 3510 arvif = arsta->arvif; 3511 ar = arvif->ar; 3512 3513 if (WARN_ON(ath12k_mac_vif_chan(arvif->vif, &def))) 3514 return; 3515 3516 band = def.chan->band; 3517 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 3518 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 3519 3520 spin_lock_bh(&ar->data_lock); 3521 3522 changed = arsta->changed; 3523 arsta->changed = 0; 3524 3525 bw = arsta->bw; 3526 bw_prev = arsta->bw_prev; 3527 nss = arsta->nss; 3528 smps = arsta->smps; 3529 3530 spin_unlock_bh(&ar->data_lock); 3531 3532 mutex_lock(&ar->conf_mutex); 3533 3534 nss = max_t(u32, 1, nss); 3535 nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask), 3536 ath12k_mac_max_vht_nss(vht_mcs_mask))); 3537 3538 if (changed & IEEE80211_RC_BW_CHANGED) { 3539 ath12k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg); 3540 peer_phymode = peer_arg.peer_phymode; 3541 3542 if (bw > bw_prev) { 3543 /* Phymode shows maximum supported channel width, if we 3544 * upgrade bandwidth then due to sanity check of firmware, 3545 * we have to send WMI_PEER_PHYMODE followed by 3546 * WMI_PEER_CHWIDTH 3547 */ 3548 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth upgrade for sta %pM new %d old %d\n", 3549 sta->addr, bw, bw_prev); 3550 err = ath12k_wmi_set_peer_param(ar, sta->addr, 3551 arvif->vdev_id, WMI_PEER_PHYMODE, 3552 peer_phymode); 3553 if (err) { 3554 ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n", 3555 sta->addr, peer_phymode, err); 3556 goto err_rc_bw_changed; 3557 } 3558 err = ath12k_wmi_set_peer_param(ar, sta->addr, 3559 arvif->vdev_id, WMI_PEER_CHWIDTH, 3560 bw); 3561 if (err) 3562 ath12k_warn(ar->ab, "failed to update STA %pM to peer bandwidth %d: %d\n", 3563 sta->addr, bw, err); 3564 } else { 3565 /* When we downgrade bandwidth this will conflict with phymode 3566 * and cause to trigger firmware crash. In this case we send 3567 * WMI_PEER_CHWIDTH followed by WMI_PEER_PHYMODE 3568 */ 3569 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth downgrade for sta %pM new %d old %d\n", 3570 sta->addr, bw, bw_prev); 3571 err = ath12k_wmi_set_peer_param(ar, sta->addr, 3572 arvif->vdev_id, WMI_PEER_CHWIDTH, 3573 bw); 3574 if (err) { 3575 ath12k_warn(ar->ab, "failed to update STA %pM peer to bandwidth %d: %d\n", 3576 sta->addr, bw, err); 3577 goto err_rc_bw_changed; 3578 } 3579 err = ath12k_wmi_set_peer_param(ar, sta->addr, 3580 arvif->vdev_id, WMI_PEER_PHYMODE, 3581 peer_phymode); 3582 if (err) 3583 ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n", 3584 sta->addr, peer_phymode, err); 3585 } 3586 } 3587 3588 if (changed & IEEE80211_RC_NSS_CHANGED) { 3589 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n", 3590 sta->addr, nss); 3591 3592 err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, 3593 WMI_PEER_NSS, nss); 3594 if (err) 3595 ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n", 3596 sta->addr, nss, err); 3597 } 3598 3599 if (changed & IEEE80211_RC_SMPS_CHANGED) { 3600 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n", 3601 sta->addr, smps); 3602 3603 err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, 3604 WMI_PEER_MIMO_PS_STATE, smps); 3605 if (err) 3606 ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n", 3607 sta->addr, smps, err); 3608 } 3609 3610 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { 3611 mask = &arvif->bitrate_mask; 3612 num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, 3613 mask); 3614 3615 /* Peer_assoc_prepare will reject vht rates in 3616 * bitrate_mask if its not available in range format and 3617 * sets vht tx_rateset as unsupported. So multiple VHT MCS 3618 * setting(eg. MCS 4,5,6) per peer is not supported here. 3619 * But, Single rate in VHT mask can be set as per-peer 3620 * fixed rate. But even if any HT rates are configured in 3621 * the bitrate mask, device will not switch to those rates 3622 * when per-peer Fixed rate is set. 3623 * TODO: Check RATEMASK_CMDID to support auto rates selection 3624 * across HT/VHT and for multiple VHT MCS support. 3625 */ 3626 if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { 3627 ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, 3628 band); 3629 } else { 3630 /* If the peer is non-VHT or no fixed VHT rate 3631 * is provided in the new bitrate mask we set the 3632 * other rates using peer_assoc command. 3633 */ 3634 ath12k_peer_assoc_prepare(ar, arvif->vif, sta, 3635 &peer_arg, true); 3636 3637 err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); 3638 if (err) 3639 ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", 3640 sta->addr, arvif->vdev_id, err); 3641 3642 if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) 3643 ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", 3644 sta->addr, arvif->vdev_id); 3645 } 3646 } 3647 err_rc_bw_changed: 3648 mutex_unlock(&ar->conf_mutex); 3649 } 3650 3651 static int ath12k_mac_inc_num_stations(struct ath12k_vif *arvif, 3652 struct ieee80211_sta *sta) 3653 { 3654 struct ath12k *ar = arvif->ar; 3655 3656 lockdep_assert_held(&ar->conf_mutex); 3657 3658 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 3659 return 0; 3660 3661 if (ar->num_stations >= ar->max_num_stations) 3662 return -ENOBUFS; 3663 3664 ar->num_stations++; 3665 3666 return 0; 3667 } 3668 3669 static void ath12k_mac_dec_num_stations(struct ath12k_vif *arvif, 3670 struct ieee80211_sta *sta) 3671 { 3672 struct ath12k *ar = arvif->ar; 3673 3674 lockdep_assert_held(&ar->conf_mutex); 3675 3676 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 3677 return; 3678 3679 ar->num_stations--; 3680 } 3681 3682 static int ath12k_mac_station_add(struct ath12k *ar, 3683 struct ieee80211_vif *vif, 3684 struct ieee80211_sta *sta) 3685 { 3686 struct ath12k_base *ab = ar->ab; 3687 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3688 struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); 3689 struct ath12k_wmi_peer_create_arg peer_param; 3690 int ret; 3691 3692 lockdep_assert_held(&ar->conf_mutex); 3693 3694 ret = ath12k_mac_inc_num_stations(arvif, sta); 3695 if (ret) { 3696 ath12k_warn(ab, "refusing to associate station: too many connected already (%d)\n", 3697 ar->max_num_stations); 3698 goto exit; 3699 } 3700 3701 arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL); 3702 if (!arsta->rx_stats) { 3703 ret = -ENOMEM; 3704 goto dec_num_station; 3705 } 3706 3707 peer_param.vdev_id = arvif->vdev_id; 3708 peer_param.peer_addr = sta->addr; 3709 peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; 3710 3711 ret = ath12k_peer_create(ar, arvif, sta, &peer_param); 3712 if (ret) { 3713 ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n", 3714 sta->addr, arvif->vdev_id); 3715 goto free_peer; 3716 } 3717 3718 ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n", 3719 sta->addr, arvif->vdev_id); 3720 3721 if (ieee80211_vif_is_mesh(vif)) { 3722 ret = ath12k_wmi_set_peer_param(ar, sta->addr, 3723 arvif->vdev_id, 3724 WMI_PEER_USE_4ADDR, 1); 3725 if (ret) { 3726 ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n", 3727 sta->addr, ret); 3728 goto free_peer; 3729 } 3730 } 3731 3732 ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr); 3733 if (ret) { 3734 ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n", 3735 sta->addr, arvif->vdev_id, ret); 3736 goto free_peer; 3737 } 3738 3739 if (ab->hw_params->vdev_start_delay && 3740 !arvif->is_started && 3741 arvif->vdev_type != WMI_VDEV_TYPE_AP) { 3742 ret = ath12k_start_vdev_delay(ar, arvif); 3743 if (ret) { 3744 ath12k_warn(ab, "failed to delay vdev start: %d\n", ret); 3745 goto free_peer; 3746 } 3747 } 3748 3749 return 0; 3750 3751 free_peer: 3752 ath12k_peer_delete(ar, arvif->vdev_id, sta->addr); 3753 dec_num_station: 3754 ath12k_mac_dec_num_stations(arvif, sta); 3755 exit: 3756 return ret; 3757 } 3758 3759 static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar, 3760 struct ieee80211_sta *sta) 3761 { 3762 u32 bw = WMI_PEER_CHWIDTH_20MHZ; 3763 3764 switch (sta->deflink.bandwidth) { 3765 case IEEE80211_STA_RX_BW_20: 3766 bw = WMI_PEER_CHWIDTH_20MHZ; 3767 break; 3768 case IEEE80211_STA_RX_BW_40: 3769 bw = WMI_PEER_CHWIDTH_40MHZ; 3770 break; 3771 case IEEE80211_STA_RX_BW_80: 3772 bw = WMI_PEER_CHWIDTH_80MHZ; 3773 break; 3774 case IEEE80211_STA_RX_BW_160: 3775 bw = WMI_PEER_CHWIDTH_160MHZ; 3776 break; 3777 case IEEE80211_STA_RX_BW_320: 3778 bw = WMI_PEER_CHWIDTH_320MHZ; 3779 break; 3780 default: 3781 ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n", 3782 sta->deflink.bandwidth, sta->addr); 3783 bw = WMI_PEER_CHWIDTH_20MHZ; 3784 break; 3785 } 3786 3787 return bw; 3788 } 3789 3790 static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, 3791 struct ieee80211_vif *vif, 3792 struct ieee80211_sta *sta, 3793 enum ieee80211_sta_state old_state, 3794 enum ieee80211_sta_state new_state) 3795 { 3796 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 3797 struct ath12k *ar; 3798 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3799 struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); 3800 struct ath12k_peer *peer; 3801 int ret = 0; 3802 3803 /* cancel must be done outside the mutex to avoid deadlock */ 3804 if ((old_state == IEEE80211_STA_NONE && 3805 new_state == IEEE80211_STA_NOTEXIST)) 3806 cancel_work_sync(&arsta->update_wk); 3807 3808 ar = ath12k_ah_to_ar(ah); 3809 3810 mutex_lock(&ar->conf_mutex); 3811 3812 if (old_state == IEEE80211_STA_NOTEXIST && 3813 new_state == IEEE80211_STA_NONE) { 3814 memset(arsta, 0, sizeof(*arsta)); 3815 arsta->arvif = arvif; 3816 INIT_WORK(&arsta->update_wk, ath12k_sta_rc_update_wk); 3817 3818 ret = ath12k_mac_station_add(ar, vif, sta); 3819 if (ret) 3820 ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", 3821 sta->addr, arvif->vdev_id); 3822 } else if ((old_state == IEEE80211_STA_NONE && 3823 new_state == IEEE80211_STA_NOTEXIST)) { 3824 ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); 3825 3826 ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr); 3827 if (ret) 3828 ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n", 3829 sta->addr, arvif->vdev_id); 3830 else 3831 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n", 3832 sta->addr, arvif->vdev_id); 3833 3834 ath12k_mac_dec_num_stations(arvif, sta); 3835 spin_lock_bh(&ar->ab->base_lock); 3836 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 3837 if (peer && peer->sta == sta) { 3838 ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", 3839 vif->addr, arvif->vdev_id); 3840 peer->sta = NULL; 3841 list_del(&peer->list); 3842 kfree(peer); 3843 ar->num_peers--; 3844 } 3845 spin_unlock_bh(&ar->ab->base_lock); 3846 3847 kfree(arsta->rx_stats); 3848 arsta->rx_stats = NULL; 3849 } else if (old_state == IEEE80211_STA_AUTH && 3850 new_state == IEEE80211_STA_ASSOC && 3851 (vif->type == NL80211_IFTYPE_AP || 3852 vif->type == NL80211_IFTYPE_MESH_POINT || 3853 vif->type == NL80211_IFTYPE_ADHOC)) { 3854 ret = ath12k_station_assoc(ar, vif, sta, false); 3855 if (ret) 3856 ath12k_warn(ar->ab, "Failed to associate station: %pM\n", 3857 sta->addr); 3858 3859 spin_lock_bh(&ar->data_lock); 3860 3861 arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta); 3862 arsta->bw_prev = sta->deflink.bandwidth; 3863 3864 spin_unlock_bh(&ar->data_lock); 3865 } else if (old_state == IEEE80211_STA_ASSOC && 3866 new_state == IEEE80211_STA_AUTHORIZED) { 3867 spin_lock_bh(&ar->ab->base_lock); 3868 3869 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 3870 if (peer) 3871 peer->is_authorized = true; 3872 3873 spin_unlock_bh(&ar->ab->base_lock); 3874 3875 if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { 3876 ret = ath12k_wmi_set_peer_param(ar, sta->addr, 3877 arvif->vdev_id, 3878 WMI_PEER_AUTHORIZE, 3879 1); 3880 if (ret) 3881 ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", 3882 sta->addr, arvif->vdev_id, ret); 3883 } 3884 } else if (old_state == IEEE80211_STA_AUTHORIZED && 3885 new_state == IEEE80211_STA_ASSOC) { 3886 spin_lock_bh(&ar->ab->base_lock); 3887 3888 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 3889 if (peer) 3890 peer->is_authorized = false; 3891 3892 spin_unlock_bh(&ar->ab->base_lock); 3893 } else if (old_state == IEEE80211_STA_ASSOC && 3894 new_state == IEEE80211_STA_AUTH && 3895 (vif->type == NL80211_IFTYPE_AP || 3896 vif->type == NL80211_IFTYPE_MESH_POINT || 3897 vif->type == NL80211_IFTYPE_ADHOC)) { 3898 ret = ath12k_station_disassoc(ar, vif, sta); 3899 if (ret) 3900 ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n", 3901 sta->addr); 3902 } 3903 3904 mutex_unlock(&ar->conf_mutex); 3905 3906 return ret; 3907 } 3908 3909 static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, 3910 struct ieee80211_vif *vif, 3911 struct ieee80211_sta *sta) 3912 { 3913 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 3914 struct ath12k *ar; 3915 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3916 int ret; 3917 s16 txpwr; 3918 3919 if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) { 3920 txpwr = 0; 3921 } else { 3922 txpwr = sta->deflink.txpwr.power; 3923 if (!txpwr) 3924 return -EINVAL; 3925 } 3926 3927 if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL) 3928 return -EINVAL; 3929 3930 ar = ath12k_ah_to_ar(ah); 3931 3932 mutex_lock(&ar->conf_mutex); 3933 3934 ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, 3935 WMI_PEER_USE_FIXED_PWR, txpwr); 3936 if (ret) { 3937 ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n", 3938 ret); 3939 goto out; 3940 } 3941 3942 out: 3943 mutex_unlock(&ar->conf_mutex); 3944 return ret; 3945 } 3946 3947 static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, 3948 struct ieee80211_vif *vif, 3949 struct ieee80211_sta *sta, 3950 u32 changed) 3951 { 3952 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 3953 struct ath12k *ar; 3954 struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); 3955 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3956 struct ath12k_peer *peer; 3957 u32 bw, smps; 3958 3959 ar = ath12k_ah_to_ar(ah); 3960 3961 spin_lock_bh(&ar->ab->base_lock); 3962 3963 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 3964 if (!peer) { 3965 spin_unlock_bh(&ar->ab->base_lock); 3966 ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n", 3967 sta->addr, arvif->vdev_id); 3968 return; 3969 } 3970 3971 spin_unlock_bh(&ar->ab->base_lock); 3972 3973 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 3974 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 3975 sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss, 3976 sta->deflink.smps_mode); 3977 3978 spin_lock_bh(&ar->data_lock); 3979 3980 if (changed & IEEE80211_RC_BW_CHANGED) { 3981 bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta); 3982 arsta->bw_prev = arsta->bw; 3983 arsta->bw = bw; 3984 } 3985 3986 if (changed & IEEE80211_RC_NSS_CHANGED) 3987 arsta->nss = sta->deflink.rx_nss; 3988 3989 if (changed & IEEE80211_RC_SMPS_CHANGED) { 3990 smps = WMI_PEER_SMPS_PS_NONE; 3991 3992 switch (sta->deflink.smps_mode) { 3993 case IEEE80211_SMPS_AUTOMATIC: 3994 case IEEE80211_SMPS_OFF: 3995 smps = WMI_PEER_SMPS_PS_NONE; 3996 break; 3997 case IEEE80211_SMPS_STATIC: 3998 smps = WMI_PEER_SMPS_STATIC; 3999 break; 4000 case IEEE80211_SMPS_DYNAMIC: 4001 smps = WMI_PEER_SMPS_DYNAMIC; 4002 break; 4003 default: 4004 ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n", 4005 sta->deflink.smps_mode, sta->addr); 4006 smps = WMI_PEER_SMPS_PS_NONE; 4007 break; 4008 } 4009 4010 arsta->smps = smps; 4011 } 4012 4013 arsta->changed |= changed; 4014 4015 spin_unlock_bh(&ar->data_lock); 4016 4017 ieee80211_queue_work(hw, &arsta->update_wk); 4018 } 4019 4020 static int ath12k_conf_tx_uapsd(struct ath12k_vif *arvif, 4021 u16 ac, bool enable) 4022 { 4023 struct ath12k *ar = arvif->ar; 4024 u32 value; 4025 int ret; 4026 4027 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 4028 return 0; 4029 4030 switch (ac) { 4031 case IEEE80211_AC_VO: 4032 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | 4033 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; 4034 break; 4035 case IEEE80211_AC_VI: 4036 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | 4037 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; 4038 break; 4039 case IEEE80211_AC_BE: 4040 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | 4041 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; 4042 break; 4043 case IEEE80211_AC_BK: 4044 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | 4045 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; 4046 break; 4047 } 4048 4049 if (enable) 4050 arvif->u.sta.uapsd |= value; 4051 else 4052 arvif->u.sta.uapsd &= ~value; 4053 4054 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 4055 WMI_STA_PS_PARAM_UAPSD, 4056 arvif->u.sta.uapsd); 4057 if (ret) { 4058 ath12k_warn(ar->ab, "could not set uapsd params %d\n", ret); 4059 goto exit; 4060 } 4061 4062 if (arvif->u.sta.uapsd) 4063 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; 4064 else 4065 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 4066 4067 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 4068 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 4069 value); 4070 if (ret) 4071 ath12k_warn(ar->ab, "could not set rx wake param %d\n", ret); 4072 4073 exit: 4074 return ret; 4075 } 4076 4077 static int ath12k_mac_conf_tx(struct ath12k_vif *arvif, 4078 unsigned int link_id, u16 ac, 4079 const struct ieee80211_tx_queue_params *params) 4080 { 4081 struct wmi_wmm_params_arg *p = NULL; 4082 struct ath12k *ar = arvif->ar; 4083 struct ath12k_base *ab = ar->ab; 4084 int ret; 4085 4086 lockdep_assert_held(&ar->conf_mutex); 4087 4088 switch (ac) { 4089 case IEEE80211_AC_VO: 4090 p = &arvif->wmm_params.ac_vo; 4091 break; 4092 case IEEE80211_AC_VI: 4093 p = &arvif->wmm_params.ac_vi; 4094 break; 4095 case IEEE80211_AC_BE: 4096 p = &arvif->wmm_params.ac_be; 4097 break; 4098 case IEEE80211_AC_BK: 4099 p = &arvif->wmm_params.ac_bk; 4100 break; 4101 } 4102 4103 if (WARN_ON(!p)) { 4104 ret = -EINVAL; 4105 goto exit; 4106 } 4107 4108 p->cwmin = params->cw_min; 4109 p->cwmax = params->cw_max; 4110 p->aifs = params->aifs; 4111 p->txop = params->txop; 4112 4113 ret = ath12k_wmi_send_wmm_update_cmd(ar, arvif->vdev_id, 4114 &arvif->wmm_params); 4115 if (ret) { 4116 ath12k_warn(ab, "pdev idx %d failed to set wmm params: %d\n", 4117 ar->pdev_idx, ret); 4118 goto exit; 4119 } 4120 4121 ret = ath12k_conf_tx_uapsd(arvif, ac, params->uapsd); 4122 if (ret) 4123 ath12k_warn(ab, "pdev idx %d failed to set sta uapsd: %d\n", 4124 ar->pdev_idx, ret); 4125 4126 exit: 4127 return ret; 4128 } 4129 4130 static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw, 4131 struct ieee80211_vif *vif, 4132 unsigned int link_id, u16 ac, 4133 const struct ieee80211_tx_queue_params *params) 4134 { 4135 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 4136 struct ath12k *ar; 4137 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 4138 int ret; 4139 4140 ar = ath12k_ah_to_ar(ah); 4141 4142 mutex_lock(&ar->conf_mutex); 4143 ret = ath12k_mac_conf_tx(arvif, link_id, ac, params); 4144 mutex_unlock(&ar->conf_mutex); 4145 4146 return ret; 4147 } 4148 4149 static struct ieee80211_sta_ht_cap 4150 ath12k_create_ht_cap(struct ath12k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask) 4151 { 4152 int i; 4153 struct ieee80211_sta_ht_cap ht_cap = {0}; 4154 u32 ar_vht_cap = ar->pdev->cap.vht_cap; 4155 4156 if (!(ar_ht_cap & WMI_HT_CAP_ENABLED)) 4157 return ht_cap; 4158 4159 ht_cap.ht_supported = 1; 4160 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 4161 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE; 4162 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 4163 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; 4164 ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; 4165 4166 if (ar_ht_cap & WMI_HT_CAP_HT20_SGI) 4167 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; 4168 4169 if (ar_ht_cap & WMI_HT_CAP_HT40_SGI) 4170 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 4171 4172 if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) { 4173 u32 smps; 4174 4175 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 4176 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 4177 4178 ht_cap.cap |= smps; 4179 } 4180 4181 if (ar_ht_cap & WMI_HT_CAP_TX_STBC) 4182 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; 4183 4184 if (ar_ht_cap & WMI_HT_CAP_RX_STBC) { 4185 u32 stbc; 4186 4187 stbc = ar_ht_cap; 4188 stbc &= WMI_HT_CAP_RX_STBC; 4189 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; 4190 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; 4191 stbc &= IEEE80211_HT_CAP_RX_STBC; 4192 4193 ht_cap.cap |= stbc; 4194 } 4195 4196 if (ar_ht_cap & WMI_HT_CAP_RX_LDPC) 4197 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 4198 4199 if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT) 4200 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; 4201 4202 if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) 4203 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; 4204 4205 for (i = 0; i < ar->num_rx_chains; i++) { 4206 if (rate_cap_rx_chainmask & BIT(i)) 4207 ht_cap.mcs.rx_mask[i] = 0xFF; 4208 } 4209 4210 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 4211 4212 return ht_cap; 4213 } 4214 4215 static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif) 4216 { 4217 u32 value = 0; 4218 struct ath12k *ar = arvif->ar; 4219 int nsts; 4220 int sound_dim; 4221 u32 vht_cap = ar->pdev->cap.vht_cap; 4222 u32 vdev_param = WMI_VDEV_PARAM_TXBF; 4223 4224 if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) { 4225 nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4226 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4227 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); 4228 } 4229 4230 if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { 4231 sound_dim = vht_cap & 4232 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4233 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4234 if (sound_dim > (ar->num_tx_chains - 1)) 4235 sound_dim = ar->num_tx_chains - 1; 4236 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); 4237 } 4238 4239 if (!value) 4240 return 0; 4241 4242 if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) { 4243 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 4244 4245 if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) && 4246 arvif->vdev_type == WMI_VDEV_TYPE_AP) 4247 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; 4248 } 4249 4250 if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) { 4251 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 4252 4253 if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) && 4254 arvif->vdev_type == WMI_VDEV_TYPE_STA) 4255 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; 4256 } 4257 4258 return ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 4259 vdev_param, value); 4260 } 4261 4262 static void ath12k_set_vht_txbf_cap(struct ath12k *ar, u32 *vht_cap) 4263 { 4264 bool subfer, subfee; 4265 int sound_dim = 0; 4266 4267 subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)); 4268 subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)); 4269 4270 if (ar->num_tx_chains < 2) { 4271 *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); 4272 subfer = false; 4273 } 4274 4275 /* If SU Beaformer is not set, then disable MU Beamformer Capability */ 4276 if (!subfer) 4277 *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE); 4278 4279 /* If SU Beaformee is not set, then disable MU Beamformee Capability */ 4280 if (!subfee) 4281 *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); 4282 4283 sound_dim = u32_get_bits(*vht_cap, 4284 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); 4285 *vht_cap = u32_replace_bits(*vht_cap, 0, 4286 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); 4287 4288 /* TODO: Need to check invalid STS and Sound_dim values set by FW? */ 4289 4290 /* Enable Sounding Dimension Field only if SU BF is enabled */ 4291 if (subfer) { 4292 if (sound_dim > (ar->num_tx_chains - 1)) 4293 sound_dim = ar->num_tx_chains - 1; 4294 4295 *vht_cap = u32_replace_bits(*vht_cap, sound_dim, 4296 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); 4297 } 4298 4299 /* Use the STS advertised by FW unless SU Beamformee is not supported*/ 4300 if (!subfee) 4301 *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK); 4302 } 4303 4304 static struct ieee80211_sta_vht_cap 4305 ath12k_create_vht_cap(struct ath12k *ar, u32 rate_cap_tx_chainmask, 4306 u32 rate_cap_rx_chainmask) 4307 { 4308 struct ieee80211_sta_vht_cap vht_cap = {0}; 4309 u16 txmcs_map, rxmcs_map; 4310 int i; 4311 4312 vht_cap.vht_supported = 1; 4313 vht_cap.cap = ar->pdev->cap.vht_cap; 4314 4315 ath12k_set_vht_txbf_cap(ar, &vht_cap.cap); 4316 4317 /* TODO: Enable back VHT160 mode once association issues are fixed */ 4318 /* Disabling VHT160 and VHT80+80 modes */ 4319 vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; 4320 vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160; 4321 4322 rxmcs_map = 0; 4323 txmcs_map = 0; 4324 for (i = 0; i < 8; i++) { 4325 if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i)) 4326 txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4327 else 4328 txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4329 4330 if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i)) 4331 rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4332 else 4333 rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4334 } 4335 4336 if (rate_cap_tx_chainmask <= 1) 4337 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; 4338 4339 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map); 4340 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map); 4341 4342 return vht_cap; 4343 } 4344 4345 static void ath12k_mac_setup_ht_vht_cap(struct ath12k *ar, 4346 struct ath12k_pdev_cap *cap, 4347 u32 *ht_cap_info) 4348 { 4349 struct ieee80211_supported_band *band; 4350 u32 rate_cap_tx_chainmask; 4351 u32 rate_cap_rx_chainmask; 4352 u32 ht_cap; 4353 4354 rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift; 4355 rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift; 4356 4357 if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) { 4358 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 4359 ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info; 4360 if (ht_cap_info) 4361 *ht_cap_info = ht_cap; 4362 band->ht_cap = ath12k_create_ht_cap(ar, ht_cap, 4363 rate_cap_rx_chainmask); 4364 } 4365 4366 if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && 4367 (ar->ab->hw_params->single_pdev_only || 4368 !ar->supports_6ghz)) { 4369 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 4370 ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info; 4371 if (ht_cap_info) 4372 *ht_cap_info = ht_cap; 4373 band->ht_cap = ath12k_create_ht_cap(ar, ht_cap, 4374 rate_cap_rx_chainmask); 4375 band->vht_cap = ath12k_create_vht_cap(ar, rate_cap_tx_chainmask, 4376 rate_cap_rx_chainmask); 4377 } 4378 } 4379 4380 static int ath12k_check_chain_mask(struct ath12k *ar, u32 ant, bool is_tx_ant) 4381 { 4382 /* TODO: Check the request chainmask against the supported 4383 * chainmask table which is advertised in extented_service_ready event 4384 */ 4385 4386 return 0; 4387 } 4388 4389 static void ath12k_gen_ppe_thresh(struct ath12k_wmi_ppe_threshold_arg *fw_ppet, 4390 u8 *he_ppet) 4391 { 4392 int nss, ru; 4393 u8 bit = 7; 4394 4395 he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK; 4396 he_ppet[0] |= (fw_ppet->ru_bit_mask << 4397 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) & 4398 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK; 4399 for (nss = 0; nss <= fw_ppet->numss_m1; nss++) { 4400 for (ru = 0; ru < 4; ru++) { 4401 u8 val; 4402 int i; 4403 4404 if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0) 4405 continue; 4406 val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) & 4407 0x3f; 4408 val = ((val >> 3) & 0x7) | ((val & 0x7) << 3); 4409 for (i = 5; i >= 0; i--) { 4410 he_ppet[bit / 8] |= 4411 ((val >> i) & 0x1) << ((bit % 8)); 4412 bit++; 4413 } 4414 } 4415 } 4416 } 4417 4418 static void 4419 ath12k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem) 4420 { 4421 u8 m; 4422 4423 m = IEEE80211_HE_MAC_CAP0_TWT_RES | 4424 IEEE80211_HE_MAC_CAP0_TWT_REQ; 4425 he_cap_elem->mac_cap_info[0] &= ~m; 4426 4427 m = IEEE80211_HE_MAC_CAP2_TRS | 4428 IEEE80211_HE_MAC_CAP2_BCAST_TWT | 4429 IEEE80211_HE_MAC_CAP2_MU_CASCADING; 4430 he_cap_elem->mac_cap_info[2] &= ~m; 4431 4432 m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED | 4433 IEEE80211_HE_MAC_CAP2_BCAST_TWT | 4434 IEEE80211_HE_MAC_CAP2_MU_CASCADING; 4435 he_cap_elem->mac_cap_info[3] &= ~m; 4436 4437 m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG | 4438 IEEE80211_HE_MAC_CAP4_BQR; 4439 he_cap_elem->mac_cap_info[4] &= ~m; 4440 4441 m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION | 4442 IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU | 4443 IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING | 4444 IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX; 4445 he_cap_elem->mac_cap_info[5] &= ~m; 4446 4447 m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | 4448 IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; 4449 he_cap_elem->phy_cap_info[2] &= ~m; 4450 4451 m = IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU | 4452 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK | 4453 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK; 4454 he_cap_elem->phy_cap_info[3] &= ~m; 4455 4456 m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; 4457 he_cap_elem->phy_cap_info[4] &= ~m; 4458 4459 m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK; 4460 he_cap_elem->phy_cap_info[5] &= ~m; 4461 4462 m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | 4463 IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB | 4464 IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | 4465 IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO; 4466 he_cap_elem->phy_cap_info[6] &= ~m; 4467 4468 m = IEEE80211_HE_PHY_CAP7_PSR_BASED_SR | 4469 IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | 4470 IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ | 4471 IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ; 4472 he_cap_elem->phy_cap_info[7] &= ~m; 4473 4474 m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | 4475 IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | 4476 IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | 4477 IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU; 4478 he_cap_elem->phy_cap_info[8] &= ~m; 4479 4480 m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM | 4481 IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | 4482 IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | 4483 IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | 4484 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | 4485 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB; 4486 he_cap_elem->phy_cap_info[9] &= ~m; 4487 } 4488 4489 static __le16 ath12k_mac_setup_he_6ghz_cap(struct ath12k_pdev_cap *pcap, 4490 struct ath12k_band_cap *bcap) 4491 { 4492 u8 val; 4493 4494 bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE; 4495 if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) 4496 bcap->he_6ghz_capa |= 4497 u32_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC, 4498 IEEE80211_HE_6GHZ_CAP_SM_PS); 4499 else 4500 bcap->he_6ghz_capa |= 4501 u32_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED, 4502 IEEE80211_HE_6GHZ_CAP_SM_PS); 4503 val = u32_get_bits(pcap->vht_cap, 4504 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK); 4505 bcap->he_6ghz_capa |= 4506 u32_encode_bits(val, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); 4507 val = u32_get_bits(pcap->vht_cap, 4508 IEEE80211_VHT_CAP_MAX_MPDU_MASK); 4509 bcap->he_6ghz_capa |= 4510 u32_encode_bits(val, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN); 4511 if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN) 4512 bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS; 4513 if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN) 4514 bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS; 4515 4516 return cpu_to_le16(bcap->he_6ghz_capa); 4517 } 4518 4519 static void ath12k_mac_copy_he_cap(struct ath12k_band_cap *band_cap, 4520 int iftype, u8 num_tx_chains, 4521 struct ieee80211_sta_he_cap *he_cap) 4522 { 4523 struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem; 4524 struct ieee80211_he_mcs_nss_supp *mcs_nss = &he_cap->he_mcs_nss_supp; 4525 4526 he_cap->has_he = true; 4527 memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info, 4528 sizeof(he_cap_elem->mac_cap_info)); 4529 memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info, 4530 sizeof(he_cap_elem->phy_cap_info)); 4531 4532 he_cap_elem->mac_cap_info[1] &= 4533 IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK; 4534 4535 he_cap_elem->phy_cap_info[5] &= 4536 ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK; 4537 he_cap_elem->phy_cap_info[5] &= 4538 ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK; 4539 he_cap_elem->phy_cap_info[5] |= num_tx_chains - 1; 4540 4541 switch (iftype) { 4542 case NL80211_IFTYPE_AP: 4543 he_cap_elem->phy_cap_info[3] &= 4544 ~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK; 4545 he_cap_elem->phy_cap_info[9] |= 4546 IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; 4547 break; 4548 case NL80211_IFTYPE_STATION: 4549 he_cap_elem->mac_cap_info[0] &= ~IEEE80211_HE_MAC_CAP0_TWT_RES; 4550 he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_REQ; 4551 he_cap_elem->phy_cap_info[9] |= 4552 IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; 4553 break; 4554 case NL80211_IFTYPE_MESH_POINT: 4555 ath12k_mac_filter_he_cap_mesh(he_cap_elem); 4556 break; 4557 } 4558 4559 mcs_nss->rx_mcs_80 = cpu_to_le16(band_cap->he_mcs & 0xffff); 4560 mcs_nss->tx_mcs_80 = cpu_to_le16(band_cap->he_mcs & 0xffff); 4561 mcs_nss->rx_mcs_160 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); 4562 mcs_nss->tx_mcs_160 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); 4563 mcs_nss->rx_mcs_80p80 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); 4564 mcs_nss->tx_mcs_80p80 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); 4565 4566 memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); 4567 if (he_cap_elem->phy_cap_info[6] & 4568 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) 4569 ath12k_gen_ppe_thresh(&band_cap->he_ppet, he_cap->ppe_thres); 4570 } 4571 4572 static void 4573 ath12k_mac_copy_eht_mcs_nss(struct ath12k_band_cap *band_cap, 4574 struct ieee80211_eht_mcs_nss_supp *mcs_nss, 4575 const struct ieee80211_he_cap_elem *he_cap, 4576 const struct ieee80211_eht_cap_elem_fixed *eht_cap) 4577 { 4578 if ((he_cap->phy_cap_info[0] & 4579 (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | 4580 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | 4581 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | 4582 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) 4583 memcpy(&mcs_nss->only_20mhz, &band_cap->eht_mcs_20_only, 4584 sizeof(struct ieee80211_eht_mcs_nss_supp_20mhz_only)); 4585 4586 if (he_cap->phy_cap_info[0] & 4587 (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | 4588 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)) 4589 memcpy(&mcs_nss->bw._80, &band_cap->eht_mcs_80, 4590 sizeof(struct ieee80211_eht_mcs_nss_supp_bw)); 4591 4592 if (he_cap->phy_cap_info[0] & 4593 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) 4594 memcpy(&mcs_nss->bw._160, &band_cap->eht_mcs_160, 4595 sizeof(struct ieee80211_eht_mcs_nss_supp_bw)); 4596 4597 if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) 4598 memcpy(&mcs_nss->bw._320, &band_cap->eht_mcs_320, 4599 sizeof(struct ieee80211_eht_mcs_nss_supp_bw)); 4600 } 4601 4602 static void ath12k_mac_copy_eht_ppe_thresh(struct ath12k_wmi_ppe_threshold_arg *fw_ppet, 4603 struct ieee80211_sta_eht_cap *cap) 4604 { 4605 u16 bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE; 4606 u8 i, nss, ru, ppet_bit_len_per_ru = IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 4607 4608 u8p_replace_bits(&cap->eht_ppe_thres[0], fw_ppet->numss_m1, 4609 IEEE80211_EHT_PPE_THRES_NSS_MASK); 4610 4611 u16p_replace_bits((u16 *)&cap->eht_ppe_thres[0], fw_ppet->ru_bit_mask, 4612 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 4613 4614 for (nss = 0; nss <= fw_ppet->numss_m1; nss++) { 4615 for (ru = 0; 4616 ru < hweight16(IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 4617 ru++) { 4618 u32 val = 0; 4619 4620 if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0) 4621 continue; 4622 4623 u32p_replace_bits(&val, fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> 4624 (ru * ppet_bit_len_per_ru), 4625 GENMASK(ppet_bit_len_per_ru - 1, 0)); 4626 4627 for (i = 0; i < ppet_bit_len_per_ru; i++) { 4628 cap->eht_ppe_thres[bit / 8] |= 4629 (((val >> i) & 0x1) << ((bit % 8))); 4630 bit++; 4631 } 4632 } 4633 } 4634 } 4635 4636 static void 4637 ath12k_mac_filter_eht_cap_mesh(struct ieee80211_eht_cap_elem_fixed 4638 *eht_cap_elem) 4639 { 4640 u8 m; 4641 4642 m = IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS; 4643 eht_cap_elem->mac_cap_info[0] &= ~m; 4644 4645 m = IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO; 4646 eht_cap_elem->phy_cap_info[0] &= ~m; 4647 4648 m = IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | 4649 IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | 4650 IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | 4651 IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK; 4652 eht_cap_elem->phy_cap_info[3] &= ~m; 4653 4654 m = IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | 4655 IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | 4656 IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | 4657 IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI; 4658 eht_cap_elem->phy_cap_info[4] &= ~m; 4659 4660 m = IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | 4661 IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | 4662 IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | 4663 IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK; 4664 eht_cap_elem->phy_cap_info[5] &= ~m; 4665 4666 m = IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK; 4667 eht_cap_elem->phy_cap_info[6] &= ~m; 4668 4669 m = IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | 4670 IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | 4671 IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | 4672 IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | 4673 IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | 4674 IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ; 4675 eht_cap_elem->phy_cap_info[7] &= ~m; 4676 } 4677 4678 static void ath12k_mac_copy_eht_cap(struct ath12k *ar, 4679 struct ath12k_band_cap *band_cap, 4680 struct ieee80211_he_cap_elem *he_cap_elem, 4681 int iftype, 4682 struct ieee80211_sta_eht_cap *eht_cap) 4683 { 4684 struct ieee80211_eht_cap_elem_fixed *eht_cap_elem = &eht_cap->eht_cap_elem; 4685 4686 memset(eht_cap, 0, sizeof(struct ieee80211_sta_eht_cap)); 4687 4688 if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map))) 4689 return; 4690 4691 eht_cap->has_eht = true; 4692 memcpy(eht_cap_elem->mac_cap_info, band_cap->eht_cap_mac_info, 4693 sizeof(eht_cap_elem->mac_cap_info)); 4694 memcpy(eht_cap_elem->phy_cap_info, band_cap->eht_cap_phy_info, 4695 sizeof(eht_cap_elem->phy_cap_info)); 4696 4697 switch (iftype) { 4698 case NL80211_IFTYPE_AP: 4699 eht_cap_elem->phy_cap_info[0] &= 4700 ~IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ; 4701 eht_cap_elem->phy_cap_info[4] &= 4702 ~IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO; 4703 eht_cap_elem->phy_cap_info[5] &= 4704 ~IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP; 4705 break; 4706 case NL80211_IFTYPE_STATION: 4707 eht_cap_elem->phy_cap_info[7] &= 4708 ~(IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | 4709 IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | 4710 IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ); 4711 eht_cap_elem->phy_cap_info[7] &= 4712 ~(IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | 4713 IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | 4714 IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ); 4715 break; 4716 case NL80211_IFTYPE_MESH_POINT: 4717 ath12k_mac_filter_eht_cap_mesh(eht_cap_elem); 4718 break; 4719 default: 4720 break; 4721 } 4722 4723 ath12k_mac_copy_eht_mcs_nss(band_cap, &eht_cap->eht_mcs_nss_supp, 4724 he_cap_elem, eht_cap_elem); 4725 4726 if (eht_cap_elem->phy_cap_info[5] & 4727 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) 4728 ath12k_mac_copy_eht_ppe_thresh(&band_cap->eht_ppet, eht_cap); 4729 } 4730 4731 static int ath12k_mac_copy_sband_iftype_data(struct ath12k *ar, 4732 struct ath12k_pdev_cap *cap, 4733 struct ieee80211_sband_iftype_data *data, 4734 int band) 4735 { 4736 struct ath12k_band_cap *band_cap = &cap->band[band]; 4737 int i, idx = 0; 4738 4739 for (i = 0; i < NUM_NL80211_IFTYPES; i++) { 4740 struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap; 4741 4742 switch (i) { 4743 case NL80211_IFTYPE_STATION: 4744 case NL80211_IFTYPE_AP: 4745 case NL80211_IFTYPE_MESH_POINT: 4746 break; 4747 4748 default: 4749 continue; 4750 } 4751 4752 data[idx].types_mask = BIT(i); 4753 4754 ath12k_mac_copy_he_cap(band_cap, i, ar->num_tx_chains, he_cap); 4755 if (band == NL80211_BAND_6GHZ) { 4756 data[idx].he_6ghz_capa.capa = 4757 ath12k_mac_setup_he_6ghz_cap(cap, band_cap); 4758 } 4759 ath12k_mac_copy_eht_cap(ar, band_cap, &he_cap->he_cap_elem, i, 4760 &data[idx].eht_cap); 4761 idx++; 4762 } 4763 4764 return idx; 4765 } 4766 4767 static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar, 4768 struct ath12k_pdev_cap *cap) 4769 { 4770 struct ieee80211_supported_band *sband; 4771 enum nl80211_band band; 4772 int count; 4773 4774 if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) { 4775 band = NL80211_BAND_2GHZ; 4776 count = ath12k_mac_copy_sband_iftype_data(ar, cap, 4777 ar->mac.iftype[band], 4778 band); 4779 sband = &ar->mac.sbands[band]; 4780 _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], 4781 count); 4782 } 4783 4784 if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) { 4785 band = NL80211_BAND_5GHZ; 4786 count = ath12k_mac_copy_sband_iftype_data(ar, cap, 4787 ar->mac.iftype[band], 4788 band); 4789 sband = &ar->mac.sbands[band]; 4790 _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], 4791 count); 4792 } 4793 4794 if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && 4795 ar->supports_6ghz) { 4796 band = NL80211_BAND_6GHZ; 4797 count = ath12k_mac_copy_sband_iftype_data(ar, cap, 4798 ar->mac.iftype[band], 4799 band); 4800 sband = &ar->mac.sbands[band]; 4801 _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], 4802 count); 4803 } 4804 } 4805 4806 static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant) 4807 { 4808 int ret; 4809 4810 lockdep_assert_held(&ar->conf_mutex); 4811 4812 if (ath12k_check_chain_mask(ar, tx_ant, true)) 4813 return -EINVAL; 4814 4815 if (ath12k_check_chain_mask(ar, rx_ant, false)) 4816 return -EINVAL; 4817 4818 ar->cfg_tx_chainmask = tx_ant; 4819 ar->cfg_rx_chainmask = rx_ant; 4820 4821 if (ar->state != ATH12K_STATE_ON && 4822 ar->state != ATH12K_STATE_RESTARTED) 4823 return 0; 4824 4825 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK, 4826 tx_ant, ar->pdev->pdev_id); 4827 if (ret) { 4828 ath12k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n", 4829 ret, tx_ant); 4830 return ret; 4831 } 4832 4833 ar->num_tx_chains = hweight32(tx_ant); 4834 4835 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK, 4836 rx_ant, ar->pdev->pdev_id); 4837 if (ret) { 4838 ath12k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n", 4839 ret, rx_ant); 4840 return ret; 4841 } 4842 4843 ar->num_rx_chains = hweight32(rx_ant); 4844 4845 /* Reload HT/VHT/HE capability */ 4846 ath12k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL); 4847 ath12k_mac_setup_sband_iftype_data(ar, &ar->pdev->cap); 4848 4849 return 0; 4850 } 4851 4852 static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb) 4853 { 4854 int num_mgmt; 4855 4856 ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb); 4857 4858 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); 4859 4860 if (num_mgmt < 0) 4861 WARN_ON_ONCE(1); 4862 4863 if (!num_mgmt) 4864 wake_up(&ar->txmgmt_empty_waitq); 4865 } 4866 4867 int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) 4868 { 4869 struct sk_buff *msdu = skb; 4870 struct ieee80211_tx_info *info; 4871 struct ath12k *ar = ctx; 4872 struct ath12k_base *ab = ar->ab; 4873 4874 spin_lock_bh(&ar->txmgmt_idr_lock); 4875 idr_remove(&ar->txmgmt_idr, buf_id); 4876 spin_unlock_bh(&ar->txmgmt_idr_lock); 4877 dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len, 4878 DMA_TO_DEVICE); 4879 4880 info = IEEE80211_SKB_CB(msdu); 4881 memset(&info->status, 0, sizeof(info->status)); 4882 4883 ath12k_mgmt_over_wmi_tx_drop(ar, skb); 4884 4885 return 0; 4886 } 4887 4888 static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx) 4889 { 4890 struct ieee80211_vif *vif = ctx; 4891 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); 4892 struct sk_buff *msdu = skb; 4893 struct ath12k *ar = skb_cb->ar; 4894 struct ath12k_base *ab = ar->ab; 4895 4896 if (skb_cb->vif == vif) { 4897 spin_lock_bh(&ar->txmgmt_idr_lock); 4898 idr_remove(&ar->txmgmt_idr, buf_id); 4899 spin_unlock_bh(&ar->txmgmt_idr_lock); 4900 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, 4901 DMA_TO_DEVICE); 4902 } 4903 4904 return 0; 4905 } 4906 4907 static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif, 4908 struct sk_buff *skb) 4909 { 4910 struct ath12k_base *ab = ar->ab; 4911 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 4912 struct ieee80211_tx_info *info; 4913 dma_addr_t paddr; 4914 int buf_id; 4915 int ret; 4916 4917 ATH12K_SKB_CB(skb)->ar = ar; 4918 spin_lock_bh(&ar->txmgmt_idr_lock); 4919 buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0, 4920 ATH12K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC); 4921 spin_unlock_bh(&ar->txmgmt_idr_lock); 4922 if (buf_id < 0) 4923 return -ENOSPC; 4924 4925 info = IEEE80211_SKB_CB(skb); 4926 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) { 4927 if ((ieee80211_is_action(hdr->frame_control) || 4928 ieee80211_is_deauth(hdr->frame_control) || 4929 ieee80211_is_disassoc(hdr->frame_control)) && 4930 ieee80211_has_protected(hdr->frame_control)) { 4931 skb_put(skb, IEEE80211_CCMP_MIC_LEN); 4932 } 4933 } 4934 4935 paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); 4936 if (dma_mapping_error(ab->dev, paddr)) { 4937 ath12k_warn(ab, "failed to DMA map mgmt Tx buffer\n"); 4938 ret = -EIO; 4939 goto err_free_idr; 4940 } 4941 4942 ATH12K_SKB_CB(skb)->paddr = paddr; 4943 4944 ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb); 4945 if (ret) { 4946 ath12k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret); 4947 goto err_unmap_buf; 4948 } 4949 4950 return 0; 4951 4952 err_unmap_buf: 4953 dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, 4954 skb->len, DMA_TO_DEVICE); 4955 err_free_idr: 4956 spin_lock_bh(&ar->txmgmt_idr_lock); 4957 idr_remove(&ar->txmgmt_idr, buf_id); 4958 spin_unlock_bh(&ar->txmgmt_idr_lock); 4959 4960 return ret; 4961 } 4962 4963 static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar) 4964 { 4965 struct sk_buff *skb; 4966 4967 while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) 4968 ath12k_mgmt_over_wmi_tx_drop(ar, skb); 4969 } 4970 4971 static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work) 4972 { 4973 struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work); 4974 struct ath12k_skb_cb *skb_cb; 4975 struct ath12k_vif *arvif; 4976 struct sk_buff *skb; 4977 int ret; 4978 4979 while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) { 4980 skb_cb = ATH12K_SKB_CB(skb); 4981 if (!skb_cb->vif) { 4982 ath12k_warn(ar->ab, "no vif found for mgmt frame\n"); 4983 ath12k_mgmt_over_wmi_tx_drop(ar, skb); 4984 continue; 4985 } 4986 4987 arvif = ath12k_vif_to_arvif(skb_cb->vif); 4988 if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) && 4989 arvif->is_started) { 4990 ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb); 4991 if (ret) { 4992 ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n", 4993 arvif->vdev_id, ret); 4994 ath12k_mgmt_over_wmi_tx_drop(ar, skb); 4995 } 4996 } else { 4997 ath12k_warn(ar->ab, 4998 "dropping mgmt frame for vdev %d, is_started %d\n", 4999 arvif->vdev_id, 5000 arvif->is_started); 5001 ath12k_mgmt_over_wmi_tx_drop(ar, skb); 5002 } 5003 } 5004 } 5005 5006 static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb, 5007 bool is_prb_rsp) 5008 { 5009 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; 5010 5011 if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) 5012 return -ESHUTDOWN; 5013 5014 /* Drop probe response packets when the pending management tx 5015 * count has reached a certain threshold, so as to prioritize 5016 * other mgmt packets like auth and assoc to be sent on time 5017 * for establishing successful connections. 5018 */ 5019 if (is_prb_rsp && 5020 atomic_read(&ar->num_pending_mgmt_tx) > ATH12K_PRB_RSP_DROP_THRESHOLD) { 5021 ath12k_warn(ar->ab, 5022 "dropping probe response as pending queue is almost full\n"); 5023 return -ENOSPC; 5024 } 5025 5026 if (skb_queue_len_lockless(q) >= ATH12K_TX_MGMT_NUM_PENDING_MAX) { 5027 ath12k_warn(ar->ab, "mgmt tx queue is full\n"); 5028 return -ENOSPC; 5029 } 5030 5031 skb_queue_tail(q, skb); 5032 atomic_inc(&ar->num_pending_mgmt_tx); 5033 ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work); 5034 5035 return 0; 5036 } 5037 5038 static void ath12k_mac_op_tx(struct ieee80211_hw *hw, 5039 struct ieee80211_tx_control *control, 5040 struct sk_buff *skb) 5041 { 5042 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); 5043 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 5044 struct ieee80211_vif *vif = info->control.vif; 5045 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 5046 struct ath12k *ar = arvif->ar; 5047 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 5048 struct ieee80211_key_conf *key = info->control.hw_key; 5049 u32 info_flags = info->flags; 5050 bool is_prb_rsp; 5051 int ret; 5052 5053 memset(skb_cb, 0, sizeof(*skb_cb)); 5054 skb_cb->vif = vif; 5055 5056 if (key) { 5057 skb_cb->cipher = key->cipher; 5058 skb_cb->flags |= ATH12K_SKB_CIPHER_SET; 5059 } 5060 5061 if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { 5062 skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP; 5063 } else if (ieee80211_is_mgmt(hdr->frame_control)) { 5064 is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); 5065 ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp); 5066 if (ret) { 5067 ath12k_warn(ar->ab, "failed to queue management frame %d\n", 5068 ret); 5069 ieee80211_free_txskb(hw, skb); 5070 } 5071 return; 5072 } 5073 5074 ret = ath12k_dp_tx(ar, arvif, skb); 5075 if (ret) { 5076 ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret); 5077 ieee80211_free_txskb(hw, skb); 5078 } 5079 } 5080 5081 void ath12k_mac_drain_tx(struct ath12k *ar) 5082 { 5083 /* make sure rcu-protected mac80211 tx path itself is drained */ 5084 synchronize_net(); 5085 5086 cancel_work_sync(&ar->wmi_mgmt_tx_work); 5087 ath12k_mgmt_over_wmi_tx_purge(ar); 5088 } 5089 5090 static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable) 5091 { 5092 return -EOPNOTSUPP; 5093 /* TODO: Need to support new monitor mode */ 5094 } 5095 5096 static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab) 5097 { 5098 int recovery_start_count; 5099 5100 if (!ab->is_reset) 5101 return; 5102 5103 recovery_start_count = atomic_inc_return(&ab->recovery_start_count); 5104 5105 ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count); 5106 5107 if (recovery_start_count == ab->num_radios) { 5108 complete(&ab->recovery_start); 5109 ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n"); 5110 } 5111 5112 ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n"); 5113 5114 wait_for_completion_timeout(&ab->reconfigure_complete, 5115 ATH12K_RECONFIGURE_TIMEOUT_HZ); 5116 } 5117 5118 static int ath12k_mac_start(struct ath12k *ar) 5119 { 5120 struct ath12k_base *ab = ar->ab; 5121 struct ath12k_pdev *pdev = ar->pdev; 5122 int ret; 5123 5124 mutex_lock(&ar->conf_mutex); 5125 5126 switch (ar->state) { 5127 case ATH12K_STATE_OFF: 5128 ar->state = ATH12K_STATE_ON; 5129 break; 5130 case ATH12K_STATE_RESTARTING: 5131 ar->state = ATH12K_STATE_RESTARTED; 5132 ath12k_mac_wait_reconfigure(ab); 5133 break; 5134 case ATH12K_STATE_RESTARTED: 5135 case ATH12K_STATE_WEDGED: 5136 case ATH12K_STATE_ON: 5137 WARN_ON(1); 5138 ret = -EINVAL; 5139 goto err; 5140 } 5141 5142 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 5143 1, pdev->pdev_id); 5144 5145 if (ret) { 5146 ath12k_err(ab, "failed to enable PMF QOS: (%d\n", ret); 5147 goto err; 5148 } 5149 5150 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1, 5151 pdev->pdev_id); 5152 if (ret) { 5153 ath12k_err(ab, "failed to enable dynamic bw: %d\n", ret); 5154 goto err; 5155 } 5156 5157 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 5158 0, pdev->pdev_id); 5159 if (ret) { 5160 ath12k_err(ab, "failed to set ac override for ARP: %d\n", 5161 ret); 5162 goto err; 5163 } 5164 5165 ret = ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id); 5166 if (ret) { 5167 ath12k_err(ab, "failed to offload radar detection: %d\n", 5168 ret); 5169 goto err; 5170 } 5171 5172 ret = ath12k_dp_tx_htt_h2t_ppdu_stats_req(ar, 5173 HTT_PPDU_STATS_TAG_DEFAULT); 5174 if (ret) { 5175 ath12k_err(ab, "failed to req ppdu stats: %d\n", ret); 5176 goto err; 5177 } 5178 5179 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE, 5180 1, pdev->pdev_id); 5181 5182 if (ret) { 5183 ath12k_err(ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret); 5184 goto err; 5185 } 5186 5187 __ath12k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); 5188 5189 /* TODO: Do we need to enable ANI? */ 5190 5191 ath12k_reg_update_chan_list(ar); 5192 5193 ar->num_started_vdevs = 0; 5194 ar->num_created_vdevs = 0; 5195 ar->num_peers = 0; 5196 ar->allocated_vdev_map = 0; 5197 5198 /* Configure monitor status ring with default rx_filter to get rx status 5199 * such as rssi, rx_duration. 5200 */ 5201 ret = ath12k_mac_config_mon_status_default(ar, true); 5202 if (ret && (ret != -EOPNOTSUPP)) { 5203 ath12k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n", 5204 ret); 5205 goto err; 5206 } 5207 5208 if (ret == -EOPNOTSUPP) 5209 ath12k_dbg(ab, ATH12K_DBG_MAC, 5210 "monitor status config is not yet supported"); 5211 5212 /* Configure the hash seed for hash based reo dest ring selection */ 5213 ath12k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id); 5214 5215 /* allow device to enter IMPS */ 5216 if (ab->hw_params->idle_ps) { 5217 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG, 5218 1, pdev->pdev_id); 5219 if (ret) { 5220 ath12k_err(ab, "failed to enable idle ps: %d\n", ret); 5221 goto err; 5222 } 5223 } 5224 5225 mutex_unlock(&ar->conf_mutex); 5226 5227 rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], 5228 &ab->pdevs[ar->pdev_idx]); 5229 5230 return 0; 5231 err: 5232 ar->state = ATH12K_STATE_OFF; 5233 mutex_unlock(&ar->conf_mutex); 5234 5235 return ret; 5236 } 5237 5238 static int ath12k_mac_op_start(struct ieee80211_hw *hw) 5239 { 5240 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5241 struct ath12k *ar = ath12k_ah_to_ar(ah); 5242 struct ath12k_base *ab = ar->ab; 5243 int ret; 5244 5245 ath12k_mac_drain_tx(ar); 5246 5247 ret = ath12k_mac_start(ar); 5248 if (ret) { 5249 ath12k_err(ab, "fail to start mac operations in pdev idx %d ret %d\n", 5250 ar->pdev_idx, ret); 5251 return ret; 5252 } 5253 5254 return 0; 5255 } 5256 5257 int ath12k_mac_rfkill_config(struct ath12k *ar) 5258 { 5259 struct ath12k_base *ab = ar->ab; 5260 u32 param; 5261 int ret; 5262 5263 if (ab->hw_params->rfkill_pin == 0) 5264 return -EOPNOTSUPP; 5265 5266 ath12k_dbg(ab, ATH12K_DBG_MAC, 5267 "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d", 5268 ab->hw_params->rfkill_pin, ab->hw_params->rfkill_cfg, 5269 ab->hw_params->rfkill_on_level); 5270 5271 param = u32_encode_bits(ab->hw_params->rfkill_on_level, 5272 WMI_RFKILL_CFG_RADIO_LEVEL) | 5273 u32_encode_bits(ab->hw_params->rfkill_pin, 5274 WMI_RFKILL_CFG_GPIO_PIN_NUM) | 5275 u32_encode_bits(ab->hw_params->rfkill_cfg, 5276 WMI_RFKILL_CFG_PIN_AS_GPIO); 5277 5278 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_HW_RFKILL_CONFIG, 5279 param, ar->pdev->pdev_id); 5280 if (ret) { 5281 ath12k_warn(ab, 5282 "failed to set rfkill config 0x%x: %d\n", 5283 param, ret); 5284 return ret; 5285 } 5286 5287 return 0; 5288 } 5289 5290 int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable) 5291 { 5292 enum wmi_rfkill_enable_radio param; 5293 int ret; 5294 5295 if (enable) 5296 param = WMI_RFKILL_ENABLE_RADIO_ON; 5297 else 5298 param = WMI_RFKILL_ENABLE_RADIO_OFF; 5299 5300 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac %d rfkill enable %d", 5301 ar->pdev_idx, param); 5302 5303 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RFKILL_ENABLE, 5304 param, ar->pdev->pdev_id); 5305 if (ret) { 5306 ath12k_warn(ar->ab, "failed to set rfkill enable param %d: %d\n", 5307 param, ret); 5308 return ret; 5309 } 5310 5311 return 0; 5312 } 5313 5314 static void ath12k_mac_stop(struct ath12k *ar) 5315 { 5316 struct htt_ppdu_stats_info *ppdu_stats, *tmp; 5317 int ret; 5318 5319 mutex_lock(&ar->conf_mutex); 5320 ret = ath12k_mac_config_mon_status_default(ar, false); 5321 if (ret && (ret != -EOPNOTSUPP)) 5322 ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n", 5323 ret); 5324 5325 clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); 5326 ar->state = ATH12K_STATE_OFF; 5327 mutex_unlock(&ar->conf_mutex); 5328 5329 cancel_delayed_work_sync(&ar->scan.timeout); 5330 cancel_work_sync(&ar->regd_update_work); 5331 cancel_work_sync(&ar->ab->rfkill_work); 5332 5333 spin_lock_bh(&ar->data_lock); 5334 list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { 5335 list_del(&ppdu_stats->list); 5336 kfree(ppdu_stats); 5337 } 5338 spin_unlock_bh(&ar->data_lock); 5339 5340 rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL); 5341 5342 synchronize_rcu(); 5343 5344 atomic_set(&ar->num_pending_mgmt_tx, 0); 5345 } 5346 5347 static void ath12k_mac_op_stop(struct ieee80211_hw *hw) 5348 { 5349 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5350 struct ath12k *ar = ath12k_ah_to_ar(ah); 5351 5352 ath12k_mac_drain_tx(ar); 5353 5354 ath12k_mac_stop(ar); 5355 } 5356 5357 static u8 5358 ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif) 5359 { 5360 struct ath12k_base *ab = arvif->ar->ab; 5361 u8 vdev_stats_id = 0; 5362 5363 do { 5364 if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) { 5365 vdev_stats_id++; 5366 if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) { 5367 vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID; 5368 break; 5369 } 5370 } else { 5371 ab->free_vdev_stats_id_map |= (1LL << vdev_stats_id); 5372 break; 5373 } 5374 } while (vdev_stats_id); 5375 5376 arvif->vdev_stats_id = vdev_stats_id; 5377 return vdev_stats_id; 5378 } 5379 5380 static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif, 5381 struct ath12k_wmi_vdev_create_arg *arg) 5382 { 5383 struct ath12k *ar = arvif->ar; 5384 struct ath12k_pdev *pdev = ar->pdev; 5385 5386 arg->if_id = arvif->vdev_id; 5387 arg->type = arvif->vdev_type; 5388 arg->subtype = arvif->vdev_subtype; 5389 arg->pdev_id = pdev->pdev_id; 5390 5391 if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) { 5392 arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains; 5393 arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains; 5394 } 5395 if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) { 5396 arg->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains; 5397 arg->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains; 5398 } 5399 if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP && 5400 ar->supports_6ghz) { 5401 arg->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains; 5402 arg->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains; 5403 } 5404 5405 arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif); 5406 } 5407 5408 static u32 5409 ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype) 5410 { 5411 struct ath12k_pdev_cap *pdev_cap = &pdev->cap; 5412 struct ath12k_band_cap *cap_band = NULL; 5413 u32 *hecap_phy_ptr = NULL; 5414 u32 hemode; 5415 5416 if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) 5417 cap_band = &pdev_cap->band[NL80211_BAND_2GHZ]; 5418 else 5419 cap_band = &pdev_cap->band[NL80211_BAND_5GHZ]; 5420 5421 hecap_phy_ptr = &cap_band->he_cap_phy_info[0]; 5422 5423 hemode = u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE) | 5424 u32_encode_bits(HECAP_PHY_SUBFMR_GET(hecap_phy_ptr), 5425 HE_MODE_SU_TX_BFER) | 5426 u32_encode_bits(HECAP_PHY_ULMUMIMO_GET(hecap_phy_ptr), 5427 HE_MODE_UL_MUMIMO); 5428 5429 /* TODO: WDS and other modes */ 5430 if (viftype == NL80211_IFTYPE_AP) { 5431 hemode |= u32_encode_bits(HECAP_PHY_MUBFMR_GET(hecap_phy_ptr), 5432 HE_MODE_MU_TX_BFER) | 5433 u32_encode_bits(HE_DL_MUOFDMA_ENABLE, HE_MODE_DL_OFDMA) | 5434 u32_encode_bits(HE_UL_MUOFDMA_ENABLE, HE_MODE_UL_OFDMA); 5435 } else { 5436 hemode |= u32_encode_bits(HE_MU_BFEE_ENABLE, HE_MODE_MU_TX_BFEE); 5437 } 5438 5439 return hemode; 5440 } 5441 5442 static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar, 5443 struct ath12k_vif *arvif) 5444 { 5445 u32 param_id, param_value; 5446 struct ath12k_base *ab = ar->ab; 5447 int ret; 5448 5449 param_id = WMI_VDEV_PARAM_SET_HEMU_MODE; 5450 param_value = ath12k_mac_prepare_he_mode(ar->pdev, arvif->vif->type); 5451 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 5452 param_id, param_value); 5453 if (ret) { 5454 ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d param_value %x\n", 5455 arvif->vdev_id, ret, param_value); 5456 return ret; 5457 } 5458 param_id = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE; 5459 param_value = 5460 u32_encode_bits(HE_VHT_SOUNDING_MODE_ENABLE, HE_VHT_SOUNDING_MODE) | 5461 u32_encode_bits(HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE, 5462 HE_TRIG_NONTRIG_SOUNDING_MODE); 5463 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 5464 param_id, param_value); 5465 if (ret) { 5466 ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d\n", 5467 arvif->vdev_id, ret); 5468 return ret; 5469 } 5470 return ret; 5471 } 5472 5473 static void ath12k_mac_update_vif_offload(struct ath12k_vif *arvif) 5474 { 5475 struct ieee80211_vif *vif = arvif->vif; 5476 struct ath12k *ar = arvif->ar; 5477 struct ath12k_base *ab = ar->ab; 5478 u32 param_id, param_value; 5479 int ret; 5480 5481 param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE; 5482 if (vif->type != NL80211_IFTYPE_STATION && 5483 vif->type != NL80211_IFTYPE_AP) 5484 vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED | 5485 IEEE80211_OFFLOAD_DECAP_ENABLED); 5486 5487 if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) 5488 arvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET; 5489 else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) 5490 arvif->tx_encap_type = ATH12K_HW_TXRX_RAW; 5491 else 5492 arvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI; 5493 5494 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 5495 param_id, arvif->tx_encap_type); 5496 if (ret) { 5497 ath12k_warn(ab, "failed to set vdev %d tx encap mode: %d\n", 5498 arvif->vdev_id, ret); 5499 vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; 5500 } 5501 5502 param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE; 5503 if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED) 5504 param_value = ATH12K_HW_TXRX_ETHERNET; 5505 else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) 5506 param_value = ATH12K_HW_TXRX_RAW; 5507 else 5508 param_value = ATH12K_HW_TXRX_NATIVE_WIFI; 5509 5510 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 5511 param_id, param_value); 5512 if (ret) { 5513 ath12k_warn(ab, "failed to set vdev %d rx decap mode: %d\n", 5514 arvif->vdev_id, ret); 5515 vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED; 5516 } 5517 } 5518 5519 static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw, 5520 struct ieee80211_vif *vif) 5521 { 5522 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 5523 5524 ath12k_mac_update_vif_offload(arvif); 5525 } 5526 5527 static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, 5528 struct ieee80211_vif *vif) 5529 { 5530 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5531 struct ath12k *ar; 5532 struct ath12k_base *ab; 5533 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 5534 struct ath12k_wmi_vdev_create_arg vdev_arg = {0}; 5535 struct ath12k_wmi_peer_create_arg peer_param; 5536 u32 param_id, param_value; 5537 u16 nss; 5538 int i; 5539 int ret; 5540 int bit; 5541 5542 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 5543 5544 ar = ath12k_ah_to_ar(ah); 5545 ab = ar->ab; 5546 5547 mutex_lock(&ar->conf_mutex); 5548 5549 if (vif->type == NL80211_IFTYPE_AP && 5550 ar->num_peers > (ar->max_num_peers - 1)) { 5551 ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n"); 5552 ret = -ENOBUFS; 5553 goto err; 5554 } 5555 5556 if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) { 5557 ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n", 5558 TARGET_NUM_VDEVS); 5559 ret = -EBUSY; 5560 goto err; 5561 } 5562 5563 memset(arvif, 0, sizeof(*arvif)); 5564 5565 arvif->ar = ar; 5566 arvif->vif = vif; 5567 5568 INIT_LIST_HEAD(&arvif->list); 5569 5570 /* Should we initialize any worker to handle connection loss indication 5571 * from firmware in sta mode? 5572 */ 5573 5574 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 5575 arvif->bitrate_mask.control[i].legacy = 0xffffffff; 5576 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 5577 sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 5578 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 5579 sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 5580 } 5581 5582 bit = __ffs64(ab->free_vdev_map); 5583 5584 arvif->vdev_id = bit; 5585 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; 5586 5587 switch (vif->type) { 5588 case NL80211_IFTYPE_UNSPECIFIED: 5589 case NL80211_IFTYPE_STATION: 5590 arvif->vdev_type = WMI_VDEV_TYPE_STA; 5591 break; 5592 case NL80211_IFTYPE_MESH_POINT: 5593 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S; 5594 fallthrough; 5595 case NL80211_IFTYPE_AP: 5596 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5597 break; 5598 case NL80211_IFTYPE_MONITOR: 5599 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; 5600 ar->monitor_vdev_id = bit; 5601 break; 5602 default: 5603 WARN_ON(1); 5604 break; 5605 } 5606 5607 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n", 5608 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, 5609 ab->free_vdev_map); 5610 5611 vif->cab_queue = arvif->vdev_id % (ATH12K_HW_MAX_QUEUES - 1); 5612 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 5613 vif->hw_queue[i] = i % (ATH12K_HW_MAX_QUEUES - 1); 5614 5615 ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg); 5616 5617 ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg); 5618 if (ret) { 5619 ath12k_warn(ab, "failed to create WMI vdev %d: %d\n", 5620 arvif->vdev_id, ret); 5621 goto err; 5622 } 5623 5624 ar->num_created_vdevs++; 5625 ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM created, vdev_id %d\n", 5626 vif->addr, arvif->vdev_id); 5627 ar->allocated_vdev_map |= 1LL << arvif->vdev_id; 5628 ab->free_vdev_map &= ~(1LL << arvif->vdev_id); 5629 5630 spin_lock_bh(&ar->data_lock); 5631 list_add(&arvif->list, &ar->arvifs); 5632 spin_unlock_bh(&ar->data_lock); 5633 5634 ath12k_mac_update_vif_offload(arvif); 5635 5636 nss = hweight32(ar->cfg_tx_chainmask) ? : 1; 5637 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 5638 WMI_VDEV_PARAM_NSS, nss); 5639 if (ret) { 5640 ath12k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n", 5641 arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret); 5642 goto err_vdev_del; 5643 } 5644 5645 switch (arvif->vdev_type) { 5646 case WMI_VDEV_TYPE_AP: 5647 peer_param.vdev_id = arvif->vdev_id; 5648 peer_param.peer_addr = vif->addr; 5649 peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; 5650 ret = ath12k_peer_create(ar, arvif, NULL, &peer_param); 5651 if (ret) { 5652 ath12k_warn(ab, "failed to vdev %d create peer for AP: %d\n", 5653 arvif->vdev_id, ret); 5654 goto err_vdev_del; 5655 } 5656 5657 ret = ath12k_mac_set_kickout(arvif); 5658 if (ret) { 5659 ath12k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n", 5660 arvif->vdev_id, ret); 5661 goto err_peer_del; 5662 } 5663 break; 5664 case WMI_VDEV_TYPE_STA: 5665 param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY; 5666 param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 5667 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5668 param_id, param_value); 5669 if (ret) { 5670 ath12k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n", 5671 arvif->vdev_id, ret); 5672 goto err_peer_del; 5673 } 5674 5675 param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 5676 param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; 5677 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5678 param_id, param_value); 5679 if (ret) { 5680 ath12k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n", 5681 arvif->vdev_id, ret); 5682 goto err_peer_del; 5683 } 5684 5685 param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT; 5686 param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; 5687 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5688 param_id, param_value); 5689 if (ret) { 5690 ath12k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n", 5691 arvif->vdev_id, ret); 5692 goto err_peer_del; 5693 } 5694 5695 ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false); 5696 if (ret) { 5697 ath12k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n", 5698 arvif->vdev_id, ret); 5699 goto err_peer_del; 5700 } 5701 break; 5702 default: 5703 break; 5704 } 5705 5706 arvif->txpower = vif->bss_conf.txpower; 5707 ret = ath12k_mac_txpower_recalc(ar); 5708 if (ret) 5709 goto err_peer_del; 5710 5711 param_id = WMI_VDEV_PARAM_RTS_THRESHOLD; 5712 param_value = hw->wiphy->rts_threshold; 5713 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 5714 param_id, param_value); 5715 if (ret) { 5716 ath12k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n", 5717 arvif->vdev_id, ret); 5718 } 5719 5720 ath12k_dp_vdev_tx_attach(ar, arvif); 5721 5722 if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled) 5723 ath12k_mac_monitor_vdev_create(ar); 5724 5725 mutex_unlock(&ar->conf_mutex); 5726 5727 return ret; 5728 5729 err_peer_del: 5730 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 5731 reinit_completion(&ar->peer_delete_done); 5732 5733 ret = ath12k_wmi_send_peer_delete_cmd(ar, vif->addr, 5734 arvif->vdev_id); 5735 if (ret) { 5736 ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", 5737 arvif->vdev_id, vif->addr); 5738 goto err; 5739 } 5740 5741 ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, 5742 vif->addr); 5743 if (ret) 5744 goto err; 5745 5746 ar->num_peers--; 5747 } 5748 5749 err_vdev_del: 5750 ath12k_wmi_vdev_delete(ar, arvif->vdev_id); 5751 ar->num_created_vdevs--; 5752 ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); 5753 ab->free_vdev_map |= 1LL << arvif->vdev_id; 5754 ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id); 5755 spin_lock_bh(&ar->data_lock); 5756 list_del(&arvif->list); 5757 spin_unlock_bh(&ar->data_lock); 5758 5759 err: 5760 mutex_unlock(&ar->conf_mutex); 5761 5762 return ret; 5763 } 5764 5765 static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif) 5766 { 5767 struct ath12k_tx_desc_info *tx_desc_info; 5768 struct ath12k_skb_cb *skb_cb; 5769 struct sk_buff *skb; 5770 int i; 5771 5772 for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) { 5773 spin_lock_bh(&dp->tx_desc_lock[i]); 5774 5775 list_for_each_entry(tx_desc_info, &dp->tx_desc_used_list[i], 5776 list) { 5777 skb = tx_desc_info->skb; 5778 if (!skb) 5779 continue; 5780 5781 skb_cb = ATH12K_SKB_CB(skb); 5782 if (skb_cb->vif == vif) 5783 skb_cb->vif = NULL; 5784 } 5785 5786 spin_unlock_bh(&dp->tx_desc_lock[i]); 5787 } 5788 } 5789 5790 static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw, 5791 struct ieee80211_vif *vif) 5792 { 5793 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5794 struct ath12k *ar; 5795 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 5796 struct ath12k_base *ab; 5797 unsigned long time_left; 5798 int ret; 5799 5800 ar = ath12k_ah_to_ar(ah); 5801 ab = ar->ab; 5802 5803 mutex_lock(&ar->conf_mutex); 5804 5805 ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n", 5806 arvif->vdev_id); 5807 5808 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 5809 ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr); 5810 if (ret) 5811 ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n", 5812 arvif->vdev_id, ret); 5813 } 5814 5815 reinit_completion(&ar->vdev_delete_done); 5816 5817 ret = ath12k_wmi_vdev_delete(ar, arvif->vdev_id); 5818 if (ret) { 5819 ath12k_warn(ab, "failed to delete WMI vdev %d: %d\n", 5820 arvif->vdev_id, ret); 5821 goto err_vdev_del; 5822 } 5823 5824 time_left = wait_for_completion_timeout(&ar->vdev_delete_done, 5825 ATH12K_VDEV_DELETE_TIMEOUT_HZ); 5826 if (time_left == 0) { 5827 ath12k_warn(ab, "Timeout in receiving vdev delete response\n"); 5828 goto err_vdev_del; 5829 } 5830 5831 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { 5832 ar->monitor_vdev_id = -1; 5833 ar->monitor_vdev_created = false; 5834 } else if (ar->monitor_vdev_created && !ar->monitor_started) { 5835 ret = ath12k_mac_monitor_vdev_delete(ar); 5836 } 5837 5838 ab->free_vdev_map |= 1LL << (arvif->vdev_id); 5839 ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); 5840 ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id); 5841 ar->num_created_vdevs--; 5842 5843 ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n", 5844 vif->addr, arvif->vdev_id); 5845 5846 err_vdev_del: 5847 spin_lock_bh(&ar->data_lock); 5848 list_del(&arvif->list); 5849 spin_unlock_bh(&ar->data_lock); 5850 5851 ath12k_peer_cleanup(ar, arvif->vdev_id); 5852 5853 idr_for_each(&ar->txmgmt_idr, 5854 ath12k_mac_vif_txmgmt_idr_remove, vif); 5855 5856 ath12k_mac_vif_unref(&ab->dp, vif); 5857 ath12k_dp_tx_put_bank_profile(&ab->dp, arvif->bank_id); 5858 5859 /* Recalc txpower for remaining vdev */ 5860 ath12k_mac_txpower_recalc(ar); 5861 clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); 5862 5863 /* TODO: recal traffic pause state based on the available vdevs */ 5864 5865 mutex_unlock(&ar->conf_mutex); 5866 } 5867 5868 /* FIXME: Has to be verified. */ 5869 #define SUPPORTED_FILTERS \ 5870 (FIF_ALLMULTI | \ 5871 FIF_CONTROL | \ 5872 FIF_PSPOLL | \ 5873 FIF_OTHER_BSS | \ 5874 FIF_BCN_PRBRESP_PROMISC | \ 5875 FIF_PROBE_REQ | \ 5876 FIF_FCSFAIL) 5877 5878 static void ath12k_mac_configure_filter(struct ath12k *ar, 5879 unsigned int total_flags) 5880 { 5881 bool reset_flag; 5882 int ret; 5883 5884 lockdep_assert_held(&ar->conf_mutex); 5885 5886 ar->filter_flags = total_flags; 5887 5888 /* For monitor mode */ 5889 reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC); 5890 5891 ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag); 5892 if (!ret) { 5893 if (!reset_flag) 5894 set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); 5895 else 5896 clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); 5897 } else { 5898 ath12k_warn(ar->ab, 5899 "fail to set monitor filter: %d\n", ret); 5900 } 5901 5902 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 5903 "total_flags:0x%x, reset_flag:%d\n", 5904 total_flags, reset_flag); 5905 } 5906 5907 static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, 5908 unsigned int changed_flags, 5909 unsigned int *total_flags, 5910 u64 multicast) 5911 { 5912 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5913 struct ath12k *ar; 5914 5915 ar = ath12k_ah_to_ar(ah); 5916 5917 mutex_lock(&ar->conf_mutex); 5918 5919 *total_flags &= SUPPORTED_FILTERS; 5920 ath12k_mac_configure_filter(ar, *total_flags); 5921 5922 mutex_unlock(&ar->conf_mutex); 5923 } 5924 5925 static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 5926 { 5927 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5928 struct ath12k *ar; 5929 5930 ar = ath12k_ah_to_ar(ah); 5931 5932 mutex_lock(&ar->conf_mutex); 5933 5934 *tx_ant = ar->cfg_tx_chainmask; 5935 *rx_ant = ar->cfg_rx_chainmask; 5936 5937 mutex_unlock(&ar->conf_mutex); 5938 5939 return 0; 5940 } 5941 5942 static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 5943 { 5944 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5945 struct ath12k *ar; 5946 int ret; 5947 5948 ar = ath12k_ah_to_ar(ah); 5949 5950 mutex_lock(&ar->conf_mutex); 5951 ret = __ath12k_set_antenna(ar, tx_ant, rx_ant); 5952 mutex_unlock(&ar->conf_mutex); 5953 5954 return ret; 5955 } 5956 5957 static int ath12k_mac_ampdu_action(struct ath12k_vif *arvif, 5958 struct ieee80211_ampdu_params *params) 5959 { 5960 struct ath12k *ar = arvif->ar; 5961 int ret = -EINVAL; 5962 5963 lockdep_assert_held(&ar->conf_mutex); 5964 5965 switch (params->action) { 5966 case IEEE80211_AMPDU_RX_START: 5967 ret = ath12k_dp_rx_ampdu_start(ar, params); 5968 break; 5969 case IEEE80211_AMPDU_RX_STOP: 5970 ret = ath12k_dp_rx_ampdu_stop(ar, params); 5971 break; 5972 case IEEE80211_AMPDU_TX_START: 5973 case IEEE80211_AMPDU_TX_STOP_CONT: 5974 case IEEE80211_AMPDU_TX_STOP_FLUSH: 5975 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 5976 case IEEE80211_AMPDU_TX_OPERATIONAL: 5977 /* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211 5978 * Tx aggregation requests. 5979 */ 5980 ret = -EOPNOTSUPP; 5981 break; 5982 } 5983 5984 return ret; 5985 } 5986 5987 static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw, 5988 struct ieee80211_vif *vif, 5989 struct ieee80211_ampdu_params *params) 5990 { 5991 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5992 struct ath12k *ar; 5993 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 5994 int ret = -EINVAL; 5995 5996 ar = ath12k_ah_to_ar(ah); 5997 5998 mutex_lock(&ar->conf_mutex); 5999 ret = ath12k_mac_ampdu_action(arvif, params); 6000 mutex_unlock(&ar->conf_mutex); 6001 6002 if (ret) 6003 ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n", 6004 ar->pdev_idx, params->action, ret); 6005 6006 return ret; 6007 } 6008 6009 static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw, 6010 struct ieee80211_chanctx_conf *ctx) 6011 { 6012 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6013 struct ath12k *ar; 6014 struct ath12k_base *ab; 6015 6016 ar = ath12k_ah_to_ar(ah); 6017 ab = ar->ab; 6018 6019 ath12k_dbg(ab, ATH12K_DBG_MAC, 6020 "mac chanctx add freq %u width %d ptr %pK\n", 6021 ctx->def.chan->center_freq, ctx->def.width, ctx); 6022 6023 mutex_lock(&ar->conf_mutex); 6024 6025 spin_lock_bh(&ar->data_lock); 6026 /* TODO: In case of multiple channel context, populate rx_channel from 6027 * Rx PPDU desc information. 6028 */ 6029 ar->rx_channel = ctx->def.chan; 6030 spin_unlock_bh(&ar->data_lock); 6031 6032 mutex_unlock(&ar->conf_mutex); 6033 6034 return 0; 6035 } 6036 6037 static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw, 6038 struct ieee80211_chanctx_conf *ctx) 6039 { 6040 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6041 struct ath12k *ar; 6042 struct ath12k_base *ab; 6043 6044 ar = ath12k_ah_to_ar(ah); 6045 ab = ar->ab; 6046 6047 ath12k_dbg(ab, ATH12K_DBG_MAC, 6048 "mac chanctx remove freq %u width %d ptr %pK\n", 6049 ctx->def.chan->center_freq, ctx->def.width, ctx); 6050 6051 mutex_lock(&ar->conf_mutex); 6052 6053 spin_lock_bh(&ar->data_lock); 6054 /* TODO: In case of there is one more channel context left, populate 6055 * rx_channel with the channel of that remaining channel context. 6056 */ 6057 ar->rx_channel = NULL; 6058 spin_unlock_bh(&ar->data_lock); 6059 6060 mutex_unlock(&ar->conf_mutex); 6061 } 6062 6063 static enum wmi_phy_mode 6064 ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar, 6065 enum wmi_phy_mode mode, 6066 enum nl80211_band band, 6067 enum nl80211_iftype type) 6068 { 6069 struct ieee80211_sta_eht_cap *eht_cap; 6070 enum wmi_phy_mode down_mode; 6071 6072 if (mode < MODE_11BE_EHT20) 6073 return mode; 6074 6075 eht_cap = &ar->mac.iftype[band][type].eht_cap; 6076 if (eht_cap->has_eht) 6077 return mode; 6078 6079 switch (mode) { 6080 case MODE_11BE_EHT20: 6081 down_mode = MODE_11AX_HE20; 6082 break; 6083 case MODE_11BE_EHT40: 6084 down_mode = MODE_11AX_HE40; 6085 break; 6086 case MODE_11BE_EHT80: 6087 down_mode = MODE_11AX_HE80; 6088 break; 6089 case MODE_11BE_EHT80_80: 6090 down_mode = MODE_11AX_HE80_80; 6091 break; 6092 case MODE_11BE_EHT160: 6093 case MODE_11BE_EHT160_160: 6094 case MODE_11BE_EHT320: 6095 down_mode = MODE_11AX_HE160; 6096 break; 6097 case MODE_11BE_EHT20_2G: 6098 down_mode = MODE_11AX_HE20_2G; 6099 break; 6100 case MODE_11BE_EHT40_2G: 6101 down_mode = MODE_11AX_HE40_2G; 6102 break; 6103 default: 6104 down_mode = mode; 6105 break; 6106 } 6107 6108 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 6109 "mac vdev start phymode %s downgrade to %s\n", 6110 ath12k_mac_phymode_str(mode), 6111 ath12k_mac_phymode_str(down_mode)); 6112 6113 return down_mode; 6114 } 6115 6116 static int 6117 ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, 6118 struct ieee80211_chanctx_conf *ctx, 6119 bool restart) 6120 { 6121 struct ath12k *ar = arvif->ar; 6122 struct ath12k_base *ab = ar->ab; 6123 struct wmi_vdev_start_req_arg arg = {}; 6124 const struct cfg80211_chan_def *chandef = &ctx->def; 6125 int he_support = arvif->vif->bss_conf.he_support; 6126 int ret; 6127 6128 lockdep_assert_held(&ar->conf_mutex); 6129 6130 reinit_completion(&ar->vdev_setup_done); 6131 6132 arg.vdev_id = arvif->vdev_id; 6133 arg.dtim_period = arvif->dtim_period; 6134 arg.bcn_intval = arvif->beacon_interval; 6135 arg.punct_bitmap = ~arvif->punct_bitmap; 6136 6137 arg.freq = chandef->chan->center_freq; 6138 arg.band_center_freq1 = chandef->center_freq1; 6139 arg.band_center_freq2 = chandef->center_freq2; 6140 arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width]; 6141 6142 arg.mode = ath12k_mac_check_down_grade_phy_mode(ar, arg.mode, 6143 chandef->chan->band, 6144 arvif->vif->type); 6145 arg.min_power = 0; 6146 arg.max_power = chandef->chan->max_power * 2; 6147 arg.max_reg_power = chandef->chan->max_reg_power * 2; 6148 arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2; 6149 6150 arg.pref_tx_streams = ar->num_tx_chains; 6151 arg.pref_rx_streams = ar->num_rx_chains; 6152 6153 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 6154 arg.ssid = arvif->u.ap.ssid; 6155 arg.ssid_len = arvif->u.ap.ssid_len; 6156 arg.hidden_ssid = arvif->u.ap.hidden_ssid; 6157 6158 /* For now allow DFS for AP mode */ 6159 arg.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); 6160 6161 arg.freq2_radar = ctx->radar_enabled; 6162 6163 arg.passive = arg.chan_radar; 6164 6165 spin_lock_bh(&ab->base_lock); 6166 arg.regdomain = ar->ab->dfs_region; 6167 spin_unlock_bh(&ab->base_lock); 6168 6169 /* TODO: Notify if secondary 80Mhz also needs radar detection */ 6170 if (he_support) { 6171 ret = ath12k_set_he_mu_sounding_mode(ar, arvif); 6172 if (ret) { 6173 ath12k_warn(ar->ab, "failed to set he mode vdev %i\n", 6174 arg.vdev_id); 6175 return ret; 6176 } 6177 } 6178 } 6179 6180 arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); 6181 6182 ath12k_dbg(ab, ATH12K_DBG_MAC, 6183 "mac vdev %d start center_freq %d phymode %s punct_bitmap 0x%x\n", 6184 arg.vdev_id, arg.freq, 6185 ath12k_mac_phymode_str(arg.mode), arg.punct_bitmap); 6186 6187 ret = ath12k_wmi_vdev_start(ar, &arg, restart); 6188 if (ret) { 6189 ath12k_warn(ar->ab, "failed to %s WMI vdev %i\n", 6190 restart ? "restart" : "start", arg.vdev_id); 6191 return ret; 6192 } 6193 6194 ret = ath12k_mac_vdev_setup_sync(ar); 6195 if (ret) { 6196 ath12k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n", 6197 arg.vdev_id, restart ? "restart" : "start", ret); 6198 return ret; 6199 } 6200 6201 ar->num_started_vdevs++; 6202 ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n", 6203 arvif->vif->addr, arvif->vdev_id); 6204 6205 /* Enable CAC Flag in the driver by checking the channel DFS cac time, 6206 * i.e dfs_cac_ms value which will be valid only for radar channels 6207 * and state as NL80211_DFS_USABLE which indicates CAC needs to be 6208 * done before channel usage. This flags is used to drop rx packets. 6209 * during CAC. 6210 */ 6211 /* TODO: Set the flag for other interface types as required */ 6212 if (arvif->vdev_type == WMI_VDEV_TYPE_AP && 6213 chandef->chan->dfs_cac_ms && 6214 chandef->chan->dfs_state == NL80211_DFS_USABLE) { 6215 set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); 6216 ath12k_dbg(ab, ATH12K_DBG_MAC, 6217 "CAC Started in chan_freq %d for vdev %d\n", 6218 arg.freq, arg.vdev_id); 6219 } 6220 6221 ret = ath12k_mac_set_txbf_conf(arvif); 6222 if (ret) 6223 ath12k_warn(ab, "failed to set txbf conf for vdev %d: %d\n", 6224 arvif->vdev_id, ret); 6225 6226 return 0; 6227 } 6228 6229 static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif) 6230 { 6231 struct ath12k *ar = arvif->ar; 6232 int ret; 6233 6234 lockdep_assert_held(&ar->conf_mutex); 6235 6236 reinit_completion(&ar->vdev_setup_done); 6237 6238 ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id); 6239 if (ret) { 6240 ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n", 6241 arvif->vdev_id, ret); 6242 goto err; 6243 } 6244 6245 ret = ath12k_mac_vdev_setup_sync(ar); 6246 if (ret) { 6247 ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n", 6248 arvif->vdev_id, ret); 6249 goto err; 6250 } 6251 6252 WARN_ON(ar->num_started_vdevs == 0); 6253 6254 ar->num_started_vdevs--; 6255 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n", 6256 arvif->vif->addr, arvif->vdev_id); 6257 6258 if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { 6259 clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); 6260 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n", 6261 arvif->vdev_id); 6262 } 6263 6264 return 0; 6265 err: 6266 return ret; 6267 } 6268 6269 static int ath12k_mac_vdev_start(struct ath12k_vif *arvif, 6270 struct ieee80211_chanctx_conf *ctx) 6271 { 6272 return ath12k_mac_vdev_start_restart(arvif, ctx, false); 6273 } 6274 6275 static int ath12k_mac_vdev_restart(struct ath12k_vif *arvif, 6276 struct ieee80211_chanctx_conf *ctx) 6277 { 6278 return ath12k_mac_vdev_start_restart(arvif, ctx, true); 6279 } 6280 6281 struct ath12k_mac_change_chanctx_arg { 6282 struct ieee80211_chanctx_conf *ctx; 6283 struct ieee80211_vif_chanctx_switch *vifs; 6284 int n_vifs; 6285 int next_vif; 6286 }; 6287 6288 static void 6289 ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, 6290 struct ieee80211_vif *vif) 6291 { 6292 struct ath12k_mac_change_chanctx_arg *arg = data; 6293 6294 if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx) 6295 return; 6296 6297 arg->n_vifs++; 6298 } 6299 6300 static void 6301 ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac, 6302 struct ieee80211_vif *vif) 6303 { 6304 struct ath12k_mac_change_chanctx_arg *arg = data; 6305 struct ieee80211_chanctx_conf *ctx; 6306 6307 ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf); 6308 if (ctx != arg->ctx) 6309 return; 6310 6311 if (WARN_ON(arg->next_vif == arg->n_vifs)) 6312 return; 6313 6314 arg->vifs[arg->next_vif].vif = vif; 6315 arg->vifs[arg->next_vif].old_ctx = ctx; 6316 arg->vifs[arg->next_vif].new_ctx = ctx; 6317 arg->next_vif++; 6318 } 6319 6320 static void 6321 ath12k_mac_update_vif_chan(struct ath12k *ar, 6322 struct ieee80211_vif_chanctx_switch *vifs, 6323 int n_vifs) 6324 { 6325 struct ath12k_base *ab = ar->ab; 6326 struct ath12k_vif *arvif; 6327 int ret; 6328 int i; 6329 bool monitor_vif = false; 6330 6331 lockdep_assert_held(&ar->conf_mutex); 6332 6333 for (i = 0; i < n_vifs; i++) { 6334 arvif = ath12k_vif_to_arvif(vifs[i].vif); 6335 6336 if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR) 6337 monitor_vif = true; 6338 6339 ath12k_dbg(ab, ATH12K_DBG_MAC, 6340 "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n", 6341 arvif->vdev_id, 6342 vifs[i].old_ctx->def.chan->center_freq, 6343 vifs[i].new_ctx->def.chan->center_freq, 6344 vifs[i].old_ctx->def.width, 6345 vifs[i].new_ctx->def.width); 6346 6347 if (WARN_ON(!arvif->is_started)) 6348 continue; 6349 6350 if (WARN_ON(!arvif->is_up)) 6351 continue; 6352 6353 ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id); 6354 if (ret) { 6355 ath12k_warn(ab, "failed to down vdev %d: %d\n", 6356 arvif->vdev_id, ret); 6357 continue; 6358 } 6359 } 6360 6361 /* All relevant vdevs are downed and associated channel resources 6362 * should be available for the channel switch now. 6363 */ 6364 6365 /* TODO: Update ar->rx_channel */ 6366 6367 for (i = 0; i < n_vifs; i++) { 6368 arvif = ath12k_vif_to_arvif(vifs[i].vif); 6369 6370 if (WARN_ON(!arvif->is_started)) 6371 continue; 6372 6373 arvif->punct_bitmap = vifs[i].new_ctx->def.punctured; 6374 6375 /* Firmware expect vdev_restart only if vdev is up. 6376 * If vdev is down then it expect vdev_stop->vdev_start. 6377 */ 6378 if (arvif->is_up) { 6379 ret = ath12k_mac_vdev_restart(arvif, vifs[i].new_ctx); 6380 if (ret) { 6381 ath12k_warn(ab, "failed to restart vdev %d: %d\n", 6382 arvif->vdev_id, ret); 6383 continue; 6384 } 6385 } else { 6386 ret = ath12k_mac_vdev_stop(arvif); 6387 if (ret) { 6388 ath12k_warn(ab, "failed to stop vdev %d: %d\n", 6389 arvif->vdev_id, ret); 6390 continue; 6391 } 6392 6393 ret = ath12k_mac_vdev_start(arvif, vifs[i].new_ctx); 6394 if (ret) 6395 ath12k_warn(ab, "failed to start vdev %d: %d\n", 6396 arvif->vdev_id, ret); 6397 continue; 6398 } 6399 6400 ret = ath12k_mac_setup_bcn_tmpl(arvif); 6401 if (ret) 6402 ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n", 6403 ret); 6404 6405 ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 6406 arvif->bssid); 6407 if (ret) { 6408 ath12k_warn(ab, "failed to bring vdev up %d: %d\n", 6409 arvif->vdev_id, ret); 6410 continue; 6411 } 6412 } 6413 6414 /* Restart the internal monitor vdev on new channel */ 6415 if (!monitor_vif && ar->monitor_vdev_created) { 6416 if (!ath12k_mac_monitor_stop(ar)) 6417 ath12k_mac_monitor_start(ar); 6418 } 6419 } 6420 6421 static void 6422 ath12k_mac_update_active_vif_chan(struct ath12k *ar, 6423 struct ieee80211_chanctx_conf *ctx) 6424 { 6425 struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx }; 6426 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 6427 6428 lockdep_assert_held(&ar->conf_mutex); 6429 6430 ieee80211_iterate_active_interfaces_atomic(hw, 6431 IEEE80211_IFACE_ITER_NORMAL, 6432 ath12k_mac_change_chanctx_cnt_iter, 6433 &arg); 6434 if (arg.n_vifs == 0) 6435 return; 6436 6437 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL); 6438 if (!arg.vifs) 6439 return; 6440 6441 ieee80211_iterate_active_interfaces_atomic(hw, 6442 IEEE80211_IFACE_ITER_NORMAL, 6443 ath12k_mac_change_chanctx_fill_iter, 6444 &arg); 6445 6446 ath12k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); 6447 6448 kfree(arg.vifs); 6449 } 6450 6451 static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw, 6452 struct ieee80211_chanctx_conf *ctx, 6453 u32 changed) 6454 { 6455 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6456 struct ath12k *ar; 6457 struct ath12k_base *ab; 6458 6459 ar = ath12k_ah_to_ar(ah); 6460 ab = ar->ab; 6461 6462 mutex_lock(&ar->conf_mutex); 6463 6464 ath12k_dbg(ab, ATH12K_DBG_MAC, 6465 "mac chanctx change freq %u width %d ptr %pK changed %x\n", 6466 ctx->def.chan->center_freq, ctx->def.width, ctx, changed); 6467 6468 /* This shouldn't really happen because channel switching should use 6469 * switch_vif_chanctx(). 6470 */ 6471 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) 6472 goto unlock; 6473 6474 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH || 6475 changed & IEEE80211_CHANCTX_CHANGE_RADAR || 6476 changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING) 6477 ath12k_mac_update_active_vif_chan(ar, ctx); 6478 6479 /* TODO: Recalc radar detection */ 6480 6481 unlock: 6482 mutex_unlock(&ar->conf_mutex); 6483 } 6484 6485 static int ath12k_start_vdev_delay(struct ath12k *ar, 6486 struct ath12k_vif *arvif) 6487 { 6488 struct ath12k_base *ab = ar->ab; 6489 struct ieee80211_vif *vif = arvif->vif; 6490 int ret; 6491 6492 if (WARN_ON(arvif->is_started)) 6493 return -EBUSY; 6494 6495 ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx); 6496 if (ret) { 6497 ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", 6498 arvif->vdev_id, vif->addr, 6499 arvif->chanctx.def.chan->center_freq, ret); 6500 return ret; 6501 } 6502 6503 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { 6504 ret = ath12k_monitor_vdev_up(ar, arvif->vdev_id); 6505 if (ret) { 6506 ath12k_warn(ab, "failed put monitor up: %d\n", ret); 6507 return ret; 6508 } 6509 } 6510 6511 arvif->is_started = true; 6512 6513 /* TODO: Setup ps and cts/rts protection */ 6514 return 0; 6515 } 6516 6517 static int 6518 ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, 6519 struct ieee80211_vif *vif, 6520 struct ieee80211_bss_conf *link_conf, 6521 struct ieee80211_chanctx_conf *ctx) 6522 { 6523 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6524 struct ath12k *ar; 6525 struct ath12k_base *ab; 6526 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6527 int ret; 6528 struct ath12k_wmi_peer_create_arg param; 6529 6530 ar = ath12k_ah_to_ar(ah); 6531 ab = ar->ab; 6532 6533 mutex_lock(&ar->conf_mutex); 6534 6535 ath12k_dbg(ab, ATH12K_DBG_MAC, 6536 "mac chanctx assign ptr %pK vdev_id %i\n", 6537 ctx, arvif->vdev_id); 6538 6539 arvif->punct_bitmap = ctx->def.punctured; 6540 6541 /* for some targets bss peer must be created before vdev_start */ 6542 if (ab->hw_params->vdev_start_delay && 6543 arvif->vdev_type != WMI_VDEV_TYPE_AP && 6544 arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && 6545 !ath12k_peer_exist_by_vdev_id(ab, arvif->vdev_id)) { 6546 memcpy(&arvif->chanctx, ctx, sizeof(*ctx)); 6547 ret = 0; 6548 goto out; 6549 } 6550 6551 if (WARN_ON(arvif->is_started)) { 6552 ret = -EBUSY; 6553 goto out; 6554 } 6555 6556 if (ab->hw_params->vdev_start_delay && 6557 arvif->vdev_type != WMI_VDEV_TYPE_AP && 6558 arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { 6559 param.vdev_id = arvif->vdev_id; 6560 param.peer_type = WMI_PEER_TYPE_DEFAULT; 6561 param.peer_addr = ar->mac_addr; 6562 6563 ret = ath12k_peer_create(ar, arvif, NULL, ¶m); 6564 if (ret) { 6565 ath12k_warn(ab, "failed to create peer after vdev start delay: %d", 6566 ret); 6567 goto out; 6568 } 6569 } 6570 6571 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { 6572 ret = ath12k_mac_monitor_start(ar); 6573 if (ret) 6574 goto out; 6575 arvif->is_started = true; 6576 goto out; 6577 } 6578 6579 ret = ath12k_mac_vdev_start(arvif, ctx); 6580 if (ret) { 6581 ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", 6582 arvif->vdev_id, vif->addr, 6583 ctx->def.chan->center_freq, ret); 6584 goto out; 6585 } 6586 6587 if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created) 6588 ath12k_mac_monitor_start(ar); 6589 6590 arvif->is_started = true; 6591 6592 /* TODO: Setup ps and cts/rts protection */ 6593 6594 out: 6595 mutex_unlock(&ar->conf_mutex); 6596 6597 return ret; 6598 } 6599 6600 static void 6601 ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, 6602 struct ieee80211_vif *vif, 6603 struct ieee80211_bss_conf *link_conf, 6604 struct ieee80211_chanctx_conf *ctx) 6605 { 6606 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6607 struct ath12k *ar; 6608 struct ath12k_base *ab; 6609 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6610 int ret; 6611 6612 ar = ath12k_ah_to_ar(ah); 6613 ab = ar->ab; 6614 6615 mutex_lock(&ar->conf_mutex); 6616 6617 ath12k_dbg(ab, ATH12K_DBG_MAC, 6618 "mac chanctx unassign ptr %pK vdev_id %i\n", 6619 ctx, arvif->vdev_id); 6620 6621 WARN_ON(!arvif->is_started); 6622 6623 if (ab->hw_params->vdev_start_delay && 6624 arvif->vdev_type == WMI_VDEV_TYPE_MONITOR && 6625 ath12k_peer_find_by_addr(ab, ar->mac_addr)) 6626 ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); 6627 6628 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { 6629 ret = ath12k_mac_monitor_stop(ar); 6630 if (ret) { 6631 mutex_unlock(&ar->conf_mutex); 6632 return; 6633 } 6634 6635 arvif->is_started = false; 6636 } 6637 6638 ret = ath12k_mac_vdev_stop(arvif); 6639 if (ret) 6640 ath12k_warn(ab, "failed to stop vdev %i: %d\n", 6641 arvif->vdev_id, ret); 6642 6643 arvif->is_started = false; 6644 6645 if (ab->hw_params->vdev_start_delay && 6646 arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) 6647 ath12k_wmi_vdev_down(ar, arvif->vdev_id); 6648 6649 if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && 6650 ar->num_started_vdevs == 1 && ar->monitor_vdev_created) 6651 ath12k_mac_monitor_stop(ar); 6652 6653 mutex_unlock(&ar->conf_mutex); 6654 } 6655 6656 static int 6657 ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, 6658 struct ieee80211_vif_chanctx_switch *vifs, 6659 int n_vifs, 6660 enum ieee80211_chanctx_switch_mode mode) 6661 { 6662 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6663 struct ath12k *ar; 6664 6665 ar = ath12k_ah_to_ar(ah); 6666 6667 mutex_lock(&ar->conf_mutex); 6668 6669 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 6670 "mac chanctx switch n_vifs %d mode %d\n", 6671 n_vifs, mode); 6672 ath12k_mac_update_vif_chan(ar, vifs, n_vifs); 6673 6674 mutex_unlock(&ar->conf_mutex); 6675 6676 return 0; 6677 } 6678 6679 static int 6680 ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value) 6681 { 6682 struct ath12k_vif *arvif; 6683 int ret = 0; 6684 6685 mutex_lock(&ar->conf_mutex); 6686 list_for_each_entry(arvif, &ar->arvifs, list) { 6687 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "setting mac vdev %d param %d value %d\n", 6688 param, arvif->vdev_id, value); 6689 6690 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 6691 param, value); 6692 if (ret) { 6693 ath12k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n", 6694 param, arvif->vdev_id, ret); 6695 break; 6696 } 6697 } 6698 mutex_unlock(&ar->conf_mutex); 6699 return ret; 6700 } 6701 6702 /* mac80211 stores device specific RTS/Fragmentation threshold value, 6703 * this is set interface specific to firmware from ath12k driver 6704 */ 6705 static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 6706 { 6707 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6708 struct ath12k *ar; 6709 int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret; 6710 6711 ar = ath12k_ah_to_ar(ah); 6712 6713 ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value); 6714 6715 return ret; 6716 } 6717 6718 static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 6719 { 6720 /* Even though there's a WMI vdev param for fragmentation threshold no 6721 * known firmware actually implements it. Moreover it is not possible to 6722 * rely frame fragmentation to mac80211 because firmware clears the 6723 * "more fragments" bit in frame control making it impossible for remote 6724 * devices to reassemble frames. 6725 * 6726 * Hence implement a dummy callback just to say fragmentation isn't 6727 * supported. This effectively prevents mac80211 from doing frame 6728 * fragmentation in software. 6729 */ 6730 return -EOPNOTSUPP; 6731 } 6732 6733 static void ath12k_mac_flush(struct ath12k *ar) 6734 { 6735 long time_left; 6736 6737 time_left = wait_event_timeout(ar->dp.tx_empty_waitq, 6738 (atomic_read(&ar->dp.num_tx_pending) == 0), 6739 ATH12K_FLUSH_TIMEOUT); 6740 if (time_left == 0) 6741 ath12k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left); 6742 6743 time_left = wait_event_timeout(ar->txmgmt_empty_waitq, 6744 (atomic_read(&ar->num_pending_mgmt_tx) == 0), 6745 ATH12K_FLUSH_TIMEOUT); 6746 if (time_left == 0) 6747 ath12k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n", 6748 time_left); 6749 } 6750 6751 static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 6752 u32 queues, bool drop) 6753 { 6754 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6755 struct ath12k *ar = ath12k_ah_to_ar(ah); 6756 6757 if (drop) 6758 return; 6759 6760 ath12k_mac_flush(ar); 6761 } 6762 6763 static int 6764 ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar, 6765 enum nl80211_band band, 6766 const struct cfg80211_bitrate_mask *mask) 6767 { 6768 int num_rates = 0; 6769 int i; 6770 6771 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) 6772 num_rates += hweight16(mask->control[band].ht_mcs[i]); 6773 6774 return num_rates; 6775 } 6776 6777 static bool 6778 ath12k_mac_has_single_legacy_rate(struct ath12k *ar, 6779 enum nl80211_band band, 6780 const struct cfg80211_bitrate_mask *mask) 6781 { 6782 int num_rates = 0; 6783 6784 num_rates = hweight32(mask->control[band].legacy); 6785 6786 if (ath12k_mac_bitrate_mask_num_ht_rates(ar, band, mask)) 6787 return false; 6788 6789 if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask)) 6790 return false; 6791 6792 return num_rates == 1; 6793 } 6794 6795 static bool 6796 ath12k_mac_bitrate_mask_get_single_nss(struct ath12k *ar, 6797 enum nl80211_band band, 6798 const struct cfg80211_bitrate_mask *mask, 6799 int *nss) 6800 { 6801 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 6802 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); 6803 u8 ht_nss_mask = 0; 6804 u8 vht_nss_mask = 0; 6805 int i; 6806 6807 /* No need to consider legacy here. Basic rates are always present 6808 * in bitrate mask 6809 */ 6810 6811 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 6812 if (mask->control[band].ht_mcs[i] == 0) 6813 continue; 6814 else if (mask->control[band].ht_mcs[i] == 6815 sband->ht_cap.mcs.rx_mask[i]) 6816 ht_nss_mask |= BIT(i); 6817 else 6818 return false; 6819 } 6820 6821 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 6822 if (mask->control[band].vht_mcs[i] == 0) 6823 continue; 6824 else if (mask->control[band].vht_mcs[i] == 6825 ath12k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) 6826 vht_nss_mask |= BIT(i); 6827 else 6828 return false; 6829 } 6830 6831 if (ht_nss_mask != vht_nss_mask) 6832 return false; 6833 6834 if (ht_nss_mask == 0) 6835 return false; 6836 6837 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) 6838 return false; 6839 6840 *nss = fls(ht_nss_mask); 6841 6842 return true; 6843 } 6844 6845 static int 6846 ath12k_mac_get_single_legacy_rate(struct ath12k *ar, 6847 enum nl80211_band band, 6848 const struct cfg80211_bitrate_mask *mask, 6849 u32 *rate, u8 *nss) 6850 { 6851 int rate_idx; 6852 u16 bitrate; 6853 u8 preamble; 6854 u8 hw_rate; 6855 6856 if (hweight32(mask->control[band].legacy) != 1) 6857 return -EINVAL; 6858 6859 rate_idx = ffs(mask->control[band].legacy) - 1; 6860 6861 if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) 6862 rate_idx += ATH12K_MAC_FIRST_OFDM_RATE_IDX; 6863 6864 hw_rate = ath12k_legacy_rates[rate_idx].hw_value; 6865 bitrate = ath12k_legacy_rates[rate_idx].bitrate; 6866 6867 if (ath12k_mac_bitrate_is_cck(bitrate)) 6868 preamble = WMI_RATE_PREAMBLE_CCK; 6869 else 6870 preamble = WMI_RATE_PREAMBLE_OFDM; 6871 6872 *nss = 1; 6873 *rate = ATH12K_HW_RATE_CODE(hw_rate, 0, preamble); 6874 6875 return 0; 6876 } 6877 6878 static int ath12k_mac_set_fixed_rate_params(struct ath12k_vif *arvif, 6879 u32 rate, u8 nss, u8 sgi, u8 ldpc) 6880 { 6881 struct ath12k *ar = arvif->ar; 6882 u32 vdev_param; 6883 int ret; 6884 6885 lockdep_assert_held(&ar->conf_mutex); 6886 6887 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n", 6888 arvif->vdev_id, rate, nss, sgi); 6889 6890 vdev_param = WMI_VDEV_PARAM_FIXED_RATE; 6891 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 6892 vdev_param, rate); 6893 if (ret) { 6894 ath12k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n", 6895 rate, ret); 6896 return ret; 6897 } 6898 6899 vdev_param = WMI_VDEV_PARAM_NSS; 6900 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 6901 vdev_param, nss); 6902 if (ret) { 6903 ath12k_warn(ar->ab, "failed to set nss param %d: %d\n", 6904 nss, ret); 6905 return ret; 6906 } 6907 6908 vdev_param = WMI_VDEV_PARAM_SGI; 6909 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 6910 vdev_param, sgi); 6911 if (ret) { 6912 ath12k_warn(ar->ab, "failed to set sgi param %d: %d\n", 6913 sgi, ret); 6914 return ret; 6915 } 6916 6917 vdev_param = WMI_VDEV_PARAM_LDPC; 6918 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 6919 vdev_param, ldpc); 6920 if (ret) { 6921 ath12k_warn(ar->ab, "failed to set ldpc param %d: %d\n", 6922 ldpc, ret); 6923 return ret; 6924 } 6925 6926 return 0; 6927 } 6928 6929 static bool 6930 ath12k_mac_vht_mcs_range_present(struct ath12k *ar, 6931 enum nl80211_band band, 6932 const struct cfg80211_bitrate_mask *mask) 6933 { 6934 int i; 6935 u16 vht_mcs; 6936 6937 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { 6938 vht_mcs = mask->control[band].vht_mcs[i]; 6939 6940 switch (vht_mcs) { 6941 case 0: 6942 case BIT(8) - 1: 6943 case BIT(9) - 1: 6944 case BIT(10) - 1: 6945 break; 6946 default: 6947 return false; 6948 } 6949 } 6950 6951 return true; 6952 } 6953 6954 static void ath12k_mac_set_bitrate_mask_iter(void *data, 6955 struct ieee80211_sta *sta) 6956 { 6957 struct ath12k_vif *arvif = data; 6958 struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); 6959 struct ath12k *ar = arvif->ar; 6960 6961 spin_lock_bh(&ar->data_lock); 6962 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 6963 spin_unlock_bh(&ar->data_lock); 6964 6965 ieee80211_queue_work(ath12k_ar_to_hw(ar), &arsta->update_wk); 6966 } 6967 6968 static void ath12k_mac_disable_peer_fixed_rate(void *data, 6969 struct ieee80211_sta *sta) 6970 { 6971 struct ath12k_vif *arvif = data; 6972 struct ath12k *ar = arvif->ar; 6973 int ret; 6974 6975 ret = ath12k_wmi_set_peer_param(ar, sta->addr, 6976 arvif->vdev_id, 6977 WMI_PEER_PARAM_FIXED_RATE, 6978 WMI_FIXED_RATE_NONE); 6979 if (ret) 6980 ath12k_warn(ar->ab, 6981 "failed to disable peer fixed rate for STA %pM ret %d\n", 6982 sta->addr, ret); 6983 } 6984 6985 static int 6986 ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, 6987 struct ieee80211_vif *vif, 6988 const struct cfg80211_bitrate_mask *mask) 6989 { 6990 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6991 struct cfg80211_chan_def def; 6992 struct ath12k *ar = arvif->ar; 6993 enum nl80211_band band; 6994 const u8 *ht_mcs_mask; 6995 const u16 *vht_mcs_mask; 6996 u32 rate; 6997 u8 nss; 6998 u8 sgi; 6999 u8 ldpc; 7000 int single_nss; 7001 int ret; 7002 int num_rates; 7003 7004 if (ath12k_mac_vif_chan(vif, &def)) 7005 return -EPERM; 7006 7007 band = def.chan->band; 7008 ht_mcs_mask = mask->control[band].ht_mcs; 7009 vht_mcs_mask = mask->control[band].vht_mcs; 7010 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); 7011 7012 sgi = mask->control[band].gi; 7013 if (sgi == NL80211_TXRATE_FORCE_LGI) { 7014 ret = -EINVAL; 7015 goto out; 7016 } 7017 7018 /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it 7019 * requires passing at least one of used basic rates along with them. 7020 * Fixed rate setting across different preambles(legacy, HT, VHT) is 7021 * not supported by the FW. Hence use of FIXED_RATE vdev param is not 7022 * suitable for setting single HT/VHT rates. 7023 * But, there could be a single basic rate passed from userspace which 7024 * can be done through the FIXED_RATE param. 7025 */ 7026 if (ath12k_mac_has_single_legacy_rate(ar, band, mask)) { 7027 ret = ath12k_mac_get_single_legacy_rate(ar, band, mask, &rate, 7028 &nss); 7029 if (ret) { 7030 ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n", 7031 arvif->vdev_id, ret); 7032 goto out; 7033 } 7034 ieee80211_iterate_stations_atomic(hw, 7035 ath12k_mac_disable_peer_fixed_rate, 7036 arvif); 7037 } else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask, 7038 &single_nss)) { 7039 rate = WMI_FIXED_RATE_NONE; 7040 nss = single_nss; 7041 } else { 7042 rate = WMI_FIXED_RATE_NONE; 7043 nss = min_t(u32, ar->num_tx_chains, 7044 max(ath12k_mac_max_ht_nss(ht_mcs_mask), 7045 ath12k_mac_max_vht_nss(vht_mcs_mask))); 7046 7047 /* If multiple rates across different preambles are given 7048 * we can reconfigure this info with all peers using PEER_ASSOC 7049 * command with the below exception cases. 7050 * - Single VHT Rate : peer_assoc command accommodates only MCS 7051 * range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211 7052 * mandates passing basic rates along with HT/VHT rates, FW 7053 * doesn't allow switching from VHT to Legacy. Hence instead of 7054 * setting legacy and VHT rates using RATEMASK_CMD vdev cmd, 7055 * we could set this VHT rate as peer fixed rate param, which 7056 * will override FIXED rate and FW rate control algorithm. 7057 * If single VHT rate is passed along with HT rates, we select 7058 * the VHT rate as fixed rate for vht peers. 7059 * - Multiple VHT Rates : When Multiple VHT rates are given,this 7060 * can be set using RATEMASK CMD which uses FW rate-ctl alg. 7061 * TODO: Setting multiple VHT MCS and replacing peer_assoc with 7062 * RATEMASK_CMDID can cover all use cases of setting rates 7063 * across multiple preambles and rates within same type. 7064 * But requires more validation of the command at this point. 7065 */ 7066 7067 num_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, 7068 mask); 7069 7070 if (!ath12k_mac_vht_mcs_range_present(ar, band, mask) && 7071 num_rates > 1) { 7072 /* TODO: Handle multiple VHT MCS values setting using 7073 * RATEMASK CMD 7074 */ 7075 ath12k_warn(ar->ab, 7076 "Setting more than one MCS Value in bitrate mask not supported\n"); 7077 ret = -EINVAL; 7078 goto out; 7079 } 7080 7081 ieee80211_iterate_stations_atomic(hw, 7082 ath12k_mac_disable_peer_fixed_rate, 7083 arvif); 7084 7085 mutex_lock(&ar->conf_mutex); 7086 7087 arvif->bitrate_mask = *mask; 7088 ieee80211_iterate_stations_atomic(hw, 7089 ath12k_mac_set_bitrate_mask_iter, 7090 arvif); 7091 7092 mutex_unlock(&ar->conf_mutex); 7093 } 7094 7095 mutex_lock(&ar->conf_mutex); 7096 7097 ret = ath12k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); 7098 if (ret) { 7099 ath12k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n", 7100 arvif->vdev_id, ret); 7101 } 7102 7103 mutex_unlock(&ar->conf_mutex); 7104 7105 out: 7106 return ret; 7107 } 7108 7109 static void 7110 ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, 7111 enum ieee80211_reconfig_type reconfig_type) 7112 { 7113 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 7114 struct ath12k *ar; 7115 struct ath12k_base *ab; 7116 struct ath12k_vif *arvif; 7117 int recovery_count; 7118 7119 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) 7120 return; 7121 7122 ar = ath12k_ah_to_ar(ah); 7123 ab = ar->ab; 7124 7125 mutex_lock(&ar->conf_mutex); 7126 7127 if (ar->state == ATH12K_STATE_RESTARTED) { 7128 ath12k_warn(ar->ab, "pdev %d successfully recovered\n", 7129 ar->pdev->pdev_id); 7130 ar->state = ATH12K_STATE_ON; 7131 ieee80211_wake_queues(hw); 7132 7133 if (ab->is_reset) { 7134 recovery_count = atomic_inc_return(&ab->recovery_count); 7135 ath12k_dbg(ab, ATH12K_DBG_BOOT, "recovery count %d\n", 7136 recovery_count); 7137 /* When there are multiple radios in an SOC, 7138 * the recovery has to be done for each radio 7139 */ 7140 if (recovery_count == ab->num_radios) { 7141 atomic_dec(&ab->reset_count); 7142 complete(&ab->reset_complete); 7143 ab->is_reset = false; 7144 atomic_set(&ab->fail_cont_count, 0); 7145 ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n"); 7146 } 7147 } 7148 7149 list_for_each_entry(arvif, &ar->arvifs, list) { 7150 ath12k_dbg(ab, ATH12K_DBG_BOOT, 7151 "reconfig cipher %d up %d vdev type %d\n", 7152 arvif->key_cipher, 7153 arvif->is_up, 7154 arvif->vdev_type); 7155 /* After trigger disconnect, then upper layer will 7156 * trigger connect again, then the PN number of 7157 * upper layer will be reset to keep up with AP 7158 * side, hence PN number mismatch will not happen. 7159 */ 7160 if (arvif->is_up && 7161 arvif->vdev_type == WMI_VDEV_TYPE_STA && 7162 arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) { 7163 ieee80211_hw_restart_disconnect(arvif->vif); 7164 ath12k_dbg(ab, ATH12K_DBG_BOOT, 7165 "restart disconnect\n"); 7166 } 7167 } 7168 } 7169 7170 mutex_unlock(&ar->conf_mutex); 7171 } 7172 7173 static void 7174 ath12k_mac_update_bss_chan_survey(struct ath12k *ar, 7175 struct ieee80211_channel *channel) 7176 { 7177 int ret; 7178 enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ; 7179 7180 lockdep_assert_held(&ar->conf_mutex); 7181 7182 if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) || 7183 ar->rx_channel != channel) 7184 return; 7185 7186 if (ar->scan.state != ATH12K_SCAN_IDLE) { 7187 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 7188 "ignoring bss chan info req while scanning..\n"); 7189 return; 7190 } 7191 7192 reinit_completion(&ar->bss_survey_done); 7193 7194 ret = ath12k_wmi_pdev_bss_chan_info_request(ar, type); 7195 if (ret) { 7196 ath12k_warn(ar->ab, "failed to send pdev bss chan info request\n"); 7197 return; 7198 } 7199 7200 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); 7201 if (ret == 0) 7202 ath12k_warn(ar->ab, "bss channel survey timed out\n"); 7203 } 7204 7205 static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, 7206 struct survey_info *survey) 7207 { 7208 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 7209 struct ath12k *ar; 7210 struct ieee80211_supported_band *sband; 7211 struct survey_info *ar_survey; 7212 int ret = 0; 7213 7214 if (idx >= ATH12K_NUM_CHANS) 7215 return -ENOENT; 7216 7217 ar = ath12k_ah_to_ar(ah); 7218 7219 ar_survey = &ar->survey[idx]; 7220 7221 mutex_lock(&ar->conf_mutex); 7222 7223 sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; 7224 if (sband && idx >= sband->n_channels) { 7225 idx -= sband->n_channels; 7226 sband = NULL; 7227 } 7228 7229 if (!sband) 7230 sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; 7231 7232 if (!sband || idx >= sband->n_channels) { 7233 ret = -ENOENT; 7234 goto exit; 7235 } 7236 7237 ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); 7238 7239 spin_lock_bh(&ar->data_lock); 7240 memcpy(survey, ar_survey, sizeof(*survey)); 7241 spin_unlock_bh(&ar->data_lock); 7242 7243 survey->channel = &sband->channels[idx]; 7244 7245 if (ar->rx_channel == survey->channel) 7246 survey->filled |= SURVEY_INFO_IN_USE; 7247 7248 exit: 7249 mutex_unlock(&ar->conf_mutex); 7250 7251 return ret; 7252 } 7253 7254 static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, 7255 struct ieee80211_vif *vif, 7256 struct ieee80211_sta *sta, 7257 struct station_info *sinfo) 7258 { 7259 struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); 7260 7261 sinfo->rx_duration = arsta->rx_duration; 7262 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); 7263 7264 sinfo->tx_duration = arsta->tx_duration; 7265 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); 7266 7267 if (!arsta->txrate.legacy && !arsta->txrate.nss) 7268 return; 7269 7270 if (arsta->txrate.legacy) { 7271 sinfo->txrate.legacy = arsta->txrate.legacy; 7272 } else { 7273 sinfo->txrate.mcs = arsta->txrate.mcs; 7274 sinfo->txrate.nss = arsta->txrate.nss; 7275 sinfo->txrate.bw = arsta->txrate.bw; 7276 sinfo->txrate.he_gi = arsta->txrate.he_gi; 7277 sinfo->txrate.he_dcm = arsta->txrate.he_dcm; 7278 sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; 7279 } 7280 sinfo->txrate.flags = arsta->txrate.flags; 7281 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 7282 7283 /* TODO: Use real NF instead of default one. */ 7284 sinfo->signal = arsta->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR; 7285 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); 7286 } 7287 7288 static const struct ieee80211_ops ath12k_ops = { 7289 .tx = ath12k_mac_op_tx, 7290 .wake_tx_queue = ieee80211_handle_wake_tx_queue, 7291 .start = ath12k_mac_op_start, 7292 .stop = ath12k_mac_op_stop, 7293 .reconfig_complete = ath12k_mac_op_reconfig_complete, 7294 .add_interface = ath12k_mac_op_add_interface, 7295 .remove_interface = ath12k_mac_op_remove_interface, 7296 .update_vif_offload = ath12k_mac_op_update_vif_offload, 7297 .config = ath12k_mac_op_config, 7298 .bss_info_changed = ath12k_mac_op_bss_info_changed, 7299 .configure_filter = ath12k_mac_op_configure_filter, 7300 .hw_scan = ath12k_mac_op_hw_scan, 7301 .cancel_hw_scan = ath12k_mac_op_cancel_hw_scan, 7302 .set_key = ath12k_mac_op_set_key, 7303 .sta_state = ath12k_mac_op_sta_state, 7304 .sta_set_txpwr = ath12k_mac_op_sta_set_txpwr, 7305 .sta_rc_update = ath12k_mac_op_sta_rc_update, 7306 .conf_tx = ath12k_mac_op_conf_tx, 7307 .set_antenna = ath12k_mac_op_set_antenna, 7308 .get_antenna = ath12k_mac_op_get_antenna, 7309 .ampdu_action = ath12k_mac_op_ampdu_action, 7310 .add_chanctx = ath12k_mac_op_add_chanctx, 7311 .remove_chanctx = ath12k_mac_op_remove_chanctx, 7312 .change_chanctx = ath12k_mac_op_change_chanctx, 7313 .assign_vif_chanctx = ath12k_mac_op_assign_vif_chanctx, 7314 .unassign_vif_chanctx = ath12k_mac_op_unassign_vif_chanctx, 7315 .switch_vif_chanctx = ath12k_mac_op_switch_vif_chanctx, 7316 .set_rts_threshold = ath12k_mac_op_set_rts_threshold, 7317 .set_frag_threshold = ath12k_mac_op_set_frag_threshold, 7318 .set_bitrate_mask = ath12k_mac_op_set_bitrate_mask, 7319 .get_survey = ath12k_mac_op_get_survey, 7320 .flush = ath12k_mac_op_flush, 7321 .sta_statistics = ath12k_mac_op_sta_statistics, 7322 }; 7323 7324 static void ath12k_mac_update_ch_list(struct ath12k *ar, 7325 struct ieee80211_supported_band *band, 7326 u32 freq_low, u32 freq_high) 7327 { 7328 int i; 7329 7330 if (!(freq_low && freq_high)) 7331 return; 7332 7333 for (i = 0; i < band->n_channels; i++) { 7334 if (band->channels[i].center_freq < freq_low || 7335 band->channels[i].center_freq > freq_high) 7336 band->channels[i].flags |= IEEE80211_CHAN_DISABLED; 7337 } 7338 } 7339 7340 static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band) 7341 { 7342 struct ath12k_pdev *pdev = ar->pdev; 7343 struct ath12k_pdev_cap *pdev_cap = &pdev->cap; 7344 7345 if (band == WMI_HOST_WLAN_2G_CAP) 7346 return pdev_cap->band[NL80211_BAND_2GHZ].phy_id; 7347 7348 if (band == WMI_HOST_WLAN_5G_CAP) 7349 return pdev_cap->band[NL80211_BAND_5GHZ].phy_id; 7350 7351 ath12k_warn(ar->ab, "unsupported phy cap:%d\n", band); 7352 7353 return 0; 7354 } 7355 7356 static int ath12k_mac_setup_channels_rates(struct ath12k *ar, 7357 u32 supported_bands, 7358 struct ieee80211_supported_band *bands[]) 7359 { 7360 struct ieee80211_supported_band *band; 7361 struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap; 7362 void *channels; 7363 u32 phy_id; 7364 7365 BUILD_BUG_ON((ARRAY_SIZE(ath12k_2ghz_channels) + 7366 ARRAY_SIZE(ath12k_5ghz_channels) + 7367 ARRAY_SIZE(ath12k_6ghz_channels)) != 7368 ATH12K_NUM_CHANS); 7369 7370 reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx]; 7371 7372 if (supported_bands & WMI_HOST_WLAN_2G_CAP) { 7373 channels = kmemdup(ath12k_2ghz_channels, 7374 sizeof(ath12k_2ghz_channels), 7375 GFP_KERNEL); 7376 if (!channels) 7377 return -ENOMEM; 7378 7379 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 7380 band->band = NL80211_BAND_2GHZ; 7381 band->n_channels = ARRAY_SIZE(ath12k_2ghz_channels); 7382 band->channels = channels; 7383 band->n_bitrates = ath12k_g_rates_size; 7384 band->bitrates = ath12k_g_rates; 7385 bands[NL80211_BAND_2GHZ] = band; 7386 7387 if (ar->ab->hw_params->single_pdev_only) { 7388 phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP); 7389 reg_cap = &ar->ab->hal_reg_cap[phy_id]; 7390 } 7391 ath12k_mac_update_ch_list(ar, band, 7392 reg_cap->low_2ghz_chan, 7393 reg_cap->high_2ghz_chan); 7394 } 7395 7396 if (supported_bands & WMI_HOST_WLAN_5G_CAP) { 7397 if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6G_FREQ) { 7398 channels = kmemdup(ath12k_6ghz_channels, 7399 sizeof(ath12k_6ghz_channels), GFP_KERNEL); 7400 if (!channels) { 7401 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 7402 return -ENOMEM; 7403 } 7404 7405 ar->supports_6ghz = true; 7406 band = &ar->mac.sbands[NL80211_BAND_6GHZ]; 7407 band->band = NL80211_BAND_6GHZ; 7408 band->n_channels = ARRAY_SIZE(ath12k_6ghz_channels); 7409 band->channels = channels; 7410 band->n_bitrates = ath12k_a_rates_size; 7411 band->bitrates = ath12k_a_rates; 7412 bands[NL80211_BAND_6GHZ] = band; 7413 ath12k_mac_update_ch_list(ar, band, 7414 reg_cap->low_5ghz_chan, 7415 reg_cap->high_5ghz_chan); 7416 } 7417 7418 if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) { 7419 channels = kmemdup(ath12k_5ghz_channels, 7420 sizeof(ath12k_5ghz_channels), 7421 GFP_KERNEL); 7422 if (!channels) { 7423 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 7424 kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); 7425 return -ENOMEM; 7426 } 7427 7428 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 7429 band->band = NL80211_BAND_5GHZ; 7430 band->n_channels = ARRAY_SIZE(ath12k_5ghz_channels); 7431 band->channels = channels; 7432 band->n_bitrates = ath12k_a_rates_size; 7433 band->bitrates = ath12k_a_rates; 7434 bands[NL80211_BAND_5GHZ] = band; 7435 7436 if (ar->ab->hw_params->single_pdev_only) { 7437 phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP); 7438 reg_cap = &ar->ab->hal_reg_cap[phy_id]; 7439 } 7440 7441 ath12k_mac_update_ch_list(ar, band, 7442 reg_cap->low_5ghz_chan, 7443 reg_cap->high_5ghz_chan); 7444 } 7445 } 7446 7447 return 0; 7448 } 7449 7450 static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah) 7451 { 7452 struct ath12k *ar = ath12k_ah_to_ar(ah); 7453 u16 interface_modes = U16_MAX; 7454 7455 interface_modes &= ar->ab->hw_params->interface_modes; 7456 7457 return interface_modes == U16_MAX ? 0 : interface_modes; 7458 } 7459 7460 static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah, 7461 enum nl80211_iftype type) 7462 { 7463 struct ath12k *ar = ath12k_ah_to_ar(ah); 7464 u16 interface_modes, mode; 7465 bool is_enable = true; 7466 7467 mode = BIT(type); 7468 7469 interface_modes = ar->ab->hw_params->interface_modes; 7470 if (!(interface_modes & mode)) 7471 is_enable = false; 7472 7473 return is_enable; 7474 } 7475 7476 static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah) 7477 { 7478 struct wiphy *wiphy = ah->hw->wiphy; 7479 struct ieee80211_iface_combination *combinations; 7480 struct ieee80211_iface_limit *limits; 7481 int n_limits, max_interfaces; 7482 bool ap, mesh; 7483 7484 ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP); 7485 7486 mesh = IS_ENABLED(CONFIG_MAC80211_MESH) && 7487 ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT); 7488 7489 combinations = kzalloc(sizeof(*combinations), GFP_KERNEL); 7490 if (!combinations) 7491 return -ENOMEM; 7492 7493 if (ap || mesh) { 7494 n_limits = 2; 7495 max_interfaces = 16; 7496 } else { 7497 n_limits = 1; 7498 max_interfaces = 1; 7499 } 7500 7501 limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL); 7502 if (!limits) { 7503 kfree(combinations); 7504 return -ENOMEM; 7505 } 7506 7507 limits[0].max = 1; 7508 limits[0].types |= BIT(NL80211_IFTYPE_STATION); 7509 7510 if (ap) { 7511 limits[1].max = max_interfaces; 7512 limits[1].types |= BIT(NL80211_IFTYPE_AP); 7513 } 7514 7515 if (mesh) 7516 limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT); 7517 7518 combinations[0].limits = limits; 7519 combinations[0].n_limits = n_limits; 7520 combinations[0].max_interfaces = max_interfaces; 7521 combinations[0].num_different_channels = 1; 7522 combinations[0].beacon_int_infra_match = true; 7523 combinations[0].beacon_int_min_gcd = 100; 7524 combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 7525 BIT(NL80211_CHAN_WIDTH_20) | 7526 BIT(NL80211_CHAN_WIDTH_40) | 7527 BIT(NL80211_CHAN_WIDTH_80); 7528 7529 wiphy->iface_combinations = combinations; 7530 wiphy->n_iface_combinations = 1; 7531 7532 return 0; 7533 } 7534 7535 static const u8 ath12k_if_types_ext_capa[] = { 7536 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, 7537 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, 7538 }; 7539 7540 static const u8 ath12k_if_types_ext_capa_sta[] = { 7541 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, 7542 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, 7543 [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, 7544 }; 7545 7546 static const u8 ath12k_if_types_ext_capa_ap[] = { 7547 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, 7548 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, 7549 [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT, 7550 }; 7551 7552 static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = { 7553 { 7554 .extended_capabilities = ath12k_if_types_ext_capa, 7555 .extended_capabilities_mask = ath12k_if_types_ext_capa, 7556 .extended_capabilities_len = sizeof(ath12k_if_types_ext_capa), 7557 }, { 7558 .iftype = NL80211_IFTYPE_STATION, 7559 .extended_capabilities = ath12k_if_types_ext_capa_sta, 7560 .extended_capabilities_mask = ath12k_if_types_ext_capa_sta, 7561 .extended_capabilities_len = 7562 sizeof(ath12k_if_types_ext_capa_sta), 7563 }, { 7564 .iftype = NL80211_IFTYPE_AP, 7565 .extended_capabilities = ath12k_if_types_ext_capa_ap, 7566 .extended_capabilities_mask = ath12k_if_types_ext_capa_ap, 7567 .extended_capabilities_len = 7568 sizeof(ath12k_if_types_ext_capa_ap), 7569 }, 7570 }; 7571 7572 static void ath12k_mac_cleanup_unregister(struct ath12k *ar) 7573 { 7574 idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar); 7575 idr_destroy(&ar->txmgmt_idr); 7576 7577 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 7578 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 7579 kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); 7580 } 7581 7582 static void ath12k_mac_hw_unregister(struct ath12k_hw *ah) 7583 { 7584 struct ieee80211_hw *hw = ah->hw; 7585 struct wiphy *wiphy = hw->wiphy; 7586 struct ath12k *ar = ath12k_ah_to_ar(ah); 7587 7588 cancel_work_sync(&ar->regd_update_work); 7589 7590 ieee80211_unregister_hw(hw); 7591 7592 ath12k_mac_cleanup_unregister(ar); 7593 7594 kfree(wiphy->iface_combinations[0].limits); 7595 kfree(wiphy->iface_combinations); 7596 7597 SET_IEEE80211_DEV(hw, NULL); 7598 } 7599 7600 static int ath12k_mac_setup_register(struct ath12k *ar, 7601 u32 *ht_cap, 7602 struct ieee80211_supported_band *bands[]) 7603 { 7604 struct ath12k_pdev_cap *cap = &ar->pdev->cap; 7605 int ret; 7606 7607 init_waitqueue_head(&ar->txmgmt_empty_waitq); 7608 idr_init(&ar->txmgmt_idr); 7609 spin_lock_init(&ar->txmgmt_idr_lock); 7610 7611 ath12k_pdev_caps_update(ar); 7612 7613 ret = ath12k_mac_setup_channels_rates(ar, 7614 cap->supported_bands, 7615 bands); 7616 if (ret) 7617 return ret; 7618 7619 ath12k_mac_setup_ht_vht_cap(ar, cap, ht_cap); 7620 ath12k_mac_setup_sband_iftype_data(ar, cap); 7621 7622 ar->max_num_stations = TARGET_NUM_STATIONS; 7623 ar->max_num_peers = TARGET_NUM_PEERS_PDEV; 7624 7625 return 0; 7626 } 7627 7628 static int ath12k_mac_hw_register(struct ath12k_hw *ah) 7629 { 7630 struct ieee80211_hw *hw = ah->hw; 7631 struct wiphy *wiphy = hw->wiphy; 7632 struct ath12k *ar = ath12k_ah_to_ar(ah); 7633 struct ath12k_base *ab = ar->ab; 7634 struct ath12k_pdev *pdev; 7635 struct ath12k_pdev_cap *cap; 7636 static const u32 cipher_suites[] = { 7637 WLAN_CIPHER_SUITE_TKIP, 7638 WLAN_CIPHER_SUITE_CCMP, 7639 WLAN_CIPHER_SUITE_AES_CMAC, 7640 WLAN_CIPHER_SUITE_BIP_CMAC_256, 7641 WLAN_CIPHER_SUITE_BIP_GMAC_128, 7642 WLAN_CIPHER_SUITE_BIP_GMAC_256, 7643 WLAN_CIPHER_SUITE_GCMP, 7644 WLAN_CIPHER_SUITE_GCMP_256, 7645 WLAN_CIPHER_SUITE_CCMP_256, 7646 }; 7647 int ret; 7648 u32 ht_cap = 0; 7649 7650 pdev = ar->pdev; 7651 7652 if (ab->pdevs_macaddr_valid) 7653 ether_addr_copy(ar->mac_addr, pdev->mac_addr); 7654 else 7655 ether_addr_copy(ar->mac_addr, ab->mac_addr); 7656 7657 ret = ath12k_mac_setup_register(ar, &ht_cap, hw->wiphy->bands); 7658 if (ret) 7659 goto out; 7660 7661 wiphy->max_ap_assoc_sta = ar->max_num_stations; 7662 7663 cap = &pdev->cap; 7664 7665 wiphy->available_antennas_rx = cap->rx_chain_mask; 7666 wiphy->available_antennas_tx = cap->tx_chain_mask; 7667 7668 SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr); 7669 SET_IEEE80211_DEV(hw, ab->dev); 7670 7671 ret = ath12k_mac_setup_iface_combinations(ah); 7672 if (ret) { 7673 ath12k_err(ab, "failed to setup interface combinations: %d\n", ret); 7674 goto err_cleanup_unregister; 7675 } 7676 7677 wiphy->interface_modes = ath12k_mac_get_ifmodes(ah); 7678 7679 if (wiphy->bands[NL80211_BAND_2GHZ] && 7680 wiphy->bands[NL80211_BAND_5GHZ] && 7681 wiphy->bands[NL80211_BAND_6GHZ]) 7682 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); 7683 7684 ieee80211_hw_set(hw, SIGNAL_DBM); 7685 ieee80211_hw_set(hw, SUPPORTS_PS); 7686 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); 7687 ieee80211_hw_set(hw, MFP_CAPABLE); 7688 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 7689 ieee80211_hw_set(hw, HAS_RATE_CONTROL); 7690 ieee80211_hw_set(hw, AP_LINK_PS); 7691 ieee80211_hw_set(hw, SPECTRUM_MGMT); 7692 ieee80211_hw_set(hw, CONNECTION_MONITOR); 7693 ieee80211_hw_set(hw, SUPPORTS_PER_STA_GTK); 7694 ieee80211_hw_set(hw, CHANCTX_STA_CSA); 7695 ieee80211_hw_set(hw, QUEUE_CONTROL); 7696 ieee80211_hw_set(hw, SUPPORTS_TX_FRAG); 7697 ieee80211_hw_set(hw, REPORTS_LOW_ACK); 7698 7699 if (ht_cap & WMI_HT_CAP_ENABLED) { 7700 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 7701 ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); 7702 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 7703 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 7704 ieee80211_hw_set(hw, USES_RSS); 7705 } 7706 7707 wiphy->features |= NL80211_FEATURE_STATIC_SMPS; 7708 wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 7709 7710 /* TODO: Check if HT capability advertised from firmware is different 7711 * for each band for a dual band capable radio. It will be tricky to 7712 * handle it when the ht capability different for each band. 7713 */ 7714 if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) 7715 wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; 7716 7717 wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 7718 wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 7719 7720 hw->max_listen_interval = ATH12K_MAX_HW_LISTEN_INTERVAL; 7721 7722 wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 7723 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 7724 wiphy->max_remain_on_channel_duration = 5000; 7725 7726 wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 7727 wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 7728 NL80211_FEATURE_AP_SCAN; 7729 7730 hw->queues = ATH12K_HW_MAX_QUEUES; 7731 wiphy->tx_queue_len = ATH12K_QUEUE_LEN; 7732 hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1; 7733 hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT; 7734 7735 hw->vif_data_size = sizeof(struct ath12k_vif); 7736 hw->sta_data_size = sizeof(struct ath12k_sta); 7737 7738 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 7739 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); 7740 7741 wiphy->cipher_suites = cipher_suites; 7742 wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 7743 7744 wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa; 7745 wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa); 7746 7747 if (ar->supports_6ghz) { 7748 wiphy_ext_feature_set(wiphy, 7749 NL80211_EXT_FEATURE_FILS_DISCOVERY); 7750 wiphy_ext_feature_set(wiphy, 7751 NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP); 7752 } 7753 7754 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_PUNCT); 7755 7756 ath12k_reg_init(hw); 7757 7758 if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) { 7759 hw->netdev_features = NETIF_F_HW_CSUM; 7760 ieee80211_hw_set(hw, SW_CRYPTO_CONTROL); 7761 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 7762 } 7763 7764 ret = ieee80211_register_hw(hw); 7765 if (ret) { 7766 ath12k_err(ab, "ieee80211 registration failed: %d\n", ret); 7767 goto err_free_if_combs; 7768 } 7769 7770 if (!ab->hw_params->supports_monitor) 7771 /* There's a race between calling ieee80211_register_hw() 7772 * and here where the monitor mode is enabled for a little 7773 * while. But that time is so short and in practise it make 7774 * a difference in real life. 7775 */ 7776 wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR); 7777 7778 /* Apply the regd received during initialization */ 7779 ret = ath12k_regd_update(ar, true); 7780 if (ret) { 7781 ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret); 7782 goto err_unregister_hw; 7783 } 7784 7785 return 0; 7786 7787 err_unregister_hw: 7788 ieee80211_unregister_hw(hw); 7789 7790 err_free_if_combs: 7791 kfree(wiphy->iface_combinations[0].limits); 7792 kfree(wiphy->iface_combinations); 7793 7794 err_cleanup_unregister: 7795 ath12k_mac_cleanup_unregister(ar); 7796 7797 out: 7798 SET_IEEE80211_DEV(hw, NULL); 7799 7800 return ret; 7801 } 7802 7803 static void ath12k_mac_setup(struct ath12k *ar) 7804 { 7805 struct ath12k_base *ab = ar->ab; 7806 struct ath12k_pdev *pdev = ar->pdev; 7807 u8 pdev_idx = ar->pdev_idx; 7808 7809 ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, pdev_idx); 7810 7811 ar->wmi = &ab->wmi_ab.wmi[pdev_idx]; 7812 /* FIXME: wmi[0] is already initialized during attach, 7813 * Should we do this again? 7814 */ 7815 ath12k_wmi_pdev_attach(ab, pdev_idx); 7816 7817 ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask; 7818 ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask; 7819 ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask); 7820 ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask); 7821 7822 spin_lock_init(&ar->data_lock); 7823 INIT_LIST_HEAD(&ar->arvifs); 7824 INIT_LIST_HEAD(&ar->ppdu_stats_info); 7825 mutex_init(&ar->conf_mutex); 7826 init_completion(&ar->vdev_setup_done); 7827 init_completion(&ar->vdev_delete_done); 7828 init_completion(&ar->peer_assoc_done); 7829 init_completion(&ar->peer_delete_done); 7830 init_completion(&ar->install_key_done); 7831 init_completion(&ar->bss_survey_done); 7832 init_completion(&ar->scan.started); 7833 init_completion(&ar->scan.completed); 7834 7835 INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work); 7836 INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work); 7837 7838 INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work); 7839 skb_queue_head_init(&ar->wmi_mgmt_tx_queue); 7840 clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); 7841 } 7842 7843 int ath12k_mac_register(struct ath12k_base *ab) 7844 { 7845 struct ath12k_hw *ah; 7846 int i; 7847 int ret; 7848 7849 if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) 7850 return 0; 7851 7852 /* Initialize channel counters frequency value in hertz */ 7853 ab->cc_freq_hz = 320000; 7854 ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1; 7855 7856 for (i = 0; i < ab->num_hw; i++) { 7857 ah = ab->ah[i]; 7858 7859 ret = ath12k_mac_hw_register(ah); 7860 if (ret) 7861 goto err; 7862 } 7863 7864 return 0; 7865 7866 err: 7867 for (i = i - 1; i >= 0; i--) { 7868 ah = ab->ah[i]; 7869 if (!ah) 7870 continue; 7871 7872 ath12k_mac_hw_unregister(ah); 7873 } 7874 7875 return ret; 7876 } 7877 7878 void ath12k_mac_unregister(struct ath12k_base *ab) 7879 { 7880 struct ath12k_hw *ah; 7881 int i; 7882 7883 for (i = ab->num_hw - 1; i >= 0; i--) { 7884 ah = ab->ah[i]; 7885 if (!ah) 7886 continue; 7887 7888 ath12k_mac_hw_unregister(ah); 7889 } 7890 } 7891 7892 static void ath12k_mac_hw_destroy(struct ath12k_hw *ah) 7893 { 7894 ieee80211_free_hw(ah->hw); 7895 } 7896 7897 static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab, 7898 struct ath12k_pdev_map *pdev_map, 7899 u8 num_pdev_map) 7900 { 7901 struct ieee80211_hw *hw; 7902 struct ath12k *ar; 7903 struct ath12k_pdev *pdev; 7904 struct ath12k_hw *ah; 7905 int i; 7906 u8 pdev_idx; 7907 7908 hw = ieee80211_alloc_hw(struct_size(ah, radio, num_pdev_map), 7909 &ath12k_ops); 7910 if (!hw) 7911 return NULL; 7912 7913 ah = ath12k_hw_to_ah(hw); 7914 ah->hw = hw; 7915 ah->num_radio = num_pdev_map; 7916 7917 for (i = 0; i < num_pdev_map; i++) { 7918 ab = pdev_map[i].ab; 7919 pdev_idx = pdev_map[i].pdev_idx; 7920 pdev = &ab->pdevs[pdev_idx]; 7921 7922 ar = ath12k_ah_to_ar(ah); 7923 ar->ah = ah; 7924 ar->ab = ab; 7925 ar->hw_link_id = i; 7926 ar->pdev = pdev; 7927 ar->pdev_idx = pdev_idx; 7928 pdev->ar = ar; 7929 7930 ath12k_mac_setup(ar); 7931 } 7932 7933 return ah; 7934 } 7935 7936 void ath12k_mac_destroy(struct ath12k_base *ab) 7937 { 7938 struct ath12k_pdev *pdev; 7939 int i; 7940 7941 for (i = 0; i < ab->num_radios; i++) { 7942 pdev = &ab->pdevs[i]; 7943 if (!pdev->ar) 7944 continue; 7945 7946 pdev->ar = NULL; 7947 } 7948 7949 for (i = 0; i < ab->num_hw; i++) { 7950 if (!ab->ah[i]) 7951 continue; 7952 7953 ath12k_mac_hw_destroy(ab->ah[i]); 7954 ab->ah[i] = NULL; 7955 } 7956 } 7957 7958 int ath12k_mac_allocate(struct ath12k_base *ab) 7959 { 7960 struct ath12k_hw *ah; 7961 struct ath12k_pdev_map pdev_map[MAX_RADIOS]; 7962 int ret, i, j; 7963 u8 radio_per_hw; 7964 7965 if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) 7966 return 0; 7967 7968 ab->num_hw = ab->num_radios; 7969 radio_per_hw = 1; 7970 7971 for (i = 0; i < ab->num_hw; i++) { 7972 for (j = 0; j < radio_per_hw; j++) { 7973 pdev_map[j].ab = ab; 7974 pdev_map[j].pdev_idx = (i * radio_per_hw) + j; 7975 } 7976 7977 ah = ath12k_mac_hw_allocate(ab, pdev_map, radio_per_hw); 7978 if (!ah) { 7979 ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n", 7980 i); 7981 goto err; 7982 } 7983 7984 ab->ah[i] = ah; 7985 } 7986 7987 ath12k_dp_pdev_pre_alloc(ab); 7988 7989 return 0; 7990 7991 err: 7992 for (i = i - 1; i >= 0; i--) { 7993 if (!ab->ah[i]) 7994 continue; 7995 7996 ath12k_mac_hw_destroy(ab->ah[i]); 7997 ab->ah[i] = NULL; 7998 } 7999 8000 return ret; 8001 } 8002