1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <net/mac80211.h> 8 #include <linux/etherdevice.h> 9 #include "mac.h" 10 #include "core.h" 11 #include "debug.h" 12 #include "wmi.h" 13 #include "hw.h" 14 #include "dp_tx.h" 15 #include "dp_rx.h" 16 #include "peer.h" 17 #include "debugfs.h" 18 19 #define CHAN2G(_channel, _freq, _flags) { \ 20 .band = NL80211_BAND_2GHZ, \ 21 .hw_value = (_channel), \ 22 .center_freq = (_freq), \ 23 .flags = (_flags), \ 24 .max_antenna_gain = 0, \ 25 .max_power = 30, \ 26 } 27 28 #define CHAN5G(_channel, _freq, _flags) { \ 29 .band = NL80211_BAND_5GHZ, \ 30 .hw_value = (_channel), \ 31 .center_freq = (_freq), \ 32 .flags = (_flags), \ 33 .max_antenna_gain = 0, \ 34 .max_power = 30, \ 35 } 36 37 #define CHAN6G(_channel, _freq, _flags) { \ 38 .band = NL80211_BAND_6GHZ, \ 39 .hw_value = (_channel), \ 40 .center_freq = (_freq), \ 41 .flags = (_flags), \ 42 .max_antenna_gain = 0, \ 43 .max_power = 30, \ 44 } 45 46 static const struct ieee80211_channel ath12k_2ghz_channels[] = { 47 CHAN2G(1, 2412, 0), 48 CHAN2G(2, 2417, 0), 49 CHAN2G(3, 2422, 0), 50 CHAN2G(4, 2427, 0), 51 CHAN2G(5, 2432, 0), 52 CHAN2G(6, 2437, 0), 53 CHAN2G(7, 2442, 0), 54 CHAN2G(8, 2447, 0), 55 CHAN2G(9, 2452, 0), 56 CHAN2G(10, 2457, 0), 57 CHAN2G(11, 2462, 0), 58 CHAN2G(12, 2467, 0), 59 CHAN2G(13, 2472, 0), 60 CHAN2G(14, 2484, 0), 61 }; 62 63 static const struct ieee80211_channel ath12k_5ghz_channels[] = { 64 CHAN5G(36, 5180, 0), 65 CHAN5G(40, 5200, 0), 66 CHAN5G(44, 5220, 0), 67 CHAN5G(48, 5240, 0), 68 CHAN5G(52, 5260, 0), 69 CHAN5G(56, 5280, 0), 70 CHAN5G(60, 5300, 0), 71 CHAN5G(64, 5320, 0), 72 CHAN5G(100, 5500, 0), 73 CHAN5G(104, 5520, 0), 74 CHAN5G(108, 5540, 0), 75 CHAN5G(112, 5560, 0), 76 CHAN5G(116, 5580, 0), 77 CHAN5G(120, 5600, 0), 78 CHAN5G(124, 5620, 0), 79 CHAN5G(128, 5640, 0), 80 CHAN5G(132, 5660, 0), 81 CHAN5G(136, 5680, 0), 82 CHAN5G(140, 5700, 0), 83 CHAN5G(144, 5720, 0), 84 CHAN5G(149, 5745, 0), 85 CHAN5G(153, 5765, 0), 86 CHAN5G(157, 5785, 0), 87 CHAN5G(161, 5805, 0), 88 CHAN5G(165, 5825, 0), 89 CHAN5G(169, 5845, 0), 90 CHAN5G(173, 5865, 0), 91 }; 92 93 static const struct ieee80211_channel ath12k_6ghz_channels[] = { 94 CHAN6G(1, 5955, 0), 95 CHAN6G(5, 5975, 0), 96 CHAN6G(9, 5995, 0), 97 CHAN6G(13, 6015, 0), 98 CHAN6G(17, 6035, 0), 99 CHAN6G(21, 6055, 0), 100 CHAN6G(25, 6075, 0), 101 CHAN6G(29, 6095, 0), 102 CHAN6G(33, 6115, 0), 103 CHAN6G(37, 6135, 0), 104 CHAN6G(41, 6155, 0), 105 CHAN6G(45, 6175, 0), 106 CHAN6G(49, 6195, 0), 107 CHAN6G(53, 6215, 0), 108 CHAN6G(57, 6235, 0), 109 CHAN6G(61, 6255, 0), 110 CHAN6G(65, 6275, 0), 111 CHAN6G(69, 6295, 0), 112 CHAN6G(73, 6315, 0), 113 CHAN6G(77, 6335, 0), 114 CHAN6G(81, 6355, 0), 115 CHAN6G(85, 6375, 0), 116 CHAN6G(89, 6395, 0), 117 CHAN6G(93, 6415, 0), 118 CHAN6G(97, 6435, 0), 119 CHAN6G(101, 6455, 0), 120 CHAN6G(105, 6475, 0), 121 CHAN6G(109, 6495, 0), 122 CHAN6G(113, 6515, 0), 123 CHAN6G(117, 6535, 0), 124 CHAN6G(121, 6555, 0), 125 CHAN6G(125, 6575, 0), 126 CHAN6G(129, 6595, 0), 127 CHAN6G(133, 6615, 0), 128 CHAN6G(137, 6635, 0), 129 CHAN6G(141, 6655, 0), 130 CHAN6G(145, 6675, 0), 131 CHAN6G(149, 6695, 0), 132 CHAN6G(153, 6715, 0), 133 CHAN6G(157, 6735, 0), 134 CHAN6G(161, 6755, 0), 135 CHAN6G(165, 6775, 0), 136 CHAN6G(169, 6795, 0), 137 CHAN6G(173, 6815, 0), 138 CHAN6G(177, 6835, 0), 139 CHAN6G(181, 6855, 0), 140 CHAN6G(185, 6875, 0), 141 CHAN6G(189, 6895, 0), 142 CHAN6G(193, 6915, 0), 143 CHAN6G(197, 6935, 0), 144 CHAN6G(201, 6955, 0), 145 CHAN6G(205, 6975, 0), 146 CHAN6G(209, 6995, 0), 147 CHAN6G(213, 7015, 0), 148 CHAN6G(217, 7035, 0), 149 CHAN6G(221, 7055, 0), 150 CHAN6G(225, 7075, 0), 151 CHAN6G(229, 7095, 0), 152 CHAN6G(233, 7115, 0), 153 }; 154 155 static struct ieee80211_rate ath12k_legacy_rates[] = { 156 { .bitrate = 10, 157 .hw_value = ATH12K_HW_RATE_CCK_LP_1M }, 158 { .bitrate = 20, 159 .hw_value = ATH12K_HW_RATE_CCK_LP_2M, 160 .hw_value_short = ATH12K_HW_RATE_CCK_SP_2M, 161 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 162 { .bitrate = 55, 163 .hw_value = ATH12K_HW_RATE_CCK_LP_5_5M, 164 .hw_value_short = ATH12K_HW_RATE_CCK_SP_5_5M, 165 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 166 { .bitrate = 110, 167 .hw_value = ATH12K_HW_RATE_CCK_LP_11M, 168 .hw_value_short = ATH12K_HW_RATE_CCK_SP_11M, 169 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 170 171 { .bitrate = 60, .hw_value = ATH12K_HW_RATE_OFDM_6M }, 172 { .bitrate = 90, .hw_value = ATH12K_HW_RATE_OFDM_9M }, 173 { .bitrate = 120, .hw_value = ATH12K_HW_RATE_OFDM_12M }, 174 { .bitrate = 180, .hw_value = ATH12K_HW_RATE_OFDM_18M }, 175 { .bitrate = 240, .hw_value = ATH12K_HW_RATE_OFDM_24M }, 176 { .bitrate = 360, .hw_value = ATH12K_HW_RATE_OFDM_36M }, 177 { .bitrate = 480, .hw_value = ATH12K_HW_RATE_OFDM_48M }, 178 { .bitrate = 540, .hw_value = ATH12K_HW_RATE_OFDM_54M }, 179 }; 180 181 static const int 182 ath12k_phymodes[NUM_NL80211_BANDS][ATH12K_CHAN_WIDTH_NUM] = { 183 [NL80211_BAND_2GHZ] = { 184 [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, 185 [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, 186 [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20_2G, 187 [NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20_2G, 188 [NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40_2G, 189 [NL80211_CHAN_WIDTH_80] = MODE_UNKNOWN, 190 [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN, 191 [NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN, 192 [NL80211_CHAN_WIDTH_320] = MODE_UNKNOWN, 193 }, 194 [NL80211_BAND_5GHZ] = { 195 [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, 196 [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, 197 [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20, 198 [NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20, 199 [NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40, 200 [NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80, 201 [NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160, 202 [NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80, 203 [NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320, 204 }, 205 [NL80211_BAND_6GHZ] = { 206 [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN, 207 [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN, 208 [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11BE_EHT20, 209 [NL80211_CHAN_WIDTH_20] = MODE_11BE_EHT20, 210 [NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40, 211 [NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80, 212 [NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160, 213 [NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80, 214 [NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320, 215 }, 216 217 }; 218 219 const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default = { 220 .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START | 221 HTT_RX_FILTER_TLV_FLAGS_PPDU_END | 222 HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE, 223 .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0, 224 .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1, 225 .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2, 226 .pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 | 227 HTT_RX_FP_CTRL_FILTER_FLASG3 228 }; 229 230 #define ATH12K_MAC_FIRST_OFDM_RATE_IDX 4 231 #define ath12k_g_rates ath12k_legacy_rates 232 #define ath12k_g_rates_size (ARRAY_SIZE(ath12k_legacy_rates)) 233 #define ath12k_a_rates (ath12k_legacy_rates + 4) 234 #define ath12k_a_rates_size (ARRAY_SIZE(ath12k_legacy_rates) - 4) 235 236 #define ATH12K_MAC_SCAN_TIMEOUT_MSECS 200 /* in msecs */ 237 238 static const u32 ath12k_smps_map[] = { 239 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, 240 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, 241 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, 242 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 243 }; 244 245 static int ath12k_start_vdev_delay(struct ath12k *ar, 246 struct ath12k_vif *arvif); 247 static void ath12k_mac_stop(struct ath12k *ar); 248 static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif); 249 static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ieee80211_vif *vif); 250 251 static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode) 252 { 253 switch (mode) { 254 case MODE_11A: 255 return "11a"; 256 case MODE_11G: 257 return "11g"; 258 case MODE_11B: 259 return "11b"; 260 case MODE_11GONLY: 261 return "11gonly"; 262 case MODE_11NA_HT20: 263 return "11na-ht20"; 264 case MODE_11NG_HT20: 265 return "11ng-ht20"; 266 case MODE_11NA_HT40: 267 return "11na-ht40"; 268 case MODE_11NG_HT40: 269 return "11ng-ht40"; 270 case MODE_11AC_VHT20: 271 return "11ac-vht20"; 272 case MODE_11AC_VHT40: 273 return "11ac-vht40"; 274 case MODE_11AC_VHT80: 275 return "11ac-vht80"; 276 case MODE_11AC_VHT160: 277 return "11ac-vht160"; 278 case MODE_11AC_VHT80_80: 279 return "11ac-vht80+80"; 280 case MODE_11AC_VHT20_2G: 281 return "11ac-vht20-2g"; 282 case MODE_11AC_VHT40_2G: 283 return "11ac-vht40-2g"; 284 case MODE_11AC_VHT80_2G: 285 return "11ac-vht80-2g"; 286 case MODE_11AX_HE20: 287 return "11ax-he20"; 288 case MODE_11AX_HE40: 289 return "11ax-he40"; 290 case MODE_11AX_HE80: 291 return "11ax-he80"; 292 case MODE_11AX_HE80_80: 293 return "11ax-he80+80"; 294 case MODE_11AX_HE160: 295 return "11ax-he160"; 296 case MODE_11AX_HE20_2G: 297 return "11ax-he20-2g"; 298 case MODE_11AX_HE40_2G: 299 return "11ax-he40-2g"; 300 case MODE_11AX_HE80_2G: 301 return "11ax-he80-2g"; 302 case MODE_11BE_EHT20: 303 return "11be-eht20"; 304 case MODE_11BE_EHT40: 305 return "11be-eht40"; 306 case MODE_11BE_EHT80: 307 return "11be-eht80"; 308 case MODE_11BE_EHT80_80: 309 return "11be-eht80+80"; 310 case MODE_11BE_EHT160: 311 return "11be-eht160"; 312 case MODE_11BE_EHT160_160: 313 return "11be-eht160+160"; 314 case MODE_11BE_EHT320: 315 return "11be-eht320"; 316 case MODE_11BE_EHT20_2G: 317 return "11be-eht20-2g"; 318 case MODE_11BE_EHT40_2G: 319 return "11be-eht40-2g"; 320 case MODE_UNKNOWN: 321 /* skip */ 322 break; 323 324 /* no default handler to allow compiler to check that the 325 * enum is fully handled 326 */ 327 } 328 329 return "<unknown>"; 330 } 331 332 enum rate_info_bw 333 ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw) 334 { 335 u8 ret = RATE_INFO_BW_20; 336 337 switch (bw) { 338 case ATH12K_BW_20: 339 ret = RATE_INFO_BW_20; 340 break; 341 case ATH12K_BW_40: 342 ret = RATE_INFO_BW_40; 343 break; 344 case ATH12K_BW_80: 345 ret = RATE_INFO_BW_80; 346 break; 347 case ATH12K_BW_160: 348 ret = RATE_INFO_BW_160; 349 break; 350 case ATH12K_BW_320: 351 ret = RATE_INFO_BW_320; 352 break; 353 } 354 355 return ret; 356 } 357 358 enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw) 359 { 360 switch (bw) { 361 case RATE_INFO_BW_20: 362 return ATH12K_BW_20; 363 case RATE_INFO_BW_40: 364 return ATH12K_BW_40; 365 case RATE_INFO_BW_80: 366 return ATH12K_BW_80; 367 case RATE_INFO_BW_160: 368 return ATH12K_BW_160; 369 case RATE_INFO_BW_320: 370 return ATH12K_BW_320; 371 default: 372 return ATH12K_BW_20; 373 } 374 } 375 376 int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx, 377 u16 *rate) 378 { 379 /* As default, it is OFDM rates */ 380 int i = ATH12K_MAC_FIRST_OFDM_RATE_IDX; 381 int max_rates_idx = ath12k_g_rates_size; 382 383 if (preamble == WMI_RATE_PREAMBLE_CCK) { 384 hw_rc &= ~ATH12K_HW_RATECODE_CCK_SHORT_PREAM_MASK; 385 i = 0; 386 max_rates_idx = ATH12K_MAC_FIRST_OFDM_RATE_IDX; 387 } 388 389 while (i < max_rates_idx) { 390 if (hw_rc == ath12k_legacy_rates[i].hw_value) { 391 *rateidx = i; 392 *rate = ath12k_legacy_rates[i].bitrate; 393 return 0; 394 } 395 i++; 396 } 397 398 return -EINVAL; 399 } 400 401 u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, 402 u32 bitrate) 403 { 404 int i; 405 406 for (i = 0; i < sband->n_bitrates; i++) 407 if (sband->bitrates[i].bitrate == bitrate) 408 return i; 409 410 return 0; 411 } 412 413 static u32 414 ath12k_mac_max_ht_nss(const u8 *ht_mcs_mask) 415 { 416 int nss; 417 418 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) 419 if (ht_mcs_mask[nss]) 420 return nss + 1; 421 422 return 1; 423 } 424 425 static u32 426 ath12k_mac_max_vht_nss(const u16 *vht_mcs_mask) 427 { 428 int nss; 429 430 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) 431 if (vht_mcs_mask[nss]) 432 return nss + 1; 433 434 return 1; 435 } 436 437 static u8 ath12k_parse_mpdudensity(u8 mpdudensity) 438 { 439 /* From IEEE Std 802.11-2020 defined values for "Minimum MPDU Start Spacing": 440 * 0 for no restriction 441 * 1 for 1/4 us 442 * 2 for 1/2 us 443 * 3 for 1 us 444 * 4 for 2 us 445 * 5 for 4 us 446 * 6 for 8 us 447 * 7 for 16 us 448 */ 449 switch (mpdudensity) { 450 case 0: 451 return 0; 452 case 1: 453 case 2: 454 case 3: 455 /* Our lower layer calculations limit our precision to 456 * 1 microsecond 457 */ 458 return 1; 459 case 4: 460 return 2; 461 case 5: 462 return 4; 463 case 6: 464 return 8; 465 case 7: 466 return 16; 467 default: 468 return 0; 469 } 470 } 471 472 static int ath12k_mac_vif_chan(struct ieee80211_vif *vif, 473 struct cfg80211_chan_def *def) 474 { 475 struct ieee80211_chanctx_conf *conf; 476 477 rcu_read_lock(); 478 conf = rcu_dereference(vif->bss_conf.chanctx_conf); 479 if (!conf) { 480 rcu_read_unlock(); 481 return -ENOENT; 482 } 483 484 *def = conf->def; 485 rcu_read_unlock(); 486 487 return 0; 488 } 489 490 static bool ath12k_mac_bitrate_is_cck(int bitrate) 491 { 492 switch (bitrate) { 493 case 10: 494 case 20: 495 case 55: 496 case 110: 497 return true; 498 } 499 500 return false; 501 } 502 503 u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, 504 u8 hw_rate, bool cck) 505 { 506 const struct ieee80211_rate *rate; 507 int i; 508 509 for (i = 0; i < sband->n_bitrates; i++) { 510 rate = &sband->bitrates[i]; 511 512 if (ath12k_mac_bitrate_is_cck(rate->bitrate) != cck) 513 continue; 514 515 if (rate->hw_value == hw_rate) 516 return i; 517 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && 518 rate->hw_value_short == hw_rate) 519 return i; 520 } 521 522 return 0; 523 } 524 525 static u8 ath12k_mac_bitrate_to_rate(int bitrate) 526 { 527 return DIV_ROUND_UP(bitrate, 5) | 528 (ath12k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); 529 } 530 531 static void ath12k_get_arvif_iter(void *data, u8 *mac, 532 struct ieee80211_vif *vif) 533 { 534 struct ath12k_vif_iter *arvif_iter = data; 535 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 536 537 if (arvif->vdev_id == arvif_iter->vdev_id && 538 arvif->ar == arvif_iter->ar) 539 arvif_iter->arvif = arvif; 540 } 541 542 struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id) 543 { 544 struct ath12k_vif_iter arvif_iter = {}; 545 u32 flags; 546 547 arvif_iter.vdev_id = vdev_id; 548 arvif_iter.ar = ar; 549 550 flags = IEEE80211_IFACE_ITER_RESUME_ALL; 551 ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar), 552 flags, 553 ath12k_get_arvif_iter, 554 &arvif_iter); 555 if (!arvif_iter.arvif) { 556 ath12k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id); 557 return NULL; 558 } 559 560 return arvif_iter.arvif; 561 } 562 563 struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab, 564 u32 vdev_id) 565 { 566 int i; 567 struct ath12k_pdev *pdev; 568 struct ath12k_vif *arvif; 569 570 for (i = 0; i < ab->num_radios; i++) { 571 pdev = rcu_dereference(ab->pdevs_active[i]); 572 if (pdev && pdev->ar && 573 (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) { 574 arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id); 575 if (arvif) 576 return arvif; 577 } 578 } 579 580 return NULL; 581 } 582 583 struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id) 584 { 585 int i; 586 struct ath12k_pdev *pdev; 587 588 for (i = 0; i < ab->num_radios; i++) { 589 pdev = rcu_dereference(ab->pdevs_active[i]); 590 if (pdev && pdev->ar) { 591 if (pdev->ar->allocated_vdev_map & (1LL << vdev_id)) 592 return pdev->ar; 593 } 594 } 595 596 return NULL; 597 } 598 599 struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id) 600 { 601 int i; 602 struct ath12k_pdev *pdev; 603 604 if (ab->hw_params->single_pdev_only) { 605 pdev = rcu_dereference(ab->pdevs_active[0]); 606 return pdev ? pdev->ar : NULL; 607 } 608 609 if (WARN_ON(pdev_id > ab->num_radios)) 610 return NULL; 611 612 for (i = 0; i < ab->num_radios; i++) { 613 pdev = rcu_dereference(ab->pdevs_active[i]); 614 615 if (pdev && pdev->pdev_id == pdev_id) 616 return (pdev->ar ? pdev->ar : NULL); 617 } 618 619 return NULL; 620 } 621 622 static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw, 623 struct ieee80211_channel *channel) 624 { 625 struct ath12k_hw *ah = hw->priv; 626 struct ath12k *ar; 627 int i; 628 629 ar = ah->radio; 630 631 if (ah->num_radio == 1) 632 return ar; 633 634 for_each_ar(ah, ar, i) { 635 if (channel->center_freq >= ar->freq_low && 636 channel->center_freq <= ar->freq_high) 637 return ar; 638 } 639 return NULL; 640 } 641 642 static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw, 643 struct ieee80211_chanctx_conf *ctx) 644 { 645 if (!ctx) 646 return NULL; 647 648 return ath12k_mac_get_ar_by_chan(hw, ctx->def.chan); 649 } 650 651 static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, 652 struct ieee80211_vif *vif) 653 { 654 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 655 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 656 657 /* If there is one pdev within ah, then we return 658 * ar directly. 659 */ 660 if (ah->num_radio == 1) 661 return ah->radio; 662 663 if (arvif->is_created) 664 return arvif->ar; 665 666 return NULL; 667 } 668 669 static void ath12k_pdev_caps_update(struct ath12k *ar) 670 { 671 struct ath12k_base *ab = ar->ab; 672 673 ar->max_tx_power = ab->target_caps.hw_max_tx_power; 674 675 /* FIXME: Set min_tx_power to ab->target_caps.hw_min_tx_power. 676 * But since the received value in svcrdy is same as hw_max_tx_power, 677 * we can set ar->min_tx_power to 0 currently until 678 * this is fixed in firmware 679 */ 680 ar->min_tx_power = 0; 681 682 ar->txpower_limit_2g = ar->max_tx_power; 683 ar->txpower_limit_5g = ar->max_tx_power; 684 ar->txpower_scale = WMI_HOST_TP_SCALE_MAX; 685 } 686 687 static int ath12k_mac_txpower_recalc(struct ath12k *ar) 688 { 689 struct ath12k_pdev *pdev = ar->pdev; 690 struct ath12k_vif *arvif; 691 int ret, txpower = -1; 692 u32 param; 693 694 lockdep_assert_held(&ar->conf_mutex); 695 696 list_for_each_entry(arvif, &ar->arvifs, list) { 697 if (arvif->txpower <= 0) 698 continue; 699 700 if (txpower == -1) 701 txpower = arvif->txpower; 702 else 703 txpower = min(txpower, arvif->txpower); 704 } 705 706 if (txpower == -1) 707 return 0; 708 709 /* txpwr is set as 2 units per dBm in FW*/ 710 txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower), 711 ar->max_tx_power) * 2; 712 713 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower to set in hw %d\n", 714 txpower / 2); 715 716 if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) && 717 ar->txpower_limit_2g != txpower) { 718 param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G; 719 ret = ath12k_wmi_pdev_set_param(ar, param, 720 txpower, ar->pdev->pdev_id); 721 if (ret) 722 goto fail; 723 ar->txpower_limit_2g = txpower; 724 } 725 726 if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) && 727 ar->txpower_limit_5g != txpower) { 728 param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G; 729 ret = ath12k_wmi_pdev_set_param(ar, param, 730 txpower, ar->pdev->pdev_id); 731 if (ret) 732 goto fail; 733 ar->txpower_limit_5g = txpower; 734 } 735 736 return 0; 737 738 fail: 739 ath12k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n", 740 txpower / 2, param, ret); 741 return ret; 742 } 743 744 static int ath12k_recalc_rtscts_prot(struct ath12k_vif *arvif) 745 { 746 struct ath12k *ar = arvif->ar; 747 u32 vdev_param, rts_cts; 748 int ret; 749 750 lockdep_assert_held(&ar->conf_mutex); 751 752 vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS; 753 754 /* Enable RTS/CTS protection for sw retries (when legacy stations 755 * are in BSS) or by default only for second rate series. 756 * TODO: Check if we need to enable CTS 2 Self in any case 757 */ 758 rts_cts = WMI_USE_RTS_CTS; 759 760 if (arvif->num_legacy_stations > 0) 761 rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4; 762 else 763 rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4; 764 765 /* Need not send duplicate param value to firmware */ 766 if (arvif->rtscts_prot_mode == rts_cts) 767 return 0; 768 769 arvif->rtscts_prot_mode = rts_cts; 770 771 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", 772 arvif->vdev_id, rts_cts); 773 774 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 775 vdev_param, rts_cts); 776 if (ret) 777 ath12k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n", 778 arvif->vdev_id, ret); 779 780 return ret; 781 } 782 783 static int ath12k_mac_set_kickout(struct ath12k_vif *arvif) 784 { 785 struct ath12k *ar = arvif->ar; 786 u32 param; 787 int ret; 788 789 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH, 790 ATH12K_KICKOUT_THRESHOLD, 791 ar->pdev->pdev_id); 792 if (ret) { 793 ath12k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n", 794 arvif->vdev_id, ret); 795 return ret; 796 } 797 798 param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS; 799 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, 800 ATH12K_KEEPALIVE_MIN_IDLE); 801 if (ret) { 802 ath12k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n", 803 arvif->vdev_id, ret); 804 return ret; 805 } 806 807 param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS; 808 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, 809 ATH12K_KEEPALIVE_MAX_IDLE); 810 if (ret) { 811 ath12k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n", 812 arvif->vdev_id, ret); 813 return ret; 814 } 815 816 param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS; 817 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, 818 ATH12K_KEEPALIVE_MAX_UNRESPONSIVE); 819 if (ret) { 820 ath12k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 821 arvif->vdev_id, ret); 822 return ret; 823 } 824 825 return 0; 826 } 827 828 void ath12k_mac_peer_cleanup_all(struct ath12k *ar) 829 { 830 struct ath12k_peer *peer, *tmp; 831 struct ath12k_base *ab = ar->ab; 832 833 lockdep_assert_held(&ar->conf_mutex); 834 835 spin_lock_bh(&ab->base_lock); 836 list_for_each_entry_safe(peer, tmp, &ab->peers, list) { 837 ath12k_dp_rx_peer_tid_cleanup(ar, peer); 838 list_del(&peer->list); 839 kfree(peer); 840 } 841 spin_unlock_bh(&ab->base_lock); 842 843 ar->num_peers = 0; 844 ar->num_stations = 0; 845 } 846 847 static int ath12k_mac_vdev_setup_sync(struct ath12k *ar) 848 { 849 lockdep_assert_held(&ar->conf_mutex); 850 851 if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) 852 return -ESHUTDOWN; 853 854 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev setup timeout %d\n", 855 ATH12K_VDEV_SETUP_TIMEOUT_HZ); 856 857 if (!wait_for_completion_timeout(&ar->vdev_setup_done, 858 ATH12K_VDEV_SETUP_TIMEOUT_HZ)) 859 return -ETIMEDOUT; 860 861 return ar->last_wmi_vdev_start_status ? -EINVAL : 0; 862 } 863 864 static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id) 865 { 866 int ret; 867 868 ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 869 if (ret) { 870 ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n", 871 vdev_id, ret); 872 return ret; 873 } 874 875 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n", 876 vdev_id); 877 return 0; 878 } 879 880 static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id, 881 struct cfg80211_chan_def *chandef) 882 { 883 struct ieee80211_channel *channel; 884 struct wmi_vdev_start_req_arg arg = {}; 885 int ret; 886 887 lockdep_assert_held(&ar->conf_mutex); 888 889 channel = chandef->chan; 890 arg.vdev_id = vdev_id; 891 arg.freq = channel->center_freq; 892 arg.band_center_freq1 = chandef->center_freq1; 893 arg.band_center_freq2 = chandef->center_freq2; 894 arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width]; 895 arg.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR); 896 897 arg.min_power = 0; 898 arg.max_power = channel->max_power; 899 arg.max_reg_power = channel->max_reg_power; 900 arg.max_antenna_gain = channel->max_antenna_gain; 901 902 arg.pref_tx_streams = ar->num_tx_chains; 903 arg.pref_rx_streams = ar->num_rx_chains; 904 arg.punct_bitmap = 0xFFFFFFFF; 905 906 arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); 907 908 reinit_completion(&ar->vdev_setup_done); 909 reinit_completion(&ar->vdev_delete_done); 910 911 ret = ath12k_wmi_vdev_start(ar, &arg, false); 912 if (ret) { 913 ath12k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n", 914 vdev_id, ret); 915 return ret; 916 } 917 918 ret = ath12k_mac_vdev_setup_sync(ar); 919 if (ret) { 920 ath12k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n", 921 vdev_id, ret); 922 return ret; 923 } 924 925 ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 926 if (ret) { 927 ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n", 928 vdev_id, ret); 929 goto vdev_stop; 930 } 931 932 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n", 933 vdev_id); 934 return 0; 935 936 vdev_stop: 937 ret = ath12k_wmi_vdev_stop(ar, vdev_id); 938 if (ret) 939 ath12k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n", 940 vdev_id, ret); 941 return ret; 942 } 943 944 static int ath12k_mac_monitor_vdev_stop(struct ath12k *ar) 945 { 946 int ret; 947 948 lockdep_assert_held(&ar->conf_mutex); 949 950 reinit_completion(&ar->vdev_setup_done); 951 952 ret = ath12k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 953 if (ret) 954 ath12k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n", 955 ar->monitor_vdev_id, ret); 956 957 ret = ath12k_mac_vdev_setup_sync(ar); 958 if (ret) 959 ath12k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n", 960 ar->monitor_vdev_id, ret); 961 962 ret = ath12k_wmi_vdev_down(ar, ar->monitor_vdev_id); 963 if (ret) 964 ath12k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n", 965 ar->monitor_vdev_id, ret); 966 967 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i stopped\n", 968 ar->monitor_vdev_id); 969 return ret; 970 } 971 972 static int ath12k_mac_monitor_vdev_create(struct ath12k *ar) 973 { 974 struct ath12k_pdev *pdev = ar->pdev; 975 struct ath12k_wmi_vdev_create_arg arg = {}; 976 int bit, ret; 977 u8 tmp_addr[6]; 978 u16 nss; 979 980 lockdep_assert_held(&ar->conf_mutex); 981 982 if (ar->monitor_vdev_created) 983 return 0; 984 985 if (ar->ab->free_vdev_map == 0) { 986 ath12k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n"); 987 return -ENOMEM; 988 } 989 990 bit = __ffs64(ar->ab->free_vdev_map); 991 992 ar->monitor_vdev_id = bit; 993 994 arg.if_id = ar->monitor_vdev_id; 995 arg.type = WMI_VDEV_TYPE_MONITOR; 996 arg.subtype = WMI_VDEV_SUBTYPE_NONE; 997 arg.pdev_id = pdev->pdev_id; 998 arg.if_stats_id = ATH12K_INVAL_VDEV_STATS_ID; 999 1000 if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) { 1001 arg.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains; 1002 arg.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains; 1003 } 1004 1005 if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) { 1006 arg.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains; 1007 arg.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains; 1008 } 1009 1010 ret = ath12k_wmi_vdev_create(ar, tmp_addr, &arg); 1011 if (ret) { 1012 ath12k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n", 1013 ar->monitor_vdev_id, ret); 1014 ar->monitor_vdev_id = -1; 1015 return ret; 1016 } 1017 1018 nss = hweight32(ar->cfg_tx_chainmask) ? : 1; 1019 ret = ath12k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id, 1020 WMI_VDEV_PARAM_NSS, nss); 1021 if (ret) { 1022 ath12k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n", 1023 ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret); 1024 return ret; 1025 } 1026 1027 ret = ath12k_mac_txpower_recalc(ar); 1028 if (ret) 1029 return ret; 1030 1031 ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id; 1032 ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); 1033 ar->num_created_vdevs++; 1034 ar->monitor_vdev_created = true; 1035 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d created\n", 1036 ar->monitor_vdev_id); 1037 1038 return 0; 1039 } 1040 1041 static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar) 1042 { 1043 int ret; 1044 unsigned long time_left; 1045 1046 lockdep_assert_held(&ar->conf_mutex); 1047 1048 if (!ar->monitor_vdev_created) 1049 return 0; 1050 1051 reinit_completion(&ar->vdev_delete_done); 1052 1053 ret = ath12k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 1054 if (ret) { 1055 ath12k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n", 1056 ar->monitor_vdev_id, ret); 1057 return ret; 1058 } 1059 1060 time_left = wait_for_completion_timeout(&ar->vdev_delete_done, 1061 ATH12K_VDEV_DELETE_TIMEOUT_HZ); 1062 if (time_left == 0) { 1063 ath12k_warn(ar->ab, "Timeout in receiving vdev delete response\n"); 1064 } else { 1065 ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id); 1066 ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id); 1067 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d deleted\n", 1068 ar->monitor_vdev_id); 1069 ar->num_created_vdevs--; 1070 ar->monitor_vdev_id = -1; 1071 ar->monitor_vdev_created = false; 1072 } 1073 1074 return ret; 1075 } 1076 1077 static void 1078 ath12k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, 1079 struct ieee80211_chanctx_conf *conf, 1080 void *data) 1081 { 1082 struct cfg80211_chan_def **def = data; 1083 1084 *def = &conf->def; 1085 } 1086 1087 static int ath12k_mac_monitor_start(struct ath12k *ar) 1088 { 1089 struct cfg80211_chan_def *chandef = NULL; 1090 int ret; 1091 1092 lockdep_assert_held(&ar->conf_mutex); 1093 1094 if (ar->monitor_started) 1095 return 0; 1096 1097 ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar), 1098 ath12k_mac_get_any_chandef_iter, 1099 &chandef); 1100 if (!chandef) 1101 return 0; 1102 1103 ret = ath12k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef); 1104 if (ret) { 1105 ath12k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret); 1106 ath12k_mac_monitor_vdev_delete(ar); 1107 return ret; 1108 } 1109 1110 ar->monitor_started = true; 1111 ar->num_started_vdevs++; 1112 ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false); 1113 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor started ret %d\n", ret); 1114 1115 return ret; 1116 } 1117 1118 static int ath12k_mac_monitor_stop(struct ath12k *ar) 1119 { 1120 int ret; 1121 1122 lockdep_assert_held(&ar->conf_mutex); 1123 1124 if (!ar->monitor_started) 1125 return 0; 1126 1127 ret = ath12k_mac_monitor_vdev_stop(ar); 1128 if (ret) { 1129 ath12k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret); 1130 return ret; 1131 } 1132 1133 ar->monitor_started = false; 1134 ar->num_started_vdevs--; 1135 ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, true); 1136 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor stopped ret %d\n", ret); 1137 return ret; 1138 } 1139 1140 static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif) 1141 { 1142 struct ath12k *ar = arvif->ar; 1143 int ret; 1144 1145 lockdep_assert_held(&ar->conf_mutex); 1146 1147 reinit_completion(&ar->vdev_setup_done); 1148 1149 ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id); 1150 if (ret) { 1151 ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n", 1152 arvif->vdev_id, ret); 1153 goto err; 1154 } 1155 1156 ret = ath12k_mac_vdev_setup_sync(ar); 1157 if (ret) { 1158 ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n", 1159 arvif->vdev_id, ret); 1160 goto err; 1161 } 1162 1163 WARN_ON(ar->num_started_vdevs == 0); 1164 1165 ar->num_started_vdevs--; 1166 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n", 1167 arvif->vif->addr, arvif->vdev_id); 1168 1169 if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) { 1170 clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); 1171 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n", 1172 arvif->vdev_id); 1173 } 1174 1175 return 0; 1176 err: 1177 return ret; 1178 } 1179 1180 static int ath12k_mac_config(struct ath12k *ar, u32 changed) 1181 { 1182 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1183 struct ieee80211_conf *conf = &hw->conf; 1184 int ret = 0; 1185 1186 mutex_lock(&ar->conf_mutex); 1187 1188 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1189 ar->monitor_conf_enabled = conf->flags & IEEE80211_CONF_MONITOR; 1190 if (ar->monitor_conf_enabled) { 1191 if (ar->monitor_vdev_created) 1192 goto exit; 1193 ret = ath12k_mac_monitor_vdev_create(ar); 1194 if (ret) 1195 goto exit; 1196 ret = ath12k_mac_monitor_start(ar); 1197 if (ret) 1198 goto err_mon_del; 1199 } else { 1200 if (!ar->monitor_vdev_created) 1201 goto exit; 1202 ret = ath12k_mac_monitor_stop(ar); 1203 if (ret) 1204 goto exit; 1205 ath12k_mac_monitor_vdev_delete(ar); 1206 } 1207 } 1208 1209 exit: 1210 mutex_unlock(&ar->conf_mutex); 1211 return ret; 1212 1213 err_mon_del: 1214 ath12k_mac_monitor_vdev_delete(ar); 1215 mutex_unlock(&ar->conf_mutex); 1216 return ret; 1217 } 1218 1219 static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed) 1220 { 1221 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 1222 struct ath12k *ar; 1223 int ret; 1224 1225 ar = ath12k_ah_to_ar(ah, 0); 1226 1227 ret = ath12k_mac_config(ar, changed); 1228 if (ret) 1229 ath12k_warn(ar->ab, "failed to update config pdev idx %d: %d\n", 1230 ar->pdev_idx, ret); 1231 1232 return ret; 1233 } 1234 1235 static int ath12k_mac_setup_bcn_p2p_ie(struct ath12k_vif *arvif, 1236 struct sk_buff *bcn) 1237 { 1238 struct ath12k *ar = arvif->ar; 1239 struct ieee80211_mgmt *mgmt; 1240 const u8 *p2p_ie; 1241 int ret; 1242 1243 mgmt = (void *)bcn->data; 1244 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1245 mgmt->u.beacon.variable, 1246 bcn->len - (mgmt->u.beacon.variable - 1247 bcn->data)); 1248 if (!p2p_ie) { 1249 ath12k_warn(ar->ab, "no P2P ie found in beacon\n"); 1250 return -ENOENT; 1251 } 1252 1253 ret = ath12k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); 1254 if (ret) { 1255 ath12k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n", 1256 arvif->vdev_id, ret); 1257 return ret; 1258 } 1259 1260 return 0; 1261 } 1262 1263 static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, 1264 u8 oui_type, size_t ie_offset) 1265 { 1266 const u8 *next, *end; 1267 size_t len; 1268 u8 *ie; 1269 1270 if (WARN_ON(skb->len < ie_offset)) 1271 return -EINVAL; 1272 1273 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, 1274 skb->data + ie_offset, 1275 skb->len - ie_offset); 1276 if (!ie) 1277 return -ENOENT; 1278 1279 len = ie[1] + 2; 1280 end = skb->data + skb->len; 1281 next = ie + len; 1282 1283 if (WARN_ON(next > end)) 1284 return -EINVAL; 1285 1286 memmove(ie, next, end - next); 1287 skb_trim(skb, skb->len - len); 1288 1289 return 0; 1290 } 1291 1292 static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif) 1293 { 1294 struct ath12k *ar = arvif->ar; 1295 struct ath12k_base *ab = ar->ab; 1296 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1297 struct ieee80211_vif *vif = arvif->vif; 1298 struct ieee80211_mutable_offsets offs = {}; 1299 struct sk_buff *bcn; 1300 struct ieee80211_mgmt *mgmt; 1301 u8 *ies; 1302 int ret; 1303 1304 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1305 return 0; 1306 1307 bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0); 1308 if (!bcn) { 1309 ath12k_warn(ab, "failed to get beacon template from mac80211\n"); 1310 return -EPERM; 1311 } 1312 1313 ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn); 1314 ies += sizeof(mgmt->u.beacon); 1315 1316 if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies))) 1317 arvif->rsnie_present = true; 1318 1319 if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 1320 WLAN_OUI_TYPE_MICROSOFT_WPA, 1321 ies, (skb_tail_pointer(bcn) - ies))) 1322 arvif->wpaie_present = true; 1323 1324 if (arvif->vif->type == NL80211_IFTYPE_AP && arvif->vif->p2p) { 1325 ret = ath12k_mac_setup_bcn_p2p_ie(arvif, bcn); 1326 if (ret) { 1327 ath12k_warn(ab, "failed to setup P2P GO bcn ie: %d\n", 1328 ret); 1329 goto free_bcn_skb; 1330 } 1331 1332 /* P2P IE is inserted by firmware automatically (as 1333 * configured above) so remove it from the base beacon 1334 * template to avoid duplicate P2P IEs in beacon frames. 1335 */ 1336 ret = ath12k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, 1337 WLAN_OUI_TYPE_WFA_P2P, 1338 offsetof(struct ieee80211_mgmt, 1339 u.beacon.variable)); 1340 if (ret) { 1341 ath12k_warn(ab, "failed to remove P2P vendor ie: %d\n", 1342 ret); 1343 goto free_bcn_skb; 1344 } 1345 } 1346 1347 ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn); 1348 1349 if (ret) 1350 ath12k_warn(ab, "failed to submit beacon template command: %d\n", 1351 ret); 1352 1353 free_bcn_skb: 1354 kfree_skb(bcn); 1355 return ret; 1356 } 1357 1358 static void ath12k_control_beaconing(struct ath12k_vif *arvif, 1359 struct ieee80211_bss_conf *info) 1360 { 1361 struct ath12k *ar = arvif->ar; 1362 int ret; 1363 1364 lockdep_assert_held(&arvif->ar->conf_mutex); 1365 1366 if (!info->enable_beacon) { 1367 ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id); 1368 if (ret) 1369 ath12k_warn(ar->ab, "failed to down vdev_id %i: %d\n", 1370 arvif->vdev_id, ret); 1371 1372 arvif->is_up = false; 1373 return; 1374 } 1375 1376 /* Install the beacon template to the FW */ 1377 ret = ath12k_mac_setup_bcn_tmpl(arvif); 1378 if (ret) { 1379 ath12k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n", 1380 ret); 1381 return; 1382 } 1383 1384 arvif->aid = 0; 1385 1386 ether_addr_copy(arvif->bssid, info->bssid); 1387 1388 ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1389 arvif->bssid); 1390 if (ret) { 1391 ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n", 1392 arvif->vdev_id, ret); 1393 return; 1394 } 1395 1396 arvif->is_up = true; 1397 1398 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); 1399 } 1400 1401 static void ath12k_mac_handle_beacon_iter(void *data, u8 *mac, 1402 struct ieee80211_vif *vif) 1403 { 1404 struct sk_buff *skb = data; 1405 struct ieee80211_mgmt *mgmt = (void *)skb->data; 1406 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1407 1408 if (vif->type != NL80211_IFTYPE_STATION) 1409 return; 1410 1411 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) 1412 return; 1413 1414 cancel_delayed_work(&arvif->connection_loss_work); 1415 } 1416 1417 void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb) 1418 { 1419 ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar), 1420 IEEE80211_IFACE_ITER_NORMAL, 1421 ath12k_mac_handle_beacon_iter, 1422 skb); 1423 } 1424 1425 static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac, 1426 struct ieee80211_vif *vif) 1427 { 1428 u32 *vdev_id = data; 1429 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1430 struct ath12k *ar = arvif->ar; 1431 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1432 1433 if (arvif->vdev_id != *vdev_id) 1434 return; 1435 1436 if (!arvif->is_up) 1437 return; 1438 1439 ieee80211_beacon_loss(vif); 1440 1441 /* Firmware doesn't report beacon loss events repeatedly. If AP probe 1442 * (done by mac80211) succeeds but beacons do not resume then it 1443 * doesn't make sense to continue operation. Queue connection loss work 1444 * which can be cancelled when beacon is received. 1445 */ 1446 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, 1447 ATH12K_CONNECTION_LOSS_HZ); 1448 } 1449 1450 void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id) 1451 { 1452 ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar), 1453 IEEE80211_IFACE_ITER_NORMAL, 1454 ath12k_mac_handle_beacon_miss_iter, 1455 &vdev_id); 1456 } 1457 1458 static void ath12k_mac_vif_sta_connection_loss_work(struct work_struct *work) 1459 { 1460 struct ath12k_vif *arvif = container_of(work, struct ath12k_vif, 1461 connection_loss_work.work); 1462 struct ieee80211_vif *vif = arvif->vif; 1463 1464 if (!arvif->is_up) 1465 return; 1466 1467 ieee80211_connection_loss(vif); 1468 } 1469 1470 static void ath12k_peer_assoc_h_basic(struct ath12k *ar, 1471 struct ieee80211_vif *vif, 1472 struct ieee80211_sta *sta, 1473 struct ath12k_wmi_peer_assoc_arg *arg) 1474 { 1475 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1476 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1477 u32 aid; 1478 1479 lockdep_assert_held(&ar->conf_mutex); 1480 1481 if (vif->type == NL80211_IFTYPE_STATION) 1482 aid = vif->cfg.aid; 1483 else 1484 aid = sta->aid; 1485 1486 ether_addr_copy(arg->peer_mac, sta->addr); 1487 arg->vdev_id = arvif->vdev_id; 1488 arg->peer_associd = aid; 1489 arg->auth_flag = true; 1490 /* TODO: STA WAR in ath10k for listen interval required? */ 1491 arg->peer_listen_intval = hw->conf.listen_interval; 1492 arg->peer_nss = 1; 1493 arg->peer_caps = vif->bss_conf.assoc_capability; 1494 } 1495 1496 static void ath12k_peer_assoc_h_crypto(struct ath12k *ar, 1497 struct ieee80211_vif *vif, 1498 struct ieee80211_sta *sta, 1499 struct ath12k_wmi_peer_assoc_arg *arg) 1500 { 1501 struct ieee80211_bss_conf *info = &vif->bss_conf; 1502 struct cfg80211_chan_def def; 1503 struct cfg80211_bss *bss; 1504 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1505 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1506 const u8 *rsnie = NULL; 1507 const u8 *wpaie = NULL; 1508 1509 lockdep_assert_held(&ar->conf_mutex); 1510 1511 if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) 1512 return; 1513 1514 bss = cfg80211_get_bss(hw->wiphy, def.chan, info->bssid, NULL, 0, 1515 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); 1516 1517 if (arvif->rsnie_present || arvif->wpaie_present) { 1518 arg->need_ptk_4_way = true; 1519 if (arvif->wpaie_present) 1520 arg->need_gtk_2_way = true; 1521 } else if (bss) { 1522 const struct cfg80211_bss_ies *ies; 1523 1524 rcu_read_lock(); 1525 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); 1526 1527 ies = rcu_dereference(bss->ies); 1528 1529 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 1530 WLAN_OUI_TYPE_MICROSOFT_WPA, 1531 ies->data, 1532 ies->len); 1533 rcu_read_unlock(); 1534 cfg80211_put_bss(hw->wiphy, bss); 1535 } 1536 1537 /* FIXME: base on RSN IE/WPA IE is a correct idea? */ 1538 if (rsnie || wpaie) { 1539 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1540 "%s: rsn ie found\n", __func__); 1541 arg->need_ptk_4_way = true; 1542 } 1543 1544 if (wpaie) { 1545 ath12k_dbg(ar->ab, ATH12K_DBG_WMI, 1546 "%s: wpa ie found\n", __func__); 1547 arg->need_gtk_2_way = true; 1548 } 1549 1550 if (sta->mfp) { 1551 /* TODO: Need to check if FW supports PMF? */ 1552 arg->is_pmf_enabled = true; 1553 } 1554 1555 /* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */ 1556 } 1557 1558 static void ath12k_peer_assoc_h_rates(struct ath12k *ar, 1559 struct ieee80211_vif *vif, 1560 struct ieee80211_sta *sta, 1561 struct ath12k_wmi_peer_assoc_arg *arg) 1562 { 1563 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1564 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; 1565 struct cfg80211_chan_def def; 1566 const struct ieee80211_supported_band *sband; 1567 const struct ieee80211_rate *rates; 1568 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 1569 enum nl80211_band band; 1570 u32 ratemask; 1571 u8 rate; 1572 int i; 1573 1574 lockdep_assert_held(&ar->conf_mutex); 1575 1576 if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) 1577 return; 1578 1579 band = def.chan->band; 1580 sband = hw->wiphy->bands[band]; 1581 ratemask = sta->deflink.supp_rates[band]; 1582 ratemask &= arvif->bitrate_mask.control[band].legacy; 1583 rates = sband->bitrates; 1584 1585 rateset->num_rates = 0; 1586 1587 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { 1588 if (!(ratemask & 1)) 1589 continue; 1590 1591 rate = ath12k_mac_bitrate_to_rate(rates->bitrate); 1592 rateset->rates[rateset->num_rates] = rate; 1593 rateset->num_rates++; 1594 } 1595 } 1596 1597 static bool 1598 ath12k_peer_assoc_h_ht_masked(const u8 *ht_mcs_mask) 1599 { 1600 int nss; 1601 1602 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) 1603 if (ht_mcs_mask[nss]) 1604 return false; 1605 1606 return true; 1607 } 1608 1609 static bool 1610 ath12k_peer_assoc_h_vht_masked(const u16 *vht_mcs_mask) 1611 { 1612 int nss; 1613 1614 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) 1615 if (vht_mcs_mask[nss]) 1616 return false; 1617 1618 return true; 1619 } 1620 1621 static void ath12k_peer_assoc_h_ht(struct ath12k *ar, 1622 struct ieee80211_vif *vif, 1623 struct ieee80211_sta *sta, 1624 struct ath12k_wmi_peer_assoc_arg *arg) 1625 { 1626 const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; 1627 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1628 struct cfg80211_chan_def def; 1629 enum nl80211_band band; 1630 const u8 *ht_mcs_mask; 1631 int i, n; 1632 u8 max_nss; 1633 u32 stbc; 1634 1635 lockdep_assert_held(&ar->conf_mutex); 1636 1637 if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) 1638 return; 1639 1640 if (!ht_cap->ht_supported) 1641 return; 1642 1643 band = def.chan->band; 1644 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 1645 1646 if (ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) 1647 return; 1648 1649 arg->ht_flag = true; 1650 1651 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 1652 ht_cap->ampdu_factor)) - 1; 1653 1654 arg->peer_mpdu_density = 1655 ath12k_parse_mpdudensity(ht_cap->ampdu_density); 1656 1657 arg->peer_ht_caps = ht_cap->cap; 1658 arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG; 1659 1660 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) 1661 arg->ldpc_flag = true; 1662 1663 if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) { 1664 arg->bw_40 = true; 1665 arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG; 1666 } 1667 1668 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { 1669 if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 | 1670 IEEE80211_HT_CAP_SGI_40)) 1671 arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG; 1672 } 1673 1674 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { 1675 arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG; 1676 arg->stbc_flag = true; 1677 } 1678 1679 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { 1680 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; 1681 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; 1682 stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S; 1683 arg->peer_rate_caps |= stbc; 1684 arg->stbc_flag = true; 1685 } 1686 1687 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) 1688 arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG; 1689 else if (ht_cap->mcs.rx_mask[1]) 1690 arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG; 1691 1692 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) 1693 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && 1694 (ht_mcs_mask[i / 8] & BIT(i % 8))) { 1695 max_nss = (i / 8) + 1; 1696 arg->peer_ht_rates.rates[n++] = i; 1697 } 1698 1699 /* This is a workaround for HT-enabled STAs which break the spec 1700 * and have no HT capabilities RX mask (no HT RX MCS map). 1701 * 1702 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), 1703 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. 1704 * 1705 * Firmware asserts if such situation occurs. 1706 */ 1707 if (n == 0) { 1708 arg->peer_ht_rates.num_rates = 8; 1709 for (i = 0; i < arg->peer_ht_rates.num_rates; i++) 1710 arg->peer_ht_rates.rates[i] = i; 1711 } else { 1712 arg->peer_ht_rates.num_rates = n; 1713 arg->peer_nss = min(sta->deflink.rx_nss, max_nss); 1714 } 1715 1716 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", 1717 arg->peer_mac, 1718 arg->peer_ht_rates.num_rates, 1719 arg->peer_nss); 1720 } 1721 1722 static int ath12k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) 1723 { 1724 switch ((mcs_map >> (2 * nss)) & 0x3) { 1725 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; 1726 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; 1727 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; 1728 } 1729 return 0; 1730 } 1731 1732 static u16 1733 ath12k_peer_assoc_h_vht_limit(u16 tx_mcs_set, 1734 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) 1735 { 1736 int idx_limit; 1737 int nss; 1738 u16 mcs_map; 1739 u16 mcs; 1740 1741 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { 1742 mcs_map = ath12k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & 1743 vht_mcs_limit[nss]; 1744 1745 if (mcs_map) 1746 idx_limit = fls(mcs_map) - 1; 1747 else 1748 idx_limit = -1; 1749 1750 switch (idx_limit) { 1751 case 0: 1752 case 1: 1753 case 2: 1754 case 3: 1755 case 4: 1756 case 5: 1757 case 6: 1758 case 7: 1759 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; 1760 break; 1761 case 8: 1762 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; 1763 break; 1764 case 9: 1765 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; 1766 break; 1767 default: 1768 WARN_ON(1); 1769 fallthrough; 1770 case -1: 1771 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; 1772 break; 1773 } 1774 1775 tx_mcs_set &= ~(0x3 << (nss * 2)); 1776 tx_mcs_set |= mcs << (nss * 2); 1777 } 1778 1779 return tx_mcs_set; 1780 } 1781 1782 static void ath12k_peer_assoc_h_vht(struct ath12k *ar, 1783 struct ieee80211_vif *vif, 1784 struct ieee80211_sta *sta, 1785 struct ath12k_wmi_peer_assoc_arg *arg) 1786 { 1787 const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap; 1788 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 1789 struct cfg80211_chan_def def; 1790 enum nl80211_band band; 1791 const u16 *vht_mcs_mask; 1792 u16 tx_mcs_map; 1793 u8 ampdu_factor; 1794 u8 max_nss, vht_mcs; 1795 int i; 1796 1797 if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) 1798 return; 1799 1800 if (!vht_cap->vht_supported) 1801 return; 1802 1803 band = def.chan->band; 1804 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 1805 1806 if (ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) 1807 return; 1808 1809 arg->vht_flag = true; 1810 1811 /* TODO: similar flags required? */ 1812 arg->vht_capable = true; 1813 1814 if (def.chan->band == NL80211_BAND_2GHZ) 1815 arg->vht_ng_flag = true; 1816 1817 arg->peer_vht_caps = vht_cap->cap; 1818 1819 ampdu_factor = (vht_cap->cap & 1820 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> 1821 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 1822 1823 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to 1824 * zero in VHT IE. Using it would result in degraded throughput. 1825 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep 1826 * it if VHT max_mpdu is smaller. 1827 */ 1828 arg->peer_max_mpdu = max(arg->peer_max_mpdu, 1829 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + 1830 ampdu_factor)) - 1); 1831 1832 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 1833 arg->bw_80 = true; 1834 1835 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) 1836 arg->bw_160 = true; 1837 1838 /* Calculate peer NSS capability from VHT capabilities if STA 1839 * supports VHT. 1840 */ 1841 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { 1842 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> 1843 (2 * i) & 3; 1844 1845 if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED && 1846 vht_mcs_mask[i]) 1847 max_nss = i + 1; 1848 } 1849 arg->peer_nss = min(sta->deflink.rx_nss, max_nss); 1850 arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest); 1851 arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); 1852 arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest); 1853 1854 tx_mcs_map = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map); 1855 arg->tx_mcs_set = ath12k_peer_assoc_h_vht_limit(tx_mcs_map, vht_mcs_mask); 1856 1857 /* In QCN9274 platform, VHT MCS rate 10 and 11 is enabled by default. 1858 * VHT MCS rate 10 and 11 is not supported in 11ac standard. 1859 * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode. 1860 */ 1861 arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK; 1862 arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11; 1863 1864 if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) == 1865 IEEE80211_VHT_MCS_NOT_SUPPORTED) 1866 arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; 1867 1868 /* TODO: Check */ 1869 arg->tx_max_mcs_nss = 0xFF; 1870 1871 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", 1872 sta->addr, arg->peer_max_mpdu, arg->peer_flags); 1873 1874 /* TODO: rxnss_override */ 1875 } 1876 1877 static void ath12k_peer_assoc_h_he(struct ath12k *ar, 1878 struct ieee80211_vif *vif, 1879 struct ieee80211_sta *sta, 1880 struct ath12k_wmi_peer_assoc_arg *arg) 1881 { 1882 const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; 1883 int i; 1884 u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss; 1885 u16 mcs_160_map, mcs_80_map; 1886 bool support_160; 1887 u16 v; 1888 1889 if (!he_cap->has_he) 1890 return; 1891 1892 arg->he_flag = true; 1893 1894 support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] & 1895 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G); 1896 1897 /* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */ 1898 mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); 1899 mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); 1900 1901 if (support_160) { 1902 for (i = 7; i >= 0; i--) { 1903 u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3; 1904 1905 if (mcs_160 != IEEE80211_HE_MCS_NOT_SUPPORTED) { 1906 rx_mcs_160 = i + 1; 1907 break; 1908 } 1909 } 1910 } 1911 1912 for (i = 7; i >= 0; i--) { 1913 u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3; 1914 1915 if (mcs_80 != IEEE80211_HE_MCS_NOT_SUPPORTED) { 1916 rx_mcs_80 = i + 1; 1917 break; 1918 } 1919 } 1920 1921 if (support_160) 1922 max_nss = min(rx_mcs_80, rx_mcs_160); 1923 else 1924 max_nss = rx_mcs_80; 1925 1926 arg->peer_nss = min(sta->deflink.rx_nss, max_nss); 1927 1928 memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info, 1929 sizeof(he_cap->he_cap_elem.mac_cap_info)); 1930 memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info, 1931 sizeof(he_cap->he_cap_elem.phy_cap_info)); 1932 arg->peer_he_ops = vif->bss_conf.he_oper.params; 1933 1934 /* the top most byte is used to indicate BSS color info */ 1935 arg->peer_he_ops &= 0xffffff; 1936 1937 /* As per section 26.6.1 IEEE Std 802.11ax‐2022, if the Max AMPDU 1938 * Exponent Extension in HE cap is zero, use the arg->peer_max_mpdu 1939 * as calculated while parsing VHT caps(if VHT caps is present) 1940 * or HT caps (if VHT caps is not present). 1941 * 1942 * For non-zero value of Max AMPDU Exponent Extension in HE MAC caps, 1943 * if a HE STA sends VHT cap and HE cap IE in assoc request then, use 1944 * MAX_AMPDU_LEN_FACTOR as 20 to calculate max_ampdu length. 1945 * If a HE STA that does not send VHT cap, but HE and HT cap in assoc 1946 * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu 1947 * length. 1948 */ 1949 ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] & 1950 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >> 1951 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK; 1952 1953 if (ampdu_factor) { 1954 if (sta->deflink.vht_cap.vht_supported) 1955 arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR + 1956 ampdu_factor)) - 1; 1957 else if (sta->deflink.ht_cap.ht_supported) 1958 arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR + 1959 ampdu_factor)) - 1; 1960 } 1961 1962 if (he_cap->he_cap_elem.phy_cap_info[6] & 1963 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { 1964 int bit = 7; 1965 int nss, ru; 1966 1967 arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] & 1968 IEEE80211_PPE_THRES_NSS_MASK; 1969 arg->peer_ppet.ru_bit_mask = 1970 (he_cap->ppe_thres[0] & 1971 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >> 1972 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS; 1973 1974 for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) { 1975 for (ru = 0; ru < 4; ru++) { 1976 u32 val = 0; 1977 int i; 1978 1979 if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0) 1980 continue; 1981 for (i = 0; i < 6; i++) { 1982 val >>= 1; 1983 val |= ((he_cap->ppe_thres[bit / 8] >> 1984 (bit % 8)) & 0x1) << 5; 1985 bit++; 1986 } 1987 arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |= 1988 val << (ru * 6); 1989 } 1990 } 1991 } 1992 1993 if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES) 1994 arg->twt_responder = true; 1995 if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ) 1996 arg->twt_requester = true; 1997 1998 switch (sta->deflink.bandwidth) { 1999 case IEEE80211_STA_RX_BW_160: 2000 if (he_cap->he_cap_elem.phy_cap_info[0] & 2001 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { 2002 v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80); 2003 arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v; 2004 2005 v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80); 2006 arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v; 2007 2008 arg->peer_he_mcs_count++; 2009 } 2010 v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160); 2011 arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; 2012 2013 v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160); 2014 arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v; 2015 2016 arg->peer_he_mcs_count++; 2017 fallthrough; 2018 2019 default: 2020 v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80); 2021 arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; 2022 2023 v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80); 2024 arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v; 2025 2026 arg->peer_he_mcs_count++; 2027 break; 2028 } 2029 } 2030 2031 static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta, 2032 struct ath12k_wmi_peer_assoc_arg *arg) 2033 { 2034 const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; 2035 int smps; 2036 2037 if (!ht_cap->ht_supported) 2038 return; 2039 2040 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 2041 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 2042 2043 switch (smps) { 2044 case WLAN_HT_CAP_SM_PS_STATIC: 2045 arg->static_mimops_flag = true; 2046 break; 2047 case WLAN_HT_CAP_SM_PS_DYNAMIC: 2048 arg->dynamic_mimops_flag = true; 2049 break; 2050 case WLAN_HT_CAP_SM_PS_DISABLED: 2051 arg->spatial_mux_flag = true; 2052 break; 2053 default: 2054 break; 2055 } 2056 } 2057 2058 static void ath12k_peer_assoc_h_qos(struct ath12k *ar, 2059 struct ieee80211_vif *vif, 2060 struct ieee80211_sta *sta, 2061 struct ath12k_wmi_peer_assoc_arg *arg) 2062 { 2063 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2064 2065 switch (arvif->vdev_type) { 2066 case WMI_VDEV_TYPE_AP: 2067 if (sta->wme) { 2068 /* TODO: Check WME vs QoS */ 2069 arg->is_wme_set = true; 2070 arg->qos_flag = true; 2071 } 2072 2073 if (sta->wme && sta->uapsd_queues) { 2074 /* TODO: Check WME vs QoS */ 2075 arg->is_wme_set = true; 2076 arg->apsd_flag = true; 2077 arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG; 2078 } 2079 break; 2080 case WMI_VDEV_TYPE_STA: 2081 if (sta->wme) { 2082 arg->is_wme_set = true; 2083 arg->qos_flag = true; 2084 } 2085 break; 2086 default: 2087 break; 2088 } 2089 2090 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n", 2091 sta->addr, arg->qos_flag); 2092 } 2093 2094 static int ath12k_peer_assoc_qos_ap(struct ath12k *ar, 2095 struct ath12k_vif *arvif, 2096 struct ieee80211_sta *sta) 2097 { 2098 struct ath12k_wmi_ap_ps_arg arg; 2099 u32 max_sp; 2100 u32 uapsd; 2101 int ret; 2102 2103 lockdep_assert_held(&ar->conf_mutex); 2104 2105 arg.vdev_id = arvif->vdev_id; 2106 2107 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", 2108 sta->uapsd_queues, sta->max_sp); 2109 2110 uapsd = 0; 2111 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 2112 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 2113 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; 2114 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 2115 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | 2116 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; 2117 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 2118 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | 2119 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; 2120 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 2121 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | 2122 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; 2123 2124 max_sp = 0; 2125 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) 2126 max_sp = sta->max_sp; 2127 2128 arg.param = WMI_AP_PS_PEER_PARAM_UAPSD; 2129 arg.value = uapsd; 2130 ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); 2131 if (ret) 2132 goto err; 2133 2134 arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP; 2135 arg.value = max_sp; 2136 ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); 2137 if (ret) 2138 goto err; 2139 2140 /* TODO: revisit during testing */ 2141 arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE; 2142 arg.value = DISABLE_SIFS_RESPONSE_TRIGGER; 2143 ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); 2144 if (ret) 2145 goto err; 2146 2147 arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD; 2148 arg.value = DISABLE_SIFS_RESPONSE_TRIGGER; 2149 ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg); 2150 if (ret) 2151 goto err; 2152 2153 return 0; 2154 2155 err: 2156 ath12k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n", 2157 arg.param, arvif->vdev_id, ret); 2158 return ret; 2159 } 2160 2161 static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 2162 { 2163 return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >> 2164 ATH12K_MAC_FIRST_OFDM_RATE_IDX; 2165 } 2166 2167 static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar, 2168 struct ieee80211_sta *sta) 2169 { 2170 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { 2171 switch (sta->deflink.vht_cap.cap & 2172 IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 2173 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 2174 return MODE_11AC_VHT160; 2175 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: 2176 return MODE_11AC_VHT80_80; 2177 default: 2178 /* not sure if this is a valid case? */ 2179 return MODE_11AC_VHT160; 2180 } 2181 } 2182 2183 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 2184 return MODE_11AC_VHT80; 2185 2186 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2187 return MODE_11AC_VHT40; 2188 2189 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) 2190 return MODE_11AC_VHT20; 2191 2192 return MODE_UNKNOWN; 2193 } 2194 2195 static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar, 2196 struct ieee80211_sta *sta) 2197 { 2198 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { 2199 if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & 2200 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) 2201 return MODE_11AX_HE160; 2202 else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & 2203 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) 2204 return MODE_11AX_HE80_80; 2205 /* not sure if this is a valid case? */ 2206 return MODE_11AX_HE160; 2207 } 2208 2209 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 2210 return MODE_11AX_HE80; 2211 2212 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2213 return MODE_11AX_HE40; 2214 2215 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) 2216 return MODE_11AX_HE20; 2217 2218 return MODE_UNKNOWN; 2219 } 2220 2221 static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar, 2222 struct ieee80211_sta *sta) 2223 { 2224 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) 2225 if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[0] & 2226 IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) 2227 return MODE_11BE_EHT320; 2228 2229 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) { 2230 if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & 2231 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) 2232 return MODE_11BE_EHT160; 2233 2234 if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] & 2235 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) 2236 return MODE_11BE_EHT80_80; 2237 2238 ath12k_warn(ar->ab, "invalid EHT PHY capability info for 160 Mhz: %d\n", 2239 sta->deflink.he_cap.he_cap_elem.phy_cap_info[0]); 2240 2241 return MODE_11BE_EHT160; 2242 } 2243 2244 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 2245 return MODE_11BE_EHT80; 2246 2247 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2248 return MODE_11BE_EHT40; 2249 2250 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) 2251 return MODE_11BE_EHT20; 2252 2253 return MODE_UNKNOWN; 2254 } 2255 2256 static void ath12k_peer_assoc_h_phymode(struct ath12k *ar, 2257 struct ieee80211_vif *vif, 2258 struct ieee80211_sta *sta, 2259 struct ath12k_wmi_peer_assoc_arg *arg) 2260 { 2261 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2262 struct cfg80211_chan_def def; 2263 enum nl80211_band band; 2264 const u8 *ht_mcs_mask; 2265 const u16 *vht_mcs_mask; 2266 enum wmi_phy_mode phymode = MODE_UNKNOWN; 2267 2268 if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) 2269 return; 2270 2271 band = def.chan->band; 2272 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2273 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2274 2275 switch (band) { 2276 case NL80211_BAND_2GHZ: 2277 if (sta->deflink.eht_cap.has_eht) { 2278 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2279 phymode = MODE_11BE_EHT40_2G; 2280 else 2281 phymode = MODE_11BE_EHT20_2G; 2282 } else if (sta->deflink.he_cap.has_he) { 2283 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) 2284 phymode = MODE_11AX_HE80_2G; 2285 else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2286 phymode = MODE_11AX_HE40_2G; 2287 else 2288 phymode = MODE_11AX_HE20_2G; 2289 } else if (sta->deflink.vht_cap.vht_supported && 2290 !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2291 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2292 phymode = MODE_11AC_VHT40; 2293 else 2294 phymode = MODE_11AC_VHT20; 2295 } else if (sta->deflink.ht_cap.ht_supported && 2296 !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2297 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) 2298 phymode = MODE_11NG_HT40; 2299 else 2300 phymode = MODE_11NG_HT20; 2301 } else if (ath12k_mac_sta_has_ofdm_only(sta)) { 2302 phymode = MODE_11G; 2303 } else { 2304 phymode = MODE_11B; 2305 } 2306 break; 2307 case NL80211_BAND_5GHZ: 2308 case NL80211_BAND_6GHZ: 2309 /* Check EHT first */ 2310 if (sta->deflink.eht_cap.has_eht) { 2311 phymode = ath12k_mac_get_phymode_eht(ar, sta); 2312 } else if (sta->deflink.he_cap.has_he) { 2313 phymode = ath12k_mac_get_phymode_he(ar, sta); 2314 } else if (sta->deflink.vht_cap.vht_supported && 2315 !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2316 phymode = ath12k_mac_get_phymode_vht(ar, sta); 2317 } else if (sta->deflink.ht_cap.ht_supported && 2318 !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2319 if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) 2320 phymode = MODE_11NA_HT40; 2321 else 2322 phymode = MODE_11NA_HT20; 2323 } else { 2324 phymode = MODE_11A; 2325 } 2326 break; 2327 default: 2328 break; 2329 } 2330 2331 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n", 2332 sta->addr, ath12k_mac_phymode_str(phymode)); 2333 2334 arg->peer_phymode = phymode; 2335 WARN_ON(phymode == MODE_UNKNOWN); 2336 } 2337 2338 static void ath12k_mac_set_eht_mcs(u8 rx_tx_mcs7, u8 rx_tx_mcs9, 2339 u8 rx_tx_mcs11, u8 rx_tx_mcs13, 2340 u32 *rx_mcs, u32 *tx_mcs) 2341 { 2342 *rx_mcs = 0; 2343 u32p_replace_bits(rx_mcs, 2344 u8_get_bits(rx_tx_mcs7, IEEE80211_EHT_MCS_NSS_RX), 2345 WMI_EHT_MCS_NSS_0_7); 2346 u32p_replace_bits(rx_mcs, 2347 u8_get_bits(rx_tx_mcs9, IEEE80211_EHT_MCS_NSS_RX), 2348 WMI_EHT_MCS_NSS_8_9); 2349 u32p_replace_bits(rx_mcs, 2350 u8_get_bits(rx_tx_mcs11, IEEE80211_EHT_MCS_NSS_RX), 2351 WMI_EHT_MCS_NSS_10_11); 2352 u32p_replace_bits(rx_mcs, 2353 u8_get_bits(rx_tx_mcs13, IEEE80211_EHT_MCS_NSS_RX), 2354 WMI_EHT_MCS_NSS_12_13); 2355 2356 *tx_mcs = 0; 2357 u32p_replace_bits(tx_mcs, 2358 u8_get_bits(rx_tx_mcs7, IEEE80211_EHT_MCS_NSS_TX), 2359 WMI_EHT_MCS_NSS_0_7); 2360 u32p_replace_bits(tx_mcs, 2361 u8_get_bits(rx_tx_mcs9, IEEE80211_EHT_MCS_NSS_TX), 2362 WMI_EHT_MCS_NSS_8_9); 2363 u32p_replace_bits(tx_mcs, 2364 u8_get_bits(rx_tx_mcs11, IEEE80211_EHT_MCS_NSS_TX), 2365 WMI_EHT_MCS_NSS_10_11); 2366 u32p_replace_bits(tx_mcs, 2367 u8_get_bits(rx_tx_mcs13, IEEE80211_EHT_MCS_NSS_TX), 2368 WMI_EHT_MCS_NSS_12_13); 2369 } 2370 2371 static void ath12k_mac_set_eht_ppe_threshold(const u8 *ppe_thres, 2372 struct ath12k_wmi_ppe_threshold_arg *ppet) 2373 { 2374 u32 bit_pos = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE, val; 2375 u8 nss, ru, i; 2376 u8 ppet_bit_len_per_ru = IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 2377 2378 ppet->numss_m1 = u8_get_bits(ppe_thres[0], IEEE80211_EHT_PPE_THRES_NSS_MASK); 2379 ppet->ru_bit_mask = u16_get_bits(get_unaligned_le16(ppe_thres), 2380 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 2381 2382 for (nss = 0; nss <= ppet->numss_m1; nss++) { 2383 for (ru = 0; 2384 ru < hweight16(IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 2385 ru++) { 2386 if ((ppet->ru_bit_mask & BIT(ru)) == 0) 2387 continue; 2388 2389 val = 0; 2390 for (i = 0; i < ppet_bit_len_per_ru; i++) { 2391 val |= (((ppe_thres[bit_pos / 8] >> 2392 (bit_pos % 8)) & 0x1) << i); 2393 bit_pos++; 2394 } 2395 ppet->ppet16_ppet8_ru3_ru0[nss] |= 2396 (val << (ru * ppet_bit_len_per_ru)); 2397 } 2398 } 2399 } 2400 2401 static void ath12k_peer_assoc_h_eht(struct ath12k *ar, 2402 struct ieee80211_vif *vif, 2403 struct ieee80211_sta *sta, 2404 struct ath12k_wmi_peer_assoc_arg *arg) 2405 { 2406 const struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap; 2407 const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; 2408 const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20; 2409 const struct ieee80211_eht_mcs_nss_supp_bw *bw; 2410 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2411 u32 *rx_mcs, *tx_mcs; 2412 2413 if (!sta->deflink.he_cap.has_he || !eht_cap->has_eht) 2414 return; 2415 2416 arg->eht_flag = true; 2417 2418 if ((eht_cap->eht_cap_elem.phy_cap_info[5] & 2419 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) && 2420 eht_cap->eht_ppe_thres[0] != 0) 2421 ath12k_mac_set_eht_ppe_threshold(eht_cap->eht_ppe_thres, 2422 &arg->peer_eht_ppet); 2423 2424 memcpy(arg->peer_eht_cap_mac, eht_cap->eht_cap_elem.mac_cap_info, 2425 sizeof(eht_cap->eht_cap_elem.mac_cap_info)); 2426 memcpy(arg->peer_eht_cap_phy, eht_cap->eht_cap_elem.phy_cap_info, 2427 sizeof(eht_cap->eht_cap_elem.phy_cap_info)); 2428 2429 rx_mcs = arg->peer_eht_rx_mcs_set; 2430 tx_mcs = arg->peer_eht_tx_mcs_set; 2431 2432 switch (sta->deflink.bandwidth) { 2433 case IEEE80211_STA_RX_BW_320: 2434 bw = &eht_cap->eht_mcs_nss_supp.bw._320; 2435 ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss, 2436 bw->rx_tx_mcs9_max_nss, 2437 bw->rx_tx_mcs11_max_nss, 2438 bw->rx_tx_mcs13_max_nss, 2439 &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_320], 2440 &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_320]); 2441 arg->peer_eht_mcs_count++; 2442 fallthrough; 2443 case IEEE80211_STA_RX_BW_160: 2444 bw = &eht_cap->eht_mcs_nss_supp.bw._160; 2445 ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss, 2446 bw->rx_tx_mcs9_max_nss, 2447 bw->rx_tx_mcs11_max_nss, 2448 bw->rx_tx_mcs13_max_nss, 2449 &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_160], 2450 &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_160]); 2451 arg->peer_eht_mcs_count++; 2452 fallthrough; 2453 default: 2454 if ((he_cap->he_cap_elem.phy_cap_info[0] & 2455 (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | 2456 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | 2457 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | 2458 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) { 2459 bw_20 = &eht_cap->eht_mcs_nss_supp.only_20mhz; 2460 2461 ath12k_mac_set_eht_mcs(bw_20->rx_tx_mcs7_max_nss, 2462 bw_20->rx_tx_mcs9_max_nss, 2463 bw_20->rx_tx_mcs11_max_nss, 2464 bw_20->rx_tx_mcs13_max_nss, 2465 &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80], 2466 &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80]); 2467 } else { 2468 bw = &eht_cap->eht_mcs_nss_supp.bw._80; 2469 ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss, 2470 bw->rx_tx_mcs9_max_nss, 2471 bw->rx_tx_mcs11_max_nss, 2472 bw->rx_tx_mcs13_max_nss, 2473 &rx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80], 2474 &tx_mcs[WMI_EHTCAP_TXRX_MCS_NSS_IDX_80]); 2475 } 2476 2477 arg->peer_eht_mcs_count++; 2478 break; 2479 } 2480 2481 arg->punct_bitmap = ~arvif->punct_bitmap; 2482 } 2483 2484 static void ath12k_peer_assoc_prepare(struct ath12k *ar, 2485 struct ieee80211_vif *vif, 2486 struct ieee80211_sta *sta, 2487 struct ath12k_wmi_peer_assoc_arg *arg, 2488 bool reassoc) 2489 { 2490 lockdep_assert_held(&ar->conf_mutex); 2491 2492 memset(arg, 0, sizeof(*arg)); 2493 2494 reinit_completion(&ar->peer_assoc_done); 2495 2496 arg->peer_new_assoc = !reassoc; 2497 ath12k_peer_assoc_h_basic(ar, vif, sta, arg); 2498 ath12k_peer_assoc_h_crypto(ar, vif, sta, arg); 2499 ath12k_peer_assoc_h_rates(ar, vif, sta, arg); 2500 ath12k_peer_assoc_h_ht(ar, vif, sta, arg); 2501 ath12k_peer_assoc_h_vht(ar, vif, sta, arg); 2502 ath12k_peer_assoc_h_he(ar, vif, sta, arg); 2503 ath12k_peer_assoc_h_eht(ar, vif, sta, arg); 2504 ath12k_peer_assoc_h_qos(ar, vif, sta, arg); 2505 ath12k_peer_assoc_h_phymode(ar, vif, sta, arg); 2506 ath12k_peer_assoc_h_smps(sta, arg); 2507 2508 /* TODO: amsdu_disable req? */ 2509 } 2510 2511 static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif, 2512 const u8 *addr, 2513 const struct ieee80211_sta_ht_cap *ht_cap) 2514 { 2515 int smps; 2516 2517 if (!ht_cap->ht_supported) 2518 return 0; 2519 2520 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 2521 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 2522 2523 if (smps >= ARRAY_SIZE(ath12k_smps_map)) 2524 return -EINVAL; 2525 2526 return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id, 2527 WMI_PEER_MIMO_PS_STATE, 2528 ath12k_smps_map[smps]); 2529 } 2530 2531 static void ath12k_bss_assoc(struct ath12k *ar, 2532 struct ath12k_vif *arvif, 2533 struct ieee80211_bss_conf *bss_conf) 2534 { 2535 struct ieee80211_vif *vif = arvif->vif; 2536 struct ath12k_wmi_peer_assoc_arg peer_arg; 2537 struct ieee80211_sta *ap_sta; 2538 struct ath12k_peer *peer; 2539 bool is_auth = false; 2540 int ret; 2541 2542 lockdep_assert_held(&ar->conf_mutex); 2543 2544 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", 2545 arvif->vdev_id, arvif->bssid, arvif->aid); 2546 2547 rcu_read_lock(); 2548 2549 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 2550 if (!ap_sta) { 2551 ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n", 2552 bss_conf->bssid, arvif->vdev_id); 2553 rcu_read_unlock(); 2554 return; 2555 } 2556 2557 ath12k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false); 2558 2559 rcu_read_unlock(); 2560 2561 ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); 2562 if (ret) { 2563 ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n", 2564 bss_conf->bssid, arvif->vdev_id, ret); 2565 return; 2566 } 2567 2568 if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { 2569 ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", 2570 bss_conf->bssid, arvif->vdev_id); 2571 return; 2572 } 2573 2574 ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid, 2575 &ap_sta->deflink.ht_cap); 2576 if (ret) { 2577 ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", 2578 arvif->vdev_id, ret); 2579 return; 2580 } 2581 2582 WARN_ON(arvif->is_up); 2583 2584 arvif->aid = vif->cfg.aid; 2585 ether_addr_copy(arvif->bssid, bss_conf->bssid); 2586 2587 ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 2588 if (ret) { 2589 ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n", 2590 arvif->vdev_id, ret); 2591 return; 2592 } 2593 2594 arvif->is_up = true; 2595 2596 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2597 "mac vdev %d up (associated) bssid %pM aid %d\n", 2598 arvif->vdev_id, bss_conf->bssid, vif->cfg.aid); 2599 2600 spin_lock_bh(&ar->ab->base_lock); 2601 2602 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid); 2603 if (peer && peer->is_authorized) 2604 is_auth = true; 2605 2606 spin_unlock_bh(&ar->ab->base_lock); 2607 2608 /* Authorize BSS Peer */ 2609 if (is_auth) { 2610 ret = ath12k_wmi_set_peer_param(ar, arvif->bssid, 2611 arvif->vdev_id, 2612 WMI_PEER_AUTHORIZE, 2613 1); 2614 if (ret) 2615 ath12k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret); 2616 } 2617 2618 ret = ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id, 2619 &bss_conf->he_obss_pd); 2620 if (ret) 2621 ath12k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n", 2622 arvif->vdev_id, ret); 2623 } 2624 2625 static void ath12k_bss_disassoc(struct ath12k *ar, 2626 struct ath12k_vif *arvif) 2627 { 2628 int ret; 2629 2630 lockdep_assert_held(&ar->conf_mutex); 2631 2632 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", 2633 arvif->vdev_id, arvif->bssid); 2634 2635 ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id); 2636 if (ret) 2637 ath12k_warn(ar->ab, "failed to down vdev %i: %d\n", 2638 arvif->vdev_id, ret); 2639 2640 arvif->is_up = false; 2641 2642 cancel_delayed_work(&arvif->connection_loss_work); 2643 } 2644 2645 static u32 ath12k_mac_get_rate_hw_value(int bitrate) 2646 { 2647 u32 preamble; 2648 u16 hw_value; 2649 int rate; 2650 size_t i; 2651 2652 if (ath12k_mac_bitrate_is_cck(bitrate)) 2653 preamble = WMI_RATE_PREAMBLE_CCK; 2654 else 2655 preamble = WMI_RATE_PREAMBLE_OFDM; 2656 2657 for (i = 0; i < ARRAY_SIZE(ath12k_legacy_rates); i++) { 2658 if (ath12k_legacy_rates[i].bitrate != bitrate) 2659 continue; 2660 2661 hw_value = ath12k_legacy_rates[i].hw_value; 2662 rate = ATH12K_HW_RATE_CODE(hw_value, 0, preamble); 2663 2664 return rate; 2665 } 2666 2667 return -EINVAL; 2668 } 2669 2670 static void ath12k_recalculate_mgmt_rate(struct ath12k *ar, 2671 struct ieee80211_vif *vif, 2672 struct cfg80211_chan_def *def) 2673 { 2674 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 2675 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 2676 const struct ieee80211_supported_band *sband; 2677 u8 basic_rate_idx; 2678 int hw_rate_code; 2679 u32 vdev_param; 2680 u16 bitrate; 2681 int ret; 2682 2683 lockdep_assert_held(&ar->conf_mutex); 2684 2685 sband = hw->wiphy->bands[def->chan->band]; 2686 basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; 2687 bitrate = sband->bitrates[basic_rate_idx].bitrate; 2688 2689 hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate); 2690 if (hw_rate_code < 0) { 2691 ath12k_warn(ar->ab, "bitrate not supported %d\n", bitrate); 2692 return; 2693 } 2694 2695 vdev_param = WMI_VDEV_PARAM_MGMT_RATE; 2696 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, 2697 hw_rate_code); 2698 if (ret) 2699 ath12k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret); 2700 2701 vdev_param = WMI_VDEV_PARAM_BEACON_RATE; 2702 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param, 2703 hw_rate_code); 2704 if (ret) 2705 ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret); 2706 } 2707 2708 static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif, 2709 struct ieee80211_bss_conf *info) 2710 { 2711 struct ath12k *ar = arvif->ar; 2712 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 2713 struct sk_buff *tmpl; 2714 int ret; 2715 u32 interval; 2716 bool unsol_bcast_probe_resp_enabled = false; 2717 2718 if (info->fils_discovery.max_interval) { 2719 interval = info->fils_discovery.max_interval; 2720 2721 tmpl = ieee80211_get_fils_discovery_tmpl(hw, arvif->vif); 2722 if (tmpl) 2723 ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id, 2724 tmpl); 2725 } else if (info->unsol_bcast_probe_resp_interval) { 2726 unsol_bcast_probe_resp_enabled = 1; 2727 interval = info->unsol_bcast_probe_resp_interval; 2728 2729 tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, 2730 arvif->vif); 2731 if (tmpl) 2732 ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id, 2733 tmpl); 2734 } else { /* Disable */ 2735 return ath12k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false); 2736 } 2737 2738 if (!tmpl) { 2739 ath12k_warn(ar->ab, 2740 "mac vdev %i failed to retrieve %s template\n", 2741 arvif->vdev_id, (unsol_bcast_probe_resp_enabled ? 2742 "unsolicited broadcast probe response" : 2743 "FILS discovery")); 2744 return -EPERM; 2745 } 2746 kfree_skb(tmpl); 2747 2748 if (!ret) 2749 ret = ath12k_wmi_fils_discovery(ar, arvif->vdev_id, interval, 2750 unsol_bcast_probe_resp_enabled); 2751 2752 return ret; 2753 } 2754 2755 static void ath12k_mac_vif_setup_ps(struct ath12k_vif *arvif) 2756 { 2757 struct ath12k *ar = arvif->ar; 2758 struct ieee80211_vif *vif = arvif->vif; 2759 struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf; 2760 enum wmi_sta_powersave_param param; 2761 enum wmi_sta_ps_mode psmode; 2762 int ret; 2763 int timeout; 2764 bool enable_ps; 2765 2766 lockdep_assert_held(&ar->conf_mutex); 2767 2768 if (vif->type != NL80211_IFTYPE_STATION) 2769 return; 2770 2771 enable_ps = arvif->ps; 2772 if (enable_ps) { 2773 psmode = WMI_STA_PS_MODE_ENABLED; 2774 param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 2775 2776 timeout = conf->dynamic_ps_timeout; 2777 if (timeout == 0) { 2778 /* firmware doesn't like 0 */ 2779 timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000; 2780 } 2781 2782 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 2783 timeout); 2784 if (ret) { 2785 ath12k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n", 2786 arvif->vdev_id, ret); 2787 return; 2788 } 2789 } else { 2790 psmode = WMI_STA_PS_MODE_DISABLED; 2791 } 2792 2793 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d psmode %s\n", 2794 arvif->vdev_id, psmode ? "enable" : "disable"); 2795 2796 ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode); 2797 if (ret) 2798 ath12k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n", 2799 psmode, arvif->vdev_id, ret); 2800 } 2801 2802 static void ath12k_mac_bss_info_changed(struct ath12k *ar, 2803 struct ath12k_vif *arvif, 2804 struct ieee80211_bss_conf *info, 2805 u64 changed) 2806 { 2807 struct ieee80211_vif *vif = arvif->vif; 2808 struct ieee80211_vif_cfg *vif_cfg = &vif->cfg; 2809 struct cfg80211_chan_def def; 2810 u32 param_id, param_value; 2811 enum nl80211_band band; 2812 u32 vdev_param; 2813 int mcast_rate; 2814 u32 preamble; 2815 u16 hw_value; 2816 u16 bitrate; 2817 int ret; 2818 u8 rateidx; 2819 u32 rate; 2820 2821 lockdep_assert_held(&ar->conf_mutex); 2822 2823 if (changed & BSS_CHANGED_BEACON_INT) { 2824 arvif->beacon_interval = info->beacon_int; 2825 2826 param_id = WMI_VDEV_PARAM_BEACON_INTERVAL; 2827 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2828 param_id, 2829 arvif->beacon_interval); 2830 if (ret) 2831 ath12k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n", 2832 arvif->vdev_id); 2833 else 2834 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2835 "Beacon interval: %d set for VDEV: %d\n", 2836 arvif->beacon_interval, arvif->vdev_id); 2837 } 2838 2839 if (changed & BSS_CHANGED_BEACON) { 2840 param_id = WMI_PDEV_PARAM_BEACON_TX_MODE; 2841 param_value = WMI_BEACON_BURST_MODE; 2842 ret = ath12k_wmi_pdev_set_param(ar, param_id, 2843 param_value, ar->pdev->pdev_id); 2844 if (ret) 2845 ath12k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n", 2846 arvif->vdev_id); 2847 else 2848 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2849 "Set burst beacon mode for VDEV: %d\n", 2850 arvif->vdev_id); 2851 2852 ret = ath12k_mac_setup_bcn_tmpl(arvif); 2853 if (ret) 2854 ath12k_warn(ar->ab, "failed to update bcn template: %d\n", 2855 ret); 2856 } 2857 2858 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { 2859 arvif->dtim_period = info->dtim_period; 2860 2861 param_id = WMI_VDEV_PARAM_DTIM_PERIOD; 2862 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2863 param_id, 2864 arvif->dtim_period); 2865 2866 if (ret) 2867 ath12k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n", 2868 arvif->vdev_id, ret); 2869 else 2870 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2871 "DTIM period: %d set for VDEV: %d\n", 2872 arvif->dtim_period, arvif->vdev_id); 2873 } 2874 2875 if (changed & BSS_CHANGED_SSID && 2876 vif->type == NL80211_IFTYPE_AP) { 2877 arvif->u.ap.ssid_len = vif->cfg.ssid_len; 2878 if (vif->cfg.ssid_len) 2879 memcpy(arvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len); 2880 arvif->u.ap.hidden_ssid = info->hidden_ssid; 2881 } 2882 2883 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) 2884 ether_addr_copy(arvif->bssid, info->bssid); 2885 2886 if (changed & BSS_CHANGED_BEACON_ENABLED) { 2887 ath12k_control_beaconing(arvif, info); 2888 2889 if (arvif->is_up && vif->bss_conf.he_support && 2890 vif->bss_conf.he_oper.params) { 2891 /* TODO: Extend to support 1024 BA Bitmap size */ 2892 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2893 WMI_VDEV_PARAM_BA_MODE, 2894 WMI_BA_MODE_BUFFER_SIZE_256); 2895 if (ret) 2896 ath12k_warn(ar->ab, 2897 "failed to set BA BUFFER SIZE 256 for vdev: %d\n", 2898 arvif->vdev_id); 2899 2900 param_id = WMI_VDEV_PARAM_HEOPS_0_31; 2901 param_value = vif->bss_conf.he_oper.params; 2902 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2903 param_id, param_value); 2904 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2905 "he oper param: %x set for VDEV: %d\n", 2906 param_value, arvif->vdev_id); 2907 2908 if (ret) 2909 ath12k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n", 2910 param_value, arvif->vdev_id, ret); 2911 } 2912 } 2913 2914 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 2915 u32 cts_prot; 2916 2917 cts_prot = !!(info->use_cts_prot); 2918 param_id = WMI_VDEV_PARAM_PROTECTION_MODE; 2919 2920 if (arvif->is_started) { 2921 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2922 param_id, cts_prot); 2923 if (ret) 2924 ath12k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n", 2925 arvif->vdev_id); 2926 else 2927 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n", 2928 cts_prot, arvif->vdev_id); 2929 } else { 2930 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n"); 2931 } 2932 } 2933 2934 if (changed & BSS_CHANGED_ERP_SLOT) { 2935 u32 slottime; 2936 2937 if (info->use_short_slot) 2938 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ 2939 2940 else 2941 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ 2942 2943 param_id = WMI_VDEV_PARAM_SLOT_TIME; 2944 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2945 param_id, slottime); 2946 if (ret) 2947 ath12k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n", 2948 arvif->vdev_id); 2949 else 2950 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2951 "Set slottime: %d for VDEV: %d\n", 2952 slottime, arvif->vdev_id); 2953 } 2954 2955 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 2956 u32 preamble; 2957 2958 if (info->use_short_preamble) 2959 preamble = WMI_VDEV_PREAMBLE_SHORT; 2960 else 2961 preamble = WMI_VDEV_PREAMBLE_LONG; 2962 2963 param_id = WMI_VDEV_PARAM_PREAMBLE; 2964 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 2965 param_id, preamble); 2966 if (ret) 2967 ath12k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n", 2968 arvif->vdev_id); 2969 else 2970 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 2971 "Set preamble: %d for VDEV: %d\n", 2972 preamble, arvif->vdev_id); 2973 } 2974 2975 if (changed & BSS_CHANGED_ASSOC) { 2976 if (vif->cfg.assoc) 2977 ath12k_bss_assoc(ar, arvif, info); 2978 else 2979 ath12k_bss_disassoc(ar, arvif); 2980 } 2981 2982 if (changed & BSS_CHANGED_TXPOWER) { 2983 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev_id %i txpower %d\n", 2984 arvif->vdev_id, info->txpower); 2985 2986 arvif->txpower = info->txpower; 2987 ath12k_mac_txpower_recalc(ar); 2988 } 2989 2990 if (changed & BSS_CHANGED_MCAST_RATE && 2991 !ath12k_mac_vif_chan(arvif->vif, &def)) { 2992 band = def.chan->band; 2993 mcast_rate = vif->bss_conf.mcast_rate[band]; 2994 2995 if (mcast_rate > 0) 2996 rateidx = mcast_rate - 1; 2997 else 2998 rateidx = ffs(vif->bss_conf.basic_rates) - 1; 2999 3000 if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) 3001 rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX; 3002 3003 bitrate = ath12k_legacy_rates[rateidx].bitrate; 3004 hw_value = ath12k_legacy_rates[rateidx].hw_value; 3005 3006 if (ath12k_mac_bitrate_is_cck(bitrate)) 3007 preamble = WMI_RATE_PREAMBLE_CCK; 3008 else 3009 preamble = WMI_RATE_PREAMBLE_OFDM; 3010 3011 rate = ATH12K_HW_RATE_CODE(hw_value, 0, preamble); 3012 3013 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 3014 "mac vdev %d mcast_rate %x\n", 3015 arvif->vdev_id, rate); 3016 3017 vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE; 3018 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 3019 vdev_param, rate); 3020 if (ret) 3021 ath12k_warn(ar->ab, 3022 "failed to set mcast rate on vdev %i: %d\n", 3023 arvif->vdev_id, ret); 3024 3025 vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE; 3026 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 3027 vdev_param, rate); 3028 if (ret) 3029 ath12k_warn(ar->ab, 3030 "failed to set bcast rate on vdev %i: %d\n", 3031 arvif->vdev_id, ret); 3032 } 3033 3034 if (changed & BSS_CHANGED_BASIC_RATES && 3035 !ath12k_mac_vif_chan(arvif->vif, &def)) 3036 ath12k_recalculate_mgmt_rate(ar, vif, &def); 3037 3038 if (changed & BSS_CHANGED_TWT) { 3039 if (info->twt_requester || info->twt_responder) 3040 ath12k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id); 3041 else 3042 ath12k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id); 3043 } 3044 3045 if (changed & BSS_CHANGED_HE_OBSS_PD) 3046 ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id, 3047 &info->he_obss_pd); 3048 3049 if (changed & BSS_CHANGED_HE_BSS_COLOR) { 3050 if (vif->type == NL80211_IFTYPE_AP) { 3051 ret = ath12k_wmi_obss_color_cfg_cmd(ar, 3052 arvif->vdev_id, 3053 info->he_bss_color.color, 3054 ATH12K_BSS_COLOR_AP_PERIODS, 3055 info->he_bss_color.enabled); 3056 if (ret) 3057 ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", 3058 arvif->vdev_id, ret); 3059 } else if (vif->type == NL80211_IFTYPE_STATION) { 3060 ret = ath12k_wmi_send_bss_color_change_enable_cmd(ar, 3061 arvif->vdev_id, 3062 1); 3063 if (ret) 3064 ath12k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n", 3065 arvif->vdev_id, ret); 3066 ret = ath12k_wmi_obss_color_cfg_cmd(ar, 3067 arvif->vdev_id, 3068 0, 3069 ATH12K_BSS_COLOR_STA_PERIODS, 3070 1); 3071 if (ret) 3072 ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n", 3073 arvif->vdev_id, ret); 3074 } 3075 } 3076 3077 ath12k_mac_fils_discovery(arvif, info); 3078 3079 if (changed & BSS_CHANGED_PS && 3080 ar->ab->hw_params->supports_sta_ps) { 3081 arvif->ps = vif_cfg->ps; 3082 ath12k_mac_vif_setup_ps(arvif); 3083 } 3084 } 3085 3086 static struct ath12k_vif_cache *ath12k_arvif_get_cache(struct ath12k_vif *arvif) 3087 { 3088 if (!arvif->cache) 3089 arvif->cache = kzalloc(sizeof(*arvif->cache), GFP_KERNEL); 3090 3091 return arvif->cache; 3092 } 3093 3094 static void ath12k_arvif_put_cache(struct ath12k_vif *arvif) 3095 { 3096 kfree(arvif->cache); 3097 arvif->cache = NULL; 3098 } 3099 3100 static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, 3101 struct ieee80211_vif *vif, 3102 struct ieee80211_bss_conf *info, 3103 u64 changed) 3104 { 3105 struct ath12k *ar; 3106 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3107 struct ath12k_vif_cache *cache; 3108 3109 ar = ath12k_get_ar_by_vif(hw, vif); 3110 3111 /* if the vdev is not created on a certain radio, 3112 * cache the info to be updated later on vdev creation 3113 */ 3114 3115 if (!ar) { 3116 cache = ath12k_arvif_get_cache(arvif); 3117 if (!cache) 3118 return; 3119 arvif->cache->bss_conf_changed |= changed; 3120 return; 3121 } 3122 3123 mutex_lock(&ar->conf_mutex); 3124 3125 ath12k_mac_bss_info_changed(ar, arvif, info, changed); 3126 3127 mutex_unlock(&ar->conf_mutex); 3128 } 3129 3130 static struct ath12k* 3131 ath12k_mac_select_scan_device(struct ieee80211_hw *hw, 3132 struct ieee80211_vif *vif, 3133 struct ieee80211_scan_request *req) 3134 { 3135 struct ath12k_hw *ah = hw->priv; 3136 enum nl80211_band band; 3137 struct ath12k *ar; 3138 int i; 3139 3140 if (ah->num_radio == 1) 3141 return ah->radio; 3142 3143 /* Currently mac80211 supports splitting scan requests into 3144 * multiple scan requests per band. 3145 * Loop through first channel and determine the scan radio 3146 * TODO: There could be 5 GHz low/high channels in that case 3147 * split the hw request and perform multiple scans 3148 */ 3149 3150 if (req->req.channels[0]->center_freq < ATH12K_MIN_5G_FREQ) 3151 band = NL80211_BAND_2GHZ; 3152 else if (req->req.channels[0]->center_freq < ATH12K_MIN_6G_FREQ) 3153 band = NL80211_BAND_5GHZ; 3154 else 3155 band = NL80211_BAND_6GHZ; 3156 3157 for_each_ar(ah, ar, i) { 3158 /* TODO 5 GHz low high split changes */ 3159 if (ar->mac.sbands[band].channels) 3160 return ar; 3161 } 3162 3163 return NULL; 3164 } 3165 3166 void __ath12k_mac_scan_finish(struct ath12k *ar) 3167 { 3168 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 3169 3170 lockdep_assert_held(&ar->data_lock); 3171 3172 switch (ar->scan.state) { 3173 case ATH12K_SCAN_IDLE: 3174 break; 3175 case ATH12K_SCAN_RUNNING: 3176 case ATH12K_SCAN_ABORTING: 3177 if (ar->scan.is_roc && ar->scan.roc_notify) 3178 ieee80211_remain_on_channel_expired(hw); 3179 fallthrough; 3180 case ATH12K_SCAN_STARTING: 3181 if (!ar->scan.is_roc) { 3182 struct cfg80211_scan_info info = { 3183 .aborted = ((ar->scan.state == 3184 ATH12K_SCAN_ABORTING) || 3185 (ar->scan.state == 3186 ATH12K_SCAN_STARTING)), 3187 }; 3188 3189 ieee80211_scan_completed(hw, &info); 3190 } 3191 3192 ar->scan.state = ATH12K_SCAN_IDLE; 3193 ar->scan_channel = NULL; 3194 ar->scan.roc_freq = 0; 3195 cancel_delayed_work(&ar->scan.timeout); 3196 complete(&ar->scan.completed); 3197 break; 3198 } 3199 } 3200 3201 void ath12k_mac_scan_finish(struct ath12k *ar) 3202 { 3203 spin_lock_bh(&ar->data_lock); 3204 __ath12k_mac_scan_finish(ar); 3205 spin_unlock_bh(&ar->data_lock); 3206 } 3207 3208 static int ath12k_scan_stop(struct ath12k *ar) 3209 { 3210 struct ath12k_wmi_scan_cancel_arg arg = { 3211 .req_type = WLAN_SCAN_CANCEL_SINGLE, 3212 .scan_id = ATH12K_SCAN_ID, 3213 }; 3214 int ret; 3215 3216 lockdep_assert_held(&ar->conf_mutex); 3217 3218 /* TODO: Fill other STOP Params */ 3219 arg.pdev_id = ar->pdev->pdev_id; 3220 3221 ret = ath12k_wmi_send_scan_stop_cmd(ar, &arg); 3222 if (ret) { 3223 ath12k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret); 3224 goto out; 3225 } 3226 3227 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); 3228 if (ret == 0) { 3229 ath12k_warn(ar->ab, 3230 "failed to receive scan abort comple: timed out\n"); 3231 ret = -ETIMEDOUT; 3232 } else if (ret > 0) { 3233 ret = 0; 3234 } 3235 3236 out: 3237 /* Scan state should be updated upon scan completion but in case 3238 * firmware fails to deliver the event (for whatever reason) it is 3239 * desired to clean up scan state anyway. Firmware may have just 3240 * dropped the scan completion event delivery due to transport pipe 3241 * being overflown with data and/or it can recover on its own before 3242 * next scan request is submitted. 3243 */ 3244 spin_lock_bh(&ar->data_lock); 3245 if (ar->scan.state != ATH12K_SCAN_IDLE) 3246 __ath12k_mac_scan_finish(ar); 3247 spin_unlock_bh(&ar->data_lock); 3248 3249 return ret; 3250 } 3251 3252 static void ath12k_scan_abort(struct ath12k *ar) 3253 { 3254 int ret; 3255 3256 lockdep_assert_held(&ar->conf_mutex); 3257 3258 spin_lock_bh(&ar->data_lock); 3259 3260 switch (ar->scan.state) { 3261 case ATH12K_SCAN_IDLE: 3262 /* This can happen if timeout worker kicked in and called 3263 * abortion while scan completion was being processed. 3264 */ 3265 break; 3266 case ATH12K_SCAN_STARTING: 3267 case ATH12K_SCAN_ABORTING: 3268 ath12k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n", 3269 ar->scan.state); 3270 break; 3271 case ATH12K_SCAN_RUNNING: 3272 ar->scan.state = ATH12K_SCAN_ABORTING; 3273 spin_unlock_bh(&ar->data_lock); 3274 3275 ret = ath12k_scan_stop(ar); 3276 if (ret) 3277 ath12k_warn(ar->ab, "failed to abort scan: %d\n", ret); 3278 3279 spin_lock_bh(&ar->data_lock); 3280 break; 3281 } 3282 3283 spin_unlock_bh(&ar->data_lock); 3284 } 3285 3286 static void ath12k_scan_timeout_work(struct work_struct *work) 3287 { 3288 struct ath12k *ar = container_of(work, struct ath12k, 3289 scan.timeout.work); 3290 3291 mutex_lock(&ar->conf_mutex); 3292 ath12k_scan_abort(ar); 3293 mutex_unlock(&ar->conf_mutex); 3294 } 3295 3296 static int ath12k_start_scan(struct ath12k *ar, 3297 struct ath12k_wmi_scan_req_arg *arg) 3298 { 3299 int ret; 3300 3301 lockdep_assert_held(&ar->conf_mutex); 3302 3303 ret = ath12k_wmi_send_scan_start_cmd(ar, arg); 3304 if (ret) 3305 return ret; 3306 3307 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); 3308 if (ret == 0) { 3309 ret = ath12k_scan_stop(ar); 3310 if (ret) 3311 ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret); 3312 3313 return -ETIMEDOUT; 3314 } 3315 3316 /* If we failed to start the scan, return error code at 3317 * this point. This is probably due to some issue in the 3318 * firmware, but no need to wedge the driver due to that... 3319 */ 3320 spin_lock_bh(&ar->data_lock); 3321 if (ar->scan.state == ATH12K_SCAN_IDLE) { 3322 spin_unlock_bh(&ar->data_lock); 3323 return -EINVAL; 3324 } 3325 spin_unlock_bh(&ar->data_lock); 3326 3327 return 0; 3328 } 3329 3330 static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, 3331 struct ieee80211_vif *vif, 3332 struct ieee80211_scan_request *hw_req) 3333 { 3334 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 3335 struct ath12k *ar, *prev_ar; 3336 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3337 struct cfg80211_scan_request *req = &hw_req->req; 3338 struct ath12k_wmi_scan_req_arg arg = {}; 3339 int ret; 3340 int i; 3341 bool create = true; 3342 3343 if (ah->num_radio == 1) { 3344 WARN_ON(!arvif->is_created); 3345 ar = ath12k_ah_to_ar(ah, 0); 3346 goto scan; 3347 } 3348 3349 /* Since the targeted scan device could depend on the frequency 3350 * requested in the hw_req, select the corresponding radio 3351 */ 3352 ar = ath12k_mac_select_scan_device(hw, vif, hw_req); 3353 if (!ar) 3354 return -EINVAL; 3355 3356 /* If the vif is already assigned to a specific vdev of an ar, 3357 * check whether its already started, vdev which is started 3358 * are not allowed to switch to a new radio. 3359 * If the vdev is not started, but was earlier created on a 3360 * different ar, delete that vdev and create a new one. We don't 3361 * delete at the scan stop as an optimization to avoid redundant 3362 * delete-create vdev's for the same ar, in case the request is 3363 * always on the same band for the vif 3364 */ 3365 if (arvif->is_created) { 3366 if (WARN_ON(!arvif->ar)) 3367 return -EINVAL; 3368 3369 if (ar != arvif->ar && arvif->is_started) 3370 return -EINVAL; 3371 3372 if (ar != arvif->ar) { 3373 /* backup the previously used ar ptr, since the vdev delete 3374 * would assign the arvif->ar to NULL after the call 3375 */ 3376 prev_ar = arvif->ar; 3377 mutex_lock(&prev_ar->conf_mutex); 3378 ret = ath12k_mac_vdev_delete(prev_ar, vif); 3379 mutex_unlock(&prev_ar->conf_mutex); 3380 if (ret) 3381 ath12k_warn(prev_ar->ab, 3382 "unable to delete scan vdev %d\n", ret); 3383 } else { 3384 create = false; 3385 } 3386 } 3387 if (create) { 3388 mutex_lock(&ar->conf_mutex); 3389 ret = ath12k_mac_vdev_create(ar, vif); 3390 mutex_unlock(&ar->conf_mutex); 3391 if (ret) { 3392 ath12k_warn(ar->ab, "unable to create scan vdev %d\n", ret); 3393 return -EINVAL; 3394 } 3395 } 3396 scan: 3397 mutex_lock(&ar->conf_mutex); 3398 3399 spin_lock_bh(&ar->data_lock); 3400 switch (ar->scan.state) { 3401 case ATH12K_SCAN_IDLE: 3402 reinit_completion(&ar->scan.started); 3403 reinit_completion(&ar->scan.completed); 3404 ar->scan.state = ATH12K_SCAN_STARTING; 3405 ar->scan.is_roc = false; 3406 ar->scan.vdev_id = arvif->vdev_id; 3407 ret = 0; 3408 break; 3409 case ATH12K_SCAN_STARTING: 3410 case ATH12K_SCAN_RUNNING: 3411 case ATH12K_SCAN_ABORTING: 3412 ret = -EBUSY; 3413 break; 3414 } 3415 spin_unlock_bh(&ar->data_lock); 3416 3417 if (ret) 3418 goto exit; 3419 3420 ath12k_wmi_start_scan_init(ar, &arg); 3421 arg.vdev_id = arvif->vdev_id; 3422 arg.scan_id = ATH12K_SCAN_ID; 3423 3424 if (req->ie_len) { 3425 arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL); 3426 if (!arg.extraie.ptr) { 3427 ret = -ENOMEM; 3428 goto exit; 3429 } 3430 arg.extraie.len = req->ie_len; 3431 } 3432 3433 if (req->n_ssids) { 3434 arg.num_ssids = req->n_ssids; 3435 for (i = 0; i < arg.num_ssids; i++) 3436 arg.ssid[i] = req->ssids[i]; 3437 } else { 3438 arg.scan_f_passive = 1; 3439 } 3440 3441 if (req->n_channels) { 3442 arg.num_chan = req->n_channels; 3443 arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list), 3444 GFP_KERNEL); 3445 3446 if (!arg.chan_list) { 3447 ret = -ENOMEM; 3448 goto exit; 3449 } 3450 3451 for (i = 0; i < arg.num_chan; i++) 3452 arg.chan_list[i] = req->channels[i]->center_freq; 3453 } 3454 3455 ret = ath12k_start_scan(ar, &arg); 3456 if (ret) { 3457 ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret); 3458 spin_lock_bh(&ar->data_lock); 3459 ar->scan.state = ATH12K_SCAN_IDLE; 3460 spin_unlock_bh(&ar->data_lock); 3461 } 3462 3463 /* Add a margin to account for event/command processing */ 3464 ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout, 3465 msecs_to_jiffies(arg.max_scan_time + 3466 ATH12K_MAC_SCAN_TIMEOUT_MSECS)); 3467 3468 exit: 3469 kfree(arg.chan_list); 3470 3471 if (req->ie_len) 3472 kfree(arg.extraie.ptr); 3473 3474 mutex_unlock(&ar->conf_mutex); 3475 3476 return ret; 3477 } 3478 3479 static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw, 3480 struct ieee80211_vif *vif) 3481 { 3482 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3483 struct ath12k *ar; 3484 3485 if (!arvif->is_created) 3486 return; 3487 3488 ar = arvif->ar; 3489 3490 mutex_lock(&ar->conf_mutex); 3491 ath12k_scan_abort(ar); 3492 mutex_unlock(&ar->conf_mutex); 3493 3494 cancel_delayed_work_sync(&ar->scan.timeout); 3495 } 3496 3497 static int ath12k_install_key(struct ath12k_vif *arvif, 3498 struct ieee80211_key_conf *key, 3499 enum set_key_cmd cmd, 3500 const u8 *macaddr, u32 flags) 3501 { 3502 int ret; 3503 struct ath12k *ar = arvif->ar; 3504 struct wmi_vdev_install_key_arg arg = { 3505 .vdev_id = arvif->vdev_id, 3506 .key_idx = key->keyidx, 3507 .key_len = key->keylen, 3508 .key_data = key->key, 3509 .key_flags = flags, 3510 .macaddr = macaddr, 3511 }; 3512 3513 lockdep_assert_held(&arvif->ar->conf_mutex); 3514 3515 reinit_completion(&ar->install_key_done); 3516 3517 if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) 3518 return 0; 3519 3520 if (cmd == DISABLE_KEY) { 3521 /* TODO: Check if FW expects value other than NONE for del */ 3522 /* arg.key_cipher = WMI_CIPHER_NONE; */ 3523 arg.key_len = 0; 3524 arg.key_data = NULL; 3525 goto install; 3526 } 3527 3528 switch (key->cipher) { 3529 case WLAN_CIPHER_SUITE_CCMP: 3530 arg.key_cipher = WMI_CIPHER_AES_CCM; 3531 /* TODO: Re-check if flag is valid */ 3532 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; 3533 break; 3534 case WLAN_CIPHER_SUITE_TKIP: 3535 arg.key_cipher = WMI_CIPHER_TKIP; 3536 arg.key_txmic_len = 8; 3537 arg.key_rxmic_len = 8; 3538 break; 3539 case WLAN_CIPHER_SUITE_CCMP_256: 3540 arg.key_cipher = WMI_CIPHER_AES_CCM; 3541 break; 3542 case WLAN_CIPHER_SUITE_GCMP: 3543 case WLAN_CIPHER_SUITE_GCMP_256: 3544 arg.key_cipher = WMI_CIPHER_AES_GCM; 3545 break; 3546 default: 3547 ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher); 3548 return -EOPNOTSUPP; 3549 } 3550 3551 if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) 3552 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV | 3553 IEEE80211_KEY_FLAG_RESERVE_TAILROOM; 3554 3555 install: 3556 ret = ath12k_wmi_vdev_install_key(arvif->ar, &arg); 3557 3558 if (ret) 3559 return ret; 3560 3561 if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ)) 3562 return -ETIMEDOUT; 3563 3564 if (ether_addr_equal(macaddr, arvif->vif->addr)) 3565 arvif->key_cipher = key->cipher; 3566 3567 return ar->install_key_status ? -EINVAL : 0; 3568 } 3569 3570 static int ath12k_clear_peer_keys(struct ath12k_vif *arvif, 3571 const u8 *addr) 3572 { 3573 struct ath12k *ar = arvif->ar; 3574 struct ath12k_base *ab = ar->ab; 3575 struct ath12k_peer *peer; 3576 int first_errno = 0; 3577 int ret; 3578 int i; 3579 u32 flags = 0; 3580 3581 lockdep_assert_held(&ar->conf_mutex); 3582 3583 spin_lock_bh(&ab->base_lock); 3584 peer = ath12k_peer_find(ab, arvif->vdev_id, addr); 3585 spin_unlock_bh(&ab->base_lock); 3586 3587 if (!peer) 3588 return -ENOENT; 3589 3590 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 3591 if (!peer->keys[i]) 3592 continue; 3593 3594 /* key flags are not required to delete the key */ 3595 ret = ath12k_install_key(arvif, peer->keys[i], 3596 DISABLE_KEY, addr, flags); 3597 if (ret < 0 && first_errno == 0) 3598 first_errno = ret; 3599 3600 if (ret < 0) 3601 ath12k_warn(ab, "failed to remove peer key %d: %d\n", 3602 i, ret); 3603 3604 spin_lock_bh(&ab->base_lock); 3605 peer->keys[i] = NULL; 3606 spin_unlock_bh(&ab->base_lock); 3607 } 3608 3609 return first_errno; 3610 } 3611 3612 static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd, 3613 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 3614 struct ieee80211_key_conf *key) 3615 { 3616 struct ath12k_base *ab = ar->ab; 3617 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3618 struct ath12k_peer *peer; 3619 struct ath12k_sta *arsta; 3620 const u8 *peer_addr; 3621 int ret = 0; 3622 u32 flags = 0; 3623 3624 lockdep_assert_held(&ar->conf_mutex); 3625 3626 if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags)) 3627 return 1; 3628 3629 if (sta) 3630 peer_addr = sta->addr; 3631 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 3632 peer_addr = vif->bss_conf.bssid; 3633 else 3634 peer_addr = vif->addr; 3635 3636 key->hw_key_idx = key->keyidx; 3637 3638 /* the peer should not disappear in mid-way (unless FW goes awry) since 3639 * we already hold conf_mutex. we just make sure its there now. 3640 */ 3641 spin_lock_bh(&ab->base_lock); 3642 peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); 3643 spin_unlock_bh(&ab->base_lock); 3644 3645 if (!peer) { 3646 if (cmd == SET_KEY) { 3647 ath12k_warn(ab, "cannot install key for non-existent peer %pM\n", 3648 peer_addr); 3649 ret = -EOPNOTSUPP; 3650 goto exit; 3651 } else { 3652 /* if the peer doesn't exist there is no key to disable 3653 * anymore 3654 */ 3655 goto exit; 3656 } 3657 } 3658 3659 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 3660 flags |= WMI_KEY_PAIRWISE; 3661 else 3662 flags |= WMI_KEY_GROUP; 3663 3664 ret = ath12k_install_key(arvif, key, cmd, peer_addr, flags); 3665 if (ret) { 3666 ath12k_warn(ab, "ath12k_install_key failed (%d)\n", ret); 3667 goto exit; 3668 } 3669 3670 ret = ath12k_dp_rx_peer_pn_replay_config(arvif, peer_addr, cmd, key); 3671 if (ret) { 3672 ath12k_warn(ab, "failed to offload PN replay detection %d\n", ret); 3673 goto exit; 3674 } 3675 3676 spin_lock_bh(&ab->base_lock); 3677 peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr); 3678 if (peer && cmd == SET_KEY) { 3679 peer->keys[key->keyidx] = key; 3680 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 3681 peer->ucast_keyidx = key->keyidx; 3682 peer->sec_type = ath12k_dp_tx_get_encrypt_type(key->cipher); 3683 } else { 3684 peer->mcast_keyidx = key->keyidx; 3685 peer->sec_type_grp = ath12k_dp_tx_get_encrypt_type(key->cipher); 3686 } 3687 } else if (peer && cmd == DISABLE_KEY) { 3688 peer->keys[key->keyidx] = NULL; 3689 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 3690 peer->ucast_keyidx = 0; 3691 else 3692 peer->mcast_keyidx = 0; 3693 } else if (!peer) 3694 /* impossible unless FW goes crazy */ 3695 ath12k_warn(ab, "peer %pM disappeared!\n", peer_addr); 3696 3697 if (sta) { 3698 arsta = ath12k_sta_to_arsta(sta); 3699 3700 switch (key->cipher) { 3701 case WLAN_CIPHER_SUITE_TKIP: 3702 case WLAN_CIPHER_SUITE_CCMP: 3703 case WLAN_CIPHER_SUITE_CCMP_256: 3704 case WLAN_CIPHER_SUITE_GCMP: 3705 case WLAN_CIPHER_SUITE_GCMP_256: 3706 if (cmd == SET_KEY) 3707 arsta->pn_type = HAL_PN_TYPE_WPA; 3708 else 3709 arsta->pn_type = HAL_PN_TYPE_NONE; 3710 break; 3711 default: 3712 arsta->pn_type = HAL_PN_TYPE_NONE; 3713 break; 3714 } 3715 } 3716 3717 spin_unlock_bh(&ab->base_lock); 3718 3719 exit: 3720 return ret; 3721 } 3722 3723 static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3724 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 3725 struct ieee80211_key_conf *key) 3726 { 3727 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3728 struct ath12k_vif_cache *cache; 3729 struct ath12k *ar; 3730 int ret; 3731 3732 /* BIP needs to be done in software */ 3733 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3734 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3735 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || 3736 key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) 3737 return 1; 3738 3739 if (key->keyidx > WMI_MAX_KEY_INDEX) 3740 return -ENOSPC; 3741 3742 ar = ath12k_get_ar_by_vif(hw, vif); 3743 if (!ar) { 3744 /* ar is expected to be valid when sta ptr is available */ 3745 if (sta) { 3746 WARN_ON_ONCE(1); 3747 return -EINVAL; 3748 } 3749 3750 cache = ath12k_arvif_get_cache(arvif); 3751 if (!cache) 3752 return -ENOSPC; 3753 cache->key_conf.cmd = cmd; 3754 cache->key_conf.key = key; 3755 cache->key_conf.changed = true; 3756 return 0; 3757 } 3758 3759 mutex_lock(&ar->conf_mutex); 3760 ret = ath12k_mac_set_key(ar, cmd, vif, sta, key); 3761 mutex_unlock(&ar->conf_mutex); 3762 return ret; 3763 } 3764 3765 static int 3766 ath12k_mac_bitrate_mask_num_vht_rates(struct ath12k *ar, 3767 enum nl80211_band band, 3768 const struct cfg80211_bitrate_mask *mask) 3769 { 3770 int num_rates = 0; 3771 int i; 3772 3773 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) 3774 num_rates += hweight16(mask->control[band].vht_mcs[i]); 3775 3776 return num_rates; 3777 } 3778 3779 static int 3780 ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif, 3781 struct ieee80211_sta *sta, 3782 const struct cfg80211_bitrate_mask *mask, 3783 enum nl80211_band band) 3784 { 3785 struct ath12k *ar = arvif->ar; 3786 u8 vht_rate, nss; 3787 u32 rate_code; 3788 int ret, i; 3789 3790 lockdep_assert_held(&ar->conf_mutex); 3791 3792 nss = 0; 3793 3794 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 3795 if (hweight16(mask->control[band].vht_mcs[i]) == 1) { 3796 nss = i + 1; 3797 vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1; 3798 } 3799 } 3800 3801 if (!nss) { 3802 ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM", 3803 sta->addr); 3804 return -EINVAL; 3805 } 3806 3807 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 3808 "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates", 3809 sta->addr); 3810 3811 rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1, 3812 WMI_RATE_PREAMBLE_VHT); 3813 ret = ath12k_wmi_set_peer_param(ar, sta->addr, 3814 arvif->vdev_id, 3815 WMI_PEER_PARAM_FIXED_RATE, 3816 rate_code); 3817 if (ret) 3818 ath12k_warn(ar->ab, 3819 "failed to update STA %pM Fixed Rate %d: %d\n", 3820 sta->addr, rate_code, ret); 3821 3822 return ret; 3823 } 3824 3825 static int ath12k_station_assoc(struct ath12k *ar, 3826 struct ieee80211_vif *vif, 3827 struct ieee80211_sta *sta, 3828 bool reassoc) 3829 { 3830 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3831 struct ath12k_wmi_peer_assoc_arg peer_arg; 3832 int ret; 3833 struct cfg80211_chan_def def; 3834 enum nl80211_band band; 3835 struct cfg80211_bitrate_mask *mask; 3836 u8 num_vht_rates; 3837 3838 lockdep_assert_held(&ar->conf_mutex); 3839 3840 if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) 3841 return -EPERM; 3842 3843 band = def.chan->band; 3844 mask = &arvif->bitrate_mask; 3845 3846 ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); 3847 3848 ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); 3849 if (ret) { 3850 ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", 3851 sta->addr, arvif->vdev_id, ret); 3852 return ret; 3853 } 3854 3855 if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) { 3856 ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", 3857 sta->addr, arvif->vdev_id); 3858 return -ETIMEDOUT; 3859 } 3860 3861 num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask); 3862 3863 /* If single VHT rate is configured (by set_bitrate_mask()), 3864 * peer_assoc will disable VHT. This is now enabled by a peer specific 3865 * fixed param. 3866 * Note that all other rates and NSS will be disabled for this peer. 3867 */ 3868 if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { 3869 ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, 3870 band); 3871 if (ret) 3872 return ret; 3873 } 3874 3875 /* Re-assoc is run only to update supported rates for given station. It 3876 * doesn't make much sense to reconfigure the peer completely. 3877 */ 3878 if (reassoc) 3879 return 0; 3880 3881 ret = ath12k_setup_peer_smps(ar, arvif, sta->addr, 3882 &sta->deflink.ht_cap); 3883 if (ret) { 3884 ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", 3885 arvif->vdev_id, ret); 3886 return ret; 3887 } 3888 3889 if (!sta->wme) { 3890 arvif->num_legacy_stations++; 3891 ret = ath12k_recalc_rtscts_prot(arvif); 3892 if (ret) 3893 return ret; 3894 } 3895 3896 if (sta->wme && sta->uapsd_queues) { 3897 ret = ath12k_peer_assoc_qos_ap(ar, arvif, sta); 3898 if (ret) { 3899 ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n", 3900 sta->addr, arvif->vdev_id, ret); 3901 return ret; 3902 } 3903 } 3904 3905 return 0; 3906 } 3907 3908 static int ath12k_station_disassoc(struct ath12k *ar, 3909 struct ieee80211_vif *vif, 3910 struct ieee80211_sta *sta) 3911 { 3912 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 3913 int ret; 3914 3915 lockdep_assert_held(&ar->conf_mutex); 3916 3917 if (!sta->wme) { 3918 arvif->num_legacy_stations--; 3919 ret = ath12k_recalc_rtscts_prot(arvif); 3920 if (ret) 3921 return ret; 3922 } 3923 3924 ret = ath12k_clear_peer_keys(arvif, sta->addr); 3925 if (ret) { 3926 ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n", 3927 arvif->vdev_id, ret); 3928 return ret; 3929 } 3930 return 0; 3931 } 3932 3933 static void ath12k_sta_rc_update_wk(struct work_struct *wk) 3934 { 3935 struct ath12k *ar; 3936 struct ath12k_vif *arvif; 3937 struct ath12k_sta *arsta; 3938 struct ieee80211_sta *sta; 3939 struct cfg80211_chan_def def; 3940 enum nl80211_band band; 3941 const u8 *ht_mcs_mask; 3942 const u16 *vht_mcs_mask; 3943 u32 changed, bw, nss, smps, bw_prev; 3944 int err, num_vht_rates; 3945 const struct cfg80211_bitrate_mask *mask; 3946 struct ath12k_wmi_peer_assoc_arg peer_arg; 3947 enum wmi_phy_mode peer_phymode; 3948 3949 arsta = container_of(wk, struct ath12k_sta, update_wk); 3950 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); 3951 arvif = arsta->arvif; 3952 ar = arvif->ar; 3953 3954 if (WARN_ON(ath12k_mac_vif_chan(arvif->vif, &def))) 3955 return; 3956 3957 band = def.chan->band; 3958 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 3959 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 3960 3961 spin_lock_bh(&ar->data_lock); 3962 3963 changed = arsta->changed; 3964 arsta->changed = 0; 3965 3966 bw = arsta->bw; 3967 bw_prev = arsta->bw_prev; 3968 nss = arsta->nss; 3969 smps = arsta->smps; 3970 3971 spin_unlock_bh(&ar->data_lock); 3972 3973 mutex_lock(&ar->conf_mutex); 3974 3975 nss = max_t(u32, 1, nss); 3976 nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask), 3977 ath12k_mac_max_vht_nss(vht_mcs_mask))); 3978 3979 if (changed & IEEE80211_RC_BW_CHANGED) { 3980 ath12k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg); 3981 peer_phymode = peer_arg.peer_phymode; 3982 3983 if (bw > bw_prev) { 3984 /* Phymode shows maximum supported channel width, if we 3985 * upgrade bandwidth then due to sanity check of firmware, 3986 * we have to send WMI_PEER_PHYMODE followed by 3987 * WMI_PEER_CHWIDTH 3988 */ 3989 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth upgrade for sta %pM new %d old %d\n", 3990 sta->addr, bw, bw_prev); 3991 err = ath12k_wmi_set_peer_param(ar, sta->addr, 3992 arvif->vdev_id, WMI_PEER_PHYMODE, 3993 peer_phymode); 3994 if (err) { 3995 ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n", 3996 sta->addr, peer_phymode, err); 3997 goto err_rc_bw_changed; 3998 } 3999 err = ath12k_wmi_set_peer_param(ar, sta->addr, 4000 arvif->vdev_id, WMI_PEER_CHWIDTH, 4001 bw); 4002 if (err) 4003 ath12k_warn(ar->ab, "failed to update STA %pM to peer bandwidth %d: %d\n", 4004 sta->addr, bw, err); 4005 } else { 4006 /* When we downgrade bandwidth this will conflict with phymode 4007 * and cause to trigger firmware crash. In this case we send 4008 * WMI_PEER_CHWIDTH followed by WMI_PEER_PHYMODE 4009 */ 4010 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth downgrade for sta %pM new %d old %d\n", 4011 sta->addr, bw, bw_prev); 4012 err = ath12k_wmi_set_peer_param(ar, sta->addr, 4013 arvif->vdev_id, WMI_PEER_CHWIDTH, 4014 bw); 4015 if (err) { 4016 ath12k_warn(ar->ab, "failed to update STA %pM peer to bandwidth %d: %d\n", 4017 sta->addr, bw, err); 4018 goto err_rc_bw_changed; 4019 } 4020 err = ath12k_wmi_set_peer_param(ar, sta->addr, 4021 arvif->vdev_id, WMI_PEER_PHYMODE, 4022 peer_phymode); 4023 if (err) 4024 ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n", 4025 sta->addr, peer_phymode, err); 4026 } 4027 } 4028 4029 if (changed & IEEE80211_RC_NSS_CHANGED) { 4030 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n", 4031 sta->addr, nss); 4032 4033 err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, 4034 WMI_PEER_NSS, nss); 4035 if (err) 4036 ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n", 4037 sta->addr, nss, err); 4038 } 4039 4040 if (changed & IEEE80211_RC_SMPS_CHANGED) { 4041 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n", 4042 sta->addr, smps); 4043 4044 err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, 4045 WMI_PEER_MIMO_PS_STATE, smps); 4046 if (err) 4047 ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n", 4048 sta->addr, smps, err); 4049 } 4050 4051 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { 4052 mask = &arvif->bitrate_mask; 4053 num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, 4054 mask); 4055 4056 /* Peer_assoc_prepare will reject vht rates in 4057 * bitrate_mask if its not available in range format and 4058 * sets vht tx_rateset as unsupported. So multiple VHT MCS 4059 * setting(eg. MCS 4,5,6) per peer is not supported here. 4060 * But, Single rate in VHT mask can be set as per-peer 4061 * fixed rate. But even if any HT rates are configured in 4062 * the bitrate mask, device will not switch to those rates 4063 * when per-peer Fixed rate is set. 4064 * TODO: Check RATEMASK_CMDID to support auto rates selection 4065 * across HT/VHT and for multiple VHT MCS support. 4066 */ 4067 if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) { 4068 ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask, 4069 band); 4070 } else { 4071 /* If the peer is non-VHT or no fixed VHT rate 4072 * is provided in the new bitrate mask we set the 4073 * other rates using peer_assoc command. 4074 */ 4075 ath12k_peer_assoc_prepare(ar, arvif->vif, sta, 4076 &peer_arg, true); 4077 4078 err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); 4079 if (err) 4080 ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", 4081 sta->addr, arvif->vdev_id, err); 4082 4083 if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) 4084 ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n", 4085 sta->addr, arvif->vdev_id); 4086 } 4087 } 4088 err_rc_bw_changed: 4089 mutex_unlock(&ar->conf_mutex); 4090 } 4091 4092 static int ath12k_mac_inc_num_stations(struct ath12k_vif *arvif, 4093 struct ieee80211_sta *sta) 4094 { 4095 struct ath12k *ar = arvif->ar; 4096 4097 lockdep_assert_held(&ar->conf_mutex); 4098 4099 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 4100 return 0; 4101 4102 if (ar->num_stations >= ar->max_num_stations) 4103 return -ENOBUFS; 4104 4105 ar->num_stations++; 4106 4107 return 0; 4108 } 4109 4110 static void ath12k_mac_dec_num_stations(struct ath12k_vif *arvif, 4111 struct ieee80211_sta *sta) 4112 { 4113 struct ath12k *ar = arvif->ar; 4114 4115 lockdep_assert_held(&ar->conf_mutex); 4116 4117 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 4118 return; 4119 4120 ar->num_stations--; 4121 } 4122 4123 static int ath12k_mac_station_add(struct ath12k *ar, 4124 struct ieee80211_vif *vif, 4125 struct ieee80211_sta *sta) 4126 { 4127 struct ath12k_base *ab = ar->ab; 4128 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 4129 struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); 4130 struct ath12k_wmi_peer_create_arg peer_param; 4131 int ret; 4132 4133 lockdep_assert_held(&ar->conf_mutex); 4134 4135 ret = ath12k_mac_inc_num_stations(arvif, sta); 4136 if (ret) { 4137 ath12k_warn(ab, "refusing to associate station: too many connected already (%d)\n", 4138 ar->max_num_stations); 4139 goto exit; 4140 } 4141 4142 arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL); 4143 if (!arsta->rx_stats) { 4144 ret = -ENOMEM; 4145 goto dec_num_station; 4146 } 4147 4148 peer_param.vdev_id = arvif->vdev_id; 4149 peer_param.peer_addr = sta->addr; 4150 peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; 4151 4152 ret = ath12k_peer_create(ar, arvif, sta, &peer_param); 4153 if (ret) { 4154 ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n", 4155 sta->addr, arvif->vdev_id); 4156 goto free_peer; 4157 } 4158 4159 ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n", 4160 sta->addr, arvif->vdev_id); 4161 4162 if (ieee80211_vif_is_mesh(vif)) { 4163 ret = ath12k_wmi_set_peer_param(ar, sta->addr, 4164 arvif->vdev_id, 4165 WMI_PEER_USE_4ADDR, 1); 4166 if (ret) { 4167 ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n", 4168 sta->addr, ret); 4169 goto free_peer; 4170 } 4171 } 4172 4173 ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr); 4174 if (ret) { 4175 ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n", 4176 sta->addr, arvif->vdev_id, ret); 4177 goto free_peer; 4178 } 4179 4180 if (ab->hw_params->vdev_start_delay && 4181 !arvif->is_started && 4182 arvif->vdev_type != WMI_VDEV_TYPE_AP) { 4183 ret = ath12k_start_vdev_delay(ar, arvif); 4184 if (ret) { 4185 ath12k_warn(ab, "failed to delay vdev start: %d\n", ret); 4186 goto free_peer; 4187 } 4188 } 4189 4190 return 0; 4191 4192 free_peer: 4193 ath12k_peer_delete(ar, arvif->vdev_id, sta->addr); 4194 dec_num_station: 4195 ath12k_mac_dec_num_stations(arvif, sta); 4196 exit: 4197 return ret; 4198 } 4199 4200 static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar, 4201 struct ieee80211_sta *sta) 4202 { 4203 u32 bw = WMI_PEER_CHWIDTH_20MHZ; 4204 4205 switch (sta->deflink.bandwidth) { 4206 case IEEE80211_STA_RX_BW_20: 4207 bw = WMI_PEER_CHWIDTH_20MHZ; 4208 break; 4209 case IEEE80211_STA_RX_BW_40: 4210 bw = WMI_PEER_CHWIDTH_40MHZ; 4211 break; 4212 case IEEE80211_STA_RX_BW_80: 4213 bw = WMI_PEER_CHWIDTH_80MHZ; 4214 break; 4215 case IEEE80211_STA_RX_BW_160: 4216 bw = WMI_PEER_CHWIDTH_160MHZ; 4217 break; 4218 case IEEE80211_STA_RX_BW_320: 4219 bw = WMI_PEER_CHWIDTH_320MHZ; 4220 break; 4221 default: 4222 ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n", 4223 sta->deflink.bandwidth, sta->addr); 4224 bw = WMI_PEER_CHWIDTH_20MHZ; 4225 break; 4226 } 4227 4228 return bw; 4229 } 4230 4231 static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw, 4232 struct ieee80211_vif *vif, 4233 struct ieee80211_sta *sta, 4234 enum ieee80211_sta_state old_state, 4235 enum ieee80211_sta_state new_state) 4236 { 4237 struct ath12k *ar; 4238 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 4239 struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); 4240 struct ath12k_peer *peer; 4241 int ret = 0; 4242 4243 /* cancel must be done outside the mutex to avoid deadlock */ 4244 if ((old_state == IEEE80211_STA_NONE && 4245 new_state == IEEE80211_STA_NOTEXIST)) 4246 cancel_work_sync(&arsta->update_wk); 4247 4248 ar = ath12k_get_ar_by_vif(hw, vif); 4249 if (!ar) { 4250 WARN_ON_ONCE(1); 4251 return -EINVAL; 4252 } 4253 4254 mutex_lock(&ar->conf_mutex); 4255 4256 if (old_state == IEEE80211_STA_NOTEXIST && 4257 new_state == IEEE80211_STA_NONE) { 4258 memset(arsta, 0, sizeof(*arsta)); 4259 arsta->arvif = arvif; 4260 INIT_WORK(&arsta->update_wk, ath12k_sta_rc_update_wk); 4261 4262 ret = ath12k_mac_station_add(ar, vif, sta); 4263 if (ret) 4264 ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n", 4265 sta->addr, arvif->vdev_id); 4266 } else if ((old_state == IEEE80211_STA_NONE && 4267 new_state == IEEE80211_STA_NOTEXIST)) { 4268 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { 4269 ath12k_bss_disassoc(ar, arvif); 4270 ret = ath12k_mac_vdev_stop(arvif); 4271 if (ret) 4272 ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n", 4273 arvif->vdev_id, ret); 4274 } 4275 ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr); 4276 4277 ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr); 4278 if (ret) 4279 ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n", 4280 sta->addr, arvif->vdev_id); 4281 else 4282 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n", 4283 sta->addr, arvif->vdev_id); 4284 4285 ath12k_mac_dec_num_stations(arvif, sta); 4286 spin_lock_bh(&ar->ab->base_lock); 4287 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 4288 if (peer && peer->sta == sta) { 4289 ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n", 4290 vif->addr, arvif->vdev_id); 4291 peer->sta = NULL; 4292 list_del(&peer->list); 4293 kfree(peer); 4294 ar->num_peers--; 4295 } 4296 spin_unlock_bh(&ar->ab->base_lock); 4297 4298 kfree(arsta->rx_stats); 4299 arsta->rx_stats = NULL; 4300 } else if (old_state == IEEE80211_STA_AUTH && 4301 new_state == IEEE80211_STA_ASSOC && 4302 (vif->type == NL80211_IFTYPE_AP || 4303 vif->type == NL80211_IFTYPE_MESH_POINT || 4304 vif->type == NL80211_IFTYPE_ADHOC)) { 4305 ret = ath12k_station_assoc(ar, vif, sta, false); 4306 if (ret) 4307 ath12k_warn(ar->ab, "Failed to associate station: %pM\n", 4308 sta->addr); 4309 4310 spin_lock_bh(&ar->data_lock); 4311 4312 arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta); 4313 arsta->bw_prev = sta->deflink.bandwidth; 4314 4315 spin_unlock_bh(&ar->data_lock); 4316 } else if (old_state == IEEE80211_STA_ASSOC && 4317 new_state == IEEE80211_STA_AUTHORIZED) { 4318 spin_lock_bh(&ar->ab->base_lock); 4319 4320 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 4321 if (peer) 4322 peer->is_authorized = true; 4323 4324 spin_unlock_bh(&ar->ab->base_lock); 4325 4326 if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) { 4327 ret = ath12k_wmi_set_peer_param(ar, sta->addr, 4328 arvif->vdev_id, 4329 WMI_PEER_AUTHORIZE, 4330 1); 4331 if (ret) 4332 ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n", 4333 sta->addr, arvif->vdev_id, ret); 4334 } 4335 } else if (old_state == IEEE80211_STA_AUTHORIZED && 4336 new_state == IEEE80211_STA_ASSOC) { 4337 spin_lock_bh(&ar->ab->base_lock); 4338 4339 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 4340 if (peer) 4341 peer->is_authorized = false; 4342 4343 spin_unlock_bh(&ar->ab->base_lock); 4344 } else if (old_state == IEEE80211_STA_ASSOC && 4345 new_state == IEEE80211_STA_AUTH && 4346 (vif->type == NL80211_IFTYPE_AP || 4347 vif->type == NL80211_IFTYPE_MESH_POINT || 4348 vif->type == NL80211_IFTYPE_ADHOC)) { 4349 ret = ath12k_station_disassoc(ar, vif, sta); 4350 if (ret) 4351 ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n", 4352 sta->addr); 4353 } 4354 4355 mutex_unlock(&ar->conf_mutex); 4356 4357 return ret; 4358 } 4359 4360 static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw, 4361 struct ieee80211_vif *vif, 4362 struct ieee80211_sta *sta) 4363 { 4364 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 4365 struct ath12k *ar; 4366 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 4367 int ret; 4368 s16 txpwr; 4369 4370 if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) { 4371 txpwr = 0; 4372 } else { 4373 txpwr = sta->deflink.txpwr.power; 4374 if (!txpwr) 4375 return -EINVAL; 4376 } 4377 4378 if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL) 4379 return -EINVAL; 4380 4381 ar = ath12k_ah_to_ar(ah, 0); 4382 4383 mutex_lock(&ar->conf_mutex); 4384 4385 ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id, 4386 WMI_PEER_USE_FIXED_PWR, txpwr); 4387 if (ret) { 4388 ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n", 4389 ret); 4390 goto out; 4391 } 4392 4393 out: 4394 mutex_unlock(&ar->conf_mutex); 4395 return ret; 4396 } 4397 4398 static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw, 4399 struct ieee80211_vif *vif, 4400 struct ieee80211_sta *sta, 4401 u32 changed) 4402 { 4403 struct ath12k *ar; 4404 struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); 4405 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 4406 struct ath12k_peer *peer; 4407 u32 bw, smps; 4408 4409 ar = ath12k_get_ar_by_vif(hw, vif); 4410 if (!ar) { 4411 WARN_ON_ONCE(1); 4412 return; 4413 } 4414 4415 spin_lock_bh(&ar->ab->base_lock); 4416 4417 peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr); 4418 if (!peer) { 4419 spin_unlock_bh(&ar->ab->base_lock); 4420 ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n", 4421 sta->addr, arvif->vdev_id); 4422 return; 4423 } 4424 4425 spin_unlock_bh(&ar->ab->base_lock); 4426 4427 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 4428 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 4429 sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss, 4430 sta->deflink.smps_mode); 4431 4432 spin_lock_bh(&ar->data_lock); 4433 4434 if (changed & IEEE80211_RC_BW_CHANGED) { 4435 bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta); 4436 arsta->bw_prev = arsta->bw; 4437 arsta->bw = bw; 4438 } 4439 4440 if (changed & IEEE80211_RC_NSS_CHANGED) 4441 arsta->nss = sta->deflink.rx_nss; 4442 4443 if (changed & IEEE80211_RC_SMPS_CHANGED) { 4444 smps = WMI_PEER_SMPS_PS_NONE; 4445 4446 switch (sta->deflink.smps_mode) { 4447 case IEEE80211_SMPS_AUTOMATIC: 4448 case IEEE80211_SMPS_OFF: 4449 smps = WMI_PEER_SMPS_PS_NONE; 4450 break; 4451 case IEEE80211_SMPS_STATIC: 4452 smps = WMI_PEER_SMPS_STATIC; 4453 break; 4454 case IEEE80211_SMPS_DYNAMIC: 4455 smps = WMI_PEER_SMPS_DYNAMIC; 4456 break; 4457 default: 4458 ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n", 4459 sta->deflink.smps_mode, sta->addr); 4460 smps = WMI_PEER_SMPS_PS_NONE; 4461 break; 4462 } 4463 4464 arsta->smps = smps; 4465 } 4466 4467 arsta->changed |= changed; 4468 4469 spin_unlock_bh(&ar->data_lock); 4470 4471 ieee80211_queue_work(hw, &arsta->update_wk); 4472 } 4473 4474 static int ath12k_conf_tx_uapsd(struct ath12k_vif *arvif, 4475 u16 ac, bool enable) 4476 { 4477 struct ath12k *ar = arvif->ar; 4478 u32 value; 4479 int ret; 4480 4481 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 4482 return 0; 4483 4484 switch (ac) { 4485 case IEEE80211_AC_VO: 4486 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | 4487 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; 4488 break; 4489 case IEEE80211_AC_VI: 4490 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | 4491 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; 4492 break; 4493 case IEEE80211_AC_BE: 4494 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | 4495 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; 4496 break; 4497 case IEEE80211_AC_BK: 4498 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | 4499 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; 4500 break; 4501 } 4502 4503 if (enable) 4504 arvif->u.sta.uapsd |= value; 4505 else 4506 arvif->u.sta.uapsd &= ~value; 4507 4508 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 4509 WMI_STA_PS_PARAM_UAPSD, 4510 arvif->u.sta.uapsd); 4511 if (ret) { 4512 ath12k_warn(ar->ab, "could not set uapsd params %d\n", ret); 4513 goto exit; 4514 } 4515 4516 if (arvif->u.sta.uapsd) 4517 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; 4518 else 4519 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 4520 4521 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 4522 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 4523 value); 4524 if (ret) 4525 ath12k_warn(ar->ab, "could not set rx wake param %d\n", ret); 4526 4527 exit: 4528 return ret; 4529 } 4530 4531 static int ath12k_mac_conf_tx(struct ath12k_vif *arvif, 4532 unsigned int link_id, u16 ac, 4533 const struct ieee80211_tx_queue_params *params) 4534 { 4535 struct wmi_wmm_params_arg *p = NULL; 4536 struct ath12k *ar = arvif->ar; 4537 struct ath12k_base *ab = ar->ab; 4538 int ret; 4539 4540 lockdep_assert_held(&ar->conf_mutex); 4541 4542 switch (ac) { 4543 case IEEE80211_AC_VO: 4544 p = &arvif->wmm_params.ac_vo; 4545 break; 4546 case IEEE80211_AC_VI: 4547 p = &arvif->wmm_params.ac_vi; 4548 break; 4549 case IEEE80211_AC_BE: 4550 p = &arvif->wmm_params.ac_be; 4551 break; 4552 case IEEE80211_AC_BK: 4553 p = &arvif->wmm_params.ac_bk; 4554 break; 4555 } 4556 4557 if (WARN_ON(!p)) { 4558 ret = -EINVAL; 4559 goto exit; 4560 } 4561 4562 p->cwmin = params->cw_min; 4563 p->cwmax = params->cw_max; 4564 p->aifs = params->aifs; 4565 p->txop = params->txop; 4566 4567 ret = ath12k_wmi_send_wmm_update_cmd(ar, arvif->vdev_id, 4568 &arvif->wmm_params); 4569 if (ret) { 4570 ath12k_warn(ab, "pdev idx %d failed to set wmm params: %d\n", 4571 ar->pdev_idx, ret); 4572 goto exit; 4573 } 4574 4575 ret = ath12k_conf_tx_uapsd(arvif, ac, params->uapsd); 4576 if (ret) 4577 ath12k_warn(ab, "pdev idx %d failed to set sta uapsd: %d\n", 4578 ar->pdev_idx, ret); 4579 4580 exit: 4581 return ret; 4582 } 4583 4584 static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw, 4585 struct ieee80211_vif *vif, 4586 unsigned int link_id, u16 ac, 4587 const struct ieee80211_tx_queue_params *params) 4588 { 4589 struct ath12k *ar; 4590 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 4591 struct ath12k_vif_cache *cache = arvif->cache; 4592 int ret; 4593 4594 ar = ath12k_get_ar_by_vif(hw, vif); 4595 if (!ar) { 4596 /* cache the info and apply after vdev is created */ 4597 cache = ath12k_arvif_get_cache(arvif); 4598 if (!cache) 4599 return -ENOSPC; 4600 cache->tx_conf.changed = true; 4601 cache->tx_conf.ac = ac; 4602 cache->tx_conf.tx_queue_params = *params; 4603 return 0; 4604 } 4605 4606 mutex_lock(&ar->conf_mutex); 4607 ret = ath12k_mac_conf_tx(arvif, link_id, ac, params); 4608 mutex_unlock(&ar->conf_mutex); 4609 4610 return ret; 4611 } 4612 4613 static struct ieee80211_sta_ht_cap 4614 ath12k_create_ht_cap(struct ath12k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask) 4615 { 4616 int i; 4617 struct ieee80211_sta_ht_cap ht_cap = {0}; 4618 u32 ar_vht_cap = ar->pdev->cap.vht_cap; 4619 4620 if (!(ar_ht_cap & WMI_HT_CAP_ENABLED)) 4621 return ht_cap; 4622 4623 ht_cap.ht_supported = 1; 4624 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 4625 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE; 4626 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 4627 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; 4628 ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; 4629 4630 if (ar_ht_cap & WMI_HT_CAP_HT20_SGI) 4631 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; 4632 4633 if (ar_ht_cap & WMI_HT_CAP_HT40_SGI) 4634 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 4635 4636 if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) { 4637 u32 smps; 4638 4639 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 4640 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 4641 4642 ht_cap.cap |= smps; 4643 } 4644 4645 if (ar_ht_cap & WMI_HT_CAP_TX_STBC) 4646 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; 4647 4648 if (ar_ht_cap & WMI_HT_CAP_RX_STBC) { 4649 u32 stbc; 4650 4651 stbc = ar_ht_cap; 4652 stbc &= WMI_HT_CAP_RX_STBC; 4653 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; 4654 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; 4655 stbc &= IEEE80211_HT_CAP_RX_STBC; 4656 4657 ht_cap.cap |= stbc; 4658 } 4659 4660 if (ar_ht_cap & WMI_HT_CAP_RX_LDPC) 4661 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 4662 4663 if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT) 4664 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; 4665 4666 if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) 4667 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; 4668 4669 for (i = 0; i < ar->num_rx_chains; i++) { 4670 if (rate_cap_rx_chainmask & BIT(i)) 4671 ht_cap.mcs.rx_mask[i] = 0xFF; 4672 } 4673 4674 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 4675 4676 return ht_cap; 4677 } 4678 4679 static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif) 4680 { 4681 u32 value = 0; 4682 struct ath12k *ar = arvif->ar; 4683 int nsts; 4684 int sound_dim; 4685 u32 vht_cap = ar->pdev->cap.vht_cap; 4686 u32 vdev_param = WMI_VDEV_PARAM_TXBF; 4687 4688 if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) { 4689 nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4690 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4691 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); 4692 } 4693 4694 if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) { 4695 sound_dim = vht_cap & 4696 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4697 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4698 if (sound_dim > (ar->num_tx_chains - 1)) 4699 sound_dim = ar->num_tx_chains - 1; 4700 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); 4701 } 4702 4703 if (!value) 4704 return 0; 4705 4706 if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) { 4707 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 4708 4709 if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) && 4710 arvif->vdev_type == WMI_VDEV_TYPE_AP) 4711 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; 4712 } 4713 4714 if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) { 4715 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 4716 4717 if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) && 4718 arvif->vdev_type == WMI_VDEV_TYPE_STA) 4719 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; 4720 } 4721 4722 return ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 4723 vdev_param, value); 4724 } 4725 4726 static void ath12k_set_vht_txbf_cap(struct ath12k *ar, u32 *vht_cap) 4727 { 4728 bool subfer, subfee; 4729 int sound_dim = 0; 4730 4731 subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)); 4732 subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)); 4733 4734 if (ar->num_tx_chains < 2) { 4735 *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); 4736 subfer = false; 4737 } 4738 4739 /* If SU Beaformer is not set, then disable MU Beamformer Capability */ 4740 if (!subfer) 4741 *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE); 4742 4743 /* If SU Beaformee is not set, then disable MU Beamformee Capability */ 4744 if (!subfee) 4745 *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); 4746 4747 sound_dim = u32_get_bits(*vht_cap, 4748 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); 4749 *vht_cap = u32_replace_bits(*vht_cap, 0, 4750 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); 4751 4752 /* TODO: Need to check invalid STS and Sound_dim values set by FW? */ 4753 4754 /* Enable Sounding Dimension Field only if SU BF is enabled */ 4755 if (subfer) { 4756 if (sound_dim > (ar->num_tx_chains - 1)) 4757 sound_dim = ar->num_tx_chains - 1; 4758 4759 *vht_cap = u32_replace_bits(*vht_cap, sound_dim, 4760 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK); 4761 } 4762 4763 /* Use the STS advertised by FW unless SU Beamformee is not supported*/ 4764 if (!subfee) 4765 *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK); 4766 } 4767 4768 static struct ieee80211_sta_vht_cap 4769 ath12k_create_vht_cap(struct ath12k *ar, u32 rate_cap_tx_chainmask, 4770 u32 rate_cap_rx_chainmask) 4771 { 4772 struct ieee80211_sta_vht_cap vht_cap = {0}; 4773 u16 txmcs_map, rxmcs_map; 4774 int i; 4775 4776 vht_cap.vht_supported = 1; 4777 vht_cap.cap = ar->pdev->cap.vht_cap; 4778 4779 ath12k_set_vht_txbf_cap(ar, &vht_cap.cap); 4780 4781 /* TODO: Enable back VHT160 mode once association issues are fixed */ 4782 /* Disabling VHT160 and VHT80+80 modes */ 4783 vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; 4784 vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160; 4785 4786 rxmcs_map = 0; 4787 txmcs_map = 0; 4788 for (i = 0; i < 8; i++) { 4789 if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i)) 4790 txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4791 else 4792 txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4793 4794 if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i)) 4795 rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4796 else 4797 rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4798 } 4799 4800 if (rate_cap_tx_chainmask <= 1) 4801 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; 4802 4803 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map); 4804 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map); 4805 4806 return vht_cap; 4807 } 4808 4809 static void ath12k_mac_setup_ht_vht_cap(struct ath12k *ar, 4810 struct ath12k_pdev_cap *cap, 4811 u32 *ht_cap_info) 4812 { 4813 struct ieee80211_supported_band *band; 4814 u32 rate_cap_tx_chainmask; 4815 u32 rate_cap_rx_chainmask; 4816 u32 ht_cap; 4817 4818 rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift; 4819 rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift; 4820 4821 if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) { 4822 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 4823 ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info; 4824 if (ht_cap_info) 4825 *ht_cap_info = ht_cap; 4826 band->ht_cap = ath12k_create_ht_cap(ar, ht_cap, 4827 rate_cap_rx_chainmask); 4828 } 4829 4830 if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && 4831 (ar->ab->hw_params->single_pdev_only || 4832 !ar->supports_6ghz)) { 4833 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 4834 ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info; 4835 if (ht_cap_info) 4836 *ht_cap_info = ht_cap; 4837 band->ht_cap = ath12k_create_ht_cap(ar, ht_cap, 4838 rate_cap_rx_chainmask); 4839 band->vht_cap = ath12k_create_vht_cap(ar, rate_cap_tx_chainmask, 4840 rate_cap_rx_chainmask); 4841 } 4842 } 4843 4844 static int ath12k_check_chain_mask(struct ath12k *ar, u32 ant, bool is_tx_ant) 4845 { 4846 /* TODO: Check the request chainmask against the supported 4847 * chainmask table which is advertised in extented_service_ready event 4848 */ 4849 4850 return 0; 4851 } 4852 4853 static void ath12k_gen_ppe_thresh(struct ath12k_wmi_ppe_threshold_arg *fw_ppet, 4854 u8 *he_ppet) 4855 { 4856 int nss, ru; 4857 u8 bit = 7; 4858 4859 he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK; 4860 he_ppet[0] |= (fw_ppet->ru_bit_mask << 4861 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) & 4862 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK; 4863 for (nss = 0; nss <= fw_ppet->numss_m1; nss++) { 4864 for (ru = 0; ru < 4; ru++) { 4865 u8 val; 4866 int i; 4867 4868 if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0) 4869 continue; 4870 val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) & 4871 0x3f; 4872 val = ((val >> 3) & 0x7) | ((val & 0x7) << 3); 4873 for (i = 5; i >= 0; i--) { 4874 he_ppet[bit / 8] |= 4875 ((val >> i) & 0x1) << ((bit % 8)); 4876 bit++; 4877 } 4878 } 4879 } 4880 } 4881 4882 static void 4883 ath12k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem) 4884 { 4885 u8 m; 4886 4887 m = IEEE80211_HE_MAC_CAP0_TWT_RES | 4888 IEEE80211_HE_MAC_CAP0_TWT_REQ; 4889 he_cap_elem->mac_cap_info[0] &= ~m; 4890 4891 m = IEEE80211_HE_MAC_CAP2_TRS | 4892 IEEE80211_HE_MAC_CAP2_BCAST_TWT | 4893 IEEE80211_HE_MAC_CAP2_MU_CASCADING; 4894 he_cap_elem->mac_cap_info[2] &= ~m; 4895 4896 m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED | 4897 IEEE80211_HE_MAC_CAP2_BCAST_TWT | 4898 IEEE80211_HE_MAC_CAP2_MU_CASCADING; 4899 he_cap_elem->mac_cap_info[3] &= ~m; 4900 4901 m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG | 4902 IEEE80211_HE_MAC_CAP4_BQR; 4903 he_cap_elem->mac_cap_info[4] &= ~m; 4904 4905 m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION | 4906 IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU | 4907 IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING | 4908 IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX; 4909 he_cap_elem->mac_cap_info[5] &= ~m; 4910 4911 m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | 4912 IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO; 4913 he_cap_elem->phy_cap_info[2] &= ~m; 4914 4915 m = IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU | 4916 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK | 4917 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK; 4918 he_cap_elem->phy_cap_info[3] &= ~m; 4919 4920 m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER; 4921 he_cap_elem->phy_cap_info[4] &= ~m; 4922 4923 m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK; 4924 he_cap_elem->phy_cap_info[5] &= ~m; 4925 4926 m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | 4927 IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB | 4928 IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | 4929 IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO; 4930 he_cap_elem->phy_cap_info[6] &= ~m; 4931 4932 m = IEEE80211_HE_PHY_CAP7_PSR_BASED_SR | 4933 IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP | 4934 IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ | 4935 IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ; 4936 he_cap_elem->phy_cap_info[7] &= ~m; 4937 4938 m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | 4939 IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | 4940 IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | 4941 IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU; 4942 he_cap_elem->phy_cap_info[8] &= ~m; 4943 4944 m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM | 4945 IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | 4946 IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | 4947 IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | 4948 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | 4949 IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB; 4950 he_cap_elem->phy_cap_info[9] &= ~m; 4951 } 4952 4953 static __le16 ath12k_mac_setup_he_6ghz_cap(struct ath12k_pdev_cap *pcap, 4954 struct ath12k_band_cap *bcap) 4955 { 4956 u8 val; 4957 4958 bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE; 4959 if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) 4960 bcap->he_6ghz_capa |= 4961 u32_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC, 4962 IEEE80211_HE_6GHZ_CAP_SM_PS); 4963 else 4964 bcap->he_6ghz_capa |= 4965 u32_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED, 4966 IEEE80211_HE_6GHZ_CAP_SM_PS); 4967 val = u32_get_bits(pcap->vht_cap, 4968 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK); 4969 bcap->he_6ghz_capa |= 4970 u32_encode_bits(val, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); 4971 val = u32_get_bits(pcap->vht_cap, 4972 IEEE80211_VHT_CAP_MAX_MPDU_MASK); 4973 bcap->he_6ghz_capa |= 4974 u32_encode_bits(val, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN); 4975 if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN) 4976 bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS; 4977 if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN) 4978 bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS; 4979 4980 return cpu_to_le16(bcap->he_6ghz_capa); 4981 } 4982 4983 static void ath12k_mac_copy_he_cap(struct ath12k_band_cap *band_cap, 4984 int iftype, u8 num_tx_chains, 4985 struct ieee80211_sta_he_cap *he_cap) 4986 { 4987 struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem; 4988 struct ieee80211_he_mcs_nss_supp *mcs_nss = &he_cap->he_mcs_nss_supp; 4989 4990 he_cap->has_he = true; 4991 memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info, 4992 sizeof(he_cap_elem->mac_cap_info)); 4993 memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info, 4994 sizeof(he_cap_elem->phy_cap_info)); 4995 4996 he_cap_elem->mac_cap_info[1] &= 4997 IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK; 4998 4999 he_cap_elem->phy_cap_info[5] &= 5000 ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK; 5001 he_cap_elem->phy_cap_info[5] &= 5002 ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK; 5003 he_cap_elem->phy_cap_info[5] |= num_tx_chains - 1; 5004 5005 switch (iftype) { 5006 case NL80211_IFTYPE_AP: 5007 he_cap_elem->phy_cap_info[3] &= 5008 ~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK; 5009 he_cap_elem->phy_cap_info[9] |= 5010 IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; 5011 break; 5012 case NL80211_IFTYPE_STATION: 5013 he_cap_elem->mac_cap_info[0] &= ~IEEE80211_HE_MAC_CAP0_TWT_RES; 5014 he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_REQ; 5015 he_cap_elem->phy_cap_info[9] |= 5016 IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; 5017 break; 5018 case NL80211_IFTYPE_MESH_POINT: 5019 ath12k_mac_filter_he_cap_mesh(he_cap_elem); 5020 break; 5021 } 5022 5023 mcs_nss->rx_mcs_80 = cpu_to_le16(band_cap->he_mcs & 0xffff); 5024 mcs_nss->tx_mcs_80 = cpu_to_le16(band_cap->he_mcs & 0xffff); 5025 mcs_nss->rx_mcs_160 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); 5026 mcs_nss->tx_mcs_160 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); 5027 mcs_nss->rx_mcs_80p80 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); 5028 mcs_nss->tx_mcs_80p80 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff); 5029 5030 memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres)); 5031 if (he_cap_elem->phy_cap_info[6] & 5032 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) 5033 ath12k_gen_ppe_thresh(&band_cap->he_ppet, he_cap->ppe_thres); 5034 } 5035 5036 static void 5037 ath12k_mac_copy_eht_mcs_nss(struct ath12k_band_cap *band_cap, 5038 struct ieee80211_eht_mcs_nss_supp *mcs_nss, 5039 const struct ieee80211_he_cap_elem *he_cap, 5040 const struct ieee80211_eht_cap_elem_fixed *eht_cap) 5041 { 5042 if ((he_cap->phy_cap_info[0] & 5043 (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | 5044 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | 5045 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | 5046 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)) == 0) 5047 memcpy(&mcs_nss->only_20mhz, &band_cap->eht_mcs_20_only, 5048 sizeof(struct ieee80211_eht_mcs_nss_supp_20mhz_only)); 5049 5050 if (he_cap->phy_cap_info[0] & 5051 (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | 5052 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G)) 5053 memcpy(&mcs_nss->bw._80, &band_cap->eht_mcs_80, 5054 sizeof(struct ieee80211_eht_mcs_nss_supp_bw)); 5055 5056 if (he_cap->phy_cap_info[0] & 5057 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) 5058 memcpy(&mcs_nss->bw._160, &band_cap->eht_mcs_160, 5059 sizeof(struct ieee80211_eht_mcs_nss_supp_bw)); 5060 5061 if (eht_cap->phy_cap_info[0] & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) 5062 memcpy(&mcs_nss->bw._320, &band_cap->eht_mcs_320, 5063 sizeof(struct ieee80211_eht_mcs_nss_supp_bw)); 5064 } 5065 5066 static void ath12k_mac_copy_eht_ppe_thresh(struct ath12k_wmi_ppe_threshold_arg *fw_ppet, 5067 struct ieee80211_sta_eht_cap *cap) 5068 { 5069 u16 bit = IEEE80211_EHT_PPE_THRES_INFO_HEADER_SIZE; 5070 u8 i, nss, ru, ppet_bit_len_per_ru = IEEE80211_EHT_PPE_THRES_INFO_PPET_SIZE * 2; 5071 5072 u8p_replace_bits(&cap->eht_ppe_thres[0], fw_ppet->numss_m1, 5073 IEEE80211_EHT_PPE_THRES_NSS_MASK); 5074 5075 u16p_replace_bits((u16 *)&cap->eht_ppe_thres[0], fw_ppet->ru_bit_mask, 5076 IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 5077 5078 for (nss = 0; nss <= fw_ppet->numss_m1; nss++) { 5079 for (ru = 0; 5080 ru < hweight16(IEEE80211_EHT_PPE_THRES_RU_INDEX_BITMASK_MASK); 5081 ru++) { 5082 u32 val = 0; 5083 5084 if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0) 5085 continue; 5086 5087 u32p_replace_bits(&val, fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> 5088 (ru * ppet_bit_len_per_ru), 5089 GENMASK(ppet_bit_len_per_ru - 1, 0)); 5090 5091 for (i = 0; i < ppet_bit_len_per_ru; i++) { 5092 cap->eht_ppe_thres[bit / 8] |= 5093 (((val >> i) & 0x1) << ((bit % 8))); 5094 bit++; 5095 } 5096 } 5097 } 5098 } 5099 5100 static void 5101 ath12k_mac_filter_eht_cap_mesh(struct ieee80211_eht_cap_elem_fixed 5102 *eht_cap_elem) 5103 { 5104 u8 m; 5105 5106 m = IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS; 5107 eht_cap_elem->mac_cap_info[0] &= ~m; 5108 5109 m = IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO; 5110 eht_cap_elem->phy_cap_info[0] &= ~m; 5111 5112 m = IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | 5113 IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | 5114 IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | 5115 IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK; 5116 eht_cap_elem->phy_cap_info[3] &= ~m; 5117 5118 m = IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | 5119 IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | 5120 IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | 5121 IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI; 5122 eht_cap_elem->phy_cap_info[4] &= ~m; 5123 5124 m = IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | 5125 IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | 5126 IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | 5127 IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK; 5128 eht_cap_elem->phy_cap_info[5] &= ~m; 5129 5130 m = IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK; 5131 eht_cap_elem->phy_cap_info[6] &= ~m; 5132 5133 m = IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | 5134 IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | 5135 IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | 5136 IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | 5137 IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | 5138 IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ; 5139 eht_cap_elem->phy_cap_info[7] &= ~m; 5140 } 5141 5142 static void ath12k_mac_copy_eht_cap(struct ath12k *ar, 5143 struct ath12k_band_cap *band_cap, 5144 struct ieee80211_he_cap_elem *he_cap_elem, 5145 int iftype, 5146 struct ieee80211_sta_eht_cap *eht_cap) 5147 { 5148 struct ieee80211_eht_cap_elem_fixed *eht_cap_elem = &eht_cap->eht_cap_elem; 5149 5150 memset(eht_cap, 0, sizeof(struct ieee80211_sta_eht_cap)); 5151 5152 if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map))) 5153 return; 5154 5155 eht_cap->has_eht = true; 5156 memcpy(eht_cap_elem->mac_cap_info, band_cap->eht_cap_mac_info, 5157 sizeof(eht_cap_elem->mac_cap_info)); 5158 memcpy(eht_cap_elem->phy_cap_info, band_cap->eht_cap_phy_info, 5159 sizeof(eht_cap_elem->phy_cap_info)); 5160 5161 switch (iftype) { 5162 case NL80211_IFTYPE_AP: 5163 eht_cap_elem->phy_cap_info[0] &= 5164 ~IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ; 5165 eht_cap_elem->phy_cap_info[4] &= 5166 ~IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO; 5167 eht_cap_elem->phy_cap_info[5] &= 5168 ~IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP; 5169 break; 5170 case NL80211_IFTYPE_STATION: 5171 eht_cap_elem->phy_cap_info[7] &= 5172 ~(IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | 5173 IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | 5174 IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ); 5175 eht_cap_elem->phy_cap_info[7] &= 5176 ~(IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | 5177 IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | 5178 IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ); 5179 break; 5180 case NL80211_IFTYPE_MESH_POINT: 5181 ath12k_mac_filter_eht_cap_mesh(eht_cap_elem); 5182 break; 5183 default: 5184 break; 5185 } 5186 5187 ath12k_mac_copy_eht_mcs_nss(band_cap, &eht_cap->eht_mcs_nss_supp, 5188 he_cap_elem, eht_cap_elem); 5189 5190 if (eht_cap_elem->phy_cap_info[5] & 5191 IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) 5192 ath12k_mac_copy_eht_ppe_thresh(&band_cap->eht_ppet, eht_cap); 5193 } 5194 5195 static int ath12k_mac_copy_sband_iftype_data(struct ath12k *ar, 5196 struct ath12k_pdev_cap *cap, 5197 struct ieee80211_sband_iftype_data *data, 5198 int band) 5199 { 5200 struct ath12k_band_cap *band_cap = &cap->band[band]; 5201 int i, idx = 0; 5202 5203 for (i = 0; i < NUM_NL80211_IFTYPES; i++) { 5204 struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap; 5205 5206 switch (i) { 5207 case NL80211_IFTYPE_STATION: 5208 case NL80211_IFTYPE_AP: 5209 case NL80211_IFTYPE_MESH_POINT: 5210 break; 5211 5212 default: 5213 continue; 5214 } 5215 5216 data[idx].types_mask = BIT(i); 5217 5218 ath12k_mac_copy_he_cap(band_cap, i, ar->num_tx_chains, he_cap); 5219 if (band == NL80211_BAND_6GHZ) { 5220 data[idx].he_6ghz_capa.capa = 5221 ath12k_mac_setup_he_6ghz_cap(cap, band_cap); 5222 } 5223 ath12k_mac_copy_eht_cap(ar, band_cap, &he_cap->he_cap_elem, i, 5224 &data[idx].eht_cap); 5225 idx++; 5226 } 5227 5228 return idx; 5229 } 5230 5231 static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar, 5232 struct ath12k_pdev_cap *cap) 5233 { 5234 struct ieee80211_supported_band *sband; 5235 enum nl80211_band band; 5236 int count; 5237 5238 if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) { 5239 band = NL80211_BAND_2GHZ; 5240 count = ath12k_mac_copy_sband_iftype_data(ar, cap, 5241 ar->mac.iftype[band], 5242 band); 5243 sband = &ar->mac.sbands[band]; 5244 _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], 5245 count); 5246 } 5247 5248 if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) { 5249 band = NL80211_BAND_5GHZ; 5250 count = ath12k_mac_copy_sband_iftype_data(ar, cap, 5251 ar->mac.iftype[band], 5252 band); 5253 sband = &ar->mac.sbands[band]; 5254 _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], 5255 count); 5256 } 5257 5258 if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && 5259 ar->supports_6ghz) { 5260 band = NL80211_BAND_6GHZ; 5261 count = ath12k_mac_copy_sband_iftype_data(ar, cap, 5262 ar->mac.iftype[band], 5263 band); 5264 sband = &ar->mac.sbands[band]; 5265 _ieee80211_set_sband_iftype_data(sband, ar->mac.iftype[band], 5266 count); 5267 } 5268 } 5269 5270 static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant) 5271 { 5272 int ret; 5273 5274 lockdep_assert_held(&ar->conf_mutex); 5275 5276 if (ath12k_check_chain_mask(ar, tx_ant, true)) 5277 return -EINVAL; 5278 5279 if (ath12k_check_chain_mask(ar, rx_ant, false)) 5280 return -EINVAL; 5281 5282 /* Since we advertised the max cap of all radios combined during wiphy 5283 * registration, ensure we don't set the antenna config higher than the 5284 * limits 5285 */ 5286 tx_ant = min_t(u32, tx_ant, ar->pdev->cap.tx_chain_mask); 5287 rx_ant = min_t(u32, rx_ant, ar->pdev->cap.rx_chain_mask); 5288 5289 ar->cfg_tx_chainmask = tx_ant; 5290 ar->cfg_rx_chainmask = rx_ant; 5291 5292 if (ar->state != ATH12K_STATE_ON && 5293 ar->state != ATH12K_STATE_RESTARTED) 5294 return 0; 5295 5296 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK, 5297 tx_ant, ar->pdev->pdev_id); 5298 if (ret) { 5299 ath12k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n", 5300 ret, tx_ant); 5301 return ret; 5302 } 5303 5304 ar->num_tx_chains = hweight32(tx_ant); 5305 5306 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK, 5307 rx_ant, ar->pdev->pdev_id); 5308 if (ret) { 5309 ath12k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n", 5310 ret, rx_ant); 5311 return ret; 5312 } 5313 5314 ar->num_rx_chains = hweight32(rx_ant); 5315 5316 /* Reload HT/VHT/HE capability */ 5317 ath12k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL); 5318 ath12k_mac_setup_sband_iftype_data(ar, &ar->pdev->cap); 5319 5320 return 0; 5321 } 5322 5323 static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb) 5324 { 5325 int num_mgmt; 5326 5327 ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb); 5328 5329 num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx); 5330 5331 if (num_mgmt < 0) 5332 WARN_ON_ONCE(1); 5333 5334 if (!num_mgmt) 5335 wake_up(&ar->txmgmt_empty_waitq); 5336 } 5337 5338 int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) 5339 { 5340 struct sk_buff *msdu = skb; 5341 struct ieee80211_tx_info *info; 5342 struct ath12k *ar = ctx; 5343 struct ath12k_base *ab = ar->ab; 5344 5345 spin_lock_bh(&ar->txmgmt_idr_lock); 5346 idr_remove(&ar->txmgmt_idr, buf_id); 5347 spin_unlock_bh(&ar->txmgmt_idr_lock); 5348 dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len, 5349 DMA_TO_DEVICE); 5350 5351 info = IEEE80211_SKB_CB(msdu); 5352 memset(&info->status, 0, sizeof(info->status)); 5353 5354 ath12k_mgmt_over_wmi_tx_drop(ar, skb); 5355 5356 return 0; 5357 } 5358 5359 static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx) 5360 { 5361 struct ieee80211_vif *vif = ctx; 5362 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); 5363 struct sk_buff *msdu = skb; 5364 struct ath12k *ar = skb_cb->ar; 5365 struct ath12k_base *ab = ar->ab; 5366 5367 if (skb_cb->vif == vif) { 5368 spin_lock_bh(&ar->txmgmt_idr_lock); 5369 idr_remove(&ar->txmgmt_idr, buf_id); 5370 spin_unlock_bh(&ar->txmgmt_idr_lock); 5371 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, 5372 DMA_TO_DEVICE); 5373 } 5374 5375 return 0; 5376 } 5377 5378 static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif, 5379 struct sk_buff *skb) 5380 { 5381 struct ath12k_base *ab = ar->ab; 5382 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 5383 struct ieee80211_tx_info *info; 5384 dma_addr_t paddr; 5385 int buf_id; 5386 int ret; 5387 5388 ATH12K_SKB_CB(skb)->ar = ar; 5389 spin_lock_bh(&ar->txmgmt_idr_lock); 5390 buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0, 5391 ATH12K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC); 5392 spin_unlock_bh(&ar->txmgmt_idr_lock); 5393 if (buf_id < 0) 5394 return -ENOSPC; 5395 5396 info = IEEE80211_SKB_CB(skb); 5397 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) { 5398 if ((ieee80211_is_action(hdr->frame_control) || 5399 ieee80211_is_deauth(hdr->frame_control) || 5400 ieee80211_is_disassoc(hdr->frame_control)) && 5401 ieee80211_has_protected(hdr->frame_control)) { 5402 skb_put(skb, IEEE80211_CCMP_MIC_LEN); 5403 } 5404 } 5405 5406 paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); 5407 if (dma_mapping_error(ab->dev, paddr)) { 5408 ath12k_warn(ab, "failed to DMA map mgmt Tx buffer\n"); 5409 ret = -EIO; 5410 goto err_free_idr; 5411 } 5412 5413 ATH12K_SKB_CB(skb)->paddr = paddr; 5414 5415 ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb); 5416 if (ret) { 5417 ath12k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret); 5418 goto err_unmap_buf; 5419 } 5420 5421 return 0; 5422 5423 err_unmap_buf: 5424 dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, 5425 skb->len, DMA_TO_DEVICE); 5426 err_free_idr: 5427 spin_lock_bh(&ar->txmgmt_idr_lock); 5428 idr_remove(&ar->txmgmt_idr, buf_id); 5429 spin_unlock_bh(&ar->txmgmt_idr_lock); 5430 5431 return ret; 5432 } 5433 5434 static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar) 5435 { 5436 struct sk_buff *skb; 5437 5438 while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) 5439 ath12k_mgmt_over_wmi_tx_drop(ar, skb); 5440 } 5441 5442 static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work) 5443 { 5444 struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work); 5445 struct ath12k_skb_cb *skb_cb; 5446 struct ath12k_vif *arvif; 5447 struct sk_buff *skb; 5448 int ret; 5449 5450 while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) { 5451 skb_cb = ATH12K_SKB_CB(skb); 5452 if (!skb_cb->vif) { 5453 ath12k_warn(ar->ab, "no vif found for mgmt frame\n"); 5454 ath12k_mgmt_over_wmi_tx_drop(ar, skb); 5455 continue; 5456 } 5457 5458 arvif = ath12k_vif_to_arvif(skb_cb->vif); 5459 5460 if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) { 5461 ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb); 5462 if (ret) { 5463 ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n", 5464 arvif->vdev_id, ret); 5465 ath12k_mgmt_over_wmi_tx_drop(ar, skb); 5466 } 5467 } else { 5468 ath12k_warn(ar->ab, 5469 "dropping mgmt frame for vdev %d, is_started %d\n", 5470 arvif->vdev_id, 5471 arvif->is_started); 5472 ath12k_mgmt_over_wmi_tx_drop(ar, skb); 5473 } 5474 } 5475 } 5476 5477 static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb, 5478 bool is_prb_rsp) 5479 { 5480 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; 5481 5482 if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) 5483 return -ESHUTDOWN; 5484 5485 /* Drop probe response packets when the pending management tx 5486 * count has reached a certain threshold, so as to prioritize 5487 * other mgmt packets like auth and assoc to be sent on time 5488 * for establishing successful connections. 5489 */ 5490 if (is_prb_rsp && 5491 atomic_read(&ar->num_pending_mgmt_tx) > ATH12K_PRB_RSP_DROP_THRESHOLD) { 5492 ath12k_warn(ar->ab, 5493 "dropping probe response as pending queue is almost full\n"); 5494 return -ENOSPC; 5495 } 5496 5497 if (skb_queue_len_lockless(q) >= ATH12K_TX_MGMT_NUM_PENDING_MAX) { 5498 ath12k_warn(ar->ab, "mgmt tx queue is full\n"); 5499 return -ENOSPC; 5500 } 5501 5502 skb_queue_tail(q, skb); 5503 atomic_inc(&ar->num_pending_mgmt_tx); 5504 ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work); 5505 5506 return 0; 5507 } 5508 5509 static void ath12k_mac_add_p2p_noa_ie(struct ath12k *ar, 5510 struct ieee80211_vif *vif, 5511 struct sk_buff *skb, 5512 bool is_prb_rsp) 5513 { 5514 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 5515 5516 if (likely(!is_prb_rsp)) 5517 return; 5518 5519 spin_lock_bh(&ar->data_lock); 5520 5521 if (arvif->u.ap.noa_data && 5522 !pskb_expand_head(skb, 0, arvif->u.ap.noa_len, 5523 GFP_ATOMIC)) 5524 skb_put_data(skb, arvif->u.ap.noa_data, 5525 arvif->u.ap.noa_len); 5526 5527 spin_unlock_bh(&ar->data_lock); 5528 } 5529 5530 static void ath12k_mac_op_tx(struct ieee80211_hw *hw, 5531 struct ieee80211_tx_control *control, 5532 struct sk_buff *skb) 5533 { 5534 struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); 5535 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 5536 struct ieee80211_vif *vif = info->control.vif; 5537 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 5538 struct ath12k *ar = arvif->ar; 5539 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 5540 struct ieee80211_key_conf *key = info->control.hw_key; 5541 u32 info_flags = info->flags; 5542 bool is_prb_rsp; 5543 int ret; 5544 5545 memset(skb_cb, 0, sizeof(*skb_cb)); 5546 skb_cb->vif = vif; 5547 5548 if (key) { 5549 skb_cb->cipher = key->cipher; 5550 skb_cb->flags |= ATH12K_SKB_CIPHER_SET; 5551 } 5552 5553 is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control); 5554 5555 if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) { 5556 skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP; 5557 } else if (ieee80211_is_mgmt(hdr->frame_control)) { 5558 ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp); 5559 if (ret) { 5560 ath12k_warn(ar->ab, "failed to queue management frame %d\n", 5561 ret); 5562 ieee80211_free_txskb(hw, skb); 5563 } 5564 return; 5565 } 5566 5567 /* This is case only for P2P_GO */ 5568 if (vif->type == NL80211_IFTYPE_AP && vif->p2p) 5569 ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp); 5570 5571 ret = ath12k_dp_tx(ar, arvif, skb); 5572 if (ret) { 5573 ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret); 5574 ieee80211_free_txskb(hw, skb); 5575 } 5576 } 5577 5578 void ath12k_mac_drain_tx(struct ath12k *ar) 5579 { 5580 /* make sure rcu-protected mac80211 tx path itself is drained */ 5581 synchronize_net(); 5582 5583 cancel_work_sync(&ar->wmi_mgmt_tx_work); 5584 ath12k_mgmt_over_wmi_tx_purge(ar); 5585 } 5586 5587 static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable) 5588 { 5589 return -EOPNOTSUPP; 5590 /* TODO: Need to support new monitor mode */ 5591 } 5592 5593 static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab) 5594 { 5595 int recovery_start_count; 5596 5597 if (!ab->is_reset) 5598 return; 5599 5600 recovery_start_count = atomic_inc_return(&ab->recovery_start_count); 5601 5602 ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count); 5603 5604 if (recovery_start_count == ab->num_radios) { 5605 complete(&ab->recovery_start); 5606 ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n"); 5607 } 5608 5609 ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n"); 5610 5611 wait_for_completion_timeout(&ab->reconfigure_complete, 5612 ATH12K_RECONFIGURE_TIMEOUT_HZ); 5613 } 5614 5615 static int ath12k_mac_start(struct ath12k *ar) 5616 { 5617 struct ath12k_base *ab = ar->ab; 5618 struct ath12k_pdev *pdev = ar->pdev; 5619 int ret; 5620 5621 mutex_lock(&ar->conf_mutex); 5622 5623 switch (ar->state) { 5624 case ATH12K_STATE_OFF: 5625 ar->state = ATH12K_STATE_ON; 5626 break; 5627 case ATH12K_STATE_RESTARTING: 5628 ar->state = ATH12K_STATE_RESTARTED; 5629 ath12k_mac_wait_reconfigure(ab); 5630 break; 5631 case ATH12K_STATE_RESTARTED: 5632 case ATH12K_STATE_WEDGED: 5633 case ATH12K_STATE_ON: 5634 WARN_ON(1); 5635 ret = -EINVAL; 5636 goto err; 5637 } 5638 5639 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 5640 1, pdev->pdev_id); 5641 5642 if (ret) { 5643 ath12k_err(ab, "failed to enable PMF QOS: (%d\n", ret); 5644 goto err; 5645 } 5646 5647 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1, 5648 pdev->pdev_id); 5649 if (ret) { 5650 ath12k_err(ab, "failed to enable dynamic bw: %d\n", ret); 5651 goto err; 5652 } 5653 5654 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE, 5655 0, pdev->pdev_id); 5656 if (ret) { 5657 ath12k_err(ab, "failed to set ac override for ARP: %d\n", 5658 ret); 5659 goto err; 5660 } 5661 5662 ret = ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id); 5663 if (ret) { 5664 ath12k_err(ab, "failed to offload radar detection: %d\n", 5665 ret); 5666 goto err; 5667 } 5668 5669 ret = ath12k_dp_tx_htt_h2t_ppdu_stats_req(ar, 5670 HTT_PPDU_STATS_TAG_DEFAULT); 5671 if (ret) { 5672 ath12k_err(ab, "failed to req ppdu stats: %d\n", ret); 5673 goto err; 5674 } 5675 5676 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE, 5677 1, pdev->pdev_id); 5678 5679 if (ret) { 5680 ath12k_err(ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret); 5681 goto err; 5682 } 5683 5684 __ath12k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); 5685 5686 /* TODO: Do we need to enable ANI? */ 5687 5688 ath12k_reg_update_chan_list(ar); 5689 5690 ar->num_started_vdevs = 0; 5691 ar->num_created_vdevs = 0; 5692 ar->num_peers = 0; 5693 ar->allocated_vdev_map = 0; 5694 5695 /* Configure monitor status ring with default rx_filter to get rx status 5696 * such as rssi, rx_duration. 5697 */ 5698 ret = ath12k_mac_config_mon_status_default(ar, true); 5699 if (ret && (ret != -EOPNOTSUPP)) { 5700 ath12k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n", 5701 ret); 5702 goto err; 5703 } 5704 5705 if (ret == -EOPNOTSUPP) 5706 ath12k_dbg(ab, ATH12K_DBG_MAC, 5707 "monitor status config is not yet supported"); 5708 5709 /* Configure the hash seed for hash based reo dest ring selection */ 5710 ath12k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id); 5711 5712 /* allow device to enter IMPS */ 5713 if (ab->hw_params->idle_ps) { 5714 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG, 5715 1, pdev->pdev_id); 5716 if (ret) { 5717 ath12k_err(ab, "failed to enable idle ps: %d\n", ret); 5718 goto err; 5719 } 5720 } 5721 5722 mutex_unlock(&ar->conf_mutex); 5723 5724 rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], 5725 &ab->pdevs[ar->pdev_idx]); 5726 5727 return 0; 5728 err: 5729 ar->state = ATH12K_STATE_OFF; 5730 mutex_unlock(&ar->conf_mutex); 5731 5732 return ret; 5733 } 5734 5735 static void ath12k_drain_tx(struct ath12k_hw *ah) 5736 { 5737 struct ath12k *ar; 5738 int i; 5739 5740 for_each_ar(ah, ar, i) 5741 ath12k_mac_drain_tx(ar); 5742 } 5743 5744 static int ath12k_mac_op_start(struct ieee80211_hw *hw) 5745 { 5746 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5747 struct ath12k *ar; 5748 int ret, i; 5749 5750 ath12k_drain_tx(ah); 5751 5752 for_each_ar(ah, ar, i) { 5753 ret = ath12k_mac_start(ar); 5754 if (ret) { 5755 ath12k_err(ar->ab, "fail to start mac operations in pdev idx %d ret %d\n", 5756 ar->pdev_idx, ret); 5757 goto fail_start; 5758 } 5759 } 5760 5761 return 0; 5762 fail_start: 5763 for (; i > 0; i--) { 5764 ar = ath12k_ah_to_ar(ah, i - 1); 5765 ath12k_mac_stop(ar); 5766 } 5767 return ret; 5768 } 5769 5770 int ath12k_mac_rfkill_config(struct ath12k *ar) 5771 { 5772 struct ath12k_base *ab = ar->ab; 5773 u32 param; 5774 int ret; 5775 5776 if (ab->hw_params->rfkill_pin == 0) 5777 return -EOPNOTSUPP; 5778 5779 ath12k_dbg(ab, ATH12K_DBG_MAC, 5780 "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d", 5781 ab->hw_params->rfkill_pin, ab->hw_params->rfkill_cfg, 5782 ab->hw_params->rfkill_on_level); 5783 5784 param = u32_encode_bits(ab->hw_params->rfkill_on_level, 5785 WMI_RFKILL_CFG_RADIO_LEVEL) | 5786 u32_encode_bits(ab->hw_params->rfkill_pin, 5787 WMI_RFKILL_CFG_GPIO_PIN_NUM) | 5788 u32_encode_bits(ab->hw_params->rfkill_cfg, 5789 WMI_RFKILL_CFG_PIN_AS_GPIO); 5790 5791 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_HW_RFKILL_CONFIG, 5792 param, ar->pdev->pdev_id); 5793 if (ret) { 5794 ath12k_warn(ab, 5795 "failed to set rfkill config 0x%x: %d\n", 5796 param, ret); 5797 return ret; 5798 } 5799 5800 return 0; 5801 } 5802 5803 int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable) 5804 { 5805 enum wmi_rfkill_enable_radio param; 5806 int ret; 5807 5808 if (enable) 5809 param = WMI_RFKILL_ENABLE_RADIO_ON; 5810 else 5811 param = WMI_RFKILL_ENABLE_RADIO_OFF; 5812 5813 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac %d rfkill enable %d", 5814 ar->pdev_idx, param); 5815 5816 ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RFKILL_ENABLE, 5817 param, ar->pdev->pdev_id); 5818 if (ret) { 5819 ath12k_warn(ar->ab, "failed to set rfkill enable param %d: %d\n", 5820 param, ret); 5821 return ret; 5822 } 5823 5824 return 0; 5825 } 5826 5827 static void ath12k_mac_stop(struct ath12k *ar) 5828 { 5829 struct htt_ppdu_stats_info *ppdu_stats, *tmp; 5830 int ret; 5831 5832 mutex_lock(&ar->conf_mutex); 5833 ret = ath12k_mac_config_mon_status_default(ar, false); 5834 if (ret && (ret != -EOPNOTSUPP)) 5835 ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n", 5836 ret); 5837 5838 clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); 5839 ar->state = ATH12K_STATE_OFF; 5840 mutex_unlock(&ar->conf_mutex); 5841 5842 cancel_delayed_work_sync(&ar->scan.timeout); 5843 cancel_work_sync(&ar->regd_update_work); 5844 cancel_work_sync(&ar->ab->rfkill_work); 5845 5846 spin_lock_bh(&ar->data_lock); 5847 list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) { 5848 list_del(&ppdu_stats->list); 5849 kfree(ppdu_stats); 5850 } 5851 spin_unlock_bh(&ar->data_lock); 5852 5853 rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL); 5854 5855 synchronize_rcu(); 5856 5857 atomic_set(&ar->num_pending_mgmt_tx, 0); 5858 } 5859 5860 static void ath12k_mac_op_stop(struct ieee80211_hw *hw) 5861 { 5862 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 5863 struct ath12k *ar; 5864 int i; 5865 5866 ath12k_drain_tx(ah); 5867 5868 for_each_ar(ah, ar, i) 5869 ath12k_mac_stop(ar); 5870 } 5871 5872 static u8 5873 ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif) 5874 { 5875 struct ath12k_base *ab = arvif->ar->ab; 5876 u8 vdev_stats_id = 0; 5877 5878 do { 5879 if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) { 5880 vdev_stats_id++; 5881 if (vdev_stats_id >= ATH12K_MAX_VDEV_STATS_ID) { 5882 vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID; 5883 break; 5884 } 5885 } else { 5886 ab->free_vdev_stats_id_map |= (1LL << vdev_stats_id); 5887 break; 5888 } 5889 } while (vdev_stats_id); 5890 5891 arvif->vdev_stats_id = vdev_stats_id; 5892 return vdev_stats_id; 5893 } 5894 5895 static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif, 5896 struct ath12k_wmi_vdev_create_arg *arg) 5897 { 5898 struct ath12k *ar = arvif->ar; 5899 struct ath12k_pdev *pdev = ar->pdev; 5900 5901 arg->if_id = arvif->vdev_id; 5902 arg->type = arvif->vdev_type; 5903 arg->subtype = arvif->vdev_subtype; 5904 arg->pdev_id = pdev->pdev_id; 5905 5906 if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) { 5907 arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains; 5908 arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains; 5909 } 5910 if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) { 5911 arg->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains; 5912 arg->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains; 5913 } 5914 if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP && 5915 ar->supports_6ghz) { 5916 arg->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains; 5917 arg->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains; 5918 } 5919 5920 arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif); 5921 } 5922 5923 static u32 5924 ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype) 5925 { 5926 struct ath12k_pdev_cap *pdev_cap = &pdev->cap; 5927 struct ath12k_band_cap *cap_band = NULL; 5928 u32 *hecap_phy_ptr = NULL; 5929 u32 hemode; 5930 5931 if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) 5932 cap_band = &pdev_cap->band[NL80211_BAND_2GHZ]; 5933 else 5934 cap_band = &pdev_cap->band[NL80211_BAND_5GHZ]; 5935 5936 hecap_phy_ptr = &cap_band->he_cap_phy_info[0]; 5937 5938 hemode = u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE) | 5939 u32_encode_bits(HECAP_PHY_SUBFMR_GET(hecap_phy_ptr), 5940 HE_MODE_SU_TX_BFER) | 5941 u32_encode_bits(HECAP_PHY_ULMUMIMO_GET(hecap_phy_ptr), 5942 HE_MODE_UL_MUMIMO); 5943 5944 /* TODO: WDS and other modes */ 5945 if (viftype == NL80211_IFTYPE_AP) { 5946 hemode |= u32_encode_bits(HECAP_PHY_MUBFMR_GET(hecap_phy_ptr), 5947 HE_MODE_MU_TX_BFER) | 5948 u32_encode_bits(HE_DL_MUOFDMA_ENABLE, HE_MODE_DL_OFDMA) | 5949 u32_encode_bits(HE_UL_MUOFDMA_ENABLE, HE_MODE_UL_OFDMA); 5950 } else { 5951 hemode |= u32_encode_bits(HE_MU_BFEE_ENABLE, HE_MODE_MU_TX_BFEE); 5952 } 5953 5954 return hemode; 5955 } 5956 5957 static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar, 5958 struct ath12k_vif *arvif) 5959 { 5960 u32 param_id, param_value; 5961 struct ath12k_base *ab = ar->ab; 5962 int ret; 5963 5964 param_id = WMI_VDEV_PARAM_SET_HEMU_MODE; 5965 param_value = ath12k_mac_prepare_he_mode(ar->pdev, arvif->vif->type); 5966 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 5967 param_id, param_value); 5968 if (ret) { 5969 ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d param_value %x\n", 5970 arvif->vdev_id, ret, param_value); 5971 return ret; 5972 } 5973 param_id = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE; 5974 param_value = 5975 u32_encode_bits(HE_VHT_SOUNDING_MODE_ENABLE, HE_VHT_SOUNDING_MODE) | 5976 u32_encode_bits(HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE, 5977 HE_TRIG_NONTRIG_SOUNDING_MODE); 5978 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 5979 param_id, param_value); 5980 if (ret) { 5981 ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d\n", 5982 arvif->vdev_id, ret); 5983 return ret; 5984 } 5985 return ret; 5986 } 5987 5988 static void ath12k_mac_update_vif_offload(struct ath12k_vif *arvif) 5989 { 5990 struct ieee80211_vif *vif = arvif->vif; 5991 struct ath12k *ar = arvif->ar; 5992 struct ath12k_base *ab = ar->ab; 5993 u32 param_id, param_value; 5994 int ret; 5995 5996 param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE; 5997 if (vif->type != NL80211_IFTYPE_STATION && 5998 vif->type != NL80211_IFTYPE_AP) 5999 vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED | 6000 IEEE80211_OFFLOAD_DECAP_ENABLED); 6001 6002 if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) 6003 arvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET; 6004 else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) 6005 arvif->tx_encap_type = ATH12K_HW_TXRX_RAW; 6006 else 6007 arvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI; 6008 6009 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 6010 param_id, arvif->tx_encap_type); 6011 if (ret) { 6012 ath12k_warn(ab, "failed to set vdev %d tx encap mode: %d\n", 6013 arvif->vdev_id, ret); 6014 vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED; 6015 } 6016 6017 param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE; 6018 if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED) 6019 param_value = ATH12K_HW_TXRX_ETHERNET; 6020 else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) 6021 param_value = ATH12K_HW_TXRX_RAW; 6022 else 6023 param_value = ATH12K_HW_TXRX_NATIVE_WIFI; 6024 6025 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 6026 param_id, param_value); 6027 if (ret) { 6028 ath12k_warn(ab, "failed to set vdev %d rx decap mode: %d\n", 6029 arvif->vdev_id, ret); 6030 vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED; 6031 } 6032 } 6033 6034 static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw, 6035 struct ieee80211_vif *vif) 6036 { 6037 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6038 6039 ath12k_mac_update_vif_offload(arvif); 6040 } 6041 6042 static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif) 6043 { 6044 struct ath12k_hw *ah = ar->ah; 6045 struct ath12k_base *ab = ar->ab; 6046 struct ieee80211_hw *hw = ah->hw; 6047 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6048 struct ath12k_wmi_vdev_create_arg vdev_arg = {0}; 6049 struct ath12k_wmi_peer_create_arg peer_param; 6050 u32 param_id, param_value; 6051 u16 nss; 6052 int i; 6053 int ret, vdev_id; 6054 6055 lockdep_assert_held(&ar->conf_mutex); 6056 6057 arvif->ar = ar; 6058 vdev_id = __ffs64(ab->free_vdev_map); 6059 arvif->vdev_id = vdev_id; 6060 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; 6061 6062 switch (vif->type) { 6063 case NL80211_IFTYPE_UNSPECIFIED: 6064 case NL80211_IFTYPE_STATION: 6065 arvif->vdev_type = WMI_VDEV_TYPE_STA; 6066 6067 if (vif->p2p) 6068 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; 6069 6070 break; 6071 case NL80211_IFTYPE_MESH_POINT: 6072 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S; 6073 fallthrough; 6074 case NL80211_IFTYPE_AP: 6075 arvif->vdev_type = WMI_VDEV_TYPE_AP; 6076 6077 if (vif->p2p) 6078 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; 6079 6080 break; 6081 case NL80211_IFTYPE_MONITOR: 6082 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; 6083 ar->monitor_vdev_id = vdev_id; 6084 break; 6085 case NL80211_IFTYPE_P2P_DEVICE: 6086 arvif->vdev_type = WMI_VDEV_TYPE_STA; 6087 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; 6088 break; 6089 default: 6090 WARN_ON(1); 6091 break; 6092 } 6093 6094 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev create id %d type %d subtype %d map %llx\n", 6095 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, 6096 ab->free_vdev_map); 6097 6098 vif->cab_queue = arvif->vdev_id % (ATH12K_HW_MAX_QUEUES - 1); 6099 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 6100 vif->hw_queue[i] = i % (ATH12K_HW_MAX_QUEUES - 1); 6101 6102 ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg); 6103 6104 ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg); 6105 if (ret) { 6106 ath12k_warn(ab, "failed to create WMI vdev %d: %d\n", 6107 arvif->vdev_id, ret); 6108 goto err; 6109 } 6110 6111 ar->num_created_vdevs++; 6112 arvif->is_created = true; 6113 ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM created, vdev_id %d\n", 6114 vif->addr, arvif->vdev_id); 6115 ar->allocated_vdev_map |= 1LL << arvif->vdev_id; 6116 ab->free_vdev_map &= ~(1LL << arvif->vdev_id); 6117 6118 spin_lock_bh(&ar->data_lock); 6119 list_add(&arvif->list, &ar->arvifs); 6120 spin_unlock_bh(&ar->data_lock); 6121 6122 ath12k_mac_update_vif_offload(arvif); 6123 6124 nss = hweight32(ar->cfg_tx_chainmask) ? : 1; 6125 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 6126 WMI_VDEV_PARAM_NSS, nss); 6127 if (ret) { 6128 ath12k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n", 6129 arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret); 6130 goto err_vdev_del; 6131 } 6132 6133 switch (arvif->vdev_type) { 6134 case WMI_VDEV_TYPE_AP: 6135 peer_param.vdev_id = arvif->vdev_id; 6136 peer_param.peer_addr = vif->addr; 6137 peer_param.peer_type = WMI_PEER_TYPE_DEFAULT; 6138 ret = ath12k_peer_create(ar, arvif, NULL, &peer_param); 6139 if (ret) { 6140 ath12k_warn(ab, "failed to vdev %d create peer for AP: %d\n", 6141 arvif->vdev_id, ret); 6142 goto err_vdev_del; 6143 } 6144 6145 ret = ath12k_mac_set_kickout(arvif); 6146 if (ret) { 6147 ath12k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n", 6148 arvif->vdev_id, ret); 6149 goto err_peer_del; 6150 } 6151 break; 6152 case WMI_VDEV_TYPE_STA: 6153 param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY; 6154 param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 6155 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6156 param_id, param_value); 6157 if (ret) { 6158 ath12k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n", 6159 arvif->vdev_id, ret); 6160 goto err_peer_del; 6161 } 6162 6163 param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 6164 param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; 6165 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6166 param_id, param_value); 6167 if (ret) { 6168 ath12k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n", 6169 arvif->vdev_id, ret); 6170 goto err_peer_del; 6171 } 6172 6173 param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT; 6174 param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; 6175 ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6176 param_id, param_value); 6177 if (ret) { 6178 ath12k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n", 6179 arvif->vdev_id, ret); 6180 goto err_peer_del; 6181 } 6182 6183 ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false); 6184 if (ret) { 6185 ath12k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n", 6186 arvif->vdev_id, ret); 6187 goto err_peer_del; 6188 } 6189 break; 6190 default: 6191 break; 6192 } 6193 6194 arvif->txpower = vif->bss_conf.txpower; 6195 ret = ath12k_mac_txpower_recalc(ar); 6196 if (ret) 6197 goto err_peer_del; 6198 6199 param_id = WMI_VDEV_PARAM_RTS_THRESHOLD; 6200 param_value = hw->wiphy->rts_threshold; 6201 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 6202 param_id, param_value); 6203 if (ret) { 6204 ath12k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n", 6205 arvif->vdev_id, ret); 6206 } 6207 6208 ath12k_dp_vdev_tx_attach(ar, arvif); 6209 6210 if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled) 6211 ath12k_mac_monitor_vdev_create(ar); 6212 6213 arvif->ar = ar; 6214 return ret; 6215 6216 err_peer_del: 6217 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 6218 reinit_completion(&ar->peer_delete_done); 6219 6220 ret = ath12k_wmi_send_peer_delete_cmd(ar, vif->addr, 6221 arvif->vdev_id); 6222 if (ret) { 6223 ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n", 6224 arvif->vdev_id, vif->addr); 6225 goto err; 6226 } 6227 6228 ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, 6229 vif->addr); 6230 if (ret) 6231 goto err; 6232 6233 ar->num_peers--; 6234 } 6235 6236 err_vdev_del: 6237 ath12k_wmi_vdev_delete(ar, arvif->vdev_id); 6238 ar->num_created_vdevs--; 6239 arvif->is_created = false; 6240 arvif->ar = NULL; 6241 ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); 6242 ab->free_vdev_map |= 1LL << arvif->vdev_id; 6243 ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id); 6244 spin_lock_bh(&ar->data_lock); 6245 list_del(&arvif->list); 6246 spin_unlock_bh(&ar->data_lock); 6247 6248 err: 6249 arvif->ar = NULL; 6250 return ret; 6251 } 6252 6253 static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ieee80211_vif *vif) 6254 { 6255 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6256 struct ath12k_vif_cache *cache = arvif->cache; 6257 struct ath12k_base *ab = ar->ab; 6258 6259 int ret; 6260 6261 lockdep_assert_held(&ar->conf_mutex); 6262 6263 if (!cache) 6264 return; 6265 6266 if (cache->tx_conf.changed) { 6267 ret = ath12k_mac_conf_tx(arvif, 0, cache->tx_conf.ac, 6268 &cache->tx_conf.tx_queue_params); 6269 if (ret) 6270 ath12k_warn(ab, 6271 "unable to apply tx config parameters to vdev %d\n", 6272 ret); 6273 } 6274 6275 if (cache->bss_conf_changed) { 6276 ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf, 6277 cache->bss_conf_changed); 6278 } 6279 6280 if (cache->key_conf.changed) { 6281 ret = ath12k_mac_set_key(ar, cache->key_conf.cmd, vif, NULL, 6282 cache->key_conf.key); 6283 if (ret) 6284 ath12k_warn(ab, "unable to apply set key param to vdev %d ret %d\n", 6285 arvif->vdev_id, ret); 6286 } 6287 ath12k_arvif_put_cache(arvif); 6288 } 6289 6290 static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw, 6291 struct ieee80211_vif *vif, 6292 struct ieee80211_chanctx_conf *ctx) 6293 { 6294 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6295 struct ath12k_hw *ah = hw->priv; 6296 struct ath12k *ar, *prev_ar; 6297 struct ath12k_base *ab; 6298 int ret; 6299 6300 if (ah->num_radio == 1) 6301 ar = ah->radio; 6302 else if (ctx) 6303 ar = ath12k_get_ar_by_ctx(hw, ctx); 6304 else 6305 return NULL; 6306 6307 if (!ar) 6308 return NULL; 6309 6310 if (arvif->ar) { 6311 /* This is not expected really */ 6312 if (WARN_ON(!arvif->is_created)) { 6313 arvif->ar = NULL; 6314 return NULL; 6315 } 6316 6317 if (ah->num_radio == 1) 6318 return arvif->ar; 6319 6320 /* This can happen as scan vdev gets created during multiple scans 6321 * across different radios before a vdev is brought up in 6322 * a certain radio. 6323 */ 6324 if (ar != arvif->ar) { 6325 if (WARN_ON(arvif->is_started)) 6326 return NULL; 6327 6328 /* backup the previously used ar ptr since arvif->ar would 6329 * be set to NULL after vdev delete is done 6330 */ 6331 prev_ar = arvif->ar; 6332 mutex_lock(&prev_ar->conf_mutex); 6333 ret = ath12k_mac_vdev_delete(prev_ar, vif); 6334 6335 if (ret) 6336 ath12k_warn(prev_ar->ab, "unable to delete vdev %d\n", 6337 ret); 6338 mutex_unlock(&prev_ar->conf_mutex); 6339 } 6340 } 6341 6342 ab = ar->ab; 6343 6344 mutex_lock(&ar->conf_mutex); 6345 6346 if (arvif->is_created) 6347 goto flush; 6348 6349 if (vif->type == NL80211_IFTYPE_AP && 6350 ar->num_peers > (ar->max_num_peers - 1)) { 6351 ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n"); 6352 goto unlock; 6353 } 6354 6355 if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) { 6356 ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n", 6357 TARGET_NUM_VDEVS); 6358 goto unlock; 6359 } 6360 6361 ret = ath12k_mac_vdev_create(ar, vif); 6362 if (ret) { 6363 ath12k_warn(ab, "failed to create vdev %pM ret %d", vif->addr, ret); 6364 goto unlock; 6365 } 6366 6367 flush: 6368 /* If the vdev is created during channel assign and not during 6369 * add_interface(), Apply any parameters for the vdev which were received 6370 * after add_interface, corresponding to this vif. 6371 */ 6372 ath12k_mac_vif_cache_flush(ar, vif); 6373 unlock: 6374 mutex_unlock(&ar->conf_mutex); 6375 return arvif->ar; 6376 } 6377 6378 static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw, 6379 struct ieee80211_vif *vif) 6380 { 6381 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6382 int i; 6383 6384 memset(arvif, 0, sizeof(*arvif)); 6385 6386 arvif->vif = vif; 6387 6388 INIT_LIST_HEAD(&arvif->list); 6389 INIT_DELAYED_WORK(&arvif->connection_loss_work, 6390 ath12k_mac_vif_sta_connection_loss_work); 6391 6392 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 6393 arvif->bitrate_mask.control[i].legacy = 0xffffffff; 6394 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 6395 sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 6396 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 6397 sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 6398 } 6399 6400 /* Allocate Default Queue now and reassign during actual vdev create */ 6401 vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE; 6402 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 6403 vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE; 6404 6405 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 6406 6407 /* For single radio wiphy(i.e ah->num_radio is 1), create the vdev 6408 * during add_interface itself, for multi radio wiphy, defer the vdev 6409 * creation until channel_assign to determine the radio on which the 6410 * vdev needs to be created 6411 */ 6412 ath12k_mac_assign_vif_to_vdev(hw, vif, NULL); 6413 return 0; 6414 } 6415 6416 static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif) 6417 { 6418 struct ath12k_tx_desc_info *tx_desc_info; 6419 struct ath12k_skb_cb *skb_cb; 6420 struct sk_buff *skb; 6421 int i; 6422 6423 for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) { 6424 spin_lock_bh(&dp->tx_desc_lock[i]); 6425 6426 list_for_each_entry(tx_desc_info, &dp->tx_desc_used_list[i], 6427 list) { 6428 skb = tx_desc_info->skb; 6429 if (!skb) 6430 continue; 6431 6432 skb_cb = ATH12K_SKB_CB(skb); 6433 if (skb_cb->vif == vif) 6434 skb_cb->vif = NULL; 6435 } 6436 6437 spin_unlock_bh(&dp->tx_desc_lock[i]); 6438 } 6439 } 6440 6441 static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ieee80211_vif *vif) 6442 { 6443 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6444 struct ath12k_base *ab = ar->ab; 6445 unsigned long time_left; 6446 int ret; 6447 6448 lockdep_assert_held(&ar->conf_mutex); 6449 reinit_completion(&ar->vdev_delete_done); 6450 6451 ret = ath12k_wmi_vdev_delete(ar, arvif->vdev_id); 6452 if (ret) { 6453 ath12k_warn(ab, "failed to delete WMI vdev %d: %d\n", 6454 arvif->vdev_id, ret); 6455 goto err_vdev_del; 6456 } 6457 6458 time_left = wait_for_completion_timeout(&ar->vdev_delete_done, 6459 ATH12K_VDEV_DELETE_TIMEOUT_HZ); 6460 if (time_left == 0) { 6461 ath12k_warn(ab, "Timeout in receiving vdev delete response\n"); 6462 goto err_vdev_del; 6463 } 6464 6465 ab->free_vdev_map |= 1LL << arvif->vdev_id; 6466 ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id); 6467 ar->num_created_vdevs--; 6468 6469 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { 6470 ar->monitor_vdev_id = -1; 6471 ar->monitor_vdev_created = false; 6472 } else if (ar->monitor_vdev_created && !ar->monitor_started) { 6473 ret = ath12k_mac_monitor_vdev_delete(ar); 6474 } 6475 6476 ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n", 6477 vif->addr, arvif->vdev_id); 6478 6479 err_vdev_del: 6480 spin_lock_bh(&ar->data_lock); 6481 list_del(&arvif->list); 6482 spin_unlock_bh(&ar->data_lock); 6483 6484 ath12k_peer_cleanup(ar, arvif->vdev_id); 6485 ath12k_arvif_put_cache(arvif); 6486 6487 idr_for_each(&ar->txmgmt_idr, 6488 ath12k_mac_vif_txmgmt_idr_remove, vif); 6489 6490 ath12k_mac_vif_unref(&ab->dp, vif); 6491 ath12k_dp_tx_put_bank_profile(&ab->dp, arvif->bank_id); 6492 6493 /* Recalc txpower for remaining vdev */ 6494 ath12k_mac_txpower_recalc(ar); 6495 clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); 6496 6497 /* TODO: recal traffic pause state based on the available vdevs */ 6498 arvif->is_created = false; 6499 arvif->ar = NULL; 6500 6501 return ret; 6502 } 6503 6504 static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw, 6505 struct ieee80211_vif *vif) 6506 { 6507 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6508 struct ath12k_base *ab; 6509 struct ath12k *ar; 6510 int ret; 6511 6512 if (!arvif->is_created) { 6513 /* if we cached some config but never received assign chanctx, 6514 * free the allocated cache. 6515 */ 6516 ath12k_arvif_put_cache(arvif); 6517 return; 6518 } 6519 6520 ar = arvif->ar; 6521 ab = ar->ab; 6522 6523 cancel_delayed_work_sync(&arvif->connection_loss_work); 6524 6525 mutex_lock(&ar->conf_mutex); 6526 6527 ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n", 6528 arvif->vdev_id); 6529 6530 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 6531 ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr); 6532 if (ret) 6533 ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n", 6534 arvif->vdev_id, ret); 6535 } 6536 6537 ath12k_mac_vdev_delete(ar, vif); 6538 6539 mutex_unlock(&ar->conf_mutex); 6540 } 6541 6542 /* FIXME: Has to be verified. */ 6543 #define SUPPORTED_FILTERS \ 6544 (FIF_ALLMULTI | \ 6545 FIF_CONTROL | \ 6546 FIF_PSPOLL | \ 6547 FIF_OTHER_BSS | \ 6548 FIF_BCN_PRBRESP_PROMISC | \ 6549 FIF_PROBE_REQ | \ 6550 FIF_FCSFAIL) 6551 6552 static void ath12k_mac_configure_filter(struct ath12k *ar, 6553 unsigned int total_flags) 6554 { 6555 bool reset_flag; 6556 int ret; 6557 6558 lockdep_assert_held(&ar->conf_mutex); 6559 6560 ar->filter_flags = total_flags; 6561 6562 /* For monitor mode */ 6563 reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC); 6564 6565 ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag); 6566 if (!ret) { 6567 if (!reset_flag) 6568 set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); 6569 else 6570 clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); 6571 } else { 6572 ath12k_warn(ar->ab, 6573 "fail to set monitor filter: %d\n", ret); 6574 } 6575 6576 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 6577 "total_flags:0x%x, reset_flag:%d\n", 6578 total_flags, reset_flag); 6579 } 6580 6581 static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw, 6582 unsigned int changed_flags, 6583 unsigned int *total_flags, 6584 u64 multicast) 6585 { 6586 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6587 struct ath12k *ar; 6588 6589 ar = ath12k_ah_to_ar(ah, 0); 6590 6591 mutex_lock(&ar->conf_mutex); 6592 6593 *total_flags &= SUPPORTED_FILTERS; 6594 ath12k_mac_configure_filter(ar, *total_flags); 6595 6596 mutex_unlock(&ar->conf_mutex); 6597 } 6598 6599 static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 6600 { 6601 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6602 int antennas_rx = 0, antennas_tx = 0; 6603 struct ath12k *ar; 6604 int i; 6605 6606 for_each_ar(ah, ar, i) { 6607 mutex_lock(&ar->conf_mutex); 6608 antennas_rx = max_t(u32, antennas_rx, ar->cfg_rx_chainmask); 6609 antennas_tx = max_t(u32, antennas_tx, ar->cfg_tx_chainmask); 6610 mutex_unlock(&ar->conf_mutex); 6611 } 6612 6613 *tx_ant = antennas_tx; 6614 *rx_ant = antennas_rx; 6615 6616 return 0; 6617 } 6618 6619 static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 6620 { 6621 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6622 struct ath12k *ar; 6623 int ret = 0; 6624 int i; 6625 6626 for_each_ar(ah, ar, i) { 6627 mutex_lock(&ar->conf_mutex); 6628 ret = __ath12k_set_antenna(ar, tx_ant, rx_ant); 6629 mutex_unlock(&ar->conf_mutex); 6630 if (ret) 6631 break; 6632 } 6633 6634 return ret; 6635 } 6636 6637 static int ath12k_mac_ampdu_action(struct ath12k_vif *arvif, 6638 struct ieee80211_ampdu_params *params) 6639 { 6640 struct ath12k *ar = arvif->ar; 6641 int ret = -EINVAL; 6642 6643 lockdep_assert_held(&ar->conf_mutex); 6644 6645 switch (params->action) { 6646 case IEEE80211_AMPDU_RX_START: 6647 ret = ath12k_dp_rx_ampdu_start(ar, params); 6648 break; 6649 case IEEE80211_AMPDU_RX_STOP: 6650 ret = ath12k_dp_rx_ampdu_stop(ar, params); 6651 break; 6652 case IEEE80211_AMPDU_TX_START: 6653 case IEEE80211_AMPDU_TX_STOP_CONT: 6654 case IEEE80211_AMPDU_TX_STOP_FLUSH: 6655 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 6656 case IEEE80211_AMPDU_TX_OPERATIONAL: 6657 /* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211 6658 * Tx aggregation requests. 6659 */ 6660 ret = -EOPNOTSUPP; 6661 break; 6662 } 6663 6664 return ret; 6665 } 6666 6667 static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw, 6668 struct ieee80211_vif *vif, 6669 struct ieee80211_ampdu_params *params) 6670 { 6671 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 6672 struct ath12k *ar; 6673 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6674 int ret = -EINVAL; 6675 6676 ar = ath12k_get_ar_by_vif(hw, vif); 6677 if (!ar) 6678 return -EINVAL; 6679 6680 ar = ath12k_ah_to_ar(ah, 0); 6681 6682 mutex_lock(&ar->conf_mutex); 6683 ret = ath12k_mac_ampdu_action(arvif, params); 6684 mutex_unlock(&ar->conf_mutex); 6685 6686 if (ret) 6687 ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n", 6688 ar->pdev_idx, params->action, ret); 6689 6690 return ret; 6691 } 6692 6693 static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw, 6694 struct ieee80211_chanctx_conf *ctx) 6695 { 6696 struct ath12k *ar; 6697 struct ath12k_base *ab; 6698 6699 ar = ath12k_get_ar_by_ctx(hw, ctx); 6700 if (!ar) 6701 return -EINVAL; 6702 6703 ab = ar->ab; 6704 6705 ath12k_dbg(ab, ATH12K_DBG_MAC, 6706 "mac chanctx add freq %u width %d ptr %p\n", 6707 ctx->def.chan->center_freq, ctx->def.width, ctx); 6708 6709 mutex_lock(&ar->conf_mutex); 6710 6711 spin_lock_bh(&ar->data_lock); 6712 /* TODO: In case of multiple channel context, populate rx_channel from 6713 * Rx PPDU desc information. 6714 */ 6715 ar->rx_channel = ctx->def.chan; 6716 spin_unlock_bh(&ar->data_lock); 6717 6718 mutex_unlock(&ar->conf_mutex); 6719 6720 return 0; 6721 } 6722 6723 static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw, 6724 struct ieee80211_chanctx_conf *ctx) 6725 { 6726 struct ath12k *ar; 6727 struct ath12k_base *ab; 6728 6729 ar = ath12k_get_ar_by_ctx(hw, ctx); 6730 if (!ar) 6731 return; 6732 6733 ab = ar->ab; 6734 6735 ath12k_dbg(ab, ATH12K_DBG_MAC, 6736 "mac chanctx remove freq %u width %d ptr %p\n", 6737 ctx->def.chan->center_freq, ctx->def.width, ctx); 6738 6739 mutex_lock(&ar->conf_mutex); 6740 6741 spin_lock_bh(&ar->data_lock); 6742 /* TODO: In case of there is one more channel context left, populate 6743 * rx_channel with the channel of that remaining channel context. 6744 */ 6745 ar->rx_channel = NULL; 6746 spin_unlock_bh(&ar->data_lock); 6747 6748 mutex_unlock(&ar->conf_mutex); 6749 } 6750 6751 static enum wmi_phy_mode 6752 ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar, 6753 enum wmi_phy_mode mode, 6754 enum nl80211_band band, 6755 enum nl80211_iftype type) 6756 { 6757 struct ieee80211_sta_eht_cap *eht_cap = NULL; 6758 enum wmi_phy_mode down_mode; 6759 int n = ar->mac.sbands[band].n_iftype_data; 6760 int i; 6761 struct ieee80211_sband_iftype_data *data; 6762 6763 if (mode < MODE_11BE_EHT20) 6764 return mode; 6765 6766 data = ar->mac.iftype[band]; 6767 for (i = 0; i < n; i++) { 6768 if (data[i].types_mask & BIT(type)) { 6769 eht_cap = &data[i].eht_cap; 6770 break; 6771 } 6772 } 6773 6774 if (eht_cap && eht_cap->has_eht) 6775 return mode; 6776 6777 switch (mode) { 6778 case MODE_11BE_EHT20: 6779 down_mode = MODE_11AX_HE20; 6780 break; 6781 case MODE_11BE_EHT40: 6782 down_mode = MODE_11AX_HE40; 6783 break; 6784 case MODE_11BE_EHT80: 6785 down_mode = MODE_11AX_HE80; 6786 break; 6787 case MODE_11BE_EHT80_80: 6788 down_mode = MODE_11AX_HE80_80; 6789 break; 6790 case MODE_11BE_EHT160: 6791 case MODE_11BE_EHT160_160: 6792 case MODE_11BE_EHT320: 6793 down_mode = MODE_11AX_HE160; 6794 break; 6795 case MODE_11BE_EHT20_2G: 6796 down_mode = MODE_11AX_HE20_2G; 6797 break; 6798 case MODE_11BE_EHT40_2G: 6799 down_mode = MODE_11AX_HE40_2G; 6800 break; 6801 default: 6802 down_mode = mode; 6803 break; 6804 } 6805 6806 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 6807 "mac vdev start phymode %s downgrade to %s\n", 6808 ath12k_mac_phymode_str(mode), 6809 ath12k_mac_phymode_str(down_mode)); 6810 6811 return down_mode; 6812 } 6813 6814 static int 6815 ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, 6816 struct ieee80211_chanctx_conf *ctx, 6817 bool restart) 6818 { 6819 struct ath12k *ar = arvif->ar; 6820 struct ath12k_base *ab = ar->ab; 6821 struct wmi_vdev_start_req_arg arg = {}; 6822 const struct cfg80211_chan_def *chandef = &ctx->def; 6823 int he_support = arvif->vif->bss_conf.he_support; 6824 int ret; 6825 6826 lockdep_assert_held(&ar->conf_mutex); 6827 6828 reinit_completion(&ar->vdev_setup_done); 6829 6830 arg.vdev_id = arvif->vdev_id; 6831 arg.dtim_period = arvif->dtim_period; 6832 arg.bcn_intval = arvif->beacon_interval; 6833 arg.punct_bitmap = ~arvif->punct_bitmap; 6834 6835 arg.freq = chandef->chan->center_freq; 6836 arg.band_center_freq1 = chandef->center_freq1; 6837 arg.band_center_freq2 = chandef->center_freq2; 6838 arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width]; 6839 6840 arg.mode = ath12k_mac_check_down_grade_phy_mode(ar, arg.mode, 6841 chandef->chan->band, 6842 arvif->vif->type); 6843 arg.min_power = 0; 6844 arg.max_power = chandef->chan->max_power * 2; 6845 arg.max_reg_power = chandef->chan->max_reg_power * 2; 6846 arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2; 6847 6848 arg.pref_tx_streams = ar->num_tx_chains; 6849 arg.pref_rx_streams = ar->num_rx_chains; 6850 6851 /* Fill the MBSSID flags to indicate AP is non MBSSID by default 6852 * Corresponding flags would be updated with MBSSID support. 6853 */ 6854 arg.mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP; 6855 6856 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 6857 arg.ssid = arvif->u.ap.ssid; 6858 arg.ssid_len = arvif->u.ap.ssid_len; 6859 arg.hidden_ssid = arvif->u.ap.hidden_ssid; 6860 6861 /* For now allow DFS for AP mode */ 6862 arg.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); 6863 6864 arg.freq2_radar = ctx->radar_enabled; 6865 6866 arg.passive = arg.chan_radar; 6867 6868 spin_lock_bh(&ab->base_lock); 6869 arg.regdomain = ar->ab->dfs_region; 6870 spin_unlock_bh(&ab->base_lock); 6871 6872 /* TODO: Notify if secondary 80Mhz also needs radar detection */ 6873 if (he_support) { 6874 ret = ath12k_set_he_mu_sounding_mode(ar, arvif); 6875 if (ret) { 6876 ath12k_warn(ar->ab, "failed to set he mode vdev %i\n", 6877 arg.vdev_id); 6878 return ret; 6879 } 6880 } 6881 } 6882 6883 arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR); 6884 6885 ath12k_dbg(ab, ATH12K_DBG_MAC, 6886 "mac vdev %d start center_freq %d phymode %s punct_bitmap 0x%x\n", 6887 arg.vdev_id, arg.freq, 6888 ath12k_mac_phymode_str(arg.mode), arg.punct_bitmap); 6889 6890 ret = ath12k_wmi_vdev_start(ar, &arg, restart); 6891 if (ret) { 6892 ath12k_warn(ar->ab, "failed to %s WMI vdev %i\n", 6893 restart ? "restart" : "start", arg.vdev_id); 6894 return ret; 6895 } 6896 6897 ret = ath12k_mac_vdev_setup_sync(ar); 6898 if (ret) { 6899 ath12k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n", 6900 arg.vdev_id, restart ? "restart" : "start", ret); 6901 return ret; 6902 } 6903 6904 ar->num_started_vdevs++; 6905 ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n", 6906 arvif->vif->addr, arvif->vdev_id); 6907 6908 /* Enable CAC Flag in the driver by checking the channel DFS cac time, 6909 * i.e dfs_cac_ms value which will be valid only for radar channels 6910 * and state as NL80211_DFS_USABLE which indicates CAC needs to be 6911 * done before channel usage. This flags is used to drop rx packets. 6912 * during CAC. 6913 */ 6914 /* TODO: Set the flag for other interface types as required */ 6915 if (arvif->vdev_type == WMI_VDEV_TYPE_AP && 6916 chandef->chan->dfs_cac_ms && 6917 chandef->chan->dfs_state == NL80211_DFS_USABLE) { 6918 set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); 6919 ath12k_dbg(ab, ATH12K_DBG_MAC, 6920 "CAC Started in chan_freq %d for vdev %d\n", 6921 arg.freq, arg.vdev_id); 6922 } 6923 6924 ret = ath12k_mac_set_txbf_conf(arvif); 6925 if (ret) 6926 ath12k_warn(ab, "failed to set txbf conf for vdev %d: %d\n", 6927 arvif->vdev_id, ret); 6928 6929 return 0; 6930 } 6931 6932 static int ath12k_mac_vdev_start(struct ath12k_vif *arvif, 6933 struct ieee80211_chanctx_conf *ctx) 6934 { 6935 return ath12k_mac_vdev_start_restart(arvif, ctx, false); 6936 } 6937 6938 static int ath12k_mac_vdev_restart(struct ath12k_vif *arvif, 6939 struct ieee80211_chanctx_conf *ctx) 6940 { 6941 return ath12k_mac_vdev_start_restart(arvif, ctx, true); 6942 } 6943 6944 struct ath12k_mac_change_chanctx_arg { 6945 struct ieee80211_chanctx_conf *ctx; 6946 struct ieee80211_vif_chanctx_switch *vifs; 6947 int n_vifs; 6948 int next_vif; 6949 struct ath12k *ar; 6950 }; 6951 6952 static void 6953 ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, 6954 struct ieee80211_vif *vif) 6955 { 6956 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6957 struct ath12k_mac_change_chanctx_arg *arg = data; 6958 6959 if (arvif->ar != arg->ar) 6960 return; 6961 6962 if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx) 6963 return; 6964 6965 arg->n_vifs++; 6966 } 6967 6968 static void 6969 ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac, 6970 struct ieee80211_vif *vif) 6971 { 6972 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 6973 struct ath12k_mac_change_chanctx_arg *arg = data; 6974 struct ieee80211_chanctx_conf *ctx; 6975 6976 if (arvif->ar != arg->ar) 6977 return; 6978 6979 ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf); 6980 if (ctx != arg->ctx) 6981 return; 6982 6983 if (WARN_ON(arg->next_vif == arg->n_vifs)) 6984 return; 6985 6986 arg->vifs[arg->next_vif].vif = vif; 6987 arg->vifs[arg->next_vif].old_ctx = ctx; 6988 arg->vifs[arg->next_vif].new_ctx = ctx; 6989 arg->next_vif++; 6990 } 6991 6992 static u32 ath12k_mac_nlwidth_to_wmiwidth(enum nl80211_chan_width width) 6993 { 6994 switch (width) { 6995 case NL80211_CHAN_WIDTH_20: 6996 return WMI_CHAN_WIDTH_20; 6997 case NL80211_CHAN_WIDTH_40: 6998 return WMI_CHAN_WIDTH_40; 6999 case NL80211_CHAN_WIDTH_80: 7000 return WMI_CHAN_WIDTH_80; 7001 case NL80211_CHAN_WIDTH_160: 7002 return WMI_CHAN_WIDTH_160; 7003 case NL80211_CHAN_WIDTH_80P80: 7004 return WMI_CHAN_WIDTH_80P80; 7005 case NL80211_CHAN_WIDTH_5: 7006 return WMI_CHAN_WIDTH_5; 7007 case NL80211_CHAN_WIDTH_10: 7008 return WMI_CHAN_WIDTH_10; 7009 case NL80211_CHAN_WIDTH_320: 7010 return WMI_CHAN_WIDTH_320; 7011 default: 7012 WARN_ON(1); 7013 return WMI_CHAN_WIDTH_20; 7014 } 7015 } 7016 7017 static int ath12k_mac_update_peer_puncturing_width(struct ath12k *ar, 7018 struct ath12k_vif *arvif, 7019 struct cfg80211_chan_def def) 7020 { 7021 u32 param_id, param_value; 7022 int ret; 7023 7024 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 7025 return 0; 7026 7027 param_id = WMI_PEER_CHWIDTH_PUNCTURE_20MHZ_BITMAP; 7028 param_value = ath12k_mac_nlwidth_to_wmiwidth(def.width) | 7029 u32_encode_bits((~def.punctured), 7030 WMI_PEER_PUNCTURE_BITMAP); 7031 7032 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 7033 "punctured bitmap %02x width %d vdev %d\n", 7034 def.punctured, def.width, arvif->vdev_id); 7035 7036 ret = ath12k_wmi_set_peer_param(ar, arvif->bssid, 7037 arvif->vdev_id, param_id, 7038 param_value); 7039 7040 return ret; 7041 } 7042 7043 static void 7044 ath12k_mac_update_vif_chan(struct ath12k *ar, 7045 struct ieee80211_vif_chanctx_switch *vifs, 7046 int n_vifs) 7047 { 7048 struct ath12k_base *ab = ar->ab; 7049 struct ath12k_vif *arvif; 7050 int ret; 7051 int i; 7052 bool monitor_vif = false; 7053 7054 lockdep_assert_held(&ar->conf_mutex); 7055 7056 for (i = 0; i < n_vifs; i++) { 7057 arvif = ath12k_vif_to_arvif(vifs[i].vif); 7058 7059 if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR) 7060 monitor_vif = true; 7061 7062 ath12k_dbg(ab, ATH12K_DBG_MAC, 7063 "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n", 7064 arvif->vdev_id, 7065 vifs[i].old_ctx->def.chan->center_freq, 7066 vifs[i].new_ctx->def.chan->center_freq, 7067 vifs[i].old_ctx->def.width, 7068 vifs[i].new_ctx->def.width); 7069 7070 if (WARN_ON(!arvif->is_started)) 7071 continue; 7072 7073 if (WARN_ON(!arvif->is_up)) 7074 continue; 7075 7076 ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id); 7077 if (ret) { 7078 ath12k_warn(ab, "failed to down vdev %d: %d\n", 7079 arvif->vdev_id, ret); 7080 continue; 7081 } 7082 } 7083 7084 /* All relevant vdevs are downed and associated channel resources 7085 * should be available for the channel switch now. 7086 */ 7087 7088 /* TODO: Update ar->rx_channel */ 7089 7090 for (i = 0; i < n_vifs; i++) { 7091 arvif = ath12k_vif_to_arvif(vifs[i].vif); 7092 7093 if (WARN_ON(!arvif->is_started)) 7094 continue; 7095 7096 arvif->punct_bitmap = vifs[i].new_ctx->def.punctured; 7097 7098 /* Firmware expect vdev_restart only if vdev is up. 7099 * If vdev is down then it expect vdev_stop->vdev_start. 7100 */ 7101 if (arvif->is_up) { 7102 ret = ath12k_mac_vdev_restart(arvif, vifs[i].new_ctx); 7103 if (ret) { 7104 ath12k_warn(ab, "failed to restart vdev %d: %d\n", 7105 arvif->vdev_id, ret); 7106 continue; 7107 } 7108 } else { 7109 ret = ath12k_mac_vdev_stop(arvif); 7110 if (ret) { 7111 ath12k_warn(ab, "failed to stop vdev %d: %d\n", 7112 arvif->vdev_id, ret); 7113 continue; 7114 } 7115 7116 ret = ath12k_mac_vdev_start(arvif, vifs[i].new_ctx); 7117 if (ret) 7118 ath12k_warn(ab, "failed to start vdev %d: %d\n", 7119 arvif->vdev_id, ret); 7120 continue; 7121 } 7122 7123 ret = ath12k_mac_setup_bcn_tmpl(arvif); 7124 if (ret) 7125 ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n", 7126 ret); 7127 7128 ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 7129 arvif->bssid); 7130 if (ret) { 7131 ath12k_warn(ab, "failed to bring vdev up %d: %d\n", 7132 arvif->vdev_id, ret); 7133 continue; 7134 } 7135 7136 ret = ath12k_mac_update_peer_puncturing_width(arvif->ar, arvif, 7137 vifs[i].new_ctx->def); 7138 if (ret) { 7139 ath12k_warn(ar->ab, 7140 "failed to update puncturing bitmap %02x and width %d: %d\n", 7141 vifs[i].new_ctx->def.punctured, 7142 vifs[i].new_ctx->def.width, ret); 7143 continue; 7144 } 7145 } 7146 7147 /* Restart the internal monitor vdev on new channel */ 7148 if (!monitor_vif && ar->monitor_vdev_created) { 7149 if (!ath12k_mac_monitor_stop(ar)) 7150 ath12k_mac_monitor_start(ar); 7151 } 7152 } 7153 7154 static void 7155 ath12k_mac_update_active_vif_chan(struct ath12k *ar, 7156 struct ieee80211_chanctx_conf *ctx) 7157 { 7158 struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx, .ar = ar }; 7159 struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); 7160 7161 lockdep_assert_held(&ar->conf_mutex); 7162 7163 ieee80211_iterate_active_interfaces_atomic(hw, 7164 IEEE80211_IFACE_ITER_NORMAL, 7165 ath12k_mac_change_chanctx_cnt_iter, 7166 &arg); 7167 if (arg.n_vifs == 0) 7168 return; 7169 7170 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL); 7171 if (!arg.vifs) 7172 return; 7173 7174 ieee80211_iterate_active_interfaces_atomic(hw, 7175 IEEE80211_IFACE_ITER_NORMAL, 7176 ath12k_mac_change_chanctx_fill_iter, 7177 &arg); 7178 7179 ath12k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); 7180 7181 kfree(arg.vifs); 7182 } 7183 7184 static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw, 7185 struct ieee80211_chanctx_conf *ctx, 7186 u32 changed) 7187 { 7188 struct ath12k *ar; 7189 struct ath12k_base *ab; 7190 7191 ar = ath12k_get_ar_by_ctx(hw, ctx); 7192 if (!ar) 7193 return; 7194 7195 ab = ar->ab; 7196 7197 mutex_lock(&ar->conf_mutex); 7198 7199 ath12k_dbg(ab, ATH12K_DBG_MAC, 7200 "mac chanctx change freq %u width %d ptr %p changed %x\n", 7201 ctx->def.chan->center_freq, ctx->def.width, ctx, changed); 7202 7203 /* This shouldn't really happen because channel switching should use 7204 * switch_vif_chanctx(). 7205 */ 7206 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) 7207 goto unlock; 7208 7209 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH || 7210 changed & IEEE80211_CHANCTX_CHANGE_RADAR || 7211 changed & IEEE80211_CHANCTX_CHANGE_PUNCTURING) 7212 ath12k_mac_update_active_vif_chan(ar, ctx); 7213 7214 /* TODO: Recalc radar detection */ 7215 7216 unlock: 7217 mutex_unlock(&ar->conf_mutex); 7218 } 7219 7220 static int ath12k_start_vdev_delay(struct ath12k *ar, 7221 struct ath12k_vif *arvif) 7222 { 7223 struct ath12k_base *ab = ar->ab; 7224 struct ieee80211_vif *vif = arvif->vif; 7225 int ret; 7226 7227 if (WARN_ON(arvif->is_started)) 7228 return -EBUSY; 7229 7230 ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx); 7231 if (ret) { 7232 ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", 7233 arvif->vdev_id, vif->addr, 7234 arvif->chanctx.def.chan->center_freq, ret); 7235 return ret; 7236 } 7237 7238 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { 7239 ret = ath12k_monitor_vdev_up(ar, arvif->vdev_id); 7240 if (ret) { 7241 ath12k_warn(ab, "failed put monitor up: %d\n", ret); 7242 return ret; 7243 } 7244 } 7245 7246 arvif->is_started = true; 7247 7248 /* TODO: Setup ps and cts/rts protection */ 7249 return 0; 7250 } 7251 7252 static int 7253 ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, 7254 struct ieee80211_vif *vif, 7255 struct ieee80211_bss_conf *link_conf, 7256 struct ieee80211_chanctx_conf *ctx) 7257 { 7258 struct ath12k *ar; 7259 struct ath12k_base *ab; 7260 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 7261 int ret; 7262 struct ath12k_wmi_peer_create_arg param; 7263 7264 /* For multi radio wiphy, the vdev was not created during add_interface 7265 * create now since we have a channel ctx now to assign to a specific ar/fw 7266 */ 7267 ar = ath12k_mac_assign_vif_to_vdev(hw, vif, ctx); 7268 if (!ar) { 7269 WARN_ON(1); 7270 return -EINVAL; 7271 } 7272 7273 ab = ar->ab; 7274 7275 mutex_lock(&ar->conf_mutex); 7276 7277 ath12k_dbg(ab, ATH12K_DBG_MAC, 7278 "mac chanctx assign ptr %p vdev_id %i\n", 7279 ctx, arvif->vdev_id); 7280 7281 arvif->punct_bitmap = ctx->def.punctured; 7282 7283 /* for some targets bss peer must be created before vdev_start */ 7284 if (ab->hw_params->vdev_start_delay && 7285 arvif->vdev_type != WMI_VDEV_TYPE_AP && 7286 arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && 7287 !ath12k_peer_exist_by_vdev_id(ab, arvif->vdev_id)) { 7288 memcpy(&arvif->chanctx, ctx, sizeof(*ctx)); 7289 ret = 0; 7290 goto out; 7291 } 7292 7293 if (WARN_ON(arvif->is_started)) { 7294 ret = -EBUSY; 7295 goto out; 7296 } 7297 7298 if (ab->hw_params->vdev_start_delay && 7299 arvif->vdev_type != WMI_VDEV_TYPE_AP && 7300 arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { 7301 param.vdev_id = arvif->vdev_id; 7302 param.peer_type = WMI_PEER_TYPE_DEFAULT; 7303 param.peer_addr = ar->mac_addr; 7304 7305 ret = ath12k_peer_create(ar, arvif, NULL, ¶m); 7306 if (ret) { 7307 ath12k_warn(ab, "failed to create peer after vdev start delay: %d", 7308 ret); 7309 goto out; 7310 } 7311 } 7312 7313 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { 7314 ret = ath12k_mac_monitor_start(ar); 7315 if (ret) 7316 goto out; 7317 arvif->is_started = true; 7318 goto out; 7319 } 7320 7321 ret = ath12k_mac_vdev_start(arvif, ctx); 7322 if (ret) { 7323 ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n", 7324 arvif->vdev_id, vif->addr, 7325 ctx->def.chan->center_freq, ret); 7326 goto out; 7327 } 7328 7329 if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created) 7330 ath12k_mac_monitor_start(ar); 7331 7332 arvif->is_started = true; 7333 7334 /* TODO: Setup ps and cts/rts protection */ 7335 7336 out: 7337 mutex_unlock(&ar->conf_mutex); 7338 7339 return ret; 7340 } 7341 7342 static void 7343 ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, 7344 struct ieee80211_vif *vif, 7345 struct ieee80211_bss_conf *link_conf, 7346 struct ieee80211_chanctx_conf *ctx) 7347 { 7348 struct ath12k *ar; 7349 struct ath12k_base *ab; 7350 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 7351 int ret; 7352 7353 /* The vif is expected to be attached to an ar's VDEV. 7354 * We leave the vif/vdev in this function as is 7355 * and not delete the vdev symmetric to assign_vif_chanctx() 7356 * the VDEV will be deleted and unassigned either during 7357 * remove_interface() or when there is a change in channel 7358 * that moves the vif to a new ar 7359 */ 7360 if (!arvif->is_created) 7361 return; 7362 7363 ar = arvif->ar; 7364 ab = ar->ab; 7365 7366 mutex_lock(&ar->conf_mutex); 7367 7368 ath12k_dbg(ab, ATH12K_DBG_MAC, 7369 "mac chanctx unassign ptr %p vdev_id %i\n", 7370 ctx, arvif->vdev_id); 7371 7372 WARN_ON(!arvif->is_started); 7373 7374 if (ab->hw_params->vdev_start_delay && 7375 arvif->vdev_type == WMI_VDEV_TYPE_MONITOR && 7376 ath12k_peer_find_by_addr(ab, ar->mac_addr)) 7377 ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); 7378 7379 if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { 7380 ret = ath12k_mac_monitor_stop(ar); 7381 if (ret) { 7382 mutex_unlock(&ar->conf_mutex); 7383 return; 7384 } 7385 7386 arvif->is_started = false; 7387 } 7388 7389 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) { 7390 ath12k_bss_disassoc(ar, arvif); 7391 ret = ath12k_mac_vdev_stop(arvif); 7392 if (ret) 7393 ath12k_warn(ab, "failed to stop vdev %i: %d\n", 7394 arvif->vdev_id, ret); 7395 } 7396 arvif->is_started = false; 7397 7398 if (ab->hw_params->vdev_start_delay && 7399 arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) 7400 ath12k_wmi_vdev_down(ar, arvif->vdev_id); 7401 7402 if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && 7403 ar->num_started_vdevs == 1 && ar->monitor_vdev_created) 7404 ath12k_mac_monitor_stop(ar); 7405 7406 mutex_unlock(&ar->conf_mutex); 7407 } 7408 7409 static int 7410 ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, 7411 struct ieee80211_vif_chanctx_switch *vifs, 7412 int n_vifs, 7413 enum ieee80211_chanctx_switch_mode mode) 7414 { 7415 struct ath12k *ar; 7416 7417 ar = ath12k_get_ar_by_ctx(hw, vifs->old_ctx); 7418 if (!ar) 7419 return -EINVAL; 7420 7421 mutex_lock(&ar->conf_mutex); 7422 7423 /* Switching channels across radio is not allowed */ 7424 if (ar != ath12k_get_ar_by_ctx(hw, vifs->new_ctx)) { 7425 mutex_unlock(&ar->conf_mutex); 7426 return -EINVAL; 7427 } 7428 7429 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 7430 "mac chanctx switch n_vifs %d mode %d\n", 7431 n_vifs, mode); 7432 ath12k_mac_update_vif_chan(ar, vifs, n_vifs); 7433 7434 mutex_unlock(&ar->conf_mutex); 7435 7436 return 0; 7437 } 7438 7439 static int 7440 ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value) 7441 { 7442 struct ath12k_vif *arvif; 7443 int ret = 0; 7444 7445 mutex_lock(&ar->conf_mutex); 7446 list_for_each_entry(arvif, &ar->arvifs, list) { 7447 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "setting mac vdev %d param %d value %d\n", 7448 param, arvif->vdev_id, value); 7449 7450 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 7451 param, value); 7452 if (ret) { 7453 ath12k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n", 7454 param, arvif->vdev_id, ret); 7455 break; 7456 } 7457 } 7458 mutex_unlock(&ar->conf_mutex); 7459 return ret; 7460 } 7461 7462 /* mac80211 stores device specific RTS/Fragmentation threshold value, 7463 * this is set interface specific to firmware from ath12k driver 7464 */ 7465 static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 7466 { 7467 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 7468 struct ath12k *ar; 7469 int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret = 0, i; 7470 7471 /* Currently we set the rts threshold value to all the vifs across 7472 * all radios of the single wiphy. 7473 * TODO Once support for vif specific RTS threshold in mac80211 is 7474 * available, ath12k can make use of it. 7475 */ 7476 for_each_ar(ah, ar, i) { 7477 ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value); 7478 if (ret) { 7479 ath12k_warn(ar->ab, "failed to set RTS config for all vdevs of pdev %d", 7480 ar->pdev->pdev_id); 7481 break; 7482 } 7483 } 7484 7485 return ret; 7486 } 7487 7488 static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 7489 { 7490 /* Even though there's a WMI vdev param for fragmentation threshold no 7491 * known firmware actually implements it. Moreover it is not possible to 7492 * rely frame fragmentation to mac80211 because firmware clears the 7493 * "more fragments" bit in frame control making it impossible for remote 7494 * devices to reassemble frames. 7495 * 7496 * Hence implement a dummy callback just to say fragmentation isn't 7497 * supported. This effectively prevents mac80211 from doing frame 7498 * fragmentation in software. 7499 */ 7500 return -EOPNOTSUPP; 7501 } 7502 7503 static int ath12k_mac_flush(struct ath12k *ar) 7504 { 7505 long time_left; 7506 int ret = 0; 7507 7508 time_left = wait_event_timeout(ar->dp.tx_empty_waitq, 7509 (atomic_read(&ar->dp.num_tx_pending) == 0), 7510 ATH12K_FLUSH_TIMEOUT); 7511 if (time_left == 0) { 7512 ath12k_warn(ar->ab, 7513 "failed to flush transmit queue, data pkts pending %d\n", 7514 atomic_read(&ar->dp.num_tx_pending)); 7515 ret = -ETIMEDOUT; 7516 } 7517 7518 time_left = wait_event_timeout(ar->txmgmt_empty_waitq, 7519 (atomic_read(&ar->num_pending_mgmt_tx) == 0), 7520 ATH12K_FLUSH_TIMEOUT); 7521 if (time_left == 0) { 7522 ath12k_warn(ar->ab, 7523 "failed to flush mgmt transmit queue, mgmt pkts pending %d\n", 7524 atomic_read(&ar->num_pending_mgmt_tx)); 7525 ret = -ETIMEDOUT; 7526 } 7527 7528 return ret; 7529 } 7530 7531 int ath12k_mac_wait_tx_complete(struct ath12k *ar) 7532 { 7533 ath12k_mac_drain_tx(ar); 7534 return ath12k_mac_flush(ar); 7535 } 7536 7537 static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 7538 u32 queues, bool drop) 7539 { 7540 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 7541 struct ath12k *ar; 7542 int i; 7543 7544 if (drop) 7545 return; 7546 7547 /* vif can be NULL when flush() is considered for hw */ 7548 if (!vif) { 7549 for_each_ar(ah, ar, i) 7550 ath12k_mac_flush(ar); 7551 return; 7552 } 7553 7554 ar = ath12k_get_ar_by_vif(hw, vif); 7555 7556 if (!ar) 7557 return; 7558 7559 ath12k_mac_flush(ar); 7560 } 7561 7562 static int 7563 ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar, 7564 enum nl80211_band band, 7565 const struct cfg80211_bitrate_mask *mask) 7566 { 7567 int num_rates = 0; 7568 int i; 7569 7570 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) 7571 num_rates += hweight16(mask->control[band].ht_mcs[i]); 7572 7573 return num_rates; 7574 } 7575 7576 static bool 7577 ath12k_mac_has_single_legacy_rate(struct ath12k *ar, 7578 enum nl80211_band band, 7579 const struct cfg80211_bitrate_mask *mask) 7580 { 7581 int num_rates = 0; 7582 7583 num_rates = hweight32(mask->control[band].legacy); 7584 7585 if (ath12k_mac_bitrate_mask_num_ht_rates(ar, band, mask)) 7586 return false; 7587 7588 if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask)) 7589 return false; 7590 7591 return num_rates == 1; 7592 } 7593 7594 static bool 7595 ath12k_mac_bitrate_mask_get_single_nss(struct ath12k *ar, 7596 enum nl80211_band band, 7597 const struct cfg80211_bitrate_mask *mask, 7598 int *nss) 7599 { 7600 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 7601 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); 7602 u8 ht_nss_mask = 0; 7603 u8 vht_nss_mask = 0; 7604 int i; 7605 7606 /* No need to consider legacy here. Basic rates are always present 7607 * in bitrate mask 7608 */ 7609 7610 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 7611 if (mask->control[band].ht_mcs[i] == 0) 7612 continue; 7613 else if (mask->control[band].ht_mcs[i] == 7614 sband->ht_cap.mcs.rx_mask[i]) 7615 ht_nss_mask |= BIT(i); 7616 else 7617 return false; 7618 } 7619 7620 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 7621 if (mask->control[band].vht_mcs[i] == 0) 7622 continue; 7623 else if (mask->control[band].vht_mcs[i] == 7624 ath12k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) 7625 vht_nss_mask |= BIT(i); 7626 else 7627 return false; 7628 } 7629 7630 if (ht_nss_mask != vht_nss_mask) 7631 return false; 7632 7633 if (ht_nss_mask == 0) 7634 return false; 7635 7636 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) 7637 return false; 7638 7639 *nss = fls(ht_nss_mask); 7640 7641 return true; 7642 } 7643 7644 static int 7645 ath12k_mac_get_single_legacy_rate(struct ath12k *ar, 7646 enum nl80211_band band, 7647 const struct cfg80211_bitrate_mask *mask, 7648 u32 *rate, u8 *nss) 7649 { 7650 int rate_idx; 7651 u16 bitrate; 7652 u8 preamble; 7653 u8 hw_rate; 7654 7655 if (hweight32(mask->control[band].legacy) != 1) 7656 return -EINVAL; 7657 7658 rate_idx = ffs(mask->control[band].legacy) - 1; 7659 7660 if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) 7661 rate_idx += ATH12K_MAC_FIRST_OFDM_RATE_IDX; 7662 7663 hw_rate = ath12k_legacy_rates[rate_idx].hw_value; 7664 bitrate = ath12k_legacy_rates[rate_idx].bitrate; 7665 7666 if (ath12k_mac_bitrate_is_cck(bitrate)) 7667 preamble = WMI_RATE_PREAMBLE_CCK; 7668 else 7669 preamble = WMI_RATE_PREAMBLE_OFDM; 7670 7671 *nss = 1; 7672 *rate = ATH12K_HW_RATE_CODE(hw_rate, 0, preamble); 7673 7674 return 0; 7675 } 7676 7677 static int ath12k_mac_set_fixed_rate_params(struct ath12k_vif *arvif, 7678 u32 rate, u8 nss, u8 sgi, u8 ldpc) 7679 { 7680 struct ath12k *ar = arvif->ar; 7681 u32 vdev_param; 7682 int ret; 7683 7684 lockdep_assert_held(&ar->conf_mutex); 7685 7686 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n", 7687 arvif->vdev_id, rate, nss, sgi); 7688 7689 vdev_param = WMI_VDEV_PARAM_FIXED_RATE; 7690 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 7691 vdev_param, rate); 7692 if (ret) { 7693 ath12k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n", 7694 rate, ret); 7695 return ret; 7696 } 7697 7698 vdev_param = WMI_VDEV_PARAM_NSS; 7699 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 7700 vdev_param, nss); 7701 if (ret) { 7702 ath12k_warn(ar->ab, "failed to set nss param %d: %d\n", 7703 nss, ret); 7704 return ret; 7705 } 7706 7707 vdev_param = WMI_VDEV_PARAM_SGI; 7708 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 7709 vdev_param, sgi); 7710 if (ret) { 7711 ath12k_warn(ar->ab, "failed to set sgi param %d: %d\n", 7712 sgi, ret); 7713 return ret; 7714 } 7715 7716 vdev_param = WMI_VDEV_PARAM_LDPC; 7717 ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, 7718 vdev_param, ldpc); 7719 if (ret) { 7720 ath12k_warn(ar->ab, "failed to set ldpc param %d: %d\n", 7721 ldpc, ret); 7722 return ret; 7723 } 7724 7725 return 0; 7726 } 7727 7728 static bool 7729 ath12k_mac_vht_mcs_range_present(struct ath12k *ar, 7730 enum nl80211_band band, 7731 const struct cfg80211_bitrate_mask *mask) 7732 { 7733 int i; 7734 u16 vht_mcs; 7735 7736 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { 7737 vht_mcs = mask->control[band].vht_mcs[i]; 7738 7739 switch (vht_mcs) { 7740 case 0: 7741 case BIT(8) - 1: 7742 case BIT(9) - 1: 7743 case BIT(10) - 1: 7744 break; 7745 default: 7746 return false; 7747 } 7748 } 7749 7750 return true; 7751 } 7752 7753 static void ath12k_mac_set_bitrate_mask_iter(void *data, 7754 struct ieee80211_sta *sta) 7755 { 7756 struct ath12k_vif *arvif = data; 7757 struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); 7758 struct ath12k *ar = arvif->ar; 7759 7760 if (arsta->arvif != arvif) 7761 return; 7762 7763 spin_lock_bh(&ar->data_lock); 7764 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 7765 spin_unlock_bh(&ar->data_lock); 7766 7767 ieee80211_queue_work(ath12k_ar_to_hw(ar), &arsta->update_wk); 7768 } 7769 7770 static void ath12k_mac_disable_peer_fixed_rate(void *data, 7771 struct ieee80211_sta *sta) 7772 { 7773 struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); 7774 struct ath12k_vif *arvif = data; 7775 struct ath12k *ar = arvif->ar; 7776 int ret; 7777 7778 if (arsta->arvif != arvif) 7779 return; 7780 7781 ret = ath12k_wmi_set_peer_param(ar, sta->addr, 7782 arvif->vdev_id, 7783 WMI_PEER_PARAM_FIXED_RATE, 7784 WMI_FIXED_RATE_NONE); 7785 if (ret) 7786 ath12k_warn(ar->ab, 7787 "failed to disable peer fixed rate for STA %pM ret %d\n", 7788 sta->addr, ret); 7789 } 7790 7791 static int 7792 ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, 7793 struct ieee80211_vif *vif, 7794 const struct cfg80211_bitrate_mask *mask) 7795 { 7796 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 7797 struct cfg80211_chan_def def; 7798 struct ath12k *ar = arvif->ar; 7799 enum nl80211_band band; 7800 const u8 *ht_mcs_mask; 7801 const u16 *vht_mcs_mask; 7802 u32 rate; 7803 u8 nss; 7804 u8 sgi; 7805 u8 ldpc; 7806 int single_nss; 7807 int ret; 7808 int num_rates; 7809 7810 if (ath12k_mac_vif_chan(vif, &def)) 7811 return -EPERM; 7812 7813 band = def.chan->band; 7814 ht_mcs_mask = mask->control[band].ht_mcs; 7815 vht_mcs_mask = mask->control[band].vht_mcs; 7816 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); 7817 7818 sgi = mask->control[band].gi; 7819 if (sgi == NL80211_TXRATE_FORCE_LGI) { 7820 ret = -EINVAL; 7821 goto out; 7822 } 7823 7824 /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it 7825 * requires passing at least one of used basic rates along with them. 7826 * Fixed rate setting across different preambles(legacy, HT, VHT) is 7827 * not supported by the FW. Hence use of FIXED_RATE vdev param is not 7828 * suitable for setting single HT/VHT rates. 7829 * But, there could be a single basic rate passed from userspace which 7830 * can be done through the FIXED_RATE param. 7831 */ 7832 if (ath12k_mac_has_single_legacy_rate(ar, band, mask)) { 7833 ret = ath12k_mac_get_single_legacy_rate(ar, band, mask, &rate, 7834 &nss); 7835 if (ret) { 7836 ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n", 7837 arvif->vdev_id, ret); 7838 goto out; 7839 } 7840 ieee80211_iterate_stations_atomic(hw, 7841 ath12k_mac_disable_peer_fixed_rate, 7842 arvif); 7843 } else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask, 7844 &single_nss)) { 7845 rate = WMI_FIXED_RATE_NONE; 7846 nss = single_nss; 7847 } else { 7848 rate = WMI_FIXED_RATE_NONE; 7849 nss = min_t(u32, ar->num_tx_chains, 7850 max(ath12k_mac_max_ht_nss(ht_mcs_mask), 7851 ath12k_mac_max_vht_nss(vht_mcs_mask))); 7852 7853 /* If multiple rates across different preambles are given 7854 * we can reconfigure this info with all peers using PEER_ASSOC 7855 * command with the below exception cases. 7856 * - Single VHT Rate : peer_assoc command accommodates only MCS 7857 * range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211 7858 * mandates passing basic rates along with HT/VHT rates, FW 7859 * doesn't allow switching from VHT to Legacy. Hence instead of 7860 * setting legacy and VHT rates using RATEMASK_CMD vdev cmd, 7861 * we could set this VHT rate as peer fixed rate param, which 7862 * will override FIXED rate and FW rate control algorithm. 7863 * If single VHT rate is passed along with HT rates, we select 7864 * the VHT rate as fixed rate for vht peers. 7865 * - Multiple VHT Rates : When Multiple VHT rates are given,this 7866 * can be set using RATEMASK CMD which uses FW rate-ctl alg. 7867 * TODO: Setting multiple VHT MCS and replacing peer_assoc with 7868 * RATEMASK_CMDID can cover all use cases of setting rates 7869 * across multiple preambles and rates within same type. 7870 * But requires more validation of the command at this point. 7871 */ 7872 7873 num_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, 7874 mask); 7875 7876 if (!ath12k_mac_vht_mcs_range_present(ar, band, mask) && 7877 num_rates > 1) { 7878 /* TODO: Handle multiple VHT MCS values setting using 7879 * RATEMASK CMD 7880 */ 7881 ath12k_warn(ar->ab, 7882 "Setting more than one MCS Value in bitrate mask not supported\n"); 7883 ret = -EINVAL; 7884 goto out; 7885 } 7886 7887 ieee80211_iterate_stations_atomic(hw, 7888 ath12k_mac_disable_peer_fixed_rate, 7889 arvif); 7890 7891 mutex_lock(&ar->conf_mutex); 7892 7893 arvif->bitrate_mask = *mask; 7894 ieee80211_iterate_stations_atomic(hw, 7895 ath12k_mac_set_bitrate_mask_iter, 7896 arvif); 7897 7898 mutex_unlock(&ar->conf_mutex); 7899 } 7900 7901 mutex_lock(&ar->conf_mutex); 7902 7903 ret = ath12k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); 7904 if (ret) { 7905 ath12k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n", 7906 arvif->vdev_id, ret); 7907 } 7908 7909 mutex_unlock(&ar->conf_mutex); 7910 7911 out: 7912 return ret; 7913 } 7914 7915 static void 7916 ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, 7917 enum ieee80211_reconfig_type reconfig_type) 7918 { 7919 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 7920 struct ath12k *ar; 7921 struct ath12k_base *ab; 7922 struct ath12k_vif *arvif; 7923 int recovery_count; 7924 7925 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) 7926 return; 7927 7928 ar = ath12k_ah_to_ar(ah, 0); 7929 ab = ar->ab; 7930 7931 mutex_lock(&ar->conf_mutex); 7932 7933 if (ar->state == ATH12K_STATE_RESTARTED) { 7934 ath12k_warn(ar->ab, "pdev %d successfully recovered\n", 7935 ar->pdev->pdev_id); 7936 ar->state = ATH12K_STATE_ON; 7937 ieee80211_wake_queues(hw); 7938 7939 if (ab->is_reset) { 7940 recovery_count = atomic_inc_return(&ab->recovery_count); 7941 ath12k_dbg(ab, ATH12K_DBG_BOOT, "recovery count %d\n", 7942 recovery_count); 7943 /* When there are multiple radios in an SOC, 7944 * the recovery has to be done for each radio 7945 */ 7946 if (recovery_count == ab->num_radios) { 7947 atomic_dec(&ab->reset_count); 7948 complete(&ab->reset_complete); 7949 ab->is_reset = false; 7950 atomic_set(&ab->fail_cont_count, 0); 7951 ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n"); 7952 } 7953 } 7954 7955 list_for_each_entry(arvif, &ar->arvifs, list) { 7956 ath12k_dbg(ab, ATH12K_DBG_BOOT, 7957 "reconfig cipher %d up %d vdev type %d\n", 7958 arvif->key_cipher, 7959 arvif->is_up, 7960 arvif->vdev_type); 7961 /* After trigger disconnect, then upper layer will 7962 * trigger connect again, then the PN number of 7963 * upper layer will be reset to keep up with AP 7964 * side, hence PN number mismatch will not happen. 7965 */ 7966 if (arvif->is_up && 7967 arvif->vdev_type == WMI_VDEV_TYPE_STA && 7968 arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) { 7969 ieee80211_hw_restart_disconnect(arvif->vif); 7970 ath12k_dbg(ab, ATH12K_DBG_BOOT, 7971 "restart disconnect\n"); 7972 } 7973 } 7974 } 7975 7976 mutex_unlock(&ar->conf_mutex); 7977 } 7978 7979 static void 7980 ath12k_mac_update_bss_chan_survey(struct ath12k *ar, 7981 struct ieee80211_channel *channel) 7982 { 7983 int ret; 7984 enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ; 7985 7986 lockdep_assert_held(&ar->conf_mutex); 7987 7988 if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) || 7989 ar->rx_channel != channel) 7990 return; 7991 7992 if (ar->scan.state != ATH12K_SCAN_IDLE) { 7993 ath12k_dbg(ar->ab, ATH12K_DBG_MAC, 7994 "ignoring bss chan info req while scanning..\n"); 7995 return; 7996 } 7997 7998 reinit_completion(&ar->bss_survey_done); 7999 8000 ret = ath12k_wmi_pdev_bss_chan_info_request(ar, type); 8001 if (ret) { 8002 ath12k_warn(ar->ab, "failed to send pdev bss chan info request\n"); 8003 return; 8004 } 8005 8006 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); 8007 if (ret == 0) 8008 ath12k_warn(ar->ab, "bss channel survey timed out\n"); 8009 } 8010 8011 static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, 8012 struct survey_info *survey) 8013 { 8014 struct ath12k *ar; 8015 struct ieee80211_supported_band *sband; 8016 struct survey_info *ar_survey; 8017 8018 if (idx >= ATH12K_NUM_CHANS) 8019 return -ENOENT; 8020 8021 sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; 8022 if (sband && idx >= sband->n_channels) { 8023 idx -= sband->n_channels; 8024 sband = NULL; 8025 } 8026 8027 if (!sband) 8028 sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; 8029 8030 if (!sband || idx >= sband->n_channels) 8031 return -ENOENT; 8032 8033 ar = ath12k_mac_get_ar_by_chan(hw, &sband->channels[idx]); 8034 if (!ar) { 8035 if (sband->channels[idx].flags & IEEE80211_CHAN_DISABLED) { 8036 memset(survey, 0, sizeof(*survey)); 8037 return 0; 8038 } 8039 return -ENOENT; 8040 } 8041 8042 ar_survey = &ar->survey[idx]; 8043 8044 mutex_lock(&ar->conf_mutex); 8045 8046 ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); 8047 8048 spin_lock_bh(&ar->data_lock); 8049 memcpy(survey, ar_survey, sizeof(*survey)); 8050 spin_unlock_bh(&ar->data_lock); 8051 8052 survey->channel = &sband->channels[idx]; 8053 8054 if (ar->rx_channel == survey->channel) 8055 survey->filled |= SURVEY_INFO_IN_USE; 8056 8057 mutex_unlock(&ar->conf_mutex); 8058 return 0; 8059 } 8060 8061 static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw, 8062 struct ieee80211_vif *vif, 8063 struct ieee80211_sta *sta, 8064 struct station_info *sinfo) 8065 { 8066 struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta); 8067 8068 sinfo->rx_duration = arsta->rx_duration; 8069 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); 8070 8071 sinfo->tx_duration = arsta->tx_duration; 8072 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION); 8073 8074 if (!arsta->txrate.legacy && !arsta->txrate.nss) 8075 return; 8076 8077 if (arsta->txrate.legacy) { 8078 sinfo->txrate.legacy = arsta->txrate.legacy; 8079 } else { 8080 sinfo->txrate.mcs = arsta->txrate.mcs; 8081 sinfo->txrate.nss = arsta->txrate.nss; 8082 sinfo->txrate.bw = arsta->txrate.bw; 8083 sinfo->txrate.he_gi = arsta->txrate.he_gi; 8084 sinfo->txrate.he_dcm = arsta->txrate.he_dcm; 8085 sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc; 8086 } 8087 sinfo->txrate.flags = arsta->txrate.flags; 8088 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 8089 8090 /* TODO: Use real NF instead of default one. */ 8091 sinfo->signal = arsta->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR; 8092 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); 8093 } 8094 8095 static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw, 8096 struct ieee80211_vif *vif) 8097 { 8098 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 8099 struct ath12k *ar; 8100 8101 ar = ath12k_ah_to_ar(ah, 0); 8102 8103 mutex_lock(&ar->conf_mutex); 8104 8105 spin_lock_bh(&ar->data_lock); 8106 ar->scan.roc_notify = false; 8107 spin_unlock_bh(&ar->data_lock); 8108 8109 ath12k_scan_abort(ar); 8110 8111 mutex_unlock(&ar->conf_mutex); 8112 8113 cancel_delayed_work_sync(&ar->scan.timeout); 8114 8115 return 0; 8116 } 8117 8118 static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw, 8119 struct ieee80211_vif *vif, 8120 struct ieee80211_channel *chan, 8121 int duration, 8122 enum ieee80211_roc_type type) 8123 { 8124 struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); 8125 struct ath12k_hw *ah = ath12k_hw_to_ah(hw); 8126 struct ath12k_wmi_scan_req_arg arg; 8127 struct ath12k *ar; 8128 u32 scan_time_msec; 8129 int ret; 8130 8131 ar = ath12k_ah_to_ar(ah, 0); 8132 8133 mutex_lock(&ar->conf_mutex); 8134 spin_lock_bh(&ar->data_lock); 8135 8136 switch (ar->scan.state) { 8137 case ATH12K_SCAN_IDLE: 8138 reinit_completion(&ar->scan.started); 8139 reinit_completion(&ar->scan.completed); 8140 reinit_completion(&ar->scan.on_channel); 8141 ar->scan.state = ATH12K_SCAN_STARTING; 8142 ar->scan.is_roc = true; 8143 ar->scan.vdev_id = arvif->vdev_id; 8144 ar->scan.roc_freq = chan->center_freq; 8145 ar->scan.roc_notify = true; 8146 ret = 0; 8147 break; 8148 case ATH12K_SCAN_STARTING: 8149 case ATH12K_SCAN_RUNNING: 8150 case ATH12K_SCAN_ABORTING: 8151 ret = -EBUSY; 8152 break; 8153 } 8154 8155 spin_unlock_bh(&ar->data_lock); 8156 8157 if (ret) 8158 goto exit; 8159 8160 scan_time_msec = hw->wiphy->max_remain_on_channel_duration * 2; 8161 8162 memset(&arg, 0, sizeof(arg)); 8163 ath12k_wmi_start_scan_init(ar, &arg); 8164 arg.num_chan = 1; 8165 arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list), 8166 GFP_KERNEL); 8167 if (!arg.chan_list) { 8168 ret = -ENOMEM; 8169 goto exit; 8170 } 8171 8172 arg.vdev_id = arvif->vdev_id; 8173 arg.scan_id = ATH12K_SCAN_ID; 8174 arg.chan_list[0] = chan->center_freq; 8175 arg.dwell_time_active = scan_time_msec; 8176 arg.dwell_time_passive = scan_time_msec; 8177 arg.max_scan_time = scan_time_msec; 8178 arg.scan_f_passive = 1; 8179 arg.burst_duration = duration; 8180 8181 ret = ath12k_start_scan(ar, &arg); 8182 if (ret) { 8183 ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret); 8184 8185 spin_lock_bh(&ar->data_lock); 8186 ar->scan.state = ATH12K_SCAN_IDLE; 8187 spin_unlock_bh(&ar->data_lock); 8188 goto free_chan_list; 8189 } 8190 8191 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); 8192 if (ret == 0) { 8193 ath12k_warn(ar->ab, "failed to switch to channel for roc scan\n"); 8194 ret = ath12k_scan_stop(ar); 8195 if (ret) 8196 ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret); 8197 ret = -ETIMEDOUT; 8198 goto free_chan_list; 8199 } 8200 8201 ieee80211_queue_delayed_work(hw, &ar->scan.timeout, 8202 msecs_to_jiffies(duration)); 8203 8204 ret = 0; 8205 8206 free_chan_list: 8207 kfree(arg.chan_list); 8208 exit: 8209 mutex_unlock(&ar->conf_mutex); 8210 8211 return ret; 8212 } 8213 8214 static const struct ieee80211_ops ath12k_ops = { 8215 .tx = ath12k_mac_op_tx, 8216 .wake_tx_queue = ieee80211_handle_wake_tx_queue, 8217 .start = ath12k_mac_op_start, 8218 .stop = ath12k_mac_op_stop, 8219 .reconfig_complete = ath12k_mac_op_reconfig_complete, 8220 .add_interface = ath12k_mac_op_add_interface, 8221 .remove_interface = ath12k_mac_op_remove_interface, 8222 .update_vif_offload = ath12k_mac_op_update_vif_offload, 8223 .config = ath12k_mac_op_config, 8224 .bss_info_changed = ath12k_mac_op_bss_info_changed, 8225 .configure_filter = ath12k_mac_op_configure_filter, 8226 .hw_scan = ath12k_mac_op_hw_scan, 8227 .cancel_hw_scan = ath12k_mac_op_cancel_hw_scan, 8228 .set_key = ath12k_mac_op_set_key, 8229 .sta_state = ath12k_mac_op_sta_state, 8230 .sta_set_txpwr = ath12k_mac_op_sta_set_txpwr, 8231 .sta_rc_update = ath12k_mac_op_sta_rc_update, 8232 .conf_tx = ath12k_mac_op_conf_tx, 8233 .set_antenna = ath12k_mac_op_set_antenna, 8234 .get_antenna = ath12k_mac_op_get_antenna, 8235 .ampdu_action = ath12k_mac_op_ampdu_action, 8236 .add_chanctx = ath12k_mac_op_add_chanctx, 8237 .remove_chanctx = ath12k_mac_op_remove_chanctx, 8238 .change_chanctx = ath12k_mac_op_change_chanctx, 8239 .assign_vif_chanctx = ath12k_mac_op_assign_vif_chanctx, 8240 .unassign_vif_chanctx = ath12k_mac_op_unassign_vif_chanctx, 8241 .switch_vif_chanctx = ath12k_mac_op_switch_vif_chanctx, 8242 .set_rts_threshold = ath12k_mac_op_set_rts_threshold, 8243 .set_frag_threshold = ath12k_mac_op_set_frag_threshold, 8244 .set_bitrate_mask = ath12k_mac_op_set_bitrate_mask, 8245 .get_survey = ath12k_mac_op_get_survey, 8246 .flush = ath12k_mac_op_flush, 8247 .sta_statistics = ath12k_mac_op_sta_statistics, 8248 .remain_on_channel = ath12k_mac_op_remain_on_channel, 8249 .cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel, 8250 }; 8251 8252 static void ath12k_mac_update_ch_list(struct ath12k *ar, 8253 struct ieee80211_supported_band *band, 8254 u32 freq_low, u32 freq_high) 8255 { 8256 int i; 8257 8258 if (!(freq_low && freq_high)) 8259 return; 8260 8261 for (i = 0; i < band->n_channels; i++) { 8262 if (band->channels[i].center_freq < freq_low || 8263 band->channels[i].center_freq > freq_high) 8264 band->channels[i].flags |= IEEE80211_CHAN_DISABLED; 8265 } 8266 8267 ar->freq_low = freq_low; 8268 ar->freq_high = freq_high; 8269 } 8270 8271 static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band) 8272 { 8273 struct ath12k_pdev *pdev = ar->pdev; 8274 struct ath12k_pdev_cap *pdev_cap = &pdev->cap; 8275 8276 if (band == WMI_HOST_WLAN_2G_CAP) 8277 return pdev_cap->band[NL80211_BAND_2GHZ].phy_id; 8278 8279 if (band == WMI_HOST_WLAN_5G_CAP) 8280 return pdev_cap->band[NL80211_BAND_5GHZ].phy_id; 8281 8282 ath12k_warn(ar->ab, "unsupported phy cap:%d\n", band); 8283 8284 return 0; 8285 } 8286 8287 static int ath12k_mac_setup_channels_rates(struct ath12k *ar, 8288 u32 supported_bands, 8289 struct ieee80211_supported_band *bands[]) 8290 { 8291 struct ieee80211_supported_band *band; 8292 struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap; 8293 struct ath12k_hw *ah = ar->ah; 8294 void *channels; 8295 u32 phy_id; 8296 8297 BUILD_BUG_ON((ARRAY_SIZE(ath12k_2ghz_channels) + 8298 ARRAY_SIZE(ath12k_5ghz_channels) + 8299 ARRAY_SIZE(ath12k_6ghz_channels)) != 8300 ATH12K_NUM_CHANS); 8301 8302 reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx]; 8303 8304 if (supported_bands & WMI_HOST_WLAN_2G_CAP) { 8305 channels = kmemdup(ath12k_2ghz_channels, 8306 sizeof(ath12k_2ghz_channels), 8307 GFP_KERNEL); 8308 if (!channels) 8309 return -ENOMEM; 8310 8311 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 8312 band->band = NL80211_BAND_2GHZ; 8313 band->n_channels = ARRAY_SIZE(ath12k_2ghz_channels); 8314 band->channels = channels; 8315 band->n_bitrates = ath12k_g_rates_size; 8316 band->bitrates = ath12k_g_rates; 8317 bands[NL80211_BAND_2GHZ] = band; 8318 8319 if (ar->ab->hw_params->single_pdev_only) { 8320 phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP); 8321 reg_cap = &ar->ab->hal_reg_cap[phy_id]; 8322 } 8323 ath12k_mac_update_ch_list(ar, band, 8324 reg_cap->low_2ghz_chan, 8325 reg_cap->high_2ghz_chan); 8326 } 8327 8328 if (supported_bands & WMI_HOST_WLAN_5G_CAP) { 8329 if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6G_FREQ) { 8330 channels = kmemdup(ath12k_6ghz_channels, 8331 sizeof(ath12k_6ghz_channels), GFP_KERNEL); 8332 if (!channels) { 8333 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8334 return -ENOMEM; 8335 } 8336 8337 ar->supports_6ghz = true; 8338 band = &ar->mac.sbands[NL80211_BAND_6GHZ]; 8339 band->band = NL80211_BAND_6GHZ; 8340 band->n_channels = ARRAY_SIZE(ath12k_6ghz_channels); 8341 band->channels = channels; 8342 band->n_bitrates = ath12k_a_rates_size; 8343 band->bitrates = ath12k_a_rates; 8344 bands[NL80211_BAND_6GHZ] = band; 8345 ath12k_mac_update_ch_list(ar, band, 8346 reg_cap->low_5ghz_chan, 8347 reg_cap->high_5ghz_chan); 8348 ah->use_6ghz_regd = true; 8349 } 8350 8351 if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) { 8352 channels = kmemdup(ath12k_5ghz_channels, 8353 sizeof(ath12k_5ghz_channels), 8354 GFP_KERNEL); 8355 if (!channels) { 8356 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8357 kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); 8358 return -ENOMEM; 8359 } 8360 8361 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 8362 band->band = NL80211_BAND_5GHZ; 8363 band->n_channels = ARRAY_SIZE(ath12k_5ghz_channels); 8364 band->channels = channels; 8365 band->n_bitrates = ath12k_a_rates_size; 8366 band->bitrates = ath12k_a_rates; 8367 bands[NL80211_BAND_5GHZ] = band; 8368 8369 if (ar->ab->hw_params->single_pdev_only) { 8370 phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP); 8371 reg_cap = &ar->ab->hal_reg_cap[phy_id]; 8372 } 8373 8374 ath12k_mac_update_ch_list(ar, band, 8375 reg_cap->low_5ghz_chan, 8376 reg_cap->high_5ghz_chan); 8377 } 8378 } 8379 8380 return 0; 8381 } 8382 8383 static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah) 8384 { 8385 struct ath12k *ar; 8386 int i; 8387 u16 interface_modes = U16_MAX; 8388 8389 for_each_ar(ah, ar, i) 8390 interface_modes &= ar->ab->hw_params->interface_modes; 8391 8392 return interface_modes == U16_MAX ? 0 : interface_modes; 8393 } 8394 8395 static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah, 8396 enum nl80211_iftype type) 8397 { 8398 struct ath12k *ar; 8399 int i; 8400 u16 interface_modes, mode; 8401 bool is_enable = true; 8402 8403 mode = BIT(type); 8404 for_each_ar(ah, ar, i) { 8405 interface_modes = ar->ab->hw_params->interface_modes; 8406 if (!(interface_modes & mode)) { 8407 is_enable = false; 8408 break; 8409 } 8410 } 8411 8412 return is_enable; 8413 } 8414 8415 static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah) 8416 { 8417 struct wiphy *wiphy = ah->hw->wiphy; 8418 struct ieee80211_iface_combination *combinations; 8419 struct ieee80211_iface_limit *limits; 8420 int n_limits, max_interfaces; 8421 bool ap, mesh, p2p; 8422 8423 ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP); 8424 p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE); 8425 8426 mesh = IS_ENABLED(CONFIG_MAC80211_MESH) && 8427 ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT); 8428 8429 combinations = kzalloc(sizeof(*combinations), GFP_KERNEL); 8430 if (!combinations) 8431 return -ENOMEM; 8432 8433 if ((ap || mesh) && !p2p) { 8434 n_limits = 2; 8435 max_interfaces = 16; 8436 } else if (p2p) { 8437 n_limits = 3; 8438 if (ap || mesh) 8439 max_interfaces = 16; 8440 else 8441 max_interfaces = 3; 8442 } else { 8443 n_limits = 1; 8444 max_interfaces = 1; 8445 } 8446 8447 limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL); 8448 if (!limits) { 8449 kfree(combinations); 8450 return -ENOMEM; 8451 } 8452 8453 limits[0].max = 1; 8454 limits[0].types |= BIT(NL80211_IFTYPE_STATION); 8455 8456 if (ap || mesh || p2p) 8457 limits[1].max = max_interfaces; 8458 8459 if (ap) 8460 limits[1].types |= BIT(NL80211_IFTYPE_AP); 8461 8462 if (mesh) 8463 limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT); 8464 8465 if (p2p) { 8466 limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) | 8467 BIT(NL80211_IFTYPE_P2P_GO); 8468 limits[2].max = 1; 8469 limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE); 8470 } 8471 8472 combinations[0].limits = limits; 8473 combinations[0].n_limits = n_limits; 8474 combinations[0].max_interfaces = max_interfaces; 8475 combinations[0].num_different_channels = 1; 8476 combinations[0].beacon_int_infra_match = true; 8477 combinations[0].beacon_int_min_gcd = 100; 8478 combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 8479 BIT(NL80211_CHAN_WIDTH_20) | 8480 BIT(NL80211_CHAN_WIDTH_40) | 8481 BIT(NL80211_CHAN_WIDTH_80); 8482 8483 wiphy->iface_combinations = combinations; 8484 wiphy->n_iface_combinations = 1; 8485 8486 return 0; 8487 } 8488 8489 static const u8 ath12k_if_types_ext_capa[] = { 8490 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, 8491 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, 8492 }; 8493 8494 static const u8 ath12k_if_types_ext_capa_sta[] = { 8495 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, 8496 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, 8497 [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, 8498 }; 8499 8500 static const u8 ath12k_if_types_ext_capa_ap[] = { 8501 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, 8502 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, 8503 [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT, 8504 }; 8505 8506 static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = { 8507 { 8508 .extended_capabilities = ath12k_if_types_ext_capa, 8509 .extended_capabilities_mask = ath12k_if_types_ext_capa, 8510 .extended_capabilities_len = sizeof(ath12k_if_types_ext_capa), 8511 }, { 8512 .iftype = NL80211_IFTYPE_STATION, 8513 .extended_capabilities = ath12k_if_types_ext_capa_sta, 8514 .extended_capabilities_mask = ath12k_if_types_ext_capa_sta, 8515 .extended_capabilities_len = 8516 sizeof(ath12k_if_types_ext_capa_sta), 8517 }, { 8518 .iftype = NL80211_IFTYPE_AP, 8519 .extended_capabilities = ath12k_if_types_ext_capa_ap, 8520 .extended_capabilities_mask = ath12k_if_types_ext_capa_ap, 8521 .extended_capabilities_len = 8522 sizeof(ath12k_if_types_ext_capa_ap), 8523 }, 8524 }; 8525 8526 static void ath12k_mac_cleanup_unregister(struct ath12k *ar) 8527 { 8528 idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar); 8529 idr_destroy(&ar->txmgmt_idr); 8530 8531 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8532 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8533 kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels); 8534 } 8535 8536 static void ath12k_mac_hw_unregister(struct ath12k_hw *ah) 8537 { 8538 struct ieee80211_hw *hw = ah->hw; 8539 struct wiphy *wiphy = hw->wiphy; 8540 struct ath12k *ar; 8541 int i; 8542 8543 for_each_ar(ah, ar, i) 8544 cancel_work_sync(&ar->regd_update_work); 8545 8546 ieee80211_unregister_hw(hw); 8547 8548 for_each_ar(ah, ar, i) 8549 ath12k_mac_cleanup_unregister(ar); 8550 8551 kfree(wiphy->iface_combinations[0].limits); 8552 kfree(wiphy->iface_combinations); 8553 8554 SET_IEEE80211_DEV(hw, NULL); 8555 } 8556 8557 static int ath12k_mac_setup_register(struct ath12k *ar, 8558 u32 *ht_cap, 8559 struct ieee80211_supported_band *bands[]) 8560 { 8561 struct ath12k_pdev_cap *cap = &ar->pdev->cap; 8562 int ret; 8563 8564 init_waitqueue_head(&ar->txmgmt_empty_waitq); 8565 idr_init(&ar->txmgmt_idr); 8566 spin_lock_init(&ar->txmgmt_idr_lock); 8567 8568 ath12k_pdev_caps_update(ar); 8569 8570 ret = ath12k_mac_setup_channels_rates(ar, 8571 cap->supported_bands, 8572 bands); 8573 if (ret) 8574 return ret; 8575 8576 ath12k_mac_setup_ht_vht_cap(ar, cap, ht_cap); 8577 ath12k_mac_setup_sband_iftype_data(ar, cap); 8578 8579 ar->max_num_stations = ath12k_core_get_max_station_per_radio(ar->ab); 8580 ar->max_num_peers = ath12k_core_get_max_peers_per_radio(ar->ab); 8581 8582 return 0; 8583 } 8584 8585 static int ath12k_mac_hw_register(struct ath12k_hw *ah) 8586 { 8587 struct ieee80211_hw *hw = ah->hw; 8588 struct wiphy *wiphy = hw->wiphy; 8589 struct ath12k *ar = ath12k_ah_to_ar(ah, 0); 8590 struct ath12k_base *ab = ar->ab; 8591 struct ath12k_pdev *pdev; 8592 struct ath12k_pdev_cap *cap; 8593 static const u32 cipher_suites[] = { 8594 WLAN_CIPHER_SUITE_TKIP, 8595 WLAN_CIPHER_SUITE_CCMP, 8596 WLAN_CIPHER_SUITE_AES_CMAC, 8597 WLAN_CIPHER_SUITE_BIP_CMAC_256, 8598 WLAN_CIPHER_SUITE_BIP_GMAC_128, 8599 WLAN_CIPHER_SUITE_BIP_GMAC_256, 8600 WLAN_CIPHER_SUITE_GCMP, 8601 WLAN_CIPHER_SUITE_GCMP_256, 8602 WLAN_CIPHER_SUITE_CCMP_256, 8603 }; 8604 int ret, i, j; 8605 u32 ht_cap = U32_MAX, antennas_rx = 0, antennas_tx = 0; 8606 bool is_6ghz = false, is_raw_mode = false, is_monitor_disable = false; 8607 u8 *mac_addr = NULL; 8608 8609 wiphy->max_ap_assoc_sta = 0; 8610 8611 for_each_ar(ah, ar, i) { 8612 u32 ht_cap_info = 0; 8613 8614 pdev = ar->pdev; 8615 if (ar->ab->pdevs_macaddr_valid) { 8616 ether_addr_copy(ar->mac_addr, pdev->mac_addr); 8617 } else { 8618 ether_addr_copy(ar->mac_addr, ar->ab->mac_addr); 8619 ar->mac_addr[4] += ar->pdev_idx; 8620 } 8621 8622 ret = ath12k_mac_setup_register(ar, &ht_cap_info, hw->wiphy->bands); 8623 if (ret) 8624 goto err_cleanup_unregister; 8625 8626 ht_cap &= ht_cap_info; 8627 wiphy->max_ap_assoc_sta += ar->max_num_stations; 8628 8629 /* Advertise the max antenna support of all radios, driver can handle 8630 * per pdev specific antenna setting based on pdev cap when antenna 8631 * changes are made 8632 */ 8633 cap = &pdev->cap; 8634 8635 antennas_rx = max_t(u32, antennas_rx, cap->rx_chain_mask); 8636 antennas_tx = max_t(u32, antennas_tx, cap->tx_chain_mask); 8637 8638 if (ar->supports_6ghz) 8639 is_6ghz = true; 8640 8641 if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) 8642 is_raw_mode = true; 8643 8644 if (!ar->ab->hw_params->supports_monitor) 8645 is_monitor_disable = true; 8646 8647 if (i == 0) 8648 mac_addr = ar->mac_addr; 8649 else 8650 mac_addr = ab->mac_addr; 8651 } 8652 8653 wiphy->available_antennas_rx = antennas_rx; 8654 wiphy->available_antennas_tx = antennas_tx; 8655 8656 SET_IEEE80211_PERM_ADDR(hw, mac_addr); 8657 SET_IEEE80211_DEV(hw, ab->dev); 8658 8659 ret = ath12k_mac_setup_iface_combinations(ah); 8660 if (ret) { 8661 ath12k_err(ab, "failed to setup interface combinations: %d\n", ret); 8662 goto err_complete_cleanup_unregister; 8663 } 8664 8665 wiphy->interface_modes = ath12k_mac_get_ifmodes(ah); 8666 8667 if (ah->num_radio == 1 && 8668 wiphy->bands[NL80211_BAND_2GHZ] && 8669 wiphy->bands[NL80211_BAND_5GHZ] && 8670 wiphy->bands[NL80211_BAND_6GHZ]) 8671 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); 8672 8673 ieee80211_hw_set(hw, SIGNAL_DBM); 8674 ieee80211_hw_set(hw, SUPPORTS_PS); 8675 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); 8676 ieee80211_hw_set(hw, MFP_CAPABLE); 8677 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 8678 ieee80211_hw_set(hw, HAS_RATE_CONTROL); 8679 ieee80211_hw_set(hw, AP_LINK_PS); 8680 ieee80211_hw_set(hw, SPECTRUM_MGMT); 8681 ieee80211_hw_set(hw, CONNECTION_MONITOR); 8682 ieee80211_hw_set(hw, SUPPORTS_PER_STA_GTK); 8683 ieee80211_hw_set(hw, CHANCTX_STA_CSA); 8684 ieee80211_hw_set(hw, QUEUE_CONTROL); 8685 ieee80211_hw_set(hw, SUPPORTS_TX_FRAG); 8686 ieee80211_hw_set(hw, REPORTS_LOW_ACK); 8687 8688 if (ht_cap & WMI_HT_CAP_ENABLED) { 8689 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 8690 ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); 8691 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 8692 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 8693 ieee80211_hw_set(hw, USES_RSS); 8694 } 8695 8696 wiphy->features |= NL80211_FEATURE_STATIC_SMPS; 8697 wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 8698 8699 /* TODO: Check if HT capability advertised from firmware is different 8700 * for each band for a dual band capable radio. It will be tricky to 8701 * handle it when the ht capability different for each band. 8702 */ 8703 if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) 8704 wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; 8705 8706 wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 8707 wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 8708 8709 hw->max_listen_interval = ATH12K_MAX_HW_LISTEN_INTERVAL; 8710 8711 wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 8712 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 8713 wiphy->max_remain_on_channel_duration = 5000; 8714 8715 wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 8716 wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 8717 NL80211_FEATURE_AP_SCAN; 8718 8719 /* MLO is not yet supported so disable Wireless Extensions for now 8720 * to make sure ath12k users don't use it. This flag can be removed 8721 * once WIPHY_FLAG_SUPPORTS_MLO is enabled. 8722 */ 8723 wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT; 8724 8725 hw->queues = ATH12K_HW_MAX_QUEUES; 8726 wiphy->tx_queue_len = ATH12K_QUEUE_LEN; 8727 hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1; 8728 hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT; 8729 8730 hw->vif_data_size = sizeof(struct ath12k_vif); 8731 hw->sta_data_size = sizeof(struct ath12k_sta); 8732 8733 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 8734 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR); 8735 8736 wiphy->cipher_suites = cipher_suites; 8737 wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 8738 8739 wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa; 8740 wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa); 8741 8742 if (is_6ghz) { 8743 wiphy_ext_feature_set(wiphy, 8744 NL80211_EXT_FEATURE_FILS_DISCOVERY); 8745 wiphy_ext_feature_set(wiphy, 8746 NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP); 8747 } 8748 8749 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_PUNCT); 8750 8751 ath12k_reg_init(hw); 8752 8753 if (!is_raw_mode) { 8754 hw->netdev_features = NETIF_F_HW_CSUM; 8755 ieee80211_hw_set(hw, SW_CRYPTO_CONTROL); 8756 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 8757 } 8758 8759 ret = ieee80211_register_hw(hw); 8760 if (ret) { 8761 ath12k_err(ab, "ieee80211 registration failed: %d\n", ret); 8762 goto err_free_if_combs; 8763 } 8764 8765 if (is_monitor_disable) 8766 /* There's a race between calling ieee80211_register_hw() 8767 * and here where the monitor mode is enabled for a little 8768 * while. But that time is so short and in practise it make 8769 * a difference in real life. 8770 */ 8771 wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR); 8772 8773 for_each_ar(ah, ar, i) { 8774 /* Apply the regd received during initialization */ 8775 ret = ath12k_regd_update(ar, true); 8776 if (ret) { 8777 ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret); 8778 goto err_unregister_hw; 8779 } 8780 } 8781 8782 ath12k_debugfs_register(ar); 8783 8784 return 0; 8785 8786 err_unregister_hw: 8787 ieee80211_unregister_hw(hw); 8788 8789 err_free_if_combs: 8790 kfree(wiphy->iface_combinations[0].limits); 8791 kfree(wiphy->iface_combinations); 8792 8793 err_complete_cleanup_unregister: 8794 i = ah->num_radio; 8795 8796 err_cleanup_unregister: 8797 for (j = 0; j < i; j++) { 8798 ar = ath12k_ah_to_ar(ah, j); 8799 ath12k_mac_cleanup_unregister(ar); 8800 } 8801 8802 SET_IEEE80211_DEV(hw, NULL); 8803 8804 return ret; 8805 } 8806 8807 static void ath12k_mac_setup(struct ath12k *ar) 8808 { 8809 struct ath12k_base *ab = ar->ab; 8810 struct ath12k_pdev *pdev = ar->pdev; 8811 u8 pdev_idx = ar->pdev_idx; 8812 8813 ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, pdev_idx); 8814 8815 ar->wmi = &ab->wmi_ab.wmi[pdev_idx]; 8816 /* FIXME: wmi[0] is already initialized during attach, 8817 * Should we do this again? 8818 */ 8819 ath12k_wmi_pdev_attach(ab, pdev_idx); 8820 8821 ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask; 8822 ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask; 8823 ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask); 8824 ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask); 8825 8826 spin_lock_init(&ar->data_lock); 8827 INIT_LIST_HEAD(&ar->arvifs); 8828 INIT_LIST_HEAD(&ar->ppdu_stats_info); 8829 mutex_init(&ar->conf_mutex); 8830 init_completion(&ar->vdev_setup_done); 8831 init_completion(&ar->vdev_delete_done); 8832 init_completion(&ar->peer_assoc_done); 8833 init_completion(&ar->peer_delete_done); 8834 init_completion(&ar->install_key_done); 8835 init_completion(&ar->bss_survey_done); 8836 init_completion(&ar->scan.started); 8837 init_completion(&ar->scan.completed); 8838 init_completion(&ar->scan.on_channel); 8839 8840 INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work); 8841 INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work); 8842 8843 INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work); 8844 skb_queue_head_init(&ar->wmi_mgmt_tx_queue); 8845 clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); 8846 } 8847 8848 int ath12k_mac_register(struct ath12k_base *ab) 8849 { 8850 struct ath12k_hw *ah; 8851 int i; 8852 int ret; 8853 8854 if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) 8855 return 0; 8856 8857 /* Initialize channel counters frequency value in hertz */ 8858 ab->cc_freq_hz = 320000; 8859 ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1; 8860 8861 for (i = 0; i < ab->num_hw; i++) { 8862 ah = ab->ah[i]; 8863 8864 ret = ath12k_mac_hw_register(ah); 8865 if (ret) 8866 goto err; 8867 } 8868 8869 return 0; 8870 8871 err: 8872 for (i = i - 1; i >= 0; i--) { 8873 ah = ab->ah[i]; 8874 if (!ah) 8875 continue; 8876 8877 ath12k_mac_hw_unregister(ah); 8878 } 8879 8880 return ret; 8881 } 8882 8883 void ath12k_mac_unregister(struct ath12k_base *ab) 8884 { 8885 struct ath12k_hw *ah; 8886 int i; 8887 8888 for (i = ab->num_hw - 1; i >= 0; i--) { 8889 ah = ab->ah[i]; 8890 if (!ah) 8891 continue; 8892 8893 ath12k_mac_hw_unregister(ah); 8894 } 8895 } 8896 8897 static void ath12k_mac_hw_destroy(struct ath12k_hw *ah) 8898 { 8899 ieee80211_free_hw(ah->hw); 8900 } 8901 8902 static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab, 8903 struct ath12k_pdev_map *pdev_map, 8904 u8 num_pdev_map) 8905 { 8906 struct ieee80211_hw *hw; 8907 struct ath12k *ar; 8908 struct ath12k_pdev *pdev; 8909 struct ath12k_hw *ah; 8910 int i; 8911 u8 pdev_idx; 8912 8913 hw = ieee80211_alloc_hw(struct_size(ah, radio, num_pdev_map), 8914 &ath12k_ops); 8915 if (!hw) 8916 return NULL; 8917 8918 ah = ath12k_hw_to_ah(hw); 8919 ah->hw = hw; 8920 ah->num_radio = num_pdev_map; 8921 8922 for (i = 0; i < num_pdev_map; i++) { 8923 ab = pdev_map[i].ab; 8924 pdev_idx = pdev_map[i].pdev_idx; 8925 pdev = &ab->pdevs[pdev_idx]; 8926 8927 ar = ath12k_ah_to_ar(ah, i); 8928 ar->ah = ah; 8929 ar->ab = ab; 8930 ar->hw_link_id = i; 8931 ar->pdev = pdev; 8932 ar->pdev_idx = pdev_idx; 8933 pdev->ar = ar; 8934 8935 ath12k_mac_setup(ar); 8936 } 8937 8938 return ah; 8939 } 8940 8941 void ath12k_mac_destroy(struct ath12k_base *ab) 8942 { 8943 struct ath12k_pdev *pdev; 8944 int i; 8945 8946 for (i = 0; i < ab->num_radios; i++) { 8947 pdev = &ab->pdevs[i]; 8948 if (!pdev->ar) 8949 continue; 8950 8951 pdev->ar = NULL; 8952 } 8953 8954 for (i = 0; i < ab->num_hw; i++) { 8955 if (!ab->ah[i]) 8956 continue; 8957 8958 ath12k_mac_hw_destroy(ab->ah[i]); 8959 ab->ah[i] = NULL; 8960 } 8961 } 8962 8963 int ath12k_mac_allocate(struct ath12k_base *ab) 8964 { 8965 struct ath12k_hw *ah; 8966 struct ath12k_pdev_map pdev_map[MAX_RADIOS]; 8967 int ret, i, j; 8968 u8 radio_per_hw; 8969 8970 if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) 8971 return 0; 8972 8973 ab->num_hw = ab->num_radios; 8974 radio_per_hw = 1; 8975 8976 for (i = 0; i < ab->num_hw; i++) { 8977 for (j = 0; j < radio_per_hw; j++) { 8978 pdev_map[j].ab = ab; 8979 pdev_map[j].pdev_idx = (i * radio_per_hw) + j; 8980 } 8981 8982 ah = ath12k_mac_hw_allocate(ab, pdev_map, radio_per_hw); 8983 if (!ah) { 8984 ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n", 8985 i); 8986 ret = -ENOMEM; 8987 goto err; 8988 } 8989 8990 ab->ah[i] = ah; 8991 } 8992 8993 ath12k_dp_pdev_pre_alloc(ab); 8994 8995 return 0; 8996 8997 err: 8998 for (i = i - 1; i >= 0; i--) { 8999 if (!ab->ah[i]) 9000 continue; 9001 9002 ath12k_mac_hw_destroy(ab->ah[i]); 9003 ab->ah[i] = NULL; 9004 } 9005 9006 return ret; 9007 } 9008