1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 #include <linux/sched.h> 6 #include <linux/of.h> 7 #include "mt76.h" 8 9 #define CHAN2G(_idx, _freq) { \ 10 .band = NL80211_BAND_2GHZ, \ 11 .center_freq = (_freq), \ 12 .hw_value = (_idx), \ 13 .max_power = 30, \ 14 } 15 16 #define CHAN5G(_idx, _freq) { \ 17 .band = NL80211_BAND_5GHZ, \ 18 .center_freq = (_freq), \ 19 .hw_value = (_idx), \ 20 .max_power = 30, \ 21 } 22 23 #define CHAN6G(_idx, _freq) { \ 24 .band = NL80211_BAND_6GHZ, \ 25 .center_freq = (_freq), \ 26 .hw_value = (_idx), \ 27 .max_power = 30, \ 28 } 29 30 static const struct ieee80211_channel mt76_channels_2ghz[] = { 31 CHAN2G(1, 2412), 32 CHAN2G(2, 2417), 33 CHAN2G(3, 2422), 34 CHAN2G(4, 2427), 35 CHAN2G(5, 2432), 36 CHAN2G(6, 2437), 37 CHAN2G(7, 2442), 38 CHAN2G(8, 2447), 39 CHAN2G(9, 2452), 40 CHAN2G(10, 2457), 41 CHAN2G(11, 2462), 42 CHAN2G(12, 2467), 43 CHAN2G(13, 2472), 44 CHAN2G(14, 2484), 45 }; 46 47 static const struct ieee80211_channel mt76_channels_5ghz[] = { 48 CHAN5G(36, 5180), 49 CHAN5G(40, 5200), 50 CHAN5G(44, 5220), 51 CHAN5G(48, 5240), 52 53 CHAN5G(52, 5260), 54 CHAN5G(56, 5280), 55 CHAN5G(60, 5300), 56 CHAN5G(64, 5320), 57 58 CHAN5G(100, 5500), 59 CHAN5G(104, 5520), 60 CHAN5G(108, 5540), 61 CHAN5G(112, 5560), 62 CHAN5G(116, 5580), 63 CHAN5G(120, 5600), 64 CHAN5G(124, 5620), 65 CHAN5G(128, 5640), 66 CHAN5G(132, 5660), 67 CHAN5G(136, 5680), 68 CHAN5G(140, 5700), 69 CHAN5G(144, 5720), 70 71 CHAN5G(149, 5745), 72 CHAN5G(153, 5765), 73 CHAN5G(157, 5785), 74 CHAN5G(161, 5805), 75 CHAN5G(165, 5825), 76 CHAN5G(169, 5845), 77 CHAN5G(173, 5865), 78 }; 79 80 static const struct ieee80211_channel mt76_channels_6ghz[] = { 81 /* UNII-5 */ 82 CHAN6G(1, 5955), 83 CHAN6G(5, 5975), 84 CHAN6G(9, 5995), 85 CHAN6G(13, 6015), 86 CHAN6G(17, 6035), 87 CHAN6G(21, 6055), 88 CHAN6G(25, 6075), 89 CHAN6G(29, 6095), 90 CHAN6G(33, 6115), 91 CHAN6G(37, 6135), 92 CHAN6G(41, 6155), 93 CHAN6G(45, 6175), 94 CHAN6G(49, 6195), 95 CHAN6G(53, 6215), 96 CHAN6G(57, 6235), 97 CHAN6G(61, 6255), 98 CHAN6G(65, 6275), 99 CHAN6G(69, 6295), 100 CHAN6G(73, 6315), 101 CHAN6G(77, 6335), 102 CHAN6G(81, 6355), 103 CHAN6G(85, 6375), 104 CHAN6G(89, 6395), 105 CHAN6G(93, 6415), 106 /* UNII-6 */ 107 CHAN6G(97, 6435), 108 CHAN6G(101, 6455), 109 CHAN6G(105, 6475), 110 CHAN6G(109, 6495), 111 CHAN6G(113, 6515), 112 CHAN6G(117, 6535), 113 /* UNII-7 */ 114 CHAN6G(121, 6555), 115 CHAN6G(125, 6575), 116 CHAN6G(129, 6595), 117 CHAN6G(133, 6615), 118 CHAN6G(137, 6635), 119 CHAN6G(141, 6655), 120 CHAN6G(145, 6675), 121 CHAN6G(149, 6695), 122 CHAN6G(153, 6715), 123 CHAN6G(157, 6735), 124 CHAN6G(161, 6755), 125 CHAN6G(165, 6775), 126 CHAN6G(169, 6795), 127 CHAN6G(173, 6815), 128 CHAN6G(177, 6835), 129 CHAN6G(181, 6855), 130 CHAN6G(185, 6875), 131 /* UNII-8 */ 132 CHAN6G(189, 6895), 133 CHAN6G(193, 6915), 134 CHAN6G(197, 6935), 135 CHAN6G(201, 6955), 136 CHAN6G(205, 6975), 137 CHAN6G(209, 6995), 138 CHAN6G(213, 7015), 139 CHAN6G(217, 7035), 140 CHAN6G(221, 7055), 141 CHAN6G(225, 7075), 142 CHAN6G(229, 7095), 143 CHAN6G(233, 7115), 144 }; 145 146 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = { 147 { .throughput = 0 * 1024, .blink_time = 334 }, 148 { .throughput = 1 * 1024, .blink_time = 260 }, 149 { .throughput = 5 * 1024, .blink_time = 220 }, 150 { .throughput = 10 * 1024, .blink_time = 190 }, 151 { .throughput = 20 * 1024, .blink_time = 170 }, 152 { .throughput = 50 * 1024, .blink_time = 150 }, 153 { .throughput = 70 * 1024, .blink_time = 130 }, 154 { .throughput = 100 * 1024, .blink_time = 110 }, 155 { .throughput = 200 * 1024, .blink_time = 80 }, 156 { .throughput = 300 * 1024, .blink_time = 50 }, 157 }; 158 159 struct ieee80211_rate mt76_rates[] = { 160 CCK_RATE(0, 10), 161 CCK_RATE(1, 20), 162 CCK_RATE(2, 55), 163 CCK_RATE(3, 110), 164 OFDM_RATE(11, 60), 165 OFDM_RATE(15, 90), 166 OFDM_RATE(10, 120), 167 OFDM_RATE(14, 180), 168 OFDM_RATE(9, 240), 169 OFDM_RATE(13, 360), 170 OFDM_RATE(8, 480), 171 OFDM_RATE(12, 540), 172 }; 173 EXPORT_SYMBOL_GPL(mt76_rates); 174 175 static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = { 176 { .start_freq = 2402, .end_freq = 2494, }, 177 { .start_freq = 5150, .end_freq = 5350, }, 178 { .start_freq = 5350, .end_freq = 5470, }, 179 { .start_freq = 5470, .end_freq = 5725, }, 180 { .start_freq = 5725, .end_freq = 5950, }, 181 { .start_freq = 5945, .end_freq = 6165, }, 182 { .start_freq = 6165, .end_freq = 6405, }, 183 { .start_freq = 6405, .end_freq = 6525, }, 184 { .start_freq = 6525, .end_freq = 6705, }, 185 { .start_freq = 6705, .end_freq = 6865, }, 186 { .start_freq = 6865, .end_freq = 7125, }, 187 }; 188 189 static const struct cfg80211_sar_capa mt76_sar_capa = { 190 .type = NL80211_SAR_TYPE_POWER, 191 .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges), 192 .freq_ranges = &mt76_sar_freq_ranges[0], 193 }; 194 195 static int mt76_led_init(struct mt76_dev *dev) 196 { 197 struct device_node *np = dev->dev->of_node; 198 struct ieee80211_hw *hw = dev->hw; 199 int led_pin; 200 201 if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set) 202 return 0; 203 204 snprintf(dev->led_name, sizeof(dev->led_name), 205 "mt76-%s", wiphy_name(hw->wiphy)); 206 207 dev->led_cdev.name = dev->led_name; 208 dev->led_cdev.default_trigger = 209 ieee80211_create_tpt_led_trigger(hw, 210 IEEE80211_TPT_LEDTRIG_FL_RADIO, 211 mt76_tpt_blink, 212 ARRAY_SIZE(mt76_tpt_blink)); 213 214 np = of_get_child_by_name(np, "led"); 215 if (np) { 216 if (!of_property_read_u32(np, "led-sources", &led_pin)) 217 dev->led_pin = led_pin; 218 dev->led_al = of_property_read_bool(np, "led-active-low"); 219 of_node_put(np); 220 } 221 222 return led_classdev_register(dev->dev, &dev->led_cdev); 223 } 224 225 static void mt76_led_cleanup(struct mt76_dev *dev) 226 { 227 if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set) 228 return; 229 230 led_classdev_unregister(&dev->led_cdev); 231 } 232 233 static void mt76_init_stream_cap(struct mt76_phy *phy, 234 struct ieee80211_supported_band *sband, 235 bool vht) 236 { 237 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap; 238 int i, nstream = hweight8(phy->antenna_mask); 239 struct ieee80211_sta_vht_cap *vht_cap; 240 u16 mcs_map = 0; 241 242 if (nstream > 1) 243 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC; 244 else 245 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC; 246 247 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++) 248 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0; 249 250 if (!vht) 251 return; 252 253 vht_cap = &sband->vht_cap; 254 if (nstream > 1) 255 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC; 256 else 257 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC; 258 vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN | 259 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN; 260 261 for (i = 0; i < 8; i++) { 262 if (i < nstream) 263 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2)); 264 else 265 mcs_map |= 266 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2)); 267 } 268 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 269 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 270 if (ieee80211_hw_check(phy->hw, SUPPORTS_VHT_EXT_NSS_BW)) 271 vht_cap->vht_mcs.tx_highest |= 272 cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE); 273 } 274 275 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht) 276 { 277 if (phy->cap.has_2ghz) 278 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false); 279 if (phy->cap.has_5ghz) 280 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht); 281 if (phy->cap.has_6ghz) 282 mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht); 283 } 284 EXPORT_SYMBOL_GPL(mt76_set_stream_caps); 285 286 static int 287 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband, 288 const struct ieee80211_channel *chan, int n_chan, 289 struct ieee80211_rate *rates, int n_rates, 290 bool ht, bool vht) 291 { 292 struct ieee80211_supported_band *sband = &msband->sband; 293 struct ieee80211_sta_vht_cap *vht_cap; 294 struct ieee80211_sta_ht_cap *ht_cap; 295 struct mt76_dev *dev = phy->dev; 296 void *chanlist; 297 int size; 298 299 size = n_chan * sizeof(*chan); 300 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL); 301 if (!chanlist) 302 return -ENOMEM; 303 304 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan), 305 GFP_KERNEL); 306 if (!msband->chan) 307 return -ENOMEM; 308 309 sband->channels = chanlist; 310 sband->n_channels = n_chan; 311 sband->bitrates = rates; 312 sband->n_bitrates = n_rates; 313 314 if (!ht) 315 return 0; 316 317 ht_cap = &sband->ht_cap; 318 ht_cap->ht_supported = true; 319 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 320 IEEE80211_HT_CAP_GRN_FLD | 321 IEEE80211_HT_CAP_SGI_20 | 322 IEEE80211_HT_CAP_SGI_40 | 323 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); 324 325 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; 326 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 327 328 mt76_init_stream_cap(phy, sband, vht); 329 330 if (!vht) 331 return 0; 332 333 vht_cap = &sband->vht_cap; 334 vht_cap->vht_supported = true; 335 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC | 336 IEEE80211_VHT_CAP_RXSTBC_1 | 337 IEEE80211_VHT_CAP_SHORT_GI_80 | 338 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT); 339 340 return 0; 341 } 342 343 static int 344 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates, 345 int n_rates) 346 { 347 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband; 348 349 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz, 350 ARRAY_SIZE(mt76_channels_2ghz), rates, 351 n_rates, true, false); 352 } 353 354 static int 355 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates, 356 int n_rates, bool vht) 357 { 358 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband; 359 360 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz, 361 ARRAY_SIZE(mt76_channels_5ghz), rates, 362 n_rates, true, vht); 363 } 364 365 static int 366 mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates, 367 int n_rates) 368 { 369 phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband; 370 371 return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz, 372 ARRAY_SIZE(mt76_channels_6ghz), rates, 373 n_rates, false, false); 374 } 375 376 static void 377 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband, 378 enum nl80211_band band) 379 { 380 struct ieee80211_supported_band *sband = &msband->sband; 381 bool found = false; 382 int i; 383 384 if (!sband) 385 return; 386 387 for (i = 0; i < sband->n_channels; i++) { 388 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED) 389 continue; 390 391 found = true; 392 break; 393 } 394 395 if (found) { 396 phy->chandef.chan = &sband->channels[0]; 397 phy->chan_state = &msband->chan[0]; 398 return; 399 } 400 401 sband->n_channels = 0; 402 phy->hw->wiphy->bands[band] = NULL; 403 } 404 405 static int 406 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw) 407 { 408 struct mt76_dev *dev = phy->dev; 409 struct wiphy *wiphy = hw->wiphy; 410 411 SET_IEEE80211_DEV(hw, dev->dev); 412 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr); 413 414 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; 415 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH | 416 WIPHY_FLAG_SUPPORTS_TDLS | 417 WIPHY_FLAG_AP_UAPSD; 418 419 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 420 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); 421 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL); 422 423 wiphy->available_antennas_tx = phy->antenna_mask; 424 wiphy->available_antennas_rx = phy->antenna_mask; 425 426 wiphy->sar_capa = &mt76_sar_capa; 427 phy->frp = devm_kcalloc(dev->dev, wiphy->sar_capa->num_freq_ranges, 428 sizeof(struct mt76_freq_range_power), 429 GFP_KERNEL); 430 if (!phy->frp) 431 return -ENOMEM; 432 433 hw->txq_data_size = sizeof(struct mt76_txq); 434 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; 435 436 if (!hw->max_tx_fragments) 437 hw->max_tx_fragments = 16; 438 439 ieee80211_hw_set(hw, SIGNAL_DBM); 440 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 441 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); 442 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 443 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 444 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 445 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 446 ieee80211_hw_set(hw, TX_AMSDU); 447 ieee80211_hw_set(hw, TX_FRAG_LIST); 448 ieee80211_hw_set(hw, MFP_CAPABLE); 449 ieee80211_hw_set(hw, AP_LINK_PS); 450 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 451 452 return 0; 453 } 454 455 struct mt76_phy * 456 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, 457 const struct ieee80211_ops *ops, u8 band_idx) 458 { 459 struct ieee80211_hw *hw; 460 unsigned int phy_size; 461 struct mt76_phy *phy; 462 463 phy_size = ALIGN(sizeof(*phy), 8); 464 hw = ieee80211_alloc_hw(size + phy_size, ops); 465 if (!hw) 466 return NULL; 467 468 phy = hw->priv; 469 phy->dev = dev; 470 phy->hw = hw; 471 phy->priv = hw->priv + phy_size; 472 phy->band_idx = band_idx; 473 474 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 475 hw->wiphy->interface_modes = 476 BIT(NL80211_IFTYPE_STATION) | 477 BIT(NL80211_IFTYPE_AP) | 478 #ifdef CONFIG_MAC80211_MESH 479 BIT(NL80211_IFTYPE_MESH_POINT) | 480 #endif 481 BIT(NL80211_IFTYPE_P2P_CLIENT) | 482 BIT(NL80211_IFTYPE_P2P_GO) | 483 BIT(NL80211_IFTYPE_ADHOC); 484 485 return phy; 486 } 487 EXPORT_SYMBOL_GPL(mt76_alloc_phy); 488 489 int mt76_register_phy(struct mt76_phy *phy, bool vht, 490 struct ieee80211_rate *rates, int n_rates) 491 { 492 int ret; 493 494 ret = mt76_phy_init(phy, phy->hw); 495 if (ret) 496 return ret; 497 498 if (phy->cap.has_2ghz) { 499 ret = mt76_init_sband_2g(phy, rates, n_rates); 500 if (ret) 501 return ret; 502 } 503 504 if (phy->cap.has_5ghz) { 505 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); 506 if (ret) 507 return ret; 508 } 509 510 if (phy->cap.has_6ghz) { 511 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4); 512 if (ret) 513 return ret; 514 } 515 516 wiphy_read_of_freq_limits(phy->hw->wiphy); 517 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ); 518 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ); 519 mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ); 520 521 ret = ieee80211_register_hw(phy->hw); 522 if (ret) 523 return ret; 524 525 phy->dev->phys[phy->band_idx] = phy; 526 527 return 0; 528 } 529 EXPORT_SYMBOL_GPL(mt76_register_phy); 530 531 void mt76_unregister_phy(struct mt76_phy *phy) 532 { 533 struct mt76_dev *dev = phy->dev; 534 535 mt76_tx_status_check(dev, true); 536 ieee80211_unregister_hw(phy->hw); 537 dev->phys[phy->band_idx] = NULL; 538 } 539 EXPORT_SYMBOL_GPL(mt76_unregister_phy); 540 541 struct mt76_dev * 542 mt76_alloc_device(struct device *pdev, unsigned int size, 543 const struct ieee80211_ops *ops, 544 const struct mt76_driver_ops *drv_ops) 545 { 546 struct ieee80211_hw *hw; 547 struct mt76_phy *phy; 548 struct mt76_dev *dev; 549 int i; 550 551 hw = ieee80211_alloc_hw(size, ops); 552 if (!hw) 553 return NULL; 554 555 dev = hw->priv; 556 dev->hw = hw; 557 dev->dev = pdev; 558 dev->drv = drv_ops; 559 dev->dma_dev = pdev; 560 561 phy = &dev->phy; 562 phy->dev = dev; 563 phy->hw = hw; 564 phy->band_idx = MT_BAND0; 565 dev->phys[phy->band_idx] = phy; 566 567 spin_lock_init(&dev->rx_lock); 568 spin_lock_init(&dev->lock); 569 spin_lock_init(&dev->cc_lock); 570 spin_lock_init(&dev->status_lock); 571 mutex_init(&dev->mutex); 572 init_waitqueue_head(&dev->tx_wait); 573 574 skb_queue_head_init(&dev->mcu.res_q); 575 init_waitqueue_head(&dev->mcu.wait); 576 mutex_init(&dev->mcu.mutex); 577 dev->tx_worker.fn = mt76_tx_worker; 578 579 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 580 hw->wiphy->interface_modes = 581 BIT(NL80211_IFTYPE_STATION) | 582 BIT(NL80211_IFTYPE_AP) | 583 #ifdef CONFIG_MAC80211_MESH 584 BIT(NL80211_IFTYPE_MESH_POINT) | 585 #endif 586 BIT(NL80211_IFTYPE_P2P_CLIENT) | 587 BIT(NL80211_IFTYPE_P2P_GO) | 588 BIT(NL80211_IFTYPE_ADHOC); 589 590 spin_lock_init(&dev->token_lock); 591 idr_init(&dev->token); 592 593 INIT_LIST_HEAD(&dev->wcid_list); 594 595 INIT_LIST_HEAD(&dev->txwi_cache); 596 dev->token_size = dev->drv->token_size; 597 598 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) 599 skb_queue_head_init(&dev->rx_skb[i]); 600 601 dev->wq = alloc_ordered_workqueue("mt76", 0); 602 if (!dev->wq) { 603 ieee80211_free_hw(hw); 604 return NULL; 605 } 606 607 return dev; 608 } 609 EXPORT_SYMBOL_GPL(mt76_alloc_device); 610 611 int mt76_register_device(struct mt76_dev *dev, bool vht, 612 struct ieee80211_rate *rates, int n_rates) 613 { 614 struct ieee80211_hw *hw = dev->hw; 615 struct mt76_phy *phy = &dev->phy; 616 int ret; 617 618 dev_set_drvdata(dev->dev, dev); 619 ret = mt76_phy_init(phy, hw); 620 if (ret) 621 return ret; 622 623 if (phy->cap.has_2ghz) { 624 ret = mt76_init_sband_2g(phy, rates, n_rates); 625 if (ret) 626 return ret; 627 } 628 629 if (phy->cap.has_5ghz) { 630 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht); 631 if (ret) 632 return ret; 633 } 634 635 if (phy->cap.has_6ghz) { 636 ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4); 637 if (ret) 638 return ret; 639 } 640 641 wiphy_read_of_freq_limits(hw->wiphy); 642 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ); 643 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ); 644 mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ); 645 646 if (IS_ENABLED(CONFIG_MT76_LEDS)) { 647 ret = mt76_led_init(dev); 648 if (ret) 649 return ret; 650 } 651 652 ret = ieee80211_register_hw(hw); 653 if (ret) 654 return ret; 655 656 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx")); 657 sched_set_fifo_low(dev->tx_worker.task); 658 659 return 0; 660 } 661 EXPORT_SYMBOL_GPL(mt76_register_device); 662 663 void mt76_unregister_device(struct mt76_dev *dev) 664 { 665 struct ieee80211_hw *hw = dev->hw; 666 667 if (IS_ENABLED(CONFIG_MT76_LEDS)) 668 mt76_led_cleanup(dev); 669 mt76_tx_status_check(dev, true); 670 ieee80211_unregister_hw(hw); 671 } 672 EXPORT_SYMBOL_GPL(mt76_unregister_device); 673 674 void mt76_free_device(struct mt76_dev *dev) 675 { 676 mt76_worker_teardown(&dev->tx_worker); 677 if (dev->wq) { 678 destroy_workqueue(dev->wq); 679 dev->wq = NULL; 680 } 681 ieee80211_free_hw(dev->hw); 682 } 683 EXPORT_SYMBOL_GPL(mt76_free_device); 684 685 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q) 686 { 687 struct sk_buff *skb = phy->rx_amsdu[q].head; 688 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 689 struct mt76_dev *dev = phy->dev; 690 691 phy->rx_amsdu[q].head = NULL; 692 phy->rx_amsdu[q].tail = NULL; 693 694 /* 695 * Validate if the amsdu has a proper first subframe. 696 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU 697 * flag of the QoS header gets flipped. In such cases, the first 698 * subframe has a LLC/SNAP header in the location of the destination 699 * address. 700 */ 701 if (skb_shinfo(skb)->frag_list) { 702 int offset = 0; 703 704 if (!(status->flag & RX_FLAG_8023)) { 705 offset = ieee80211_get_hdrlen_from_skb(skb); 706 707 if ((status->flag & 708 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) == 709 RX_FLAG_DECRYPTED) 710 offset += 8; 711 } 712 713 if (ether_addr_equal(skb->data + offset, rfc1042_header)) { 714 dev_kfree_skb(skb); 715 return; 716 } 717 } 718 __skb_queue_tail(&dev->rx_skb[q], skb); 719 } 720 721 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q, 722 struct sk_buff *skb) 723 { 724 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 725 726 if (phy->rx_amsdu[q].head && 727 (!status->amsdu || status->first_amsdu || 728 status->seqno != phy->rx_amsdu[q].seqno)) 729 mt76_rx_release_amsdu(phy, q); 730 731 if (!phy->rx_amsdu[q].head) { 732 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list; 733 phy->rx_amsdu[q].seqno = status->seqno; 734 phy->rx_amsdu[q].head = skb; 735 } else { 736 *phy->rx_amsdu[q].tail = skb; 737 phy->rx_amsdu[q].tail = &skb->next; 738 } 739 740 if (!status->amsdu || status->last_amsdu) 741 mt76_rx_release_amsdu(phy, q); 742 } 743 744 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) 745 { 746 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 747 struct mt76_phy *phy = mt76_dev_phy(dev, status->phy_idx); 748 749 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) { 750 dev_kfree_skb(skb); 751 return; 752 } 753 754 #ifdef CONFIG_NL80211_TESTMODE 755 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) { 756 phy->test.rx_stats.packets[q]++; 757 if (status->flag & RX_FLAG_FAILED_FCS_CRC) 758 phy->test.rx_stats.fcs_error[q]++; 759 } 760 #endif 761 762 mt76_rx_release_burst(phy, q, skb); 763 } 764 EXPORT_SYMBOL_GPL(mt76_rx); 765 766 bool mt76_has_tx_pending(struct mt76_phy *phy) 767 { 768 struct mt76_queue *q; 769 int i; 770 771 for (i = 0; i < __MT_TXQ_MAX; i++) { 772 q = phy->q_tx[i]; 773 if (q && q->queued) 774 return true; 775 } 776 777 return false; 778 } 779 EXPORT_SYMBOL_GPL(mt76_has_tx_pending); 780 781 static struct mt76_channel_state * 782 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c) 783 { 784 struct mt76_sband *msband; 785 int idx; 786 787 if (c->band == NL80211_BAND_2GHZ) 788 msband = &phy->sband_2g; 789 else if (c->band == NL80211_BAND_6GHZ) 790 msband = &phy->sband_6g; 791 else 792 msband = &phy->sband_5g; 793 794 idx = c - &msband->sband.channels[0]; 795 return &msband->chan[idx]; 796 } 797 798 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time) 799 { 800 struct mt76_channel_state *state = phy->chan_state; 801 802 state->cc_active += ktime_to_us(ktime_sub(time, 803 phy->survey_time)); 804 phy->survey_time = time; 805 } 806 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time); 807 808 void mt76_update_survey(struct mt76_phy *phy) 809 { 810 struct mt76_dev *dev = phy->dev; 811 ktime_t cur_time; 812 813 if (dev->drv->update_survey) 814 dev->drv->update_survey(phy); 815 816 cur_time = ktime_get_boottime(); 817 mt76_update_survey_active_time(phy, cur_time); 818 819 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) { 820 struct mt76_channel_state *state = phy->chan_state; 821 822 spin_lock_bh(&dev->cc_lock); 823 state->cc_bss_rx += dev->cur_cc_bss_rx; 824 dev->cur_cc_bss_rx = 0; 825 spin_unlock_bh(&dev->cc_lock); 826 } 827 } 828 EXPORT_SYMBOL_GPL(mt76_update_survey); 829 830 void mt76_set_channel(struct mt76_phy *phy) 831 { 832 struct mt76_dev *dev = phy->dev; 833 struct ieee80211_hw *hw = phy->hw; 834 struct cfg80211_chan_def *chandef = &hw->conf.chandef; 835 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL; 836 int timeout = HZ / 5; 837 838 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout); 839 mt76_update_survey(phy); 840 841 if (phy->chandef.chan->center_freq != chandef->chan->center_freq || 842 phy->chandef.width != chandef->width) 843 phy->dfs_state = MT_DFS_STATE_UNKNOWN; 844 845 phy->chandef = *chandef; 846 phy->chan_state = mt76_channel_state(phy, chandef->chan); 847 848 if (!offchannel) 849 phy->main_chan = chandef->chan; 850 851 if (chandef->chan != phy->main_chan) 852 memset(phy->chan_state, 0, sizeof(*phy->chan_state)); 853 } 854 EXPORT_SYMBOL_GPL(mt76_set_channel); 855 856 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 857 struct survey_info *survey) 858 { 859 struct mt76_phy *phy = hw->priv; 860 struct mt76_dev *dev = phy->dev; 861 struct mt76_sband *sband; 862 struct ieee80211_channel *chan; 863 struct mt76_channel_state *state; 864 int ret = 0; 865 866 mutex_lock(&dev->mutex); 867 if (idx == 0 && dev->drv->update_survey) 868 mt76_update_survey(phy); 869 870 if (idx >= phy->sband_2g.sband.n_channels + 871 phy->sband_5g.sband.n_channels) { 872 idx -= (phy->sband_2g.sband.n_channels + 873 phy->sband_5g.sband.n_channels); 874 sband = &phy->sband_6g; 875 } else if (idx >= phy->sband_2g.sband.n_channels) { 876 idx -= phy->sband_2g.sband.n_channels; 877 sband = &phy->sband_5g; 878 } else { 879 sband = &phy->sband_2g; 880 } 881 882 if (idx >= sband->sband.n_channels) { 883 ret = -ENOENT; 884 goto out; 885 } 886 887 chan = &sband->sband.channels[idx]; 888 state = mt76_channel_state(phy, chan); 889 890 memset(survey, 0, sizeof(*survey)); 891 survey->channel = chan; 892 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; 893 survey->filled |= dev->drv->survey_flags; 894 if (state->noise) 895 survey->filled |= SURVEY_INFO_NOISE_DBM; 896 897 if (chan == phy->main_chan) { 898 survey->filled |= SURVEY_INFO_IN_USE; 899 900 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) 901 survey->filled |= SURVEY_INFO_TIME_BSS_RX; 902 } 903 904 survey->time_busy = div_u64(state->cc_busy, 1000); 905 survey->time_rx = div_u64(state->cc_rx, 1000); 906 survey->time = div_u64(state->cc_active, 1000); 907 survey->noise = state->noise; 908 909 spin_lock_bh(&dev->cc_lock); 910 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000); 911 survey->time_tx = div_u64(state->cc_tx, 1000); 912 spin_unlock_bh(&dev->cc_lock); 913 914 out: 915 mutex_unlock(&dev->mutex); 916 917 return ret; 918 } 919 EXPORT_SYMBOL_GPL(mt76_get_survey); 920 921 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 922 struct ieee80211_key_conf *key) 923 { 924 struct ieee80211_key_seq seq; 925 int i; 926 927 wcid->rx_check_pn = false; 928 929 if (!key) 930 return; 931 932 if (key->cipher != WLAN_CIPHER_SUITE_CCMP) 933 return; 934 935 wcid->rx_check_pn = true; 936 937 /* data frame */ 938 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 939 ieee80211_get_key_rx_seq(key, i, &seq); 940 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); 941 } 942 943 /* robust management frame */ 944 ieee80211_get_key_rx_seq(key, -1, &seq); 945 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn)); 946 947 } 948 EXPORT_SYMBOL(mt76_wcid_key_setup); 949 950 static int 951 mt76_rx_signal(struct mt76_rx_status *status) 952 { 953 s8 *chain_signal = status->chain_signal; 954 int signal = -128; 955 u8 chains; 956 957 for (chains = status->chains; chains; chains >>= 1, chain_signal++) { 958 int cur, diff; 959 960 cur = *chain_signal; 961 if (!(chains & BIT(0)) || 962 cur > 0) 963 continue; 964 965 if (cur > signal) 966 swap(cur, signal); 967 968 diff = signal - cur; 969 if (diff == 0) 970 signal += 3; 971 else if (diff <= 2) 972 signal += 2; 973 else if (diff <= 6) 974 signal += 1; 975 } 976 977 return signal; 978 } 979 980 static void 981 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, 982 struct ieee80211_hw **hw, 983 struct ieee80211_sta **sta) 984 { 985 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 986 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 987 struct mt76_rx_status mstat; 988 989 mstat = *((struct mt76_rx_status *)skb->cb); 990 memset(status, 0, sizeof(*status)); 991 992 status->flag = mstat.flag; 993 status->freq = mstat.freq; 994 status->enc_flags = mstat.enc_flags; 995 status->encoding = mstat.encoding; 996 status->bw = mstat.bw; 997 status->he_ru = mstat.he_ru; 998 status->he_gi = mstat.he_gi; 999 status->he_dcm = mstat.he_dcm; 1000 status->rate_idx = mstat.rate_idx; 1001 status->nss = mstat.nss; 1002 status->band = mstat.band; 1003 status->signal = mstat.signal; 1004 status->chains = mstat.chains; 1005 status->ampdu_reference = mstat.ampdu_ref; 1006 status->device_timestamp = mstat.timestamp; 1007 status->mactime = mstat.timestamp; 1008 status->signal = mt76_rx_signal(&mstat); 1009 if (status->signal <= -128) 1010 status->flag |= RX_FLAG_NO_SIGNAL_VAL; 1011 1012 if (ieee80211_is_beacon(hdr->frame_control) || 1013 ieee80211_is_probe_resp(hdr->frame_control)) 1014 status->boottime_ns = ktime_get_boottime_ns(); 1015 1016 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb)); 1017 BUILD_BUG_ON(sizeof(status->chain_signal) != 1018 sizeof(mstat.chain_signal)); 1019 memcpy(status->chain_signal, mstat.chain_signal, 1020 sizeof(mstat.chain_signal)); 1021 1022 *sta = wcid_to_sta(mstat.wcid); 1023 *hw = mt76_phy_hw(dev, mstat.phy_idx); 1024 } 1025 1026 static void 1027 mt76_check_ccmp_pn(struct sk_buff *skb) 1028 { 1029 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1030 struct mt76_wcid *wcid = status->wcid; 1031 struct ieee80211_hdr *hdr; 1032 int security_idx; 1033 int ret; 1034 1035 if (!(status->flag & RX_FLAG_DECRYPTED)) 1036 return; 1037 1038 if (status->flag & RX_FLAG_ONLY_MONITOR) 1039 return; 1040 1041 if (!wcid || !wcid->rx_check_pn) 1042 return; 1043 1044 security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 1045 if (status->flag & RX_FLAG_8023) 1046 goto skip_hdr_check; 1047 1048 hdr = mt76_skb_get_hdr(skb); 1049 if (!(status->flag & RX_FLAG_IV_STRIPPED)) { 1050 /* 1051 * Validate the first fragment both here and in mac80211 1052 * All further fragments will be validated by mac80211 only. 1053 */ 1054 if (ieee80211_is_frag(hdr) && 1055 !ieee80211_is_first_frag(hdr->frame_control)) 1056 return; 1057 } 1058 1059 /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c): 1060 * 1061 * the recipient shall maintain a single replay counter for received 1062 * individually addressed robust Management frames that are received 1063 * with the To DS subfield equal to 0, [...] 1064 */ 1065 if (ieee80211_is_mgmt(hdr->frame_control) && 1066 !ieee80211_has_tods(hdr->frame_control)) 1067 security_idx = IEEE80211_NUM_TIDS; 1068 1069 skip_hdr_check: 1070 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0])); 1071 ret = memcmp(status->iv, wcid->rx_key_pn[security_idx], 1072 sizeof(status->iv)); 1073 if (ret <= 0) { 1074 status->flag |= RX_FLAG_ONLY_MONITOR; 1075 return; 1076 } 1077 1078 memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv)); 1079 1080 if (status->flag & RX_FLAG_IV_STRIPPED) 1081 status->flag |= RX_FLAG_PN_VALIDATED; 1082 } 1083 1084 static void 1085 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status, 1086 int len) 1087 { 1088 struct mt76_wcid *wcid = status->wcid; 1089 struct ieee80211_rx_status info = { 1090 .enc_flags = status->enc_flags, 1091 .rate_idx = status->rate_idx, 1092 .encoding = status->encoding, 1093 .band = status->band, 1094 .nss = status->nss, 1095 .bw = status->bw, 1096 }; 1097 struct ieee80211_sta *sta; 1098 u32 airtime; 1099 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 1100 1101 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len); 1102 spin_lock(&dev->cc_lock); 1103 dev->cur_cc_bss_rx += airtime; 1104 spin_unlock(&dev->cc_lock); 1105 1106 if (!wcid || !wcid->sta) 1107 return; 1108 1109 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 1110 ieee80211_sta_register_airtime(sta, tidno, 0, airtime); 1111 } 1112 1113 static void 1114 mt76_airtime_flush_ampdu(struct mt76_dev *dev) 1115 { 1116 struct mt76_wcid *wcid; 1117 int wcid_idx; 1118 1119 if (!dev->rx_ampdu_len) 1120 return; 1121 1122 wcid_idx = dev->rx_ampdu_status.wcid_idx; 1123 if (wcid_idx < ARRAY_SIZE(dev->wcid)) 1124 wcid = rcu_dereference(dev->wcid[wcid_idx]); 1125 else 1126 wcid = NULL; 1127 dev->rx_ampdu_status.wcid = wcid; 1128 1129 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len); 1130 1131 dev->rx_ampdu_len = 0; 1132 dev->rx_ampdu_ref = 0; 1133 } 1134 1135 static void 1136 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb) 1137 { 1138 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1139 struct mt76_wcid *wcid = status->wcid; 1140 1141 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)) 1142 return; 1143 1144 if (!wcid || !wcid->sta) { 1145 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 1146 1147 if (status->flag & RX_FLAG_8023) 1148 return; 1149 1150 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr)) 1151 return; 1152 1153 wcid = NULL; 1154 } 1155 1156 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) || 1157 status->ampdu_ref != dev->rx_ampdu_ref) 1158 mt76_airtime_flush_ampdu(dev); 1159 1160 if (status->flag & RX_FLAG_AMPDU_DETAILS) { 1161 if (!dev->rx_ampdu_len || 1162 status->ampdu_ref != dev->rx_ampdu_ref) { 1163 dev->rx_ampdu_status = *status; 1164 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff; 1165 dev->rx_ampdu_ref = status->ampdu_ref; 1166 } 1167 1168 dev->rx_ampdu_len += skb->len; 1169 return; 1170 } 1171 1172 mt76_airtime_report(dev, status, skb->len); 1173 } 1174 1175 static void 1176 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb) 1177 { 1178 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1179 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb); 1180 struct ieee80211_sta *sta; 1181 struct ieee80211_hw *hw; 1182 struct mt76_wcid *wcid = status->wcid; 1183 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK; 1184 bool ps; 1185 1186 hw = mt76_phy_hw(dev, status->phy_idx); 1187 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid && 1188 !(status->flag & RX_FLAG_8023)) { 1189 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL); 1190 if (sta) 1191 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv; 1192 } 1193 1194 mt76_airtime_check(dev, skb); 1195 1196 if (!wcid || !wcid->sta) 1197 return; 1198 1199 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv); 1200 1201 if (status->signal <= 0) 1202 ewma_signal_add(&wcid->rssi, -status->signal); 1203 1204 wcid->inactive_count = 0; 1205 1206 if (status->flag & RX_FLAG_8023) 1207 return; 1208 1209 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags)) 1210 return; 1211 1212 if (ieee80211_is_pspoll(hdr->frame_control)) { 1213 ieee80211_sta_pspoll(sta); 1214 return; 1215 } 1216 1217 if (ieee80211_has_morefrags(hdr->frame_control) || 1218 !(ieee80211_is_mgmt(hdr->frame_control) || 1219 ieee80211_is_data(hdr->frame_control))) 1220 return; 1221 1222 ps = ieee80211_has_pm(hdr->frame_control); 1223 1224 if (ps && (ieee80211_is_data_qos(hdr->frame_control) || 1225 ieee80211_is_qos_nullfunc(hdr->frame_control))) 1226 ieee80211_sta_uapsd_trigger(sta, tidno); 1227 1228 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps) 1229 return; 1230 1231 if (ps) 1232 set_bit(MT_WCID_FLAG_PS, &wcid->flags); 1233 1234 dev->drv->sta_ps(dev, sta, ps); 1235 1236 if (!ps) 1237 clear_bit(MT_WCID_FLAG_PS, &wcid->flags); 1238 1239 ieee80211_sta_ps_transition(sta, ps); 1240 } 1241 1242 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 1243 struct napi_struct *napi) 1244 { 1245 struct ieee80211_sta *sta; 1246 struct ieee80211_hw *hw; 1247 struct sk_buff *skb, *tmp; 1248 LIST_HEAD(list); 1249 1250 spin_lock(&dev->rx_lock); 1251 while ((skb = __skb_dequeue(frames)) != NULL) { 1252 struct sk_buff *nskb = skb_shinfo(skb)->frag_list; 1253 1254 mt76_check_ccmp_pn(skb); 1255 skb_shinfo(skb)->frag_list = NULL; 1256 mt76_rx_convert(dev, skb, &hw, &sta); 1257 ieee80211_rx_list(hw, sta, skb, &list); 1258 1259 /* subsequent amsdu frames */ 1260 while (nskb) { 1261 skb = nskb; 1262 nskb = nskb->next; 1263 skb->next = NULL; 1264 1265 mt76_rx_convert(dev, skb, &hw, &sta); 1266 ieee80211_rx_list(hw, sta, skb, &list); 1267 } 1268 } 1269 spin_unlock(&dev->rx_lock); 1270 1271 if (!napi) { 1272 netif_receive_skb_list(&list); 1273 return; 1274 } 1275 1276 list_for_each_entry_safe(skb, tmp, &list, list) { 1277 skb_list_del_init(skb); 1278 napi_gro_receive(napi, skb); 1279 } 1280 } 1281 1282 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 1283 struct napi_struct *napi) 1284 { 1285 struct sk_buff_head frames; 1286 struct sk_buff *skb; 1287 1288 __skb_queue_head_init(&frames); 1289 1290 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) { 1291 mt76_check_sta(dev, skb); 1292 mt76_rx_aggr_reorder(skb, &frames); 1293 } 1294 1295 mt76_rx_complete(dev, &frames, napi); 1296 } 1297 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete); 1298 1299 static int 1300 mt76_sta_add(struct mt76_phy *phy, struct ieee80211_vif *vif, 1301 struct ieee80211_sta *sta) 1302 { 1303 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1304 struct mt76_dev *dev = phy->dev; 1305 int ret; 1306 int i; 1307 1308 mutex_lock(&dev->mutex); 1309 1310 ret = dev->drv->sta_add(dev, vif, sta); 1311 if (ret) 1312 goto out; 1313 1314 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 1315 struct mt76_txq *mtxq; 1316 1317 if (!sta->txq[i]) 1318 continue; 1319 1320 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; 1321 mtxq->wcid = wcid->idx; 1322 } 1323 1324 ewma_signal_init(&wcid->rssi); 1325 if (phy->band_idx == MT_BAND1) 1326 mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx); 1327 wcid->phy_idx = phy->band_idx; 1328 rcu_assign_pointer(dev->wcid[wcid->idx], wcid); 1329 1330 mt76_packet_id_init(wcid); 1331 out: 1332 mutex_unlock(&dev->mutex); 1333 1334 return ret; 1335 } 1336 1337 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 1338 struct ieee80211_sta *sta) 1339 { 1340 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1341 int i, idx = wcid->idx; 1342 1343 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++) 1344 mt76_rx_aggr_stop(dev, wcid, i); 1345 1346 if (dev->drv->sta_remove) 1347 dev->drv->sta_remove(dev, vif, sta); 1348 1349 mt76_packet_id_flush(dev, wcid); 1350 1351 mt76_wcid_mask_clear(dev->wcid_mask, idx); 1352 mt76_wcid_mask_clear(dev->wcid_phy_mask, idx); 1353 } 1354 EXPORT_SYMBOL_GPL(__mt76_sta_remove); 1355 1356 static void 1357 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 1358 struct ieee80211_sta *sta) 1359 { 1360 mutex_lock(&dev->mutex); 1361 __mt76_sta_remove(dev, vif, sta); 1362 mutex_unlock(&dev->mutex); 1363 } 1364 1365 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1366 struct ieee80211_sta *sta, 1367 enum ieee80211_sta_state old_state, 1368 enum ieee80211_sta_state new_state) 1369 { 1370 struct mt76_phy *phy = hw->priv; 1371 struct mt76_dev *dev = phy->dev; 1372 1373 if (old_state == IEEE80211_STA_NOTEXIST && 1374 new_state == IEEE80211_STA_NONE) 1375 return mt76_sta_add(phy, vif, sta); 1376 1377 if (old_state == IEEE80211_STA_AUTH && 1378 new_state == IEEE80211_STA_ASSOC && 1379 dev->drv->sta_assoc) 1380 dev->drv->sta_assoc(dev, vif, sta); 1381 1382 if (old_state == IEEE80211_STA_NONE && 1383 new_state == IEEE80211_STA_NOTEXIST) 1384 mt76_sta_remove(dev, vif, sta); 1385 1386 return 0; 1387 } 1388 EXPORT_SYMBOL_GPL(mt76_sta_state); 1389 1390 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1391 struct ieee80211_sta *sta) 1392 { 1393 struct mt76_phy *phy = hw->priv; 1394 struct mt76_dev *dev = phy->dev; 1395 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 1396 1397 mutex_lock(&dev->mutex); 1398 spin_lock_bh(&dev->status_lock); 1399 rcu_assign_pointer(dev->wcid[wcid->idx], NULL); 1400 spin_unlock_bh(&dev->status_lock); 1401 mutex_unlock(&dev->mutex); 1402 } 1403 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove); 1404 1405 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1406 int *dbm) 1407 { 1408 struct mt76_phy *phy = hw->priv; 1409 int n_chains = hweight8(phy->antenna_mask); 1410 int delta = mt76_tx_power_nss_delta(n_chains); 1411 1412 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2); 1413 1414 return 0; 1415 } 1416 EXPORT_SYMBOL_GPL(mt76_get_txpower); 1417 1418 int mt76_init_sar_power(struct ieee80211_hw *hw, 1419 const struct cfg80211_sar_specs *sar) 1420 { 1421 struct mt76_phy *phy = hw->priv; 1422 const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa; 1423 int i; 1424 1425 if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs) 1426 return -EINVAL; 1427 1428 for (i = 0; i < sar->num_sub_specs; i++) { 1429 u32 index = sar->sub_specs[i].freq_range_index; 1430 /* SAR specifies power limitaton in 0.25dbm */ 1431 s32 power = sar->sub_specs[i].power >> 1; 1432 1433 if (power > 127 || power < -127) 1434 power = 127; 1435 1436 phy->frp[index].range = &capa->freq_ranges[index]; 1437 phy->frp[index].power = power; 1438 } 1439 1440 return 0; 1441 } 1442 EXPORT_SYMBOL_GPL(mt76_init_sar_power); 1443 1444 int mt76_get_sar_power(struct mt76_phy *phy, 1445 struct ieee80211_channel *chan, 1446 int power) 1447 { 1448 const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa; 1449 int freq, i; 1450 1451 if (!capa || !phy->frp) 1452 return power; 1453 1454 if (power > 127 || power < -127) 1455 power = 127; 1456 1457 freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band); 1458 for (i = 0 ; i < capa->num_freq_ranges; i++) { 1459 if (phy->frp[i].range && 1460 freq >= phy->frp[i].range->start_freq && 1461 freq < phy->frp[i].range->end_freq) { 1462 power = min_t(int, phy->frp[i].power, power); 1463 break; 1464 } 1465 } 1466 1467 return power; 1468 } 1469 EXPORT_SYMBOL_GPL(mt76_get_sar_power); 1470 1471 static void 1472 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) 1473 { 1474 if (vif->bss_conf.csa_active && ieee80211_beacon_cntdwn_is_complete(vif)) 1475 ieee80211_csa_finish(vif); 1476 } 1477 1478 void mt76_csa_finish(struct mt76_dev *dev) 1479 { 1480 if (!dev->csa_complete) 1481 return; 1482 1483 ieee80211_iterate_active_interfaces_atomic(dev->hw, 1484 IEEE80211_IFACE_ITER_RESUME_ALL, 1485 __mt76_csa_finish, dev); 1486 1487 dev->csa_complete = 0; 1488 } 1489 EXPORT_SYMBOL_GPL(mt76_csa_finish); 1490 1491 static void 1492 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif) 1493 { 1494 struct mt76_dev *dev = priv; 1495 1496 if (!vif->bss_conf.csa_active) 1497 return; 1498 1499 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif); 1500 } 1501 1502 void mt76_csa_check(struct mt76_dev *dev) 1503 { 1504 ieee80211_iterate_active_interfaces_atomic(dev->hw, 1505 IEEE80211_IFACE_ITER_RESUME_ALL, 1506 __mt76_csa_check, dev); 1507 } 1508 EXPORT_SYMBOL_GPL(mt76_csa_check); 1509 1510 int 1511 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) 1512 { 1513 return 0; 1514 } 1515 EXPORT_SYMBOL_GPL(mt76_set_tim); 1516 1517 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id) 1518 { 1519 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 1520 int hdr_len = ieee80211_get_hdrlen_from_skb(skb); 1521 u8 *hdr, *pn = status->iv; 1522 1523 __skb_push(skb, 8); 1524 memmove(skb->data, skb->data + 8, hdr_len); 1525 hdr = skb->data + hdr_len; 1526 1527 hdr[0] = pn[5]; 1528 hdr[1] = pn[4]; 1529 hdr[2] = 0; 1530 hdr[3] = 0x20 | (key_id << 6); 1531 hdr[4] = pn[3]; 1532 hdr[5] = pn[2]; 1533 hdr[6] = pn[1]; 1534 hdr[7] = pn[0]; 1535 1536 status->flag &= ~RX_FLAG_IV_STRIPPED; 1537 } 1538 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr); 1539 1540 int mt76_get_rate(struct mt76_dev *dev, 1541 struct ieee80211_supported_band *sband, 1542 int idx, bool cck) 1543 { 1544 int i, offset = 0, len = sband->n_bitrates; 1545 1546 if (cck) { 1547 if (sband != &dev->phy.sband_2g.sband) 1548 return 0; 1549 1550 idx &= ~BIT(2); /* short preamble */ 1551 } else if (sband == &dev->phy.sband_2g.sband) { 1552 offset = 4; 1553 } 1554 1555 for (i = offset; i < len; i++) { 1556 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx) 1557 return i; 1558 } 1559 1560 return 0; 1561 } 1562 EXPORT_SYMBOL_GPL(mt76_get_rate); 1563 1564 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1565 const u8 *mac) 1566 { 1567 struct mt76_phy *phy = hw->priv; 1568 1569 set_bit(MT76_SCANNING, &phy->state); 1570 } 1571 EXPORT_SYMBOL_GPL(mt76_sw_scan); 1572 1573 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1574 { 1575 struct mt76_phy *phy = hw->priv; 1576 1577 clear_bit(MT76_SCANNING, &phy->state); 1578 } 1579 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete); 1580 1581 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 1582 { 1583 struct mt76_phy *phy = hw->priv; 1584 struct mt76_dev *dev = phy->dev; 1585 1586 mutex_lock(&dev->mutex); 1587 *tx_ant = phy->antenna_mask; 1588 *rx_ant = phy->antenna_mask; 1589 mutex_unlock(&dev->mutex); 1590 1591 return 0; 1592 } 1593 EXPORT_SYMBOL_GPL(mt76_get_antenna); 1594 1595 struct mt76_queue * 1596 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, 1597 int ring_base, u32 flags) 1598 { 1599 struct mt76_queue *hwq; 1600 int err; 1601 1602 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL); 1603 if (!hwq) 1604 return ERR_PTR(-ENOMEM); 1605 1606 hwq->flags = flags; 1607 1608 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base); 1609 if (err < 0) 1610 return ERR_PTR(err); 1611 1612 return hwq; 1613 } 1614 EXPORT_SYMBOL_GPL(mt76_init_queue); 1615 1616 u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx) 1617 { 1618 int offset = 0; 1619 1620 if (phy->chandef.chan->band != NL80211_BAND_2GHZ) 1621 offset = 4; 1622 1623 /* pick the lowest rate for hidden nodes */ 1624 if (rateidx < 0) 1625 rateidx = 0; 1626 1627 rateidx += offset; 1628 if (rateidx >= ARRAY_SIZE(mt76_rates)) 1629 rateidx = offset; 1630 1631 return mt76_rates[rateidx].hw_value; 1632 } 1633 EXPORT_SYMBOL_GPL(mt76_calculate_default_rate); 1634 1635 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, 1636 struct mt76_sta_stats *stats) 1637 { 1638 int i, ei = wi->initial_stat_idx; 1639 u64 *data = wi->data; 1640 1641 wi->sta_count++; 1642 1643 data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK]; 1644 data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM]; 1645 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT]; 1646 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF]; 1647 data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT]; 1648 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU]; 1649 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU]; 1650 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB]; 1651 data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU]; 1652 1653 for (i = 0; i < ARRAY_SIZE(stats->tx_bw); i++) 1654 data[ei++] += stats->tx_bw[i]; 1655 1656 for (i = 0; i < 12; i++) 1657 data[ei++] += stats->tx_mcs[i]; 1658 1659 wi->worker_stat_count = ei - wi->initial_stat_idx; 1660 } 1661 EXPORT_SYMBOL_GPL(mt76_ethtool_worker); 1662 1663 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy) 1664 { 1665 struct ieee80211_hw *hw = phy->hw; 1666 struct mt76_dev *dev = phy->dev; 1667 1668 if (dev->region == NL80211_DFS_UNSET || 1669 test_bit(MT76_SCANNING, &phy->state)) 1670 return MT_DFS_STATE_DISABLED; 1671 1672 if (!hw->conf.radar_enabled) { 1673 if ((hw->conf.flags & IEEE80211_CONF_MONITOR) && 1674 (phy->chandef.chan->flags & IEEE80211_CHAN_RADAR)) 1675 return MT_DFS_STATE_ACTIVE; 1676 1677 return MT_DFS_STATE_DISABLED; 1678 } 1679 1680 if (!cfg80211_reg_can_beacon(hw->wiphy, &phy->chandef, NL80211_IFTYPE_AP)) 1681 return MT_DFS_STATE_CAC; 1682 1683 return MT_DFS_STATE_ACTIVE; 1684 } 1685 EXPORT_SYMBOL_GPL(mt76_phy_dfs_state); 1686