1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/fs.h> 5 #include "mt7915.h" 6 #include "mcu.h" 7 #include "mac.h" 8 #include "eeprom.h" 9 10 #define fw_name(_dev, name, ...) ({ \ 11 char *_fw; \ 12 switch (mt76_chip(&(_dev)->mt76)) { \ 13 case 0x7915: \ 14 _fw = MT7915_##name; \ 15 break; \ 16 case 0x7981: \ 17 _fw = MT7981_##name; \ 18 break; \ 19 case 0x7986: \ 20 _fw = MT7986_##name##__VA_ARGS__; \ 21 break; \ 22 default: \ 23 _fw = MT7916_##name; \ 24 break; \ 25 } \ 26 _fw; \ 27 }) 28 29 #define fw_name_var(_dev, name) (mt7915_check_adie(dev, false) ? \ 30 fw_name(_dev, name) : \ 31 fw_name(_dev, name, _MT7975)) 32 33 #define MCU_PATCH_ADDRESS 0x200000 34 35 #define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p) 36 #define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m) 37 38 static bool sr_scene_detect = true; 39 module_param(sr_scene_detect, bool, 0644); 40 MODULE_PARM_DESC(sr_scene_detect, "Enable firmware scene detection algorithm"); 41 42 static u8 43 mt7915_mcu_get_sta_nss(u16 mcs_map) 44 { 45 u8 nss; 46 47 for (nss = 8; nss > 0; nss--) { 48 u8 nss_mcs = (mcs_map >> (2 * (nss - 1))) & 3; 49 50 if (nss_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) 51 break; 52 } 53 54 return nss - 1; 55 } 56 57 static void 58 mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs, 59 u16 mcs_map) 60 { 61 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 62 struct mt7915_dev *dev = msta->vif->phy->dev; 63 enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band; 64 const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs; 65 int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss; 66 67 for (nss = 0; nss < max_nss; nss++) { 68 int mcs; 69 70 switch ((mcs_map >> (2 * nss)) & 0x3) { 71 case IEEE80211_HE_MCS_SUPPORT_0_11: 72 mcs = GENMASK(11, 0); 73 break; 74 case IEEE80211_HE_MCS_SUPPORT_0_9: 75 mcs = GENMASK(9, 0); 76 break; 77 case IEEE80211_HE_MCS_SUPPORT_0_7: 78 mcs = GENMASK(7, 0); 79 break; 80 default: 81 mcs = 0; 82 } 83 84 mcs = mcs ? fls(mcs & mask[nss]) - 1 : -1; 85 86 switch (mcs) { 87 case 0 ... 7: 88 mcs = IEEE80211_HE_MCS_SUPPORT_0_7; 89 break; 90 case 8 ... 9: 91 mcs = IEEE80211_HE_MCS_SUPPORT_0_9; 92 break; 93 case 10 ... 11: 94 mcs = IEEE80211_HE_MCS_SUPPORT_0_11; 95 break; 96 default: 97 mcs = IEEE80211_HE_MCS_NOT_SUPPORTED; 98 break; 99 } 100 mcs_map &= ~(0x3 << (nss * 2)); 101 mcs_map |= mcs << (nss * 2); 102 103 /* only support 2ss on 160MHz for mt7915 */ 104 if (is_mt7915(&dev->mt76) && nss > 1 && 105 sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) 106 break; 107 } 108 109 *he_mcs = cpu_to_le16(mcs_map); 110 } 111 112 static void 113 mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs, 114 const u16 *mask) 115 { 116 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 117 struct mt7915_dev *dev = msta->vif->phy->dev; 118 u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map); 119 int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss; 120 u16 mcs; 121 122 for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) { 123 switch (mcs_map & 0x3) { 124 case IEEE80211_VHT_MCS_SUPPORT_0_9: 125 mcs = GENMASK(9, 0); 126 break; 127 case IEEE80211_VHT_MCS_SUPPORT_0_8: 128 mcs = GENMASK(8, 0); 129 break; 130 case IEEE80211_VHT_MCS_SUPPORT_0_7: 131 mcs = GENMASK(7, 0); 132 break; 133 default: 134 mcs = 0; 135 } 136 137 vht_mcs[nss] = cpu_to_le16(mcs & mask[nss]); 138 139 /* only support 2ss on 160MHz for mt7915 */ 140 if (is_mt7915(&dev->mt76) && nss > 1 && 141 sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) 142 break; 143 } 144 } 145 146 static void 147 mt7915_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs, 148 const u8 *mask) 149 { 150 int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss; 151 152 for (nss = 0; nss < max_nss; nss++) 153 ht_mcs[nss] = sta->deflink.ht_cap.mcs.rx_mask[nss] & mask[nss]; 154 } 155 156 static int 157 mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd, 158 struct sk_buff *skb, int seq) 159 { 160 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 161 struct mt76_connac2_mcu_rxd *rxd; 162 int ret = 0; 163 164 if (!skb) { 165 dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", 166 cmd, seq); 167 168 if (!test_and_set_bit(MT76_MCU_RESET, &dev->mphy.state)) { 169 dev->recovery.restart = true; 170 wake_up(&dev->mt76.mcu.wait); 171 queue_work(dev->mt76.wq, &dev->reset_work); 172 wake_up(&dev->reset_wait); 173 } 174 175 return -ETIMEDOUT; 176 } 177 178 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 179 if (seq != rxd->seq && 180 !(rxd->eid == MCU_CMD_EXT_CID && 181 rxd->ext_eid == MCU_EXT_EVENT_WA_TX_STAT)) 182 return -EAGAIN; 183 184 if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) { 185 skb_pull(skb, sizeof(*rxd) - 4); 186 ret = *skb->data; 187 } else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) { 188 skb_pull(skb, sizeof(*rxd) + 4); 189 ret = le32_to_cpu(*(__le32 *)skb->data); 190 } else { 191 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 192 } 193 194 return ret; 195 } 196 197 static void 198 mt7915_mcu_set_timeout(struct mt76_dev *mdev, int cmd) 199 { 200 if ((cmd & __MCU_CMD_FIELD_ID) != MCU_CMD_EXT_CID) 201 return; 202 203 switch (FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd)) { 204 case MCU_EXT_CMD_THERMAL_CTRL: 205 case MCU_EXT_CMD_GET_MIB_INFO: 206 case MCU_EXT_CMD_PHY_STAT_INFO: 207 case MCU_EXT_CMD_STA_REC_UPDATE: 208 case MCU_EXT_CMD_BSS_INFO_UPDATE: 209 mdev->mcu.timeout = 2 * HZ; 210 return; 211 default: 212 break; 213 } 214 } 215 216 static int 217 mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, 218 int cmd, int *wait_seq) 219 { 220 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 221 enum mt76_mcuq_id qid; 222 223 if (cmd == MCU_CMD(FW_SCATTER)) 224 qid = MT_MCUQ_FWDL; 225 else if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) 226 qid = MT_MCUQ_WA; 227 else 228 qid = MT_MCUQ_WM; 229 230 mt7915_mcu_set_timeout(mdev, cmd); 231 232 return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0); 233 } 234 235 int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3) 236 { 237 struct { 238 __le32 args[3]; 239 } req = { 240 .args = { 241 cpu_to_le32(a1), 242 cpu_to_le32(a2), 243 cpu_to_le32(a3), 244 }, 245 }; 246 247 return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), false); 248 } 249 250 static void 251 mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) 252 { 253 if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION) 254 return; 255 256 ieee80211_csa_finish(vif, 0); 257 } 258 259 static void 260 mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb) 261 { 262 struct mt76_phy *mphy = &dev->mt76.phy; 263 struct mt7915_mcu_csa_notify *c; 264 265 c = (struct mt7915_mcu_csa_notify *)skb->data; 266 267 if (c->band_idx > MT_BAND1) 268 return; 269 270 if ((c->band_idx && !dev->phy.mt76->band_idx) && 271 dev->mt76.phys[MT_BAND1]) 272 mphy = dev->mt76.phys[MT_BAND1]; 273 274 ieee80211_iterate_active_interfaces_atomic(mphy->hw, 275 IEEE80211_IFACE_ITER_RESUME_ALL, 276 mt7915_mcu_csa_finish, mphy->hw); 277 } 278 279 static void 280 mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb) 281 { 282 struct mt76_phy *mphy = &dev->mt76.phy; 283 struct mt7915_mcu_thermal_notify *t; 284 struct mt7915_phy *phy; 285 286 t = (struct mt7915_mcu_thermal_notify *)skb->data; 287 if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE) 288 return; 289 290 if (t->ctrl.band_idx > MT_BAND1) 291 return; 292 293 if ((t->ctrl.band_idx && !dev->phy.mt76->band_idx) && 294 dev->mt76.phys[MT_BAND1]) 295 mphy = dev->mt76.phys[MT_BAND1]; 296 297 phy = mphy->priv; 298 phy->throttle_state = t->ctrl.duty.duty_cycle; 299 } 300 301 static void 302 mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb) 303 { 304 struct mt76_phy *mphy = &dev->mt76.phy; 305 struct mt7915_mcu_rdd_report *r; 306 307 r = (struct mt7915_mcu_rdd_report *)skb->data; 308 309 if (r->band_idx > MT_RX_SEL2) 310 return; 311 312 if ((r->band_idx && !dev->phy.mt76->band_idx) && 313 dev->mt76.phys[MT_BAND1]) 314 mphy = dev->mt76.phys[MT_BAND1]; 315 316 if (r->band_idx == MT_RX_SEL2) 317 cfg80211_background_radar_event(mphy->hw->wiphy, 318 &dev->rdd2_chandef, 319 GFP_ATOMIC); 320 else 321 ieee80211_radar_detected(mphy->hw, NULL); 322 dev->hw_pattern++; 323 } 324 325 static void 326 mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb) 327 { 328 struct mt76_connac2_mcu_rxd *rxd; 329 int len = skb->len - sizeof(*rxd); 330 const char *data, *type; 331 332 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 333 data = (char *)&rxd[1]; 334 335 switch (rxd->s2d_index) { 336 case 0: 337 #if !defined(__FreeBSD__) || defined(CONFIG_MT7915_DEBUGFS) 338 if (mt7915_debugfs_rx_log(dev, data, len)) 339 return; 340 #endif 341 342 type = "WM"; 343 break; 344 case 2: 345 type = "WA"; 346 break; 347 default: 348 type = "unknown"; 349 break; 350 } 351 352 wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, len, data); 353 } 354 355 static void 356 mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) 357 { 358 if (!vif->bss_conf.color_change_active || vif->type == NL80211_IFTYPE_STATION) 359 return; 360 361 ieee80211_color_change_finish(vif, 0); 362 } 363 364 static void 365 mt7915_mcu_rx_bcc_notify(struct mt7915_dev *dev, struct sk_buff *skb) 366 { 367 struct mt76_phy *mphy = &dev->mt76.phy; 368 struct mt7915_mcu_bcc_notify *b; 369 370 b = (struct mt7915_mcu_bcc_notify *)skb->data; 371 372 if (b->band_idx > MT_BAND1) 373 return; 374 375 if ((b->band_idx && !dev->phy.mt76->band_idx) && 376 dev->mt76.phys[MT_BAND1]) 377 mphy = dev->mt76.phys[MT_BAND1]; 378 379 ieee80211_iterate_active_interfaces_atomic(mphy->hw, 380 IEEE80211_IFACE_ITER_RESUME_ALL, 381 mt7915_mcu_cca_finish, mphy->hw); 382 } 383 384 static void 385 mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb) 386 { 387 struct mt76_connac2_mcu_rxd *rxd; 388 389 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 390 switch (rxd->ext_eid) { 391 case MCU_EXT_EVENT_THERMAL_PROTECT: 392 mt7915_mcu_rx_thermal_notify(dev, skb); 393 break; 394 case MCU_EXT_EVENT_RDD_REPORT: 395 mt7915_mcu_rx_radar_detected(dev, skb); 396 break; 397 case MCU_EXT_EVENT_CSA_NOTIFY: 398 mt7915_mcu_rx_csa_notify(dev, skb); 399 break; 400 case MCU_EXT_EVENT_FW_LOG_2_HOST: 401 mt7915_mcu_rx_log_message(dev, skb); 402 break; 403 case MCU_EXT_EVENT_BCC_NOTIFY: 404 mt7915_mcu_rx_bcc_notify(dev, skb); 405 break; 406 default: 407 break; 408 } 409 } 410 411 static void 412 mt7915_mcu_rx_unsolicited_event(struct mt7915_dev *dev, struct sk_buff *skb) 413 { 414 struct mt76_connac2_mcu_rxd *rxd; 415 416 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 417 switch (rxd->eid) { 418 case MCU_EVENT_EXT: 419 mt7915_mcu_rx_ext_event(dev, skb); 420 break; 421 default: 422 break; 423 } 424 dev_kfree_skb(skb); 425 } 426 427 void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb) 428 { 429 struct mt76_connac2_mcu_rxd *rxd; 430 431 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 432 if ((rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT || 433 rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST || 434 rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP || 435 rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC || 436 rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY || 437 !rxd->seq) && 438 !(rxd->eid == MCU_CMD_EXT_CID && 439 rxd->ext_eid == MCU_EXT_EVENT_WA_TX_STAT)) 440 mt7915_mcu_rx_unsolicited_event(dev, skb); 441 else 442 mt76_mcu_rx_event(&dev->mt76, skb); 443 } 444 445 static struct tlv * 446 mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len, 447 __le16 *sub_ntlv, __le16 *len) 448 { 449 struct tlv *ptlv, tlv = { 450 .tag = cpu_to_le16(sub_tag), 451 .len = cpu_to_le16(sub_len), 452 }; 453 454 ptlv = skb_put_zero(skb, sub_len); 455 memcpy(ptlv, &tlv, sizeof(tlv)); 456 457 le16_add_cpu(sub_ntlv, 1); 458 le16_add_cpu(len, sub_len); 459 460 return ptlv; 461 } 462 463 /** bss info **/ 464 struct mt7915_he_obss_narrow_bw_ru_data { 465 bool tolerated; 466 }; 467 468 static void mt7915_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, 469 struct cfg80211_bss *bss, 470 void *_data) 471 { 472 struct mt7915_he_obss_narrow_bw_ru_data *data = _data; 473 const struct element *elem; 474 475 rcu_read_lock(); 476 elem = ieee80211_bss_get_elem(bss, WLAN_EID_EXT_CAPABILITY); 477 478 if (!elem || elem->datalen <= 10 || 479 !(elem->data[10] & 480 WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) 481 data->tolerated = false; 482 483 rcu_read_unlock(); 484 } 485 486 static bool mt7915_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, 487 struct ieee80211_vif *vif) 488 { 489 struct mt7915_he_obss_narrow_bw_ru_data iter_data = { 490 .tolerated = true, 491 }; 492 493 if (!(vif->bss_conf.chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR)) 494 return false; 495 496 cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chanreq.oper, 497 mt7915_check_he_obss_narrow_bw_ru_iter, 498 &iter_data); 499 500 /* 501 * If there is at least one AP on radar channel that cannot 502 * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. 503 */ 504 return !iter_data.tolerated; 505 } 506 507 static void 508 mt7915_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, 509 struct mt7915_phy *phy) 510 { 511 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 512 struct bss_info_rf_ch *ch; 513 struct tlv *tlv; 514 int freq1 = chandef->center_freq1; 515 516 tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch)); 517 518 ch = (struct bss_info_rf_ch *)tlv; 519 ch->pri_ch = chandef->chan->hw_value; 520 ch->center_ch0 = ieee80211_frequency_to_channel(freq1); 521 ch->bw = mt76_connac_chan_bw(chandef); 522 523 if (chandef->width == NL80211_CHAN_WIDTH_80P80) { 524 int freq2 = chandef->center_freq2; 525 526 ch->center_ch1 = ieee80211_frequency_to_channel(freq2); 527 } 528 529 if (vif->bss_conf.he_support && vif->type == NL80211_IFTYPE_STATION) { 530 struct mt76_phy *mphy = phy->mt76; 531 532 ch->he_ru26_block = 533 mt7915_check_he_obss_narrow_bw_ru(mphy->hw, vif); 534 ch->he_all_disable = false; 535 } else { 536 ch->he_all_disable = true; 537 } 538 } 539 540 static void 541 mt7915_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, 542 struct mt7915_phy *phy) 543 { 544 int max_nss = hweight8(phy->mt76->antenna_mask); 545 struct bss_info_ra *ra; 546 struct tlv *tlv; 547 548 tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra)); 549 550 ra = (struct bss_info_ra *)tlv; 551 ra->op_mode = vif->type == NL80211_IFTYPE_AP; 552 ra->adhoc_en = vif->type == NL80211_IFTYPE_ADHOC; 553 ra->short_preamble = true; 554 ra->tx_streams = max_nss; 555 ra->rx_streams = max_nss; 556 ra->algo = 4; 557 ra->train_up_rule = 2; 558 ra->train_up_high_thres = 110; 559 ra->train_up_rule_rssi = -70; 560 ra->low_traffic_thres = 2; 561 ra->phy_cap = cpu_to_le32(0xfdf); 562 ra->interval = cpu_to_le32(500); 563 ra->fast_interval = cpu_to_le32(100); 564 } 565 566 static void 567 mt7915_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, 568 struct mt7915_phy *phy) 569 { 570 #define DEFAULT_HE_PE_DURATION 4 571 #define DEFAULT_HE_DURATION_RTS_THRES 1023 572 const struct ieee80211_sta_he_cap *cap; 573 struct bss_info_he *he; 574 struct tlv *tlv; 575 576 cap = mt76_connac_get_he_phy_cap(phy->mt76, vif); 577 578 tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he)); 579 580 he = (struct bss_info_he *)tlv; 581 he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext; 582 if (!he->he_pe_duration) 583 he->he_pe_duration = DEFAULT_HE_PE_DURATION; 584 585 he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th); 586 if (!he->he_rts_thres) 587 he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES); 588 589 he->max_nss_mcs[CMD_HE_MCS_BW80] = cap->he_mcs_nss_supp.tx_mcs_80; 590 he->max_nss_mcs[CMD_HE_MCS_BW160] = cap->he_mcs_nss_supp.tx_mcs_160; 591 he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80; 592 } 593 594 static void 595 mt7915_mcu_bss_hw_amsdu_tlv(struct sk_buff *skb) 596 { 597 #define TXD_CMP_MAP1 GENMASK(15, 0) 598 #define TXD_CMP_MAP2 (GENMASK(31, 0) & ~BIT(23)) 599 struct bss_info_hw_amsdu *amsdu; 600 struct tlv *tlv; 601 602 tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu)); 603 604 amsdu = (struct bss_info_hw_amsdu *)tlv; 605 amsdu->cmp_bitmap_0 = cpu_to_le32(TXD_CMP_MAP1); 606 amsdu->cmp_bitmap_1 = cpu_to_le32(TXD_CMP_MAP2); 607 amsdu->trig_thres = cpu_to_le16(2); 608 amsdu->enable = true; 609 } 610 611 static void 612 mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy) 613 { 614 struct bss_info_bmc_rate *bmc; 615 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 616 enum nl80211_band band = chandef->chan->band; 617 struct tlv *tlv; 618 619 tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc)); 620 621 bmc = (struct bss_info_bmc_rate *)tlv; 622 if (band == NL80211_BAND_2GHZ) { 623 bmc->short_preamble = true; 624 } else { 625 bmc->bc_trans = cpu_to_le16(0x2000); 626 bmc->mc_trans = cpu_to_le16(0x2080); 627 } 628 } 629 630 static int 631 mt7915_mcu_muar_config(struct mt7915_phy *phy, struct ieee80211_vif *vif, 632 bool bssid, bool enable) 633 { 634 struct mt7915_dev *dev = phy->dev; 635 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 636 u32 idx = mvif->mt76.omac_idx - REPEATER_BSSID_START; 637 u32 mask = phy->omac_mask >> 32 & ~BIT(idx); 638 const u8 *addr = vif->addr; 639 struct { 640 u8 mode; 641 u8 force_clear; 642 u8 clear_bitmap[8]; 643 u8 entry_count; 644 u8 write; 645 u8 band; 646 647 u8 index; 648 u8 bssid; 649 u8 addr[ETH_ALEN]; 650 } __packed req = { 651 .mode = !!mask || enable, 652 .entry_count = 1, 653 .write = 1, 654 .band = phy->mt76->band_idx, 655 .index = idx * 2 + bssid, 656 }; 657 658 if (bssid) 659 addr = vif->bss_conf.bssid; 660 661 if (enable) 662 ether_addr_copy(req.addr, addr); 663 664 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MUAR_UPDATE), &req, 665 sizeof(req), true); 666 } 667 668 int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, 669 struct ieee80211_vif *vif, int enable) 670 { 671 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 672 struct mt7915_dev *dev = phy->dev; 673 struct sk_buff *skb; 674 675 if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) { 676 mt7915_mcu_muar_config(phy, vif, false, enable); 677 mt7915_mcu_muar_config(phy, vif, true, enable); 678 } 679 680 skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL, 681 MT7915_BSS_UPDATE_MAX_SIZE); 682 if (IS_ERR(skb)) 683 return PTR_ERR(skb); 684 685 /* bss_omac must be first */ 686 if (enable) 687 mt76_connac_mcu_bss_omac_tlv(skb, vif); 688 689 mt76_connac_mcu_bss_basic_tlv(skb, vif, NULL, phy->mt76, 690 mvif->sta.wcid.idx, enable); 691 692 if (vif->type == NL80211_IFTYPE_MONITOR) 693 goto out; 694 695 if (enable) { 696 mt7915_mcu_bss_rfch_tlv(skb, vif, phy); 697 mt7915_mcu_bss_bmc_tlv(skb, phy); 698 mt7915_mcu_bss_ra_tlv(skb, vif, phy); 699 mt7915_mcu_bss_hw_amsdu_tlv(skb); 700 701 if (vif->bss_conf.he_support) 702 mt7915_mcu_bss_he_tlv(skb, vif, phy); 703 704 if (mvif->mt76.omac_idx >= EXT_BSSID_START && 705 mvif->mt76.omac_idx < REPEATER_BSSID_START) 706 mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76); 707 } 708 out: 709 return mt76_mcu_skb_send_msg(&dev->mt76, skb, 710 MCU_EXT_CMD(BSS_INFO_UPDATE), true); 711 } 712 713 /** starec & wtbl **/ 714 int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev, 715 struct ieee80211_ampdu_params *params, 716 bool enable) 717 { 718 struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv; 719 struct mt7915_vif *mvif = msta->vif; 720 int ret; 721 722 mt76_worker_disable(&dev->mt76.tx_worker); 723 if (enable && !params->amsdu) 724 msta->wcid.amsdu = false; 725 ret = mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, 726 MCU_EXT_CMD(STA_REC_UPDATE), 727 enable, true); 728 mt76_worker_enable(&dev->mt76.tx_worker); 729 730 return ret; 731 } 732 733 int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev, 734 struct ieee80211_ampdu_params *params, 735 bool enable) 736 { 737 struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv; 738 struct mt7915_vif *mvif = msta->vif; 739 740 return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, 741 MCU_EXT_CMD(STA_REC_UPDATE), 742 enable, false); 743 } 744 745 static void 746 mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, 747 struct ieee80211_vif *vif) 748 { 749 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 750 struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem; 751 struct ieee80211_he_mcs_nss_supp mcs_map; 752 struct sta_rec_he *he; 753 struct tlv *tlv; 754 u32 cap = 0; 755 756 if (!sta->deflink.he_cap.has_he) 757 return; 758 759 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he)); 760 761 he = (struct sta_rec_he *)tlv; 762 763 if (elem->mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) 764 cap |= STA_REC_HE_CAP_HTC; 765 766 if (elem->mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) 767 cap |= STA_REC_HE_CAP_BSR; 768 769 if (elem->mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL) 770 cap |= STA_REC_HE_CAP_OM; 771 772 if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU) 773 cap |= STA_REC_HE_CAP_AMSDU_IN_AMPDU; 774 775 if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) 776 cap |= STA_REC_HE_CAP_BQR; 777 778 if (elem->phy_cap_info[0] & 779 (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G | 780 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G)) 781 cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT; 782 783 if (mvif->cap.he_ldpc && 784 (elem->phy_cap_info[1] & 785 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) 786 cap |= STA_REC_HE_CAP_LDPC; 787 788 if (elem->phy_cap_info[1] & 789 IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US) 790 cap |= STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI; 791 792 if (elem->phy_cap_info[2] & 793 IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US) 794 cap |= STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI; 795 796 if (elem->phy_cap_info[2] & 797 IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ) 798 cap |= STA_REC_HE_CAP_LE_EQ_80M_TX_STBC; 799 800 if (elem->phy_cap_info[2] & 801 IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) 802 cap |= STA_REC_HE_CAP_LE_EQ_80M_RX_STBC; 803 804 if (elem->phy_cap_info[6] & 805 IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB) 806 cap |= STA_REC_HE_CAP_TRIG_CQI_FK; 807 808 if (elem->phy_cap_info[6] & 809 IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE) 810 cap |= STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE; 811 812 if (elem->phy_cap_info[7] & 813 IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI) 814 cap |= STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI; 815 816 if (elem->phy_cap_info[7] & 817 IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ) 818 cap |= STA_REC_HE_CAP_GT_80M_TX_STBC; 819 820 if (elem->phy_cap_info[7] & 821 IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ) 822 cap |= STA_REC_HE_CAP_GT_80M_RX_STBC; 823 824 if (elem->phy_cap_info[8] & 825 IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI) 826 cap |= STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI; 827 828 if (elem->phy_cap_info[8] & 829 IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI) 830 cap |= STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI; 831 832 if (elem->phy_cap_info[9] & 833 IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU) 834 cap |= STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242; 835 836 if (elem->phy_cap_info[9] & 837 IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU) 838 cap |= STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242; 839 840 he->he_cap = cpu_to_le32(cap); 841 842 mcs_map = sta->deflink.he_cap.he_mcs_nss_supp; 843 switch (sta->deflink.bandwidth) { 844 case IEEE80211_STA_RX_BW_160: 845 if (elem->phy_cap_info[0] & 846 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) 847 mt7915_mcu_set_sta_he_mcs(sta, 848 &he->max_nss_mcs[CMD_HE_MCS_BW8080], 849 le16_to_cpu(mcs_map.rx_mcs_80p80)); 850 851 mt7915_mcu_set_sta_he_mcs(sta, 852 &he->max_nss_mcs[CMD_HE_MCS_BW160], 853 le16_to_cpu(mcs_map.rx_mcs_160)); 854 fallthrough; 855 default: 856 mt7915_mcu_set_sta_he_mcs(sta, 857 &he->max_nss_mcs[CMD_HE_MCS_BW80], 858 le16_to_cpu(mcs_map.rx_mcs_80)); 859 break; 860 } 861 862 he->t_frame_dur = 863 HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]); 864 he->max_ampdu_exp = 865 HE_MAC(CAP3_MAX_AMPDU_LEN_EXP_MASK, elem->mac_cap_info[3]); 866 867 he->bw_set = 868 HE_PHY(CAP0_CHANNEL_WIDTH_SET_MASK, elem->phy_cap_info[0]); 869 he->device_class = 870 HE_PHY(CAP1_DEVICE_CLASS_A, elem->phy_cap_info[1]); 871 he->punc_pream_rx = 872 HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]); 873 874 he->dcm_tx_mode = 875 HE_PHY(CAP3_DCM_MAX_CONST_TX_MASK, elem->phy_cap_info[3]); 876 he->dcm_tx_max_nss = 877 HE_PHY(CAP3_DCM_MAX_TX_NSS_2, elem->phy_cap_info[3]); 878 he->dcm_rx_mode = 879 HE_PHY(CAP3_DCM_MAX_CONST_RX_MASK, elem->phy_cap_info[3]); 880 he->dcm_rx_max_nss = 881 HE_PHY(CAP3_DCM_MAX_RX_NSS_2, elem->phy_cap_info[3]); 882 he->dcm_rx_max_nss = 883 HE_PHY(CAP8_DCM_MAX_RU_MASK, elem->phy_cap_info[8]); 884 885 he->pkt_ext = 2; 886 } 887 888 static void 889 mt7915_mcu_sta_muru_tlv(struct mt7915_dev *dev, struct sk_buff *skb, 890 struct ieee80211_sta *sta, struct ieee80211_vif *vif) 891 { 892 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 893 struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem; 894 struct sta_rec_muru *muru; 895 struct tlv *tlv; 896 897 if (vif->type != NL80211_IFTYPE_STATION && 898 vif->type != NL80211_IFTYPE_AP) 899 return; 900 901 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru)); 902 903 muru = (struct sta_rec_muru *)tlv; 904 905 muru->cfg.mimo_dl_en = mvif->cap.he_mu_ebfer || 906 mvif->cap.vht_mu_ebfer || 907 mvif->cap.vht_mu_ebfee; 908 if (!is_mt7915(&dev->mt76)) 909 muru->cfg.mimo_ul_en = true; 910 muru->cfg.ofdma_dl_en = true; 911 912 if (sta->deflink.vht_cap.vht_supported) 913 muru->mimo_dl.vht_mu_bfee = 914 !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); 915 916 if (!sta->deflink.he_cap.has_he) 917 return; 918 919 muru->mimo_dl.partial_bw_dl_mimo = 920 HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]); 921 922 muru->mimo_ul.full_ul_mimo = 923 HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]); 924 muru->mimo_ul.partial_ul_mimo = 925 HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]); 926 927 muru->ofdma_dl.punc_pream_rx = 928 HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]); 929 muru->ofdma_dl.he_20m_in_40m_2g = 930 HE_PHY(CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G, elem->phy_cap_info[8]); 931 muru->ofdma_dl.he_20m_in_160m = 932 HE_PHY(CAP8_20MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]); 933 muru->ofdma_dl.he_80m_in_160m = 934 HE_PHY(CAP8_80MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]); 935 936 muru->ofdma_ul.t_frame_dur = 937 HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]); 938 muru->ofdma_ul.mu_cascading = 939 HE_MAC(CAP2_MU_CASCADING, elem->mac_cap_info[2]); 940 muru->ofdma_ul.uo_ra = 941 HE_MAC(CAP3_OFDMA_RA, elem->mac_cap_info[3]); 942 muru->ofdma_ul.rx_ctrl_frame_to_mbss = 943 HE_MAC(CAP3_RX_CTRL_FRAME_TO_MULTIBSS, elem->mac_cap_info[3]); 944 } 945 946 static void 947 mt7915_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) 948 { 949 struct sta_rec_ht *ht; 950 struct tlv *tlv; 951 952 if (!sta->deflink.ht_cap.ht_supported) 953 return; 954 955 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); 956 957 ht = (struct sta_rec_ht *)tlv; 958 ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap); 959 } 960 961 static void 962 mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) 963 { 964 struct sta_rec_vht *vht; 965 struct tlv *tlv; 966 967 if (!sta->deflink.vht_cap.vht_supported) 968 return; 969 970 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht)); 971 972 vht = (struct sta_rec_vht *)tlv; 973 vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap); 974 vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map; 975 vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map; 976 } 977 978 static void 979 mt7915_mcu_sta_amsdu_tlv(struct mt7915_dev *dev, struct sk_buff *skb, 980 struct ieee80211_vif *vif, struct ieee80211_sta *sta) 981 { 982 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 983 struct sta_rec_amsdu *amsdu; 984 struct tlv *tlv; 985 986 if (vif->type != NL80211_IFTYPE_STATION && 987 vif->type != NL80211_IFTYPE_AP) 988 return; 989 990 if (!sta->deflink.agg.max_amsdu_len) 991 return; 992 993 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu)); 994 amsdu = (struct sta_rec_amsdu *)tlv; 995 amsdu->max_amsdu_num = 8; 996 amsdu->amsdu_en = true; 997 msta->wcid.amsdu = true; 998 999 switch (sta->deflink.agg.max_amsdu_len) { 1000 case IEEE80211_MAX_MPDU_LEN_VHT_11454: 1001 if (!is_mt7915(&dev->mt76)) { 1002 amsdu->max_mpdu_size = 1003 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; 1004 return; 1005 } 1006 fallthrough; 1007 case IEEE80211_MAX_MPDU_LEN_HT_7935: 1008 case IEEE80211_MAX_MPDU_LEN_VHT_7991: 1009 amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; 1010 return; 1011 default: 1012 amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; 1013 return; 1014 } 1015 } 1016 1017 static int 1018 mt7915_mcu_sta_wtbl_tlv(struct mt7915_dev *dev, struct sk_buff *skb, 1019 struct ieee80211_vif *vif, struct ieee80211_sta *sta) 1020 { 1021 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1022 struct mt7915_sta *msta; 1023 struct wtbl_req_hdr *wtbl_hdr; 1024 struct mt76_wcid *wcid; 1025 struct tlv *tlv; 1026 1027 msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; 1028 wcid = sta ? &msta->wcid : NULL; 1029 1030 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); 1031 wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, 1032 WTBL_RESET_AND_SET, tlv, 1033 &skb); 1034 if (IS_ERR(wtbl_hdr)) 1035 return PTR_ERR(wtbl_hdr); 1036 1037 mt76_connac_mcu_wtbl_generic_tlv(&dev->mt76, skb, vif, sta, tlv, 1038 wtbl_hdr); 1039 mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, tlv, wtbl_hdr); 1040 if (sta) 1041 mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, skb, sta, tlv, 1042 wtbl_hdr, mvif->cap.ht_ldpc, 1043 mvif->cap.vht_ldpc); 1044 1045 return 0; 1046 } 1047 1048 static inline bool 1049 mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif, 1050 struct ieee80211_sta *sta, bool bfee) 1051 { 1052 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1053 int sts = hweight16(phy->mt76->chainmask); 1054 1055 if (vif->type != NL80211_IFTYPE_STATION && 1056 vif->type != NL80211_IFTYPE_AP) 1057 return false; 1058 1059 if (!bfee && sts < 2) 1060 return false; 1061 1062 if (sta->deflink.he_cap.has_he) { 1063 struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem; 1064 1065 if (bfee) 1066 return mvif->cap.he_su_ebfee && 1067 HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]); 1068 else 1069 return mvif->cap.he_su_ebfer && 1070 HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]); 1071 } 1072 1073 if (sta->deflink.vht_cap.vht_supported) { 1074 u32 cap = sta->deflink.vht_cap.cap; 1075 1076 if (bfee) 1077 return mvif->cap.vht_su_ebfee && 1078 (cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); 1079 else 1080 return mvif->cap.vht_su_ebfer && 1081 (cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); 1082 } 1083 1084 return false; 1085 } 1086 1087 static void 1088 mt7915_mcu_sta_sounding_rate(struct sta_rec_bf *bf) 1089 { 1090 bf->sounding_phy = MT_PHY_TYPE_OFDM; 1091 bf->ndp_rate = 0; /* mcs0 */ 1092 bf->ndpa_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */ 1093 bf->rept_poll_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */ 1094 } 1095 1096 static void 1097 mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7915_phy *phy, 1098 struct sta_rec_bf *bf) 1099 { 1100 struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs; 1101 u8 n = 0; 1102 1103 bf->tx_mode = MT_PHY_TYPE_HT; 1104 1105 if ((mcs->tx_params & IEEE80211_HT_MCS_TX_RX_DIFF) && 1106 (mcs->tx_params & IEEE80211_HT_MCS_TX_DEFINED)) 1107 n = FIELD_GET(IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK, 1108 mcs->tx_params); 1109 else if (mcs->rx_mask[3]) 1110 n = 3; 1111 else if (mcs->rx_mask[2]) 1112 n = 2; 1113 else if (mcs->rx_mask[1]) 1114 n = 1; 1115 1116 bf->nrow = hweight8(phy->mt76->chainmask) - 1; 1117 bf->ncol = min_t(u8, bf->nrow, n); 1118 bf->ibf_ncol = n; 1119 } 1120 1121 static void 1122 mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy, 1123 struct sta_rec_bf *bf, bool explicit) 1124 { 1125 struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap; 1126 struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap; 1127 u16 mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map); 1128 u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); 1129 u8 tx_ant = hweight8(phy->mt76->chainmask) - 1; 1130 1131 bf->tx_mode = MT_PHY_TYPE_VHT; 1132 1133 if (explicit) { 1134 u8 sts, snd_dim; 1135 1136 mt7915_mcu_sta_sounding_rate(bf); 1137 1138 sts = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 1139 pc->cap); 1140 snd_dim = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, 1141 vc->cap); 1142 bf->nrow = min_t(u8, min_t(u8, snd_dim, sts), tx_ant); 1143 bf->ncol = min_t(u8, nss_mcs, bf->nrow); 1144 bf->ibf_ncol = bf->ncol; 1145 1146 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) 1147 bf->nrow = 1; 1148 } else { 1149 bf->nrow = tx_ant; 1150 bf->ncol = min_t(u8, nss_mcs, bf->nrow); 1151 bf->ibf_ncol = nss_mcs; 1152 1153 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) 1154 bf->ibf_nrow = 1; 1155 } 1156 } 1157 1158 static void 1159 mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, 1160 struct mt7915_phy *phy, struct sta_rec_bf *bf) 1161 { 1162 struct ieee80211_sta_he_cap *pc = &sta->deflink.he_cap; 1163 struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem; 1164 const struct ieee80211_sta_he_cap *vc = 1165 mt76_connac_get_he_phy_cap(phy->mt76, vif); 1166 const struct ieee80211_he_cap_elem *ve = &vc->he_cap_elem; 1167 u16 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80); 1168 u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); 1169 u8 snd_dim, sts; 1170 1171 bf->tx_mode = MT_PHY_TYPE_HE_SU; 1172 1173 mt7915_mcu_sta_sounding_rate(bf); 1174 1175 bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMING_FB, 1176 pe->phy_cap_info[6]); 1177 bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB, 1178 pe->phy_cap_info[6]); 1179 snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, 1180 ve->phy_cap_info[5]); 1181 sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK, 1182 pe->phy_cap_info[4]); 1183 bf->nrow = min_t(u8, snd_dim, sts); 1184 bf->ncol = min_t(u8, nss_mcs, bf->nrow); 1185 bf->ibf_ncol = bf->ncol; 1186 1187 if (sta->deflink.bandwidth != IEEE80211_STA_RX_BW_160) 1188 return; 1189 1190 /* go over for 160MHz and 80p80 */ 1191 if (pe->phy_cap_info[0] & 1192 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) { 1193 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_160); 1194 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); 1195 1196 bf->ncol_gt_bw80 = nss_mcs; 1197 } 1198 1199 if (pe->phy_cap_info[0] & 1200 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { 1201 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80p80); 1202 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); 1203 1204 if (bf->ncol_gt_bw80) 1205 bf->ncol_gt_bw80 = min_t(u8, bf->ncol_gt_bw80, nss_mcs); 1206 else 1207 bf->ncol_gt_bw80 = nss_mcs; 1208 } 1209 1210 snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK, 1211 ve->phy_cap_info[5]); 1212 sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK, 1213 pe->phy_cap_info[4]); 1214 1215 bf->nrow_gt_bw80 = min_t(int, snd_dim, sts); 1216 } 1217 1218 static void 1219 mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb, 1220 struct ieee80211_vif *vif, struct ieee80211_sta *sta) 1221 { 1222 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1223 struct mt7915_phy *phy = mvif->phy; 1224 int tx_ant = hweight8(phy->mt76->chainmask) - 1; 1225 struct sta_rec_bf *bf; 1226 struct tlv *tlv; 1227 static const u8 matrix[4][4] = { 1228 {0, 0, 0, 0}, 1229 {1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */ 1230 {2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */ 1231 {3, 5, 6, 0} /* 4x1, 4x2, 4x3, 4x4 */ 1232 }; 1233 bool ebf; 1234 1235 if (!(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) 1236 return; 1237 1238 ebf = mt7915_is_ebf_supported(phy, vif, sta, false); 1239 if (!ebf && !dev->ibf) 1240 return; 1241 1242 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf)); 1243 bf = (struct sta_rec_bf *)tlv; 1244 1245 /* he: eBF only, in accordance with spec 1246 * vht: support eBF and iBF 1247 * ht: iBF only, since mac80211 lacks of eBF support 1248 */ 1249 if (sta->deflink.he_cap.has_he && ebf) 1250 mt7915_mcu_sta_bfer_he(sta, vif, phy, bf); 1251 else if (sta->deflink.vht_cap.vht_supported) 1252 mt7915_mcu_sta_bfer_vht(sta, phy, bf, ebf); 1253 else if (sta->deflink.ht_cap.ht_supported) 1254 mt7915_mcu_sta_bfer_ht(sta, phy, bf); 1255 else 1256 return; 1257 1258 bf->bf_cap = ebf ? ebf : dev->ibf << 1; 1259 bf->bw = sta->deflink.bandwidth; 1260 bf->ibf_dbw = sta->deflink.bandwidth; 1261 bf->ibf_nrow = tx_ant; 1262 1263 if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol) 1264 bf->ibf_timeout = 0x48; 1265 else 1266 bf->ibf_timeout = 0x18; 1267 1268 if (ebf && bf->nrow != tx_ant) 1269 bf->mem_20m = matrix[tx_ant][bf->ncol]; 1270 else 1271 bf->mem_20m = matrix[bf->nrow][bf->ncol]; 1272 1273 switch (sta->deflink.bandwidth) { 1274 case IEEE80211_STA_RX_BW_160: 1275 case IEEE80211_STA_RX_BW_80: 1276 bf->mem_total = bf->mem_20m * 2; 1277 break; 1278 case IEEE80211_STA_RX_BW_40: 1279 bf->mem_total = bf->mem_20m; 1280 break; 1281 case IEEE80211_STA_RX_BW_20: 1282 default: 1283 break; 1284 } 1285 } 1286 1287 static void 1288 mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb, 1289 struct ieee80211_vif *vif, struct ieee80211_sta *sta) 1290 { 1291 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1292 struct mt7915_phy *phy = mvif->phy; 1293 int tx_ant = hweight8(phy->mt76->chainmask) - 1; 1294 struct sta_rec_bfee *bfee; 1295 struct tlv *tlv; 1296 u8 nrow = 0; 1297 1298 if (!(sta->deflink.vht_cap.vht_supported || sta->deflink.he_cap.has_he)) 1299 return; 1300 1301 if (!mt7915_is_ebf_supported(phy, vif, sta, true)) 1302 return; 1303 1304 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee)); 1305 bfee = (struct sta_rec_bfee *)tlv; 1306 1307 if (sta->deflink.he_cap.has_he) { 1308 struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem; 1309 1310 nrow = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, 1311 pe->phy_cap_info[5]); 1312 } else if (sta->deflink.vht_cap.vht_supported) { 1313 struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap; 1314 1315 nrow = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, 1316 pc->cap); 1317 } 1318 1319 /* reply with identity matrix to avoid 2x2 BF negative gain */ 1320 bfee->fb_identity_matrix = (nrow == 1 && tx_ant == 2); 1321 } 1322 1323 static enum mcu_mmps_mode 1324 mt7915_mcu_get_mmps_mode(enum ieee80211_smps_mode smps) 1325 { 1326 switch (smps) { 1327 case IEEE80211_SMPS_OFF: 1328 return MCU_MMPS_DISABLE; 1329 case IEEE80211_SMPS_STATIC: 1330 return MCU_MMPS_STATIC; 1331 case IEEE80211_SMPS_DYNAMIC: 1332 return MCU_MMPS_DYNAMIC; 1333 default: 1334 return MCU_MMPS_DISABLE; 1335 } 1336 } 1337 1338 int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev, 1339 struct ieee80211_vif *vif, 1340 struct ieee80211_sta *sta, 1341 void *data, u32 field) 1342 { 1343 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1344 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 1345 struct sta_phy *phy = data; 1346 struct sta_rec_ra_fixed *ra; 1347 struct sk_buff *skb; 1348 struct tlv *tlv; 1349 1350 skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 1351 &msta->wcid); 1352 if (IS_ERR(skb)) 1353 return PTR_ERR(skb); 1354 1355 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra)); 1356 ra = (struct sta_rec_ra_fixed *)tlv; 1357 1358 switch (field) { 1359 case RATE_PARAM_AUTO: 1360 break; 1361 case RATE_PARAM_FIXED: 1362 case RATE_PARAM_FIXED_MCS: 1363 case RATE_PARAM_FIXED_GI: 1364 case RATE_PARAM_FIXED_HE_LTF: 1365 if (phy) 1366 ra->phy = *phy; 1367 break; 1368 case RATE_PARAM_MMPS_UPDATE: 1369 ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode); 1370 break; 1371 case RATE_PARAM_SPE_UPDATE: 1372 ra->spe_idx = *(u8 *)data; 1373 break; 1374 default: 1375 break; 1376 } 1377 ra->field = cpu_to_le32(field); 1378 1379 return mt76_mcu_skb_send_msg(&dev->mt76, skb, 1380 MCU_EXT_CMD(STA_REC_UPDATE), true); 1381 } 1382 1383 int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1384 struct ieee80211_sta *sta) 1385 { 1386 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1387 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 1388 struct wtbl_req_hdr *wtbl_hdr; 1389 struct tlv *sta_wtbl; 1390 struct sk_buff *skb; 1391 int ret; 1392 1393 skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 1394 &msta->wcid); 1395 if (IS_ERR(skb)) 1396 return PTR_ERR(skb); 1397 1398 sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, 1399 sizeof(struct tlv)); 1400 wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, 1401 WTBL_SET, sta_wtbl, &skb); 1402 if (IS_ERR(wtbl_hdr)) 1403 return PTR_ERR(wtbl_hdr); 1404 1405 mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr); 1406 1407 ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, 1408 MCU_EXT_CMD(STA_REC_UPDATE), true); 1409 if (ret) 1410 return ret; 1411 1412 return mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, NULL, 1413 RATE_PARAM_MMPS_UPDATE); 1414 } 1415 1416 static int 1417 mt7915_mcu_set_spe_idx(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1418 struct ieee80211_sta *sta) 1419 { 1420 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1421 struct mt76_phy *mphy = mvif->phy->mt76; 1422 u8 spe_idx = mt76_connac_spe_idx(mphy->antenna_mask); 1423 1424 return mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &spe_idx, 1425 RATE_PARAM_SPE_UPDATE); 1426 } 1427 1428 static int 1429 mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev, 1430 struct ieee80211_vif *vif, 1431 struct ieee80211_sta *sta) 1432 { 1433 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1434 struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef; 1435 struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask; 1436 enum nl80211_band band = chandef->chan->band; 1437 struct sta_phy phy = {}; 1438 int ret, nrates = 0; 1439 1440 #define __sta_phy_bitrate_mask_check(_mcs, _gi, _ht, _he) \ 1441 do { \ 1442 u8 i, gi = mask->control[band]._gi; \ 1443 gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \ 1444 for (i = 0; i <= sta->deflink.bandwidth; i++) { \ 1445 phy.sgi |= gi << (i << (_he)); \ 1446 phy.he_ltf |= mask->control[band].he_ltf << (i << (_he));\ 1447 } \ 1448 for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) { \ 1449 if (!mask->control[band]._mcs[i]) \ 1450 continue; \ 1451 nrates += hweight16(mask->control[band]._mcs[i]); \ 1452 phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \ 1453 if (_ht) \ 1454 phy.mcs += 8 * i; \ 1455 } \ 1456 } while (0) 1457 1458 if (sta->deflink.he_cap.has_he) { 1459 __sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1); 1460 } else if (sta->deflink.vht_cap.vht_supported) { 1461 __sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0); 1462 } else if (sta->deflink.ht_cap.ht_supported) { 1463 __sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0); 1464 } else { 1465 nrates = hweight32(mask->control[band].legacy); 1466 phy.mcs = ffs(mask->control[band].legacy) - 1; 1467 } 1468 #undef __sta_phy_bitrate_mask_check 1469 1470 /* fall back to auto rate control */ 1471 if (mask->control[band].gi == NL80211_TXRATE_DEFAULT_GI && 1472 mask->control[band].he_gi == GENMASK(7, 0) && 1473 mask->control[band].he_ltf == GENMASK(7, 0) && 1474 nrates != 1) 1475 return 0; 1476 1477 /* fixed single rate */ 1478 if (nrates == 1) { 1479 ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy, 1480 RATE_PARAM_FIXED_MCS); 1481 if (ret) 1482 return ret; 1483 } 1484 1485 /* fixed GI */ 1486 if (mask->control[band].gi != NL80211_TXRATE_DEFAULT_GI || 1487 mask->control[band].he_gi != GENMASK(7, 0)) { 1488 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 1489 u32 addr; 1490 1491 /* firmware updates only TXCMD but doesn't take WTBL into 1492 * account, so driver should update here to reflect the 1493 * actual txrate hardware sends out. 1494 */ 1495 addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7); 1496 if (sta->deflink.he_cap.has_he) 1497 mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi); 1498 else 1499 mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi); 1500 1501 ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy, 1502 RATE_PARAM_FIXED_GI); 1503 if (ret) 1504 return ret; 1505 } 1506 1507 /* fixed HE_LTF */ 1508 if (mask->control[band].he_ltf != GENMASK(7, 0)) { 1509 ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy, 1510 RATE_PARAM_FIXED_HE_LTF); 1511 if (ret) 1512 return ret; 1513 } 1514 1515 return mt7915_mcu_set_spe_idx(dev, vif, sta); 1516 } 1517 1518 static void 1519 mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, 1520 struct ieee80211_vif *vif, struct ieee80211_sta *sta) 1521 { 1522 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1523 struct mt76_phy *mphy = mvif->phy->mt76; 1524 struct cfg80211_chan_def *chandef = &mphy->chandef; 1525 struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask; 1526 enum nl80211_band band = chandef->chan->band; 1527 struct sta_rec_ra *ra; 1528 struct tlv *tlv; 1529 u32 supp_rate = sta->deflink.supp_rates[band]; 1530 u32 cap = sta->wme ? STA_CAP_WMM : 0; 1531 1532 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra)); 1533 ra = (struct sta_rec_ra *)tlv; 1534 1535 ra->valid = true; 1536 ra->auto_rate = true; 1537 ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, &sta->deflink); 1538 ra->channel = chandef->chan->hw_value; 1539 ra->bw = sta->deflink.bandwidth; 1540 ra->phy.bw = sta->deflink.bandwidth; 1541 ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode); 1542 1543 if (supp_rate) { 1544 supp_rate &= mask->control[band].legacy; 1545 ra->rate_len = hweight32(supp_rate); 1546 1547 if (band == NL80211_BAND_2GHZ) { 1548 ra->supp_mode = MODE_CCK; 1549 ra->supp_cck_rate = supp_rate & GENMASK(3, 0); 1550 1551 if (ra->rate_len > 4) { 1552 ra->supp_mode |= MODE_OFDM; 1553 ra->supp_ofdm_rate = supp_rate >> 4; 1554 } 1555 } else { 1556 ra->supp_mode = MODE_OFDM; 1557 ra->supp_ofdm_rate = supp_rate; 1558 } 1559 } 1560 1561 if (sta->deflink.ht_cap.ht_supported) { 1562 ra->supp_mode |= MODE_HT; 1563 ra->af = sta->deflink.ht_cap.ampdu_factor; 1564 ra->ht_gf = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); 1565 1566 cap |= STA_CAP_HT; 1567 if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) 1568 cap |= STA_CAP_SGI_20; 1569 if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) 1570 cap |= STA_CAP_SGI_40; 1571 if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_TX_STBC) 1572 cap |= STA_CAP_TX_STBC; 1573 if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) 1574 cap |= STA_CAP_RX_STBC; 1575 if (mvif->cap.ht_ldpc && 1576 (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)) 1577 cap |= STA_CAP_LDPC; 1578 1579 mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs, 1580 mask->control[band].ht_mcs); 1581 ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs; 1582 } 1583 1584 if (sta->deflink.vht_cap.vht_supported) { 1585 u8 af; 1586 1587 ra->supp_mode |= MODE_VHT; 1588 af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, 1589 sta->deflink.vht_cap.cap); 1590 ra->af = max_t(u8, ra->af, af); 1591 1592 cap |= STA_CAP_VHT; 1593 if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) 1594 cap |= STA_CAP_VHT_SGI_80; 1595 if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160) 1596 cap |= STA_CAP_VHT_SGI_160; 1597 if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC) 1598 cap |= STA_CAP_VHT_TX_STBC; 1599 if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1) 1600 cap |= STA_CAP_VHT_RX_STBC; 1601 if (mvif->cap.vht_ldpc && 1602 (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)) 1603 cap |= STA_CAP_VHT_LDPC; 1604 1605 mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs, 1606 mask->control[band].vht_mcs); 1607 } 1608 1609 if (sta->deflink.he_cap.has_he) { 1610 ra->supp_mode |= MODE_HE; 1611 cap |= STA_CAP_HE; 1612 1613 if (sta->deflink.he_6ghz_capa.capa) 1614 ra->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa, 1615 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); 1616 } 1617 1618 ra->sta_cap = cpu_to_le32(cap); 1619 } 1620 1621 int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1622 struct ieee80211_sta *sta, bool changed) 1623 { 1624 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1625 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 1626 struct sk_buff *skb; 1627 int ret; 1628 1629 skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 1630 &msta->wcid); 1631 if (IS_ERR(skb)) 1632 return PTR_ERR(skb); 1633 1634 /* firmware rc algorithm refers to sta_rec_he for HE control. 1635 * once dev->rc_work changes the settings driver should also 1636 * update sta_rec_he here. 1637 */ 1638 if (changed) 1639 mt7915_mcu_sta_he_tlv(skb, sta, vif); 1640 1641 /* sta_rec_ra accommodates BW, NSS and only MCS range format 1642 * i.e 0-{7,8,9} for VHT. 1643 */ 1644 mt7915_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta); 1645 1646 ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, 1647 MCU_EXT_CMD(STA_REC_UPDATE), true); 1648 if (ret) 1649 return ret; 1650 1651 /* sta_rec_ra_fixed accommodates single rate, (HE)GI and HE_LTE, 1652 * and updates as peer fixed rate parameters, which overrides 1653 * sta_rec_ra and firmware rate control algorithm. 1654 */ 1655 return mt7915_mcu_add_rate_ctrl_fixed(dev, vif, sta); 1656 } 1657 1658 static int 1659 mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1660 struct ieee80211_sta *sta) 1661 { 1662 #define MT_STA_BSS_GROUP 1 1663 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1664 struct mt7915_sta *msta; 1665 struct { 1666 __le32 action; 1667 u8 wlan_idx_lo; 1668 u8 status; 1669 u8 wlan_idx_hi; 1670 u8 rsv0[5]; 1671 __le32 val; 1672 u8 rsv1[8]; 1673 } __packed req = { 1674 .action = cpu_to_le32(MT_STA_BSS_GROUP), 1675 .val = cpu_to_le32(mvif->mt76.idx % 16), 1676 }; 1677 1678 msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; 1679 req.wlan_idx_lo = to_wcid_lo(msta->wcid.idx); 1680 req.wlan_idx_hi = to_wcid_hi(msta->wcid.idx); 1681 1682 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_DRR_CTRL), &req, 1683 sizeof(req), true); 1684 } 1685 1686 int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1687 struct ieee80211_sta *sta, int conn_state, bool newly) 1688 { 1689 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1690 struct ieee80211_link_sta *link_sta; 1691 struct mt7915_sta *msta; 1692 struct sk_buff *skb; 1693 int ret; 1694 1695 msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; 1696 link_sta = sta ? &sta->deflink : NULL; 1697 1698 skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 1699 &msta->wcid); 1700 if (IS_ERR(skb)) 1701 return PTR_ERR(skb); 1702 1703 /* starec basic */ 1704 mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, &vif->bss_conf, link_sta, 1705 conn_state, newly); 1706 /* tag order is in accordance with firmware dependency. */ 1707 if (sta && conn_state != CONN_STATE_DISCONNECT) { 1708 /* starec bfer */ 1709 mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta); 1710 /* starec ht */ 1711 mt7915_mcu_sta_ht_tlv(skb, sta); 1712 /* starec vht */ 1713 mt7915_mcu_sta_vht_tlv(skb, sta); 1714 /* starec uapsd */ 1715 mt76_connac_mcu_sta_uapsd(skb, vif, sta); 1716 } 1717 1718 if (newly || conn_state != CONN_STATE_DISCONNECT) { 1719 ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta); 1720 if (ret) { 1721 dev_kfree_skb(skb); 1722 return ret; 1723 } 1724 } 1725 1726 if (conn_state == CONN_STATE_DISCONNECT) 1727 goto out; 1728 1729 if (sta) { 1730 /* starec amsdu */ 1731 mt7915_mcu_sta_amsdu_tlv(dev, skb, vif, sta); 1732 /* starec he */ 1733 mt7915_mcu_sta_he_tlv(skb, sta, vif); 1734 /* starec muru */ 1735 mt7915_mcu_sta_muru_tlv(dev, skb, sta, vif); 1736 /* starec bfee */ 1737 mt7915_mcu_sta_bfee_tlv(dev, skb, vif, sta); 1738 } 1739 1740 ret = mt7915_mcu_add_group(dev, vif, sta); 1741 if (ret) { 1742 dev_kfree_skb(skb); 1743 return ret; 1744 } 1745 out: 1746 ret = mt76_connac_mcu_sta_wed_update(&dev->mt76, skb); 1747 if (ret) 1748 return ret; 1749 1750 return mt76_mcu_skb_send_msg(&dev->mt76, skb, 1751 MCU_EXT_CMD(STA_REC_UPDATE), true); 1752 } 1753 1754 int mt7915_mcu_wed_enable_rx_stats(struct mt7915_dev *dev) 1755 { 1756 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 1757 struct mtk_wed_device *wed = &dev->mt76.mmio.wed; 1758 struct { 1759 __le32 args[2]; 1760 } req = { 1761 .args[0] = cpu_to_le32(1), 1762 .args[1] = cpu_to_le32(6), 1763 }; 1764 1765 return mtk_wed_device_update_msg(wed, MTK_WED_WO_CMD_RXCNT_CTRL, 1766 &req, sizeof(req)); 1767 #else 1768 return 0; 1769 #endif 1770 } 1771 1772 int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, 1773 struct ieee80211_vif *vif, bool enable) 1774 { 1775 struct mt7915_dev *dev = phy->dev; 1776 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1777 struct { 1778 struct req_hdr { 1779 u8 omac_idx; 1780 u8 band_idx; 1781 __le16 tlv_num; 1782 u8 is_tlv_append; 1783 u8 rsv[3]; 1784 } __packed hdr; 1785 struct req_tlv { 1786 __le16 tag; 1787 __le16 len; 1788 u8 active; 1789 u8 band_idx; 1790 u8 omac_addr[ETH_ALEN]; 1791 } __packed tlv; 1792 } data = { 1793 .hdr = { 1794 .omac_idx = mvif->mt76.omac_idx, 1795 .band_idx = mvif->mt76.band_idx, 1796 .tlv_num = cpu_to_le16(1), 1797 .is_tlv_append = 1, 1798 }, 1799 .tlv = { 1800 .tag = cpu_to_le16(DEV_INFO_ACTIVE), 1801 .len = cpu_to_le16(sizeof(struct req_tlv)), 1802 .active = enable, 1803 .band_idx = mvif->mt76.band_idx, 1804 }, 1805 }; 1806 1807 if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) 1808 return mt7915_mcu_muar_config(phy, vif, false, enable); 1809 1810 memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN); 1811 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(DEV_INFO_UPDATE), 1812 &data, sizeof(data), true); 1813 } 1814 1815 static void 1816 mt7915_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb, 1817 struct sk_buff *skb, struct bss_info_bcn *bcn, 1818 struct ieee80211_mutable_offsets *offs) 1819 { 1820 struct bss_info_bcn_cntdwn *info; 1821 struct tlv *tlv; 1822 int sub_tag; 1823 1824 if (!offs->cntdwn_counter_offs[0]) 1825 return; 1826 1827 sub_tag = vif->bss_conf.csa_active ? BSS_INFO_BCN_CSA : BSS_INFO_BCN_BCC; 1828 tlv = mt7915_mcu_add_nested_subtlv(rskb, sub_tag, sizeof(*info), 1829 &bcn->sub_ntlv, &bcn->len); 1830 info = (struct bss_info_bcn_cntdwn *)tlv; 1831 info->cnt = skb->data[offs->cntdwn_counter_offs[0]]; 1832 } 1833 1834 static void 1835 mt7915_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb, 1836 struct ieee80211_vif *vif, struct bss_info_bcn *bcn, 1837 struct ieee80211_mutable_offsets *offs) 1838 { 1839 struct bss_info_bcn_mbss *mbss; 1840 const struct element *elem; 1841 struct tlv *tlv; 1842 1843 if (!vif->bss_conf.bssid_indicator) 1844 return; 1845 1846 tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_MBSSID, 1847 sizeof(*mbss), &bcn->sub_ntlv, 1848 &bcn->len); 1849 1850 mbss = (struct bss_info_bcn_mbss *)tlv; 1851 mbss->offset[0] = cpu_to_le16(offs->tim_offset); 1852 mbss->bitmap = cpu_to_le32(1); 1853 1854 for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, 1855 &skb->data[offs->mbssid_off], 1856 skb->len - offs->mbssid_off) { 1857 const struct element *sub_elem; 1858 1859 if (elem->datalen < 2) 1860 continue; 1861 1862 for_each_element(sub_elem, elem->data + 1, elem->datalen - 1) { 1863 const struct ieee80211_bssid_index *idx; 1864 const u8 *idx_ie; 1865 1866 if (sub_elem->id || sub_elem->datalen < 4) 1867 continue; /* not a valid BSS profile */ 1868 1869 /* Find WLAN_EID_MULTI_BSSID_IDX 1870 * in the merged nontransmitted profile 1871 */ 1872 idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, 1873 sub_elem->data, 1874 sub_elem->datalen); 1875 if (!idx_ie || idx_ie[1] < sizeof(*idx)) 1876 continue; 1877 1878 #if defined(__linux__) 1879 idx = (void *)(idx_ie + 2); 1880 #elif defined(__FreeBSD__) 1881 idx = (const void *)(idx_ie + 2); 1882 #endif 1883 if (!idx->bssid_index || idx->bssid_index > 31) 1884 continue; 1885 1886 mbss->offset[idx->bssid_index] = 1887 cpu_to_le16(idx_ie - skb->data); 1888 mbss->bitmap |= cpu_to_le32(BIT(idx->bssid_index)); 1889 } 1890 } 1891 } 1892 1893 static void 1894 mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1895 struct sk_buff *rskb, struct sk_buff *skb, 1896 struct bss_info_bcn *bcn, 1897 struct ieee80211_mutable_offsets *offs) 1898 { 1899 struct mt76_wcid *wcid = &dev->mt76.global_wcid; 1900 struct bss_info_bcn_cont *cont; 1901 struct tlv *tlv; 1902 u8 *buf; 1903 int len = sizeof(*cont) + MT_TXD_SIZE + skb->len; 1904 1905 len = (len & 0x3) ? ((len | 0x3) + 1) : len; 1906 tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CONTENT, 1907 len, &bcn->sub_ntlv, &bcn->len); 1908 1909 cont = (struct bss_info_bcn_cont *)tlv; 1910 cont->pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len); 1911 cont->tim_ofs = cpu_to_le16(offs->tim_offset); 1912 1913 if (offs->cntdwn_counter_offs[0]) { 1914 u16 offset = offs->cntdwn_counter_offs[0]; 1915 1916 if (vif->bss_conf.csa_active) 1917 cont->csa_ofs = cpu_to_le16(offset - 4); 1918 if (vif->bss_conf.color_change_active) 1919 cont->bcc_ofs = cpu_to_le16(offset - 3); 1920 } 1921 1922 buf = (u8 *)tlv + sizeof(*cont); 1923 mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL, 1924 0, BSS_CHANGED_BEACON); 1925 memcpy(buf + MT_TXD_SIZE, skb->data, skb->len); 1926 } 1927 1928 int 1929 mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1930 u32 changed) 1931 { 1932 #define OFFLOAD_TX_MODE_SU BIT(0) 1933 #define OFFLOAD_TX_MODE_MU BIT(1) 1934 struct ieee80211_hw *hw = mt76_hw(dev); 1935 struct mt7915_phy *phy = mt7915_hw_phy(hw); 1936 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1937 struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef; 1938 enum nl80211_band band = chandef->chan->band; 1939 struct mt76_wcid *wcid = &dev->mt76.global_wcid; 1940 struct bss_info_bcn *bcn; 1941 struct bss_info_inband_discovery *discov; 1942 struct ieee80211_tx_info *info; 1943 struct sk_buff *rskb, *skb = NULL; 1944 struct tlv *tlv, *sub_tlv; 1945 bool ext_phy = phy != &dev->phy; 1946 u8 *buf, interval; 1947 int len; 1948 1949 if (vif->bss_conf.nontransmitted) 1950 return 0; 1951 1952 rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL, 1953 MT7915_MAX_BSS_OFFLOAD_SIZE); 1954 if (IS_ERR(rskb)) 1955 return PTR_ERR(rskb); 1956 1957 tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); 1958 bcn = (struct bss_info_bcn *)tlv; 1959 bcn->enable = true; 1960 1961 if (changed & BSS_CHANGED_FILS_DISCOVERY) { 1962 interval = vif->bss_conf.fils_discovery.max_interval; 1963 skb = ieee80211_get_fils_discovery_tmpl(hw, vif); 1964 } else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP && 1965 vif->bss_conf.unsol_bcast_probe_resp_interval) { 1966 interval = vif->bss_conf.unsol_bcast_probe_resp_interval; 1967 skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif); 1968 } 1969 1970 if (!skb) { 1971 dev_kfree_skb(rskb); 1972 return -EINVAL; 1973 } 1974 1975 info = IEEE80211_SKB_CB(skb); 1976 info->control.vif = vif; 1977 info->band = band; 1978 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy); 1979 1980 len = sizeof(*discov) + MT_TXD_SIZE + skb->len; 1981 len = (len & 0x3) ? ((len | 0x3) + 1) : len; 1982 1983 if (skb->len > MT7915_MAX_BEACON_SIZE) { 1984 dev_err(dev->mt76.dev, "inband discovery size limit exceed\n"); 1985 dev_kfree_skb(rskb); 1986 dev_kfree_skb(skb); 1987 return -EINVAL; 1988 } 1989 1990 sub_tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV, 1991 len, &bcn->sub_ntlv, &bcn->len); 1992 discov = (struct bss_info_inband_discovery *)sub_tlv; 1993 discov->tx_mode = OFFLOAD_TX_MODE_SU; 1994 /* 0: UNSOL PROBE RESP, 1: FILS DISCOV */ 1995 discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY); 1996 discov->tx_interval = interval; 1997 discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len); 1998 discov->enable = !!interval; 1999 2000 buf = (u8 *)sub_tlv + sizeof(*discov); 2001 2002 mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL, 2003 0, changed); 2004 memcpy(buf + MT_TXD_SIZE, skb->data, skb->len); 2005 2006 dev_kfree_skb(skb); 2007 2008 return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, 2009 MCU_EXT_CMD(BSS_INFO_UPDATE), true); 2010 } 2011 2012 int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2013 int en, u32 changed) 2014 { 2015 struct mt7915_dev *dev = mt7915_hw_dev(hw); 2016 struct mt7915_phy *phy = mt7915_hw_phy(hw); 2017 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 2018 struct ieee80211_mutable_offsets offs; 2019 struct ieee80211_tx_info *info; 2020 struct sk_buff *skb, *rskb; 2021 struct tlv *tlv; 2022 struct bss_info_bcn *bcn; 2023 int len = MT7915_MAX_BSS_OFFLOAD_SIZE; 2024 bool ext_phy = phy != &dev->phy; 2025 2026 if (vif->bss_conf.nontransmitted) 2027 return 0; 2028 2029 rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 2030 NULL, len); 2031 if (IS_ERR(rskb)) 2032 return PTR_ERR(rskb); 2033 2034 tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); 2035 bcn = (struct bss_info_bcn *)tlv; 2036 bcn->enable = en; 2037 2038 if (!en) 2039 goto out; 2040 2041 skb = ieee80211_beacon_get_template(hw, vif, &offs, 0); 2042 if (!skb) { 2043 dev_kfree_skb(rskb); 2044 return -EINVAL; 2045 } 2046 2047 if (skb->len > MT7915_MAX_BEACON_SIZE) { 2048 dev_err(dev->mt76.dev, "Bcn size limit exceed\n"); 2049 dev_kfree_skb(rskb); 2050 dev_kfree_skb(skb); 2051 return -EINVAL; 2052 } 2053 2054 info = IEEE80211_SKB_CB(skb); 2055 info->hw_queue = FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy); 2056 2057 mt7915_mcu_beacon_cntdwn(vif, rskb, skb, bcn, &offs); 2058 mt7915_mcu_beacon_mbss(rskb, skb, vif, bcn, &offs); 2059 mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs); 2060 dev_kfree_skb(skb); 2061 2062 out: 2063 return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, 2064 MCU_EXT_CMD(BSS_INFO_UPDATE), true); 2065 } 2066 2067 static int mt7915_driver_own(struct mt7915_dev *dev, u8 band) 2068 { 2069 mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(band), MT_TOP_LPCR_HOST_DRV_OWN); 2070 if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND(band), 2071 MT_TOP_LPCR_HOST_FW_OWN_STAT, 0, 500)) { 2072 dev_err(dev->mt76.dev, "Timeout for driver own\n"); 2073 return -EIO; 2074 } 2075 2076 /* clear irq when the driver own success */ 2077 mt76_wr(dev, MT_TOP_LPCR_HOST_BAND_IRQ_STAT(band), 2078 MT_TOP_LPCR_HOST_BAND_STAT); 2079 2080 return 0; 2081 } 2082 2083 static int 2084 mt7915_firmware_state(struct mt7915_dev *dev, bool wa) 2085 { 2086 u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE, 2087 wa ? FW_STATE_RDY : FW_STATE_FW_DOWNLOAD); 2088 2089 if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE, 2090 state, 1000)) { 2091 dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); 2092 return -EIO; 2093 } 2094 return 0; 2095 } 2096 2097 static int mt7915_load_firmware(struct mt7915_dev *dev) 2098 { 2099 int ret; 2100 2101 /* make sure fw is download state */ 2102 if (mt7915_firmware_state(dev, false)) { 2103 /* restart firmware once */ 2104 mt76_connac_mcu_restart(&dev->mt76); 2105 ret = mt7915_firmware_state(dev, false); 2106 if (ret) { 2107 dev_err(dev->mt76.dev, 2108 "Firmware is not ready for download\n"); 2109 return ret; 2110 } 2111 } 2112 2113 ret = mt76_connac2_load_patch(&dev->mt76, fw_name_var(dev, ROM_PATCH)); 2114 if (ret) 2115 return ret; 2116 2117 ret = mt76_connac2_load_ram(&dev->mt76, fw_name_var(dev, FIRMWARE_WM), 2118 fw_name(dev, FIRMWARE_WA)); 2119 if (ret) 2120 return ret; 2121 2122 ret = mt7915_firmware_state(dev, true); 2123 if (ret) 2124 return ret; 2125 2126 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false); 2127 2128 dev_dbg(dev->mt76.dev, "Firmware init done\n"); 2129 2130 return 0; 2131 } 2132 2133 int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl) 2134 { 2135 struct { 2136 u8 ctrl_val; 2137 u8 pad[3]; 2138 } data = { 2139 .ctrl_val = ctrl 2140 }; 2141 2142 if (type == MCU_FW_LOG_WA) 2143 return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(FW_LOG_2_HOST), 2144 &data, sizeof(data), true); 2145 2146 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_LOG_2_HOST), &data, 2147 sizeof(data), true); 2148 } 2149 2150 int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level) 2151 { 2152 struct { 2153 u8 ver; 2154 u8 pad; 2155 __le16 len; 2156 u8 level; 2157 u8 rsv[3]; 2158 __le32 module_idx; 2159 } data = { 2160 .module_idx = cpu_to_le32(module), 2161 .level = level, 2162 }; 2163 2164 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_DBG_CTRL), &data, 2165 sizeof(data), false); 2166 } 2167 2168 int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enabled) 2169 { 2170 struct { 2171 __le32 cmd; 2172 u8 enable; 2173 } data = { 2174 .cmd = cpu_to_le32(MURU_SET_TXC_TX_STATS_EN), 2175 .enable = enabled, 2176 }; 2177 2178 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &data, 2179 sizeof(data), false); 2180 } 2181 2182 int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy) 2183 { 2184 struct mt7915_dev *dev = phy->dev; 2185 struct sk_buff *skb; 2186 struct mt7915_mcu_muru_stats *mu_stats; 2187 int ret; 2188 2189 struct { 2190 __le32 cmd; 2191 u8 band_idx; 2192 } req = { 2193 .cmd = cpu_to_le32(MURU_GET_TXC_TX_STATS), 2194 .band_idx = phy->mt76->band_idx, 2195 }; 2196 2197 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), 2198 &req, sizeof(req), true, &skb); 2199 if (ret) 2200 return ret; 2201 2202 mu_stats = (struct mt7915_mcu_muru_stats *)(skb->data); 2203 2204 /* accumulate stats, these are clear-on-read */ 2205 #define __dl_u32(s) phy->mib.dl_##s += le32_to_cpu(mu_stats->dl.s) 2206 #define __ul_u32(s) phy->mib.ul_##s += le32_to_cpu(mu_stats->ul.s) 2207 __dl_u32(cck_cnt); 2208 __dl_u32(ofdm_cnt); 2209 __dl_u32(htmix_cnt); 2210 __dl_u32(htgf_cnt); 2211 __dl_u32(vht_su_cnt); 2212 __dl_u32(vht_2mu_cnt); 2213 __dl_u32(vht_3mu_cnt); 2214 __dl_u32(vht_4mu_cnt); 2215 __dl_u32(he_su_cnt); 2216 __dl_u32(he_2ru_cnt); 2217 __dl_u32(he_2mu_cnt); 2218 __dl_u32(he_3ru_cnt); 2219 __dl_u32(he_3mu_cnt); 2220 __dl_u32(he_4ru_cnt); 2221 __dl_u32(he_4mu_cnt); 2222 __dl_u32(he_5to8ru_cnt); 2223 __dl_u32(he_9to16ru_cnt); 2224 __dl_u32(he_gtr16ru_cnt); 2225 2226 __ul_u32(hetrig_su_cnt); 2227 __ul_u32(hetrig_2ru_cnt); 2228 __ul_u32(hetrig_3ru_cnt); 2229 __ul_u32(hetrig_4ru_cnt); 2230 __ul_u32(hetrig_5to8ru_cnt); 2231 __ul_u32(hetrig_9to16ru_cnt); 2232 __ul_u32(hetrig_gtr16ru_cnt); 2233 __ul_u32(hetrig_2mu_cnt); 2234 __ul_u32(hetrig_3mu_cnt); 2235 __ul_u32(hetrig_4mu_cnt); 2236 #undef __dl_u32 2237 #undef __ul_u32 2238 2239 dev_kfree_skb(skb); 2240 2241 return 0; 2242 } 2243 2244 static int mt7915_mcu_set_mwds(struct mt7915_dev *dev, bool enabled) 2245 { 2246 struct { 2247 u8 enable; 2248 u8 _rsv[3]; 2249 } __packed req = { 2250 .enable = enabled 2251 }; 2252 2253 return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(MWDS_SUPPORT), &req, 2254 sizeof(req), false); 2255 } 2256 2257 int mt7915_mcu_set_muru_ctrl(struct mt7915_dev *dev, u32 cmd, u32 val) 2258 { 2259 struct { 2260 __le32 cmd; 2261 u8 val[4]; 2262 } __packed req = { 2263 .cmd = cpu_to_le32(cmd), 2264 }; 2265 2266 put_unaligned_le32(val, req.val); 2267 2268 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req, 2269 sizeof(req), false); 2270 } 2271 2272 static int 2273 mt7915_mcu_init_rx_airtime(struct mt7915_dev *dev) 2274 { 2275 #define RX_AIRTIME_FEATURE_CTRL 1 2276 #define RX_AIRTIME_BITWISE_CTRL 2 2277 #define RX_AIRTIME_CLEAR_EN 1 2278 struct { 2279 __le16 field; 2280 __le16 sub_field; 2281 __le32 set_status; 2282 __le32 get_status; 2283 u8 _rsv[12]; 2284 2285 bool airtime_en; 2286 bool mibtime_en; 2287 bool earlyend_en; 2288 u8 _rsv1[9]; 2289 2290 bool airtime_clear; 2291 bool mibtime_clear; 2292 u8 _rsv2[98]; 2293 } __packed req = { 2294 .field = cpu_to_le16(RX_AIRTIME_BITWISE_CTRL), 2295 .sub_field = cpu_to_le16(RX_AIRTIME_CLEAR_EN), 2296 .airtime_clear = true, 2297 }; 2298 int ret; 2299 2300 ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_AIRTIME_CTRL), &req, 2301 sizeof(req), true); 2302 if (ret) 2303 return ret; 2304 2305 req.field = cpu_to_le16(RX_AIRTIME_FEATURE_CTRL); 2306 req.sub_field = cpu_to_le16(RX_AIRTIME_CLEAR_EN); 2307 req.airtime_en = true; 2308 2309 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_AIRTIME_CTRL), &req, 2310 sizeof(req), true); 2311 } 2312 2313 static int mt7915_red_set_watermark(struct mt7915_dev *dev) 2314 { 2315 #define RED_GLOBAL_TOKEN_WATERMARK 2 2316 struct { 2317 __le32 args[3]; 2318 u8 cmd; 2319 u8 version; 2320 u8 __rsv1[4]; 2321 __le16 len; 2322 __le16 high_mark; 2323 __le16 low_mark; 2324 u8 __rsv2[12]; 2325 } __packed req = { 2326 .args[0] = cpu_to_le32(MCU_WA_PARAM_RED_SETTING), 2327 .cmd = RED_GLOBAL_TOKEN_WATERMARK, 2328 .len = cpu_to_le16(sizeof(req) - sizeof(req.args)), 2329 .high_mark = cpu_to_le16(MT7915_HW_TOKEN_SIZE - 256), 2330 .low_mark = cpu_to_le16(MT7915_HW_TOKEN_SIZE - 256 - 1536), 2331 }; 2332 2333 return mt76_mcu_send_msg(&dev->mt76, MCU_WA_PARAM_CMD(SET), &req, 2334 sizeof(req), false); 2335 } 2336 2337 static int mt7915_mcu_set_red(struct mt7915_dev *dev, bool enabled) 2338 { 2339 #define RED_DISABLE 0 2340 #define RED_BY_WA_ENABLE 2 2341 int ret; 2342 u32 red_type = enabled ? RED_BY_WA_ENABLE : RED_DISABLE; 2343 __le32 req = cpu_to_le32(red_type); 2344 2345 if (enabled) { 2346 ret = mt7915_red_set_watermark(dev); 2347 if (ret < 0) 2348 return ret; 2349 } 2350 2351 ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RED_ENABLE), &req, 2352 sizeof(req), false); 2353 if (ret < 0) 2354 return ret; 2355 2356 return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), 2357 MCU_WA_PARAM_RED, enabled, 0); 2358 } 2359 2360 int mt7915_mcu_init_firmware(struct mt7915_dev *dev) 2361 { 2362 int ret; 2363 2364 /* force firmware operation mode into normal state, 2365 * which should be set before firmware download stage. 2366 */ 2367 mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE); 2368 2369 ret = mt7915_driver_own(dev, 0); 2370 if (ret) 2371 return ret; 2372 /* set driver own for band1 when two hif exist */ 2373 if (dev->hif2) { 2374 ret = mt7915_driver_own(dev, 1); 2375 if (ret) 2376 return ret; 2377 } 2378 2379 ret = mt7915_load_firmware(dev); 2380 if (ret) 2381 return ret; 2382 2383 set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 2384 ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, 0); 2385 if (ret) 2386 return ret; 2387 2388 ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, 0); 2389 if (ret) 2390 return ret; 2391 2392 mt76_connac_mcu_del_wtbl_all(&dev->mt76); 2393 2394 if ((mtk_wed_device_active(&dev->mt76.mmio.wed) && 2395 is_mt7915(&dev->mt76)) || 2396 !mtk_wed_get_rx_capa(&dev->mt76.mmio.wed)) 2397 mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(CAPABILITY), 0, 0, 0); 2398 2399 ret = mt7915_mcu_set_mwds(dev, 1); 2400 if (ret) 2401 return ret; 2402 2403 ret = mt7915_mcu_set_muru_ctrl(dev, MURU_SET_PLATFORM_TYPE, 2404 MURU_PLATFORM_TYPE_PERF_LEVEL_2); 2405 if (ret) 2406 return ret; 2407 2408 ret = mt7915_mcu_init_rx_airtime(dev); 2409 if (ret) 2410 return ret; 2411 2412 return mt7915_mcu_set_red(dev, mtk_wed_device_active(&dev->mt76.mmio.wed)); 2413 } 2414 2415 int mt7915_mcu_init(struct mt7915_dev *dev) 2416 { 2417 static const struct mt76_mcu_ops mt7915_mcu_ops = { 2418 .max_retry = 1, 2419 .headroom = sizeof(struct mt76_connac2_mcu_txd), 2420 .mcu_skb_prepare_msg = mt76_connac2_mcu_fill_message, 2421 .mcu_skb_send_msg = mt7915_mcu_send_message, 2422 .mcu_parse_response = mt7915_mcu_parse_response, 2423 }; 2424 2425 dev->mt76.mcu_ops = &mt7915_mcu_ops; 2426 2427 return mt7915_mcu_init_firmware(dev); 2428 } 2429 2430 void mt7915_mcu_exit(struct mt7915_dev *dev) 2431 { 2432 mt76_connac_mcu_restart(&dev->mt76); 2433 if (mt7915_firmware_state(dev, false)) { 2434 dev_err(dev->mt76.dev, "Failed to exit mcu\n"); 2435 goto out; 2436 } 2437 2438 mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN); 2439 if (dev->hif2) 2440 mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1), 2441 MT_TOP_LPCR_HOST_FW_OWN); 2442 out: 2443 skb_queue_purge(&dev->mt76.mcu.res_q); 2444 } 2445 2446 static int 2447 mt7915_mcu_set_rx_hdr_trans_blacklist(struct mt7915_dev *dev, int band) 2448 { 2449 struct { 2450 u8 operation; 2451 u8 count; 2452 u8 _rsv[2]; 2453 u8 index; 2454 u8 enable; 2455 __le16 etype; 2456 } req = { 2457 .operation = 1, 2458 .count = 1, 2459 .enable = 1, 2460 .etype = cpu_to_le16(ETH_P_PAE), 2461 }; 2462 2463 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS), 2464 &req, sizeof(req), false); 2465 } 2466 2467 int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, 2468 bool enable, bool hdr_trans) 2469 { 2470 struct { 2471 u8 operation; 2472 u8 enable; 2473 u8 check_bssid; 2474 u8 insert_vlan; 2475 u8 remove_vlan; 2476 u8 tid; 2477 u8 mode; 2478 u8 rsv; 2479 } __packed req_trans = { 2480 .enable = hdr_trans, 2481 }; 2482 struct { 2483 u8 enable; 2484 u8 band; 2485 u8 rsv[2]; 2486 } __packed req_mac = { 2487 .enable = enable, 2488 .band = band, 2489 }; 2490 int ret; 2491 2492 ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS), 2493 &req_trans, sizeof(req_trans), false); 2494 if (ret) 2495 return ret; 2496 2497 if (hdr_trans) 2498 mt7915_mcu_set_rx_hdr_trans_blacklist(dev, band); 2499 2500 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MAC_INIT_CTRL), 2501 &req_mac, sizeof(req_mac), true); 2502 } 2503 2504 int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *param) 2505 { 2506 struct mt7915_mcu_tx *req = (struct mt7915_mcu_tx *)param; 2507 u8 num = req->total; 2508 size_t len = sizeof(*req) - 2509 (IEEE80211_NUM_ACS - num) * sizeof(struct edca); 2510 2511 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE), req, 2512 len, true); 2513 } 2514 2515 int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif) 2516 { 2517 #define TX_CMD_MODE 1 2518 struct mt7915_mcu_tx req = { 2519 .valid = true, 2520 .mode = TX_CMD_MODE, 2521 .total = IEEE80211_NUM_ACS, 2522 }; 2523 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 2524 int ac; 2525 2526 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 2527 struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; 2528 struct edca *e = &req.edca[ac]; 2529 2530 e->set = WMM_PARAM_SET; 2531 e->queue = ac + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS; 2532 e->aifs = q->aifs; 2533 e->txop = cpu_to_le16(q->txop); 2534 2535 if (q->cw_min) 2536 e->cw_min = fls(q->cw_min); 2537 else 2538 e->cw_min = 5; 2539 2540 if (q->cw_max) 2541 e->cw_max = cpu_to_le16(fls(q->cw_max)); 2542 else 2543 e->cw_max = cpu_to_le16(10); 2544 } 2545 2546 return mt7915_mcu_update_edca(dev, &req); 2547 } 2548 2549 int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val) 2550 { 2551 struct { 2552 __le32 tag; 2553 __le16 min_lpn; 2554 u8 rsv[2]; 2555 } __packed req = { 2556 .tag = cpu_to_le32(0x1), 2557 .min_lpn = cpu_to_le16(val), 2558 }; 2559 2560 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_TH), &req, 2561 sizeof(req), true); 2562 } 2563 2564 int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev, 2565 const struct mt7915_dfs_pulse *pulse) 2566 { 2567 struct { 2568 __le32 tag; 2569 2570 __le32 max_width; /* us */ 2571 __le32 max_pwr; /* dbm */ 2572 __le32 min_pwr; /* dbm */ 2573 __le32 min_stgr_pri; /* us */ 2574 __le32 max_stgr_pri; /* us */ 2575 __le32 min_cr_pri; /* us */ 2576 __le32 max_cr_pri; /* us */ 2577 } __packed req = { 2578 .tag = cpu_to_le32(0x3), 2579 2580 #define __req_field(field) .field = cpu_to_le32(pulse->field) 2581 __req_field(max_width), 2582 __req_field(max_pwr), 2583 __req_field(min_pwr), 2584 __req_field(min_stgr_pri), 2585 __req_field(max_stgr_pri), 2586 __req_field(min_cr_pri), 2587 __req_field(max_cr_pri), 2588 #undef __req_field 2589 }; 2590 2591 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_TH), &req, 2592 sizeof(req), true); 2593 } 2594 2595 int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index, 2596 const struct mt7915_dfs_pattern *pattern) 2597 { 2598 struct { 2599 __le32 tag; 2600 __le16 radar_type; 2601 2602 u8 enb; 2603 u8 stgr; 2604 u8 min_crpn; 2605 u8 max_crpn; 2606 u8 min_crpr; 2607 u8 min_pw; 2608 __le32 min_pri; 2609 __le32 max_pri; 2610 u8 max_pw; 2611 u8 min_crbn; 2612 u8 max_crbn; 2613 u8 min_stgpn; 2614 u8 max_stgpn; 2615 u8 min_stgpr; 2616 u8 rsv[2]; 2617 __le32 min_stgpr_diff; 2618 } __packed req = { 2619 .tag = cpu_to_le32(0x2), 2620 .radar_type = cpu_to_le16(index), 2621 2622 #define __req_field_u8(field) .field = pattern->field 2623 #define __req_field_u32(field) .field = cpu_to_le32(pattern->field) 2624 __req_field_u8(enb), 2625 __req_field_u8(stgr), 2626 __req_field_u8(min_crpn), 2627 __req_field_u8(max_crpn), 2628 __req_field_u8(min_crpr), 2629 __req_field_u8(min_pw), 2630 __req_field_u32(min_pri), 2631 __req_field_u32(max_pri), 2632 __req_field_u8(max_pw), 2633 __req_field_u8(min_crbn), 2634 __req_field_u8(max_crbn), 2635 __req_field_u8(min_stgpn), 2636 __req_field_u8(max_stgpn), 2637 __req_field_u8(min_stgpr), 2638 __req_field_u32(min_stgpr_diff), 2639 #undef __req_field_u8 2640 #undef __req_field_u32 2641 }; 2642 2643 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_TH), &req, 2644 sizeof(req), true); 2645 } 2646 2647 static int 2648 mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy, 2649 struct cfg80211_chan_def *chandef, 2650 int cmd) 2651 { 2652 struct mt7915_dev *dev = phy->dev; 2653 struct mt76_phy *mphy = phy->mt76; 2654 struct ieee80211_channel *chan = mphy->chandef.chan; 2655 int freq = mphy->chandef.center_freq1; 2656 struct mt7915_mcu_background_chain_ctrl req = { 2657 .monitor_scan_type = 2, /* simple rx */ 2658 }; 2659 2660 if (!chandef && cmd != CH_SWITCH_BACKGROUND_SCAN_STOP) 2661 return -EINVAL; 2662 2663 if (!cfg80211_chandef_valid(&mphy->chandef)) 2664 return -EINVAL; 2665 2666 switch (cmd) { 2667 case CH_SWITCH_BACKGROUND_SCAN_START: { 2668 req.chan = chan->hw_value; 2669 req.central_chan = ieee80211_frequency_to_channel(freq); 2670 req.bw = mt76_connac_chan_bw(&mphy->chandef); 2671 req.monitor_chan = chandef->chan->hw_value; 2672 req.monitor_central_chan = 2673 ieee80211_frequency_to_channel(chandef->center_freq1); 2674 req.monitor_bw = mt76_connac_chan_bw(chandef); 2675 req.band_idx = phy->mt76->band_idx; 2676 req.scan_mode = 1; 2677 break; 2678 } 2679 case CH_SWITCH_BACKGROUND_SCAN_RUNNING: 2680 req.monitor_chan = chandef->chan->hw_value; 2681 req.monitor_central_chan = 2682 ieee80211_frequency_to_channel(chandef->center_freq1); 2683 req.band_idx = phy->mt76->band_idx; 2684 req.scan_mode = 2; 2685 break; 2686 case CH_SWITCH_BACKGROUND_SCAN_STOP: 2687 req.chan = chan->hw_value; 2688 req.central_chan = ieee80211_frequency_to_channel(freq); 2689 req.bw = mt76_connac_chan_bw(&mphy->chandef); 2690 req.tx_stream = hweight8(mphy->antenna_mask); 2691 req.rx_stream = mphy->antenna_mask; 2692 break; 2693 default: 2694 return -EINVAL; 2695 } 2696 req.band = chandef ? chandef->chan->band == NL80211_BAND_5GHZ : 1; 2697 2698 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(OFFCH_SCAN_CTRL), 2699 &req, sizeof(req), false); 2700 } 2701 2702 int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy, 2703 struct cfg80211_chan_def *chandef) 2704 { 2705 struct mt7915_dev *dev = phy->dev; 2706 int err, region; 2707 2708 if (!chandef) { /* disable offchain */ 2709 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, MT_RX_SEL2, 2710 0, 0); 2711 if (err) 2712 return err; 2713 2714 return mt7915_mcu_background_chain_ctrl(phy, NULL, 2715 CH_SWITCH_BACKGROUND_SCAN_STOP); 2716 } 2717 2718 err = mt7915_mcu_background_chain_ctrl(phy, chandef, 2719 CH_SWITCH_BACKGROUND_SCAN_START); 2720 if (err) 2721 return err; 2722 2723 switch (dev->mt76.region) { 2724 case NL80211_DFS_ETSI: 2725 region = 0; 2726 break; 2727 case NL80211_DFS_JP: 2728 region = 2; 2729 break; 2730 case NL80211_DFS_FCC: 2731 default: 2732 region = 1; 2733 break; 2734 } 2735 2736 return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, MT_RX_SEL2, 2737 0, region); 2738 } 2739 2740 int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd) 2741 { 2742 static const u8 ch_band[] = { 2743 [NL80211_BAND_2GHZ] = 0, 2744 [NL80211_BAND_5GHZ] = 1, 2745 [NL80211_BAND_6GHZ] = 2, 2746 }; 2747 struct mt7915_dev *dev = phy->dev; 2748 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2749 int freq1 = chandef->center_freq1; 2750 u8 band = phy->mt76->band_idx; 2751 struct { 2752 u8 control_ch; 2753 u8 center_ch; 2754 u8 bw; 2755 u8 tx_path_num; 2756 u8 rx_path; /* mask or num */ 2757 u8 switch_reason; 2758 u8 band_idx; 2759 u8 center_ch2; /* for 80+80 only */ 2760 __le16 cac_case; 2761 u8 channel_band; 2762 u8 rsv0; 2763 __le32 outband_freq; 2764 u8 txpower_drop; 2765 u8 ap_bw; 2766 u8 ap_center_ch; 2767 u8 rsv1[57]; 2768 } __packed req = { 2769 .control_ch = chandef->chan->hw_value, 2770 .center_ch = ieee80211_frequency_to_channel(freq1), 2771 .bw = mt76_connac_chan_bw(chandef), 2772 .tx_path_num = hweight16(phy->mt76->chainmask), 2773 .rx_path = phy->mt76->chainmask >> (dev->chainshift * band), 2774 .band_idx = band, 2775 .channel_band = ch_band[chandef->chan->band], 2776 }; 2777 2778 #ifdef CONFIG_NL80211_TESTMODE 2779 if (phy->mt76->test.tx_antenna_mask && 2780 mt76_testmode_enabled(phy->mt76)) { 2781 req.tx_path_num = fls(phy->mt76->test.tx_antenna_mask); 2782 req.rx_path = phy->mt76->test.tx_antenna_mask; 2783 } 2784 #endif 2785 2786 if (mt76_connac_spe_idx(phy->mt76->antenna_mask)) 2787 req.tx_path_num = fls(phy->mt76->antenna_mask); 2788 2789 if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR) 2790 req.switch_reason = CH_SWITCH_NORMAL; 2791 else if (phy->mt76->offchannel || 2792 phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE) 2793 req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; 2794 else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, 2795 NL80211_IFTYPE_AP)) 2796 req.switch_reason = CH_SWITCH_DFS; 2797 else 2798 req.switch_reason = CH_SWITCH_NORMAL; 2799 2800 if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH)) 2801 req.rx_path = hweight8(req.rx_path); 2802 2803 if (chandef->width == NL80211_CHAN_WIDTH_80P80) { 2804 int freq2 = chandef->center_freq2; 2805 2806 req.center_ch2 = ieee80211_frequency_to_channel(freq2); 2807 } 2808 2809 return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); 2810 } 2811 2812 static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev) 2813 { 2814 #define MAX_PAGE_IDX_MASK GENMASK(7, 5) 2815 #define PAGE_IDX_MASK GENMASK(4, 2) 2816 #define PER_PAGE_SIZE 0x400 2817 struct mt7915_mcu_eeprom req = { .buffer_mode = EE_MODE_BUFFER }; 2818 u16 eeprom_size = mt7915_eeprom_size(dev); 2819 u8 total = DIV_ROUND_UP(eeprom_size, PER_PAGE_SIZE); 2820 u8 *eep = (u8 *)dev->mt76.eeprom.data; 2821 int eep_len; 2822 int i; 2823 2824 for (i = 0; i < total; i++, eep += eep_len) { 2825 struct sk_buff *skb; 2826 int ret; 2827 2828 if (i == total - 1 && !!(eeprom_size % PER_PAGE_SIZE)) 2829 eep_len = eeprom_size % PER_PAGE_SIZE; 2830 else 2831 eep_len = PER_PAGE_SIZE; 2832 2833 skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, 2834 sizeof(req) + eep_len); 2835 if (!skb) 2836 return -ENOMEM; 2837 2838 req.format = FIELD_PREP(MAX_PAGE_IDX_MASK, total - 1) | 2839 FIELD_PREP(PAGE_IDX_MASK, i) | EE_FORMAT_WHOLE; 2840 req.len = cpu_to_le16(eep_len); 2841 2842 skb_put_data(skb, &req, sizeof(req)); 2843 skb_put_data(skb, eep, eep_len); 2844 2845 ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, 2846 MCU_EXT_CMD(EFUSE_BUFFER_MODE), true); 2847 if (ret) 2848 return ret; 2849 } 2850 2851 return 0; 2852 } 2853 2854 int mt7915_mcu_set_eeprom(struct mt7915_dev *dev) 2855 { 2856 struct mt7915_mcu_eeprom req = { 2857 .buffer_mode = EE_MODE_EFUSE, 2858 .format = EE_FORMAT_WHOLE, 2859 }; 2860 2861 if (dev->flash_mode) 2862 return mt7915_mcu_set_eeprom_flash(dev); 2863 2864 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE), 2865 &req, sizeof(req), true); 2866 } 2867 2868 int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset) 2869 { 2870 struct mt7915_mcu_eeprom_info req = { 2871 .addr = cpu_to_le32(round_down(offset, 2872 MT7915_EEPROM_BLOCK_SIZE)), 2873 }; 2874 struct mt7915_mcu_eeprom_info *res; 2875 struct sk_buff *skb; 2876 int ret; 2877 u8 *buf; 2878 2879 ret = mt76_mcu_send_and_get_msg(&dev->mt76, 2880 MCU_EXT_QUERY(EFUSE_ACCESS), 2881 &req, sizeof(req), true, &skb); 2882 if (ret) 2883 return ret; 2884 2885 res = (struct mt7915_mcu_eeprom_info *)skb->data; 2886 #if defined(__linux__) 2887 buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr); 2888 #elif defined(__FreeBSD__) 2889 buf = (u8 *)dev->mt76.eeprom.data + le32_to_cpu(res->addr); 2890 #endif 2891 memcpy(buf, res->data, MT7915_EEPROM_BLOCK_SIZE); 2892 dev_kfree_skb(skb); 2893 2894 return 0; 2895 } 2896 2897 int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num) 2898 { 2899 struct { 2900 u8 _rsv; 2901 u8 version; 2902 u8 die_idx; 2903 u8 _rsv2; 2904 } __packed req = { 2905 .version = 1, 2906 }; 2907 struct sk_buff *skb; 2908 int ret; 2909 2910 ret = mt76_mcu_send_and_get_msg(&dev->mt76, 2911 MCU_EXT_QUERY(EFUSE_FREE_BLOCK), 2912 &req, sizeof(req), true, &skb); 2913 if (ret) 2914 return ret; 2915 2916 *block_num = *(u8 *)skb->data; 2917 dev_kfree_skb(skb); 2918 2919 return 0; 2920 } 2921 2922 static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx, 2923 u8 *data, u32 len, int cmd) 2924 { 2925 struct { 2926 u8 dir; 2927 u8 valid; 2928 __le16 bitmap; 2929 s8 precal; 2930 u8 action; 2931 u8 band; 2932 u8 idx; 2933 u8 rsv[4]; 2934 __le32 len; 2935 } req = {}; 2936 struct sk_buff *skb; 2937 2938 skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req) + len); 2939 if (!skb) 2940 return -ENOMEM; 2941 2942 req.idx = idx; 2943 req.len = cpu_to_le32(len); 2944 skb_put_data(skb, &req, sizeof(req)); 2945 skb_put_data(skb, data, len); 2946 2947 return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, false); 2948 } 2949 2950 int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev) 2951 { 2952 u8 idx = 0, *cal = dev->cal, *eep = dev->mt76.eeprom.data; 2953 u32 total = mt7915_get_cal_group_size(dev); 2954 u32 offs = is_mt7915(&dev->mt76) ? MT_EE_DO_PRE_CAL : MT_EE_DO_PRE_CAL_V2; 2955 2956 if (!(eep[offs] & MT_EE_WIFI_CAL_GROUP)) 2957 return 0; 2958 2959 /* 2960 * Items: Rx DCOC, RSSI DCOC, Tx TSSI DCOC, Tx LPFG 2961 * Tx FDIQ, Tx DCIQ, Rx FDIQ, Rx FIIQ, ADCDCOC 2962 */ 2963 while (total > 0) { 2964 int ret, len; 2965 2966 len = min_t(u32, total, MT_EE_CAL_UNIT); 2967 2968 ret = mt7915_mcu_set_pre_cal(dev, idx, cal, len, 2969 MCU_EXT_CMD(GROUP_PRE_CAL_INFO)); 2970 if (ret) 2971 return ret; 2972 2973 total -= len; 2974 cal += len; 2975 idx++; 2976 } 2977 2978 return 0; 2979 } 2980 2981 static int mt7915_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur) 2982 { 2983 int i; 2984 2985 for (i = 0; i < n_freqs; i++) 2986 if (cur == freqs[i]) 2987 return i; 2988 2989 return -1; 2990 } 2991 2992 static int mt7915_dpd_freq_idx(struct mt7915_dev *dev, u16 freq, u8 bw) 2993 { 2994 static const u16 freq_list_v1[] = { 2995 5180, 5200, 5220, 5240, 2996 5260, 5280, 5300, 5320, 2997 5500, 5520, 5540, 5560, 2998 5580, 5600, 5620, 5640, 2999 5660, 5680, 5700, 5745, 3000 5765, 5785, 5805, 5825 3001 }; 3002 static const u16 freq_list_v2[] = { 3003 /* 6G BW20*/ 3004 5955, 5975, 5995, 6015, 3005 6035, 6055, 6075, 6095, 3006 6115, 6135, 6155, 6175, 3007 6195, 6215, 6235, 6255, 3008 6275, 6295, 6315, 6335, 3009 6355, 6375, 6395, 6415, 3010 6435, 6455, 6475, 6495, 3011 6515, 6535, 6555, 6575, 3012 6595, 6615, 6635, 6655, 3013 6675, 6695, 6715, 6735, 3014 6755, 6775, 6795, 6815, 3015 6835, 6855, 6875, 6895, 3016 6915, 6935, 6955, 6975, 3017 6995, 7015, 7035, 7055, 3018 7075, 7095, 7115, 3019 /* 6G BW160 */ 3020 6025, 6185, 6345, 6505, 3021 6665, 6825, 6985, 3022 /* 5G BW20 */ 3023 5180, 5200, 5220, 5240, 3024 5260, 5280, 5300, 5320, 3025 5500, 5520, 5540, 5560, 3026 5580, 5600, 5620, 5640, 3027 5660, 5680, 5700, 5720, 3028 5745, 5765, 5785, 5805, 3029 5825, 5845, 5865, 5885, 3030 /* 5G BW160 */ 3031 5250, 5570, 5815 3032 }; 3033 static const u16 freq_list_v2_7981[] = { 3034 /* 5G BW20 */ 3035 5180, 5200, 5220, 5240, 3036 5260, 5280, 5300, 5320, 3037 5500, 5520, 5540, 5560, 3038 5580, 5600, 5620, 5640, 3039 5660, 5680, 5700, 5720, 3040 5745, 5765, 5785, 5805, 3041 5825, 5845, 5865, 5885, 3042 /* 5G BW160 */ 3043 5250, 5570, 5815 3044 }; 3045 const u16 *freq_list = freq_list_v1; 3046 int n_freqs = ARRAY_SIZE(freq_list_v1); 3047 int idx; 3048 3049 if (!is_mt7915(&dev->mt76)) { 3050 if (is_mt7981(&dev->mt76)) { 3051 freq_list = freq_list_v2_7981; 3052 n_freqs = ARRAY_SIZE(freq_list_v2_7981); 3053 } else { 3054 freq_list = freq_list_v2; 3055 n_freqs = ARRAY_SIZE(freq_list_v2); 3056 } 3057 } 3058 3059 if (freq < 4000) { 3060 if (freq < 2432) 3061 return n_freqs; 3062 if (freq < 2457) 3063 return n_freqs + 1; 3064 3065 return n_freqs + 2; 3066 } 3067 3068 if (bw == NL80211_CHAN_WIDTH_80P80) 3069 return -1; 3070 3071 if (bw != NL80211_CHAN_WIDTH_20) { 3072 idx = mt7915_find_freq_idx(freq_list, n_freqs, freq + 10); 3073 if (idx >= 0) 3074 return idx; 3075 3076 idx = mt7915_find_freq_idx(freq_list, n_freqs, freq - 10); 3077 if (idx >= 0) 3078 return idx; 3079 } 3080 3081 return mt7915_find_freq_idx(freq_list, n_freqs, freq); 3082 } 3083 3084 int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy) 3085 { 3086 struct mt7915_dev *dev = phy->dev; 3087 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 3088 enum nl80211_band band = chandef->chan->band; 3089 u32 offs = is_mt7915(&dev->mt76) ? MT_EE_DO_PRE_CAL : MT_EE_DO_PRE_CAL_V2; 3090 u16 center_freq = chandef->center_freq1; 3091 u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data; 3092 u8 dpd_mask, cal_num = is_mt7915(&dev->mt76) ? 2 : 3; 3093 int idx; 3094 3095 switch (band) { 3096 case NL80211_BAND_2GHZ: 3097 dpd_mask = MT_EE_WIFI_CAL_DPD_2G; 3098 break; 3099 case NL80211_BAND_5GHZ: 3100 dpd_mask = MT_EE_WIFI_CAL_DPD_5G; 3101 break; 3102 case NL80211_BAND_6GHZ: 3103 dpd_mask = MT_EE_WIFI_CAL_DPD_6G; 3104 break; 3105 default: 3106 dpd_mask = 0; 3107 break; 3108 } 3109 3110 if (!(eep[offs] & dpd_mask)) 3111 return 0; 3112 3113 idx = mt7915_dpd_freq_idx(dev, center_freq, chandef->width); 3114 if (idx < 0) 3115 return -EINVAL; 3116 3117 /* Items: Tx DPD, Tx Flatness */ 3118 idx = idx * cal_num; 3119 cal += mt7915_get_cal_group_size(dev) + (idx * MT_EE_CAL_UNIT); 3120 3121 while (cal_num--) { 3122 int ret; 3123 3124 ret = mt7915_mcu_set_pre_cal(dev, idx, cal, MT_EE_CAL_UNIT, 3125 MCU_EXT_CMD(DPD_PRE_CAL_INFO)); 3126 if (ret) 3127 return ret; 3128 3129 idx++; 3130 cal += MT_EE_CAL_UNIT; 3131 } 3132 3133 return 0; 3134 } 3135 3136 int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch) 3137 { 3138 struct mt76_channel_state *state = phy->mt76->chan_state; 3139 struct mt76_channel_state *state_ts = &phy->state_ts; 3140 struct mt7915_dev *dev = phy->dev; 3141 struct mt7915_mcu_mib *res, req[5]; 3142 struct sk_buff *skb; 3143 static const u32 *offs; 3144 int i, ret, len, offs_cc; 3145 u64 cc_tx; 3146 3147 /* strict order */ 3148 if (is_mt7915(&dev->mt76)) { 3149 static const u32 chip_offs[] = { 3150 MIB_NON_WIFI_TIME, 3151 MIB_TX_TIME, 3152 MIB_RX_TIME, 3153 MIB_OBSS_AIRTIME, 3154 MIB_TXOP_INIT_COUNT, 3155 }; 3156 len = ARRAY_SIZE(chip_offs); 3157 offs = chip_offs; 3158 offs_cc = 20; 3159 } else { 3160 static const u32 chip_offs[] = { 3161 MIB_NON_WIFI_TIME_V2, 3162 MIB_TX_TIME_V2, 3163 MIB_RX_TIME_V2, 3164 MIB_OBSS_AIRTIME_V2 3165 }; 3166 len = ARRAY_SIZE(chip_offs); 3167 offs = chip_offs; 3168 offs_cc = 0; 3169 } 3170 3171 for (i = 0; i < len; i++) { 3172 req[i].band = cpu_to_le32(phy->mt76->band_idx); 3173 req[i].offs = cpu_to_le32(offs[i]); 3174 } 3175 3176 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO), 3177 req, len * sizeof(req[0]), true, &skb); 3178 if (ret) 3179 return ret; 3180 3181 res = (struct mt7915_mcu_mib *)(skb->data + offs_cc); 3182 3183 #define __res_u64(s) le64_to_cpu(res[s].data) 3184 /* subtract Tx backoff time from Tx duration for MT7915 */ 3185 if (is_mt7915(&dev->mt76)) { 3186 u64 backoff = (__res_u64(4) & 0xffff) * 79; /* 16us + 9us * 7 */ 3187 cc_tx = __res_u64(1) - backoff; 3188 } else { 3189 cc_tx = __res_u64(1); 3190 } 3191 3192 if (chan_switch) 3193 goto out; 3194 3195 state->cc_tx += cc_tx - state_ts->cc_tx; 3196 state->cc_bss_rx += __res_u64(2) - state_ts->cc_bss_rx; 3197 state->cc_rx += __res_u64(2) + __res_u64(3) - state_ts->cc_rx; 3198 state->cc_busy += __res_u64(0) + cc_tx + __res_u64(2) + __res_u64(3) - 3199 state_ts->cc_busy; 3200 3201 out: 3202 state_ts->cc_tx = cc_tx; 3203 state_ts->cc_bss_rx = __res_u64(2); 3204 state_ts->cc_rx = __res_u64(2) + __res_u64(3); 3205 state_ts->cc_busy = __res_u64(0) + cc_tx + __res_u64(2) + __res_u64(3); 3206 #undef __res_u64 3207 3208 dev_kfree_skb(skb); 3209 3210 return 0; 3211 } 3212 3213 int mt7915_mcu_get_temperature(struct mt7915_phy *phy) 3214 { 3215 struct mt7915_dev *dev = phy->dev; 3216 struct { 3217 u8 ctrl_id; 3218 u8 action; 3219 u8 band_idx; 3220 u8 rsv[5]; 3221 } req = { 3222 .ctrl_id = THERMAL_SENSOR_TEMP_QUERY, 3223 .band_idx = phy->mt76->band_idx, 3224 }; 3225 3226 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req, 3227 sizeof(req), true); 3228 } 3229 3230 int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state) 3231 { 3232 struct mt7915_dev *dev = phy->dev; 3233 struct mt7915_mcu_thermal_ctrl req = { 3234 .band_idx = phy->mt76->band_idx, 3235 .ctrl_id = THERMAL_PROTECT_DUTY_CONFIG, 3236 }; 3237 int level, ret; 3238 3239 /* set duty cycle and level */ 3240 for (level = 0; level < 4; level++) { 3241 req.duty.duty_level = level; 3242 req.duty.duty_cycle = state; 3243 state /= 2; 3244 3245 ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT), 3246 &req, sizeof(req), false); 3247 if (ret) 3248 return ret; 3249 } 3250 return 0; 3251 } 3252 3253 int mt7915_mcu_set_thermal_protect(struct mt7915_phy *phy) 3254 { 3255 struct mt7915_dev *dev = phy->dev; 3256 struct { 3257 struct mt7915_mcu_thermal_ctrl ctrl; 3258 3259 __le32 trigger_temp; 3260 __le32 restore_temp; 3261 __le16 sustain_time; 3262 u8 rsv[2]; 3263 } __packed req = { 3264 .ctrl = { 3265 .band_idx = phy->mt76->band_idx, 3266 .type.protect_type = 1, 3267 .type.trigger_type = 1, 3268 }, 3269 }; 3270 int ret; 3271 3272 req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE; 3273 ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT), 3274 &req, sizeof(req.ctrl), false); 3275 3276 if (ret) 3277 return ret; 3278 3279 /* set high-temperature trigger threshold */ 3280 req.ctrl.ctrl_id = THERMAL_PROTECT_ENABLE; 3281 /* add a safety margin ~10 */ 3282 req.restore_temp = cpu_to_le32(phy->throttle_temp[0] - 10); 3283 req.trigger_temp = cpu_to_le32(phy->throttle_temp[1]); 3284 req.sustain_time = cpu_to_le16(10); 3285 3286 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT), 3287 &req, sizeof(req), false); 3288 } 3289 3290 int mt7915_mcu_set_txpower_frame_min(struct mt7915_phy *phy, s8 txpower) 3291 { 3292 struct mt7915_dev *dev = phy->dev; 3293 struct { 3294 u8 format_id; 3295 u8 rsv; 3296 u8 band_idx; 3297 s8 txpower_min; 3298 } __packed req = { 3299 .format_id = TX_POWER_LIMIT_FRAME_MIN, 3300 .band_idx = phy->mt76->band_idx, 3301 .txpower_min = txpower * 2, /* 0.5db */ 3302 }; 3303 3304 return mt76_mcu_send_msg(&dev->mt76, 3305 MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req, 3306 sizeof(req), true); 3307 } 3308 3309 int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy, 3310 struct ieee80211_vif *vif, 3311 struct ieee80211_sta *sta, s8 txpower) 3312 { 3313 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 3314 struct mt7915_dev *dev = phy->dev; 3315 struct mt76_phy *mphy = phy->mt76; 3316 struct { 3317 u8 format_id; 3318 u8 rsv[3]; 3319 u8 band_idx; 3320 s8 txpower_max; 3321 __le16 wcid; 3322 s8 txpower_offs[48]; 3323 } __packed req = { 3324 .format_id = TX_POWER_LIMIT_FRAME, 3325 .band_idx = phy->mt76->band_idx, 3326 .txpower_max = DIV_ROUND_UP(mphy->txpower_cur, 2), 3327 .wcid = cpu_to_le16(msta->wcid.idx), 3328 }; 3329 int ret; 3330 s8 txpower_sku[MT7915_SKU_RATE_NUM]; 3331 3332 ret = mt7915_mcu_get_txpower_sku(phy, txpower_sku, sizeof(txpower_sku)); 3333 if (ret) 3334 return ret; 3335 3336 txpower = mt7915_get_power_bound(phy, txpower); 3337 if (txpower > mphy->txpower_cur || txpower < 0) 3338 return -EINVAL; 3339 3340 if (txpower) { 3341 u32 offs, len, i; 3342 3343 if (sta->deflink.ht_cap.ht_supported) { 3344 const u8 *sku_len = mt7915_sku_group_len; 3345 3346 offs = sku_len[SKU_CCK] + sku_len[SKU_OFDM]; 3347 len = sku_len[SKU_HT_BW20] + sku_len[SKU_HT_BW40]; 3348 3349 if (sta->deflink.vht_cap.vht_supported) { 3350 offs += len; 3351 len = sku_len[SKU_VHT_BW20] * 4; 3352 3353 if (sta->deflink.he_cap.has_he) { 3354 offs += len + sku_len[SKU_HE_RU26] * 3; 3355 len = sku_len[SKU_HE_RU242] * 4; 3356 } 3357 } 3358 } else { 3359 return -EINVAL; 3360 } 3361 3362 for (i = 0; i < len; i++, offs++) 3363 req.txpower_offs[i] = 3364 DIV_ROUND_UP(txpower - txpower_sku[offs], 2); 3365 } 3366 3367 return mt76_mcu_send_msg(&dev->mt76, 3368 MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req, 3369 sizeof(req), true); 3370 } 3371 3372 int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy) 3373 { 3374 struct mt7915_dev *dev = phy->dev; 3375 struct mt76_phy *mphy = phy->mt76; 3376 struct ieee80211_hw *hw = mphy->hw; 3377 struct mt7915_mcu_txpower_sku req = { 3378 .format_id = TX_POWER_LIMIT_TABLE, 3379 .band_idx = phy->mt76->band_idx, 3380 }; 3381 struct mt76_power_limits limits_array; 3382 s8 *la = (s8 *)&limits_array; 3383 int i, idx; 3384 int tx_power; 3385 3386 tx_power = mt7915_get_power_bound(phy, hw->conf.power_level); 3387 tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan, 3388 &limits_array, tx_power); 3389 mphy->txpower_cur = tx_power; 3390 3391 for (i = 0, idx = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) { 3392 u8 mcs_num, len = mt7915_sku_group_len[i]; 3393 int j; 3394 3395 if (i >= SKU_HT_BW20 && i <= SKU_VHT_BW160) { 3396 mcs_num = 10; 3397 3398 if (i == SKU_HT_BW20 || i == SKU_VHT_BW20) 3399 la = (s8 *)&limits_array + 12; 3400 } else { 3401 mcs_num = len; 3402 } 3403 3404 for (j = 0; j < min_t(u8, mcs_num, len); j++) 3405 req.txpower_sku[idx + j] = la[j]; 3406 3407 la += mcs_num; 3408 idx += len; 3409 } 3410 3411 return mt76_mcu_send_msg(&dev->mt76, 3412 MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req, 3413 sizeof(req), true); 3414 } 3415 3416 int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len) 3417 { 3418 #define RATE_POWER_INFO 2 3419 struct mt7915_dev *dev = phy->dev; 3420 struct { 3421 u8 format_id; 3422 u8 category; 3423 u8 band_idx; 3424 u8 _rsv; 3425 } __packed req = { 3426 .format_id = TX_POWER_LIMIT_INFO, 3427 .category = RATE_POWER_INFO, 3428 .band_idx = phy->mt76->band_idx, 3429 }; 3430 s8 txpower_sku[MT7915_SKU_RATE_NUM][2]; 3431 struct sk_buff *skb; 3432 int ret, i; 3433 3434 ret = mt76_mcu_send_and_get_msg(&dev->mt76, 3435 MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), 3436 &req, sizeof(req), true, &skb); 3437 if (ret) 3438 return ret; 3439 3440 memcpy(txpower_sku, skb->data + 4, sizeof(txpower_sku)); 3441 for (i = 0; i < len; i++) 3442 txpower[i] = txpower_sku[i][req.band_idx]; 3443 3444 dev_kfree_skb(skb); 3445 3446 return 0; 3447 } 3448 3449 int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode, 3450 u8 en) 3451 { 3452 struct { 3453 u8 test_mode_en; 3454 u8 param_idx; 3455 u8 _rsv[2]; 3456 3457 u8 enable; 3458 u8 _rsv2[3]; 3459 3460 u8 pad[8]; 3461 } __packed req = { 3462 .test_mode_en = test_mode, 3463 .param_idx = param, 3464 .enable = en, 3465 }; 3466 3467 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req, 3468 sizeof(req), false); 3469 } 3470 3471 int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable) 3472 { 3473 struct mt7915_dev *dev = phy->dev; 3474 struct mt7915_sku { 3475 u8 format_id; 3476 u8 sku_enable; 3477 u8 band_idx; 3478 u8 rsv; 3479 } __packed req = { 3480 .format_id = TX_POWER_LIMIT_ENABLE, 3481 .band_idx = phy->mt76->band_idx, 3482 .sku_enable = enable, 3483 }; 3484 3485 return mt76_mcu_send_msg(&dev->mt76, 3486 MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req, 3487 sizeof(req), true); 3488 } 3489 3490 int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band) 3491 { 3492 struct { 3493 u8 action; 3494 u8 set; 3495 u8 band; 3496 u8 rsv; 3497 } req = { 3498 .action = action, 3499 .set = set, 3500 .band = band, 3501 }; 3502 3503 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SER_TRIGGER), 3504 &req, sizeof(req), false); 3505 } 3506 3507 int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action) 3508 { 3509 struct { 3510 u8 action; 3511 union { 3512 struct { 3513 u8 snd_mode; 3514 u8 sta_num; 3515 u8 rsv; 3516 u8 wlan_idx[4]; 3517 __le32 snd_period; /* ms */ 3518 } __packed snd; 3519 struct { 3520 bool ebf; 3521 bool ibf; 3522 u8 rsv; 3523 } __packed type; 3524 struct { 3525 u8 bf_num; 3526 u8 bf_bitmap; 3527 u8 bf_sel[8]; 3528 u8 rsv[5]; 3529 } __packed mod; 3530 }; 3531 } __packed req = { 3532 .action = action, 3533 }; 3534 3535 #define MT_BF_PROCESSING 4 3536 switch (action) { 3537 case MT_BF_SOUNDING_ON: 3538 req.snd.snd_mode = MT_BF_PROCESSING; 3539 break; 3540 case MT_BF_TYPE_UPDATE: 3541 req.type.ebf = true; 3542 req.type.ibf = dev->ibf; 3543 break; 3544 case MT_BF_MODULE_UPDATE: 3545 req.mod.bf_num = 2; 3546 req.mod.bf_bitmap = GENMASK(1, 0); 3547 break; 3548 default: 3549 return -EINVAL; 3550 } 3551 3552 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req, 3553 sizeof(req), true); 3554 } 3555 3556 static int 3557 mt7915_mcu_enable_obss_spr(struct mt7915_phy *phy, u8 action, u8 val) 3558 { 3559 struct mt7915_dev *dev = phy->dev; 3560 struct mt7915_mcu_sr_ctrl req = { 3561 .action = action, 3562 .argnum = 1, 3563 .band_idx = phy->mt76->band_idx, 3564 .val = cpu_to_le32(val), 3565 }; 3566 3567 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req, 3568 sizeof(req), true); 3569 } 3570 3571 static int 3572 mt7915_mcu_set_obss_spr_pd(struct mt7915_phy *phy, 3573 struct ieee80211_he_obss_pd *he_obss_pd) 3574 { 3575 struct mt7915_dev *dev = phy->dev; 3576 struct { 3577 struct mt7915_mcu_sr_ctrl ctrl; 3578 struct { 3579 u8 pd_th_non_srg; 3580 u8 pd_th_srg; 3581 u8 period_offs; 3582 u8 rcpi_src; 3583 __le16 obss_pd_min; 3584 __le16 obss_pd_min_srg; 3585 u8 resp_txpwr_mode; 3586 u8 txpwr_restrict_mode; 3587 u8 txpwr_ref; 3588 u8 rsv[3]; 3589 } __packed param; 3590 } __packed req = { 3591 .ctrl = { 3592 .action = SPR_SET_PARAM, 3593 .argnum = 9, 3594 .band_idx = phy->mt76->band_idx, 3595 }, 3596 }; 3597 int ret; 3598 u8 max_th = 82, non_srg_max_th = 62; 3599 3600 /* disable firmware dynamical PD asjustment */ 3601 ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_DPD, false); 3602 if (ret) 3603 return ret; 3604 3605 if (he_obss_pd->sr_ctrl & 3606 IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED) 3607 req.param.pd_th_non_srg = max_th; 3608 else if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) 3609 req.param.pd_th_non_srg = max_th - he_obss_pd->non_srg_max_offset; 3610 else 3611 req.param.pd_th_non_srg = non_srg_max_th; 3612 3613 if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) 3614 req.param.pd_th_srg = max_th - he_obss_pd->max_offset; 3615 3616 req.param.obss_pd_min = cpu_to_le16(82); 3617 req.param.obss_pd_min_srg = cpu_to_le16(82); 3618 req.param.txpwr_restrict_mode = 2; 3619 req.param.txpwr_ref = 21; 3620 3621 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req, 3622 sizeof(req), true); 3623 } 3624 3625 static int 3626 mt7915_mcu_set_obss_spr_siga(struct mt7915_phy *phy, struct ieee80211_vif *vif, 3627 struct ieee80211_he_obss_pd *he_obss_pd) 3628 { 3629 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 3630 struct mt7915_dev *dev = phy->dev; 3631 u8 omac = mvif->mt76.omac_idx; 3632 struct { 3633 struct mt7915_mcu_sr_ctrl ctrl; 3634 struct { 3635 u8 omac; 3636 u8 rsv[3]; 3637 u8 flag[20]; 3638 } __packed siga; 3639 } __packed req = { 3640 .ctrl = { 3641 .action = SPR_SET_SIGA, 3642 .argnum = 1, 3643 .band_idx = phy->mt76->band_idx, 3644 }, 3645 .siga = { 3646 .omac = omac > HW_BSSID_MAX ? omac - 12 : omac, 3647 }, 3648 }; 3649 int ret; 3650 3651 if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED) 3652 req.siga.flag[req.siga.omac] = 0xf; 3653 else 3654 return 0; 3655 3656 /* switch to normal AP mode */ 3657 ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_MODE, 0); 3658 if (ret) 3659 return ret; 3660 3661 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req, 3662 sizeof(req), true); 3663 } 3664 3665 static int 3666 mt7915_mcu_set_obss_spr_bitmap(struct mt7915_phy *phy, 3667 struct ieee80211_he_obss_pd *he_obss_pd) 3668 { 3669 struct mt7915_dev *dev = phy->dev; 3670 struct { 3671 struct mt7915_mcu_sr_ctrl ctrl; 3672 struct { 3673 __le32 color_l[2]; 3674 __le32 color_h[2]; 3675 __le32 bssid_l[2]; 3676 __le32 bssid_h[2]; 3677 } __packed bitmap; 3678 } __packed req = { 3679 .ctrl = { 3680 .action = SPR_SET_SRG_BITMAP, 3681 .argnum = 4, 3682 .band_idx = phy->mt76->band_idx, 3683 }, 3684 }; 3685 u32 bitmap; 3686 3687 memcpy(&bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap)); 3688 req.bitmap.color_l[req.ctrl.band_idx] = cpu_to_le32(bitmap); 3689 3690 memcpy(&bitmap, he_obss_pd->bss_color_bitmap + 4, sizeof(bitmap)); 3691 req.bitmap.color_h[req.ctrl.band_idx] = cpu_to_le32(bitmap); 3692 3693 memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap)); 3694 req.bitmap.bssid_l[req.ctrl.band_idx] = cpu_to_le32(bitmap); 3695 3696 memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap + 4, sizeof(bitmap)); 3697 req.bitmap.bssid_h[req.ctrl.band_idx] = cpu_to_le32(bitmap); 3698 3699 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req, 3700 sizeof(req), true); 3701 } 3702 3703 int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif, 3704 struct ieee80211_he_obss_pd *he_obss_pd) 3705 { 3706 int ret; 3707 3708 /* enable firmware scene detection algorithms */ 3709 ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_SD, sr_scene_detect); 3710 if (ret) 3711 return ret; 3712 3713 /* firmware dynamically adjusts PD threshold so skip manual control */ 3714 if (sr_scene_detect && !he_obss_pd->enable) 3715 return 0; 3716 3717 /* enable spatial reuse */ 3718 ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE, he_obss_pd->enable); 3719 if (ret) 3720 return ret; 3721 3722 if (sr_scene_detect || !he_obss_pd->enable) 3723 return 0; 3724 3725 ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_TX, true); 3726 if (ret) 3727 return ret; 3728 3729 /* set SRG/non-SRG OBSS PD threshold */ 3730 ret = mt7915_mcu_set_obss_spr_pd(phy, he_obss_pd); 3731 if (ret) 3732 return ret; 3733 3734 /* Set SR prohibit */ 3735 ret = mt7915_mcu_set_obss_spr_siga(phy, vif, he_obss_pd); 3736 if (ret) 3737 return ret; 3738 3739 /* set SRG BSS color/BSSID bitmap */ 3740 return mt7915_mcu_set_obss_spr_bitmap(phy, he_obss_pd); 3741 } 3742 3743 int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, 3744 struct ieee80211_sta *sta, struct rate_info *rate) 3745 { 3746 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 3747 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 3748 struct mt7915_dev *dev = phy->dev; 3749 struct mt76_phy *mphy = phy->mt76; 3750 struct { 3751 u8 category; 3752 u8 band; 3753 __le16 wcid; 3754 } __packed req = { 3755 .category = MCU_PHY_STATE_CONTENTION_RX_RATE, 3756 .band = mvif->mt76.band_idx, 3757 .wcid = cpu_to_le16(msta->wcid.idx), 3758 }; 3759 struct ieee80211_supported_band *sband; 3760 struct mt7915_mcu_phy_rx_info *res; 3761 struct sk_buff *skb; 3762 int ret; 3763 bool cck = false; 3764 3765 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(PHY_STAT_INFO), 3766 &req, sizeof(req), true, &skb); 3767 if (ret) 3768 return ret; 3769 3770 res = (struct mt7915_mcu_phy_rx_info *)skb->data; 3771 3772 rate->mcs = res->rate; 3773 rate->nss = res->nsts + 1; 3774 3775 switch (res->mode) { 3776 case MT_PHY_TYPE_CCK: 3777 cck = true; 3778 fallthrough; 3779 case MT_PHY_TYPE_OFDM: 3780 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 3781 sband = &mphy->sband_5g.sband; 3782 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ) 3783 sband = &mphy->sband_6g.sband; 3784 else 3785 sband = &mphy->sband_2g.sband; 3786 3787 rate->mcs = mt76_get_rate(&dev->mt76, sband, rate->mcs, cck); 3788 rate->legacy = sband->bitrates[rate->mcs].bitrate; 3789 break; 3790 case MT_PHY_TYPE_HT: 3791 case MT_PHY_TYPE_HT_GF: 3792 if (rate->mcs > 31) { 3793 ret = -EINVAL; 3794 goto out; 3795 } 3796 3797 rate->flags = RATE_INFO_FLAGS_MCS; 3798 if (res->gi) 3799 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 3800 break; 3801 case MT_PHY_TYPE_VHT: 3802 if (rate->mcs > 9) { 3803 ret = -EINVAL; 3804 goto out; 3805 } 3806 3807 rate->flags = RATE_INFO_FLAGS_VHT_MCS; 3808 if (res->gi) 3809 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 3810 break; 3811 case MT_PHY_TYPE_HE_SU: 3812 case MT_PHY_TYPE_HE_EXT_SU: 3813 case MT_PHY_TYPE_HE_TB: 3814 case MT_PHY_TYPE_HE_MU: 3815 if (res->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) { 3816 ret = -EINVAL; 3817 goto out; 3818 } 3819 rate->he_gi = res->gi; 3820 rate->flags = RATE_INFO_FLAGS_HE_MCS; 3821 break; 3822 default: 3823 ret = -EINVAL; 3824 goto out; 3825 } 3826 3827 switch (res->bw) { 3828 case IEEE80211_STA_RX_BW_160: 3829 rate->bw = RATE_INFO_BW_160; 3830 break; 3831 case IEEE80211_STA_RX_BW_80: 3832 rate->bw = RATE_INFO_BW_80; 3833 break; 3834 case IEEE80211_STA_RX_BW_40: 3835 rate->bw = RATE_INFO_BW_40; 3836 break; 3837 default: 3838 rate->bw = RATE_INFO_BW_20; 3839 break; 3840 } 3841 3842 out: 3843 dev_kfree_skb(skb); 3844 3845 return ret; 3846 } 3847 3848 int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif, 3849 struct cfg80211_he_bss_color *he_bss_color) 3850 { 3851 int len = sizeof(struct sta_req_hdr) + sizeof(struct bss_info_color); 3852 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 3853 struct bss_info_color *bss_color; 3854 struct sk_buff *skb; 3855 struct tlv *tlv; 3856 3857 skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 3858 NULL, len); 3859 if (IS_ERR(skb)) 3860 return PTR_ERR(skb); 3861 3862 tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BSS_COLOR, 3863 sizeof(*bss_color)); 3864 bss_color = (struct bss_info_color *)tlv; 3865 bss_color->disable = !he_bss_color->enabled; 3866 bss_color->color = he_bss_color->color; 3867 3868 return mt76_mcu_skb_send_msg(&dev->mt76, skb, 3869 MCU_EXT_CMD(BSS_INFO_UPDATE), true); 3870 } 3871 3872 #define TWT_AGRT_TRIGGER BIT(0) 3873 #define TWT_AGRT_ANNOUNCE BIT(1) 3874 #define TWT_AGRT_PROTECT BIT(2) 3875 3876 int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev, 3877 struct mt7915_vif *mvif, 3878 struct mt7915_twt_flow *flow, 3879 int cmd) 3880 { 3881 struct { 3882 u8 tbl_idx; 3883 u8 cmd; 3884 u8 own_mac_idx; 3885 u8 flowid; /* 0xff for group id */ 3886 __le16 peer_id; /* specify the peer_id (msb=0) 3887 * or group_id (msb=1) 3888 */ 3889 u8 duration; /* 256 us */ 3890 u8 bss_idx; 3891 __le64 start_tsf; 3892 __le16 mantissa; 3893 u8 exponent; 3894 u8 is_ap; 3895 u8 agrt_params; 3896 u8 rsv[23]; 3897 } __packed req = { 3898 .tbl_idx = flow->table_id, 3899 .cmd = cmd, 3900 .own_mac_idx = mvif->mt76.omac_idx, 3901 .flowid = flow->id, 3902 .peer_id = cpu_to_le16(flow->wcid), 3903 .duration = flow->duration, 3904 .bss_idx = mvif->mt76.idx, 3905 .start_tsf = cpu_to_le64(flow->tsf), 3906 .mantissa = flow->mantissa, 3907 .exponent = flow->exp, 3908 .is_ap = true, 3909 }; 3910 3911 if (flow->protection) 3912 req.agrt_params |= TWT_AGRT_PROTECT; 3913 if (!flow->flowtype) 3914 req.agrt_params |= TWT_AGRT_ANNOUNCE; 3915 if (flow->trigger) 3916 req.agrt_params |= TWT_AGRT_TRIGGER; 3917 3918 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TWT_AGRT_UPDATE), 3919 &req, sizeof(req), true); 3920 } 3921 3922 int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wlan_idx) 3923 { 3924 struct { 3925 __le32 cmd; 3926 __le32 arg0; 3927 __le32 arg1; 3928 __le16 arg2; 3929 } __packed req = { 3930 .cmd = cpu_to_le32(0x15), 3931 }; 3932 struct mt7915_mcu_wa_tx_stat { 3933 __le16 wcid; 3934 u8 __rsv2[2]; 3935 3936 /* tx_bytes is deprecated since WA byte counter uses u32, 3937 * which easily leads to overflow. 3938 */ 3939 __le32 tx_bytes; 3940 __le32 tx_packets; 3941 } __packed *res; 3942 struct mt76_wcid *wcid; 3943 struct sk_buff *skb; 3944 int ret, len; 3945 u16 ret_wcid; 3946 3947 if (is_mt7915(&dev->mt76)) { 3948 req.arg0 = cpu_to_le32(wlan_idx); 3949 len = sizeof(req) - sizeof(req.arg2); 3950 } else { 3951 req.arg0 = cpu_to_le32(1); 3952 req.arg2 = cpu_to_le16(wlan_idx); 3953 len = sizeof(req); 3954 } 3955 3956 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WA_PARAM_CMD(QUERY), 3957 &req, len, true, &skb); 3958 if (ret) 3959 return ret; 3960 3961 if (!is_mt7915(&dev->mt76)) 3962 skb_pull(skb, 4); 3963 3964 res = (struct mt7915_mcu_wa_tx_stat *)skb->data; 3965 3966 ret_wcid = le16_to_cpu(res->wcid); 3967 if (is_mt7915(&dev->mt76)) 3968 ret_wcid &= 0xff; 3969 3970 if (ret_wcid != wlan_idx) { 3971 ret = -EINVAL; 3972 goto out; 3973 } 3974 3975 rcu_read_lock(); 3976 3977 wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); 3978 if (wcid) 3979 wcid->stats.tx_packets += le32_to_cpu(res->tx_packets); 3980 else 3981 ret = -EINVAL; 3982 3983 rcu_read_unlock(); 3984 out: 3985 dev_kfree_skb(skb); 3986 3987 return ret; 3988 } 3989 3990 int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set) 3991 { 3992 struct { 3993 __le32 idx; 3994 __le32 ofs; 3995 __le32 data; 3996 } __packed req = { 3997 .idx = cpu_to_le32(u32_get_bits(regidx, GENMASK(31, 24))), 3998 .ofs = cpu_to_le32(u32_get_bits(regidx, GENMASK(23, 0))), 3999 .data = set ? cpu_to_le32(*val) : 0, 4000 }; 4001 struct sk_buff *skb; 4002 int ret; 4003 4004 if (set) 4005 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_REG_ACCESS), 4006 &req, sizeof(req), false); 4007 4008 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(RF_REG_ACCESS), 4009 &req, sizeof(req), true, &skb); 4010 if (ret) 4011 return ret; 4012 4013 *val = le32_to_cpu(*(__le32 *)(skb->data + 8)); 4014 dev_kfree_skb(skb); 4015 4016 return 0; 4017 } 4018