1 // SPDX-License-Identifier: ISC 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/fs.h> 5 #include <linux/firmware.h> 6 #include "mt7921.h" 7 #include "mcu.h" 8 #include "../mt76_connac2_mac.h" 9 #include "../mt792x_trace.h" 10 11 #define MT_STA_BFER BIT(0) 12 #define MT_STA_BFEE BIT(1) 13 14 static bool mt7921_disable_clc; 15 module_param_named(disable_clc, mt7921_disable_clc, bool, 0644); 16 MODULE_PARM_DESC(disable_clc, "disable CLC support"); 17 18 int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd, 19 struct sk_buff *skb, int seq) 20 { 21 int mcu_cmd = FIELD_GET(__MCU_CMD_FIELD_ID, cmd); 22 struct mt76_connac2_mcu_rxd *rxd; 23 int ret = 0; 24 25 if (!skb) { 26 dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", 27 cmd, seq); 28 mt792x_reset(mdev); 29 30 return -ETIMEDOUT; 31 } 32 33 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 34 if (seq != rxd->seq) 35 return -EAGAIN; 36 37 if (cmd == MCU_CMD(PATCH_SEM_CONTROL) || 38 cmd == MCU_CMD(PATCH_FINISH_REQ)) { 39 skb_pull(skb, sizeof(*rxd) - 4); 40 ret = *skb->data; 41 } else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) { 42 skb_pull(skb, sizeof(*rxd) + 4); 43 ret = le32_to_cpu(*(__le32 *)skb->data); 44 } else if (cmd == MCU_UNI_CMD(DEV_INFO_UPDATE) || 45 cmd == MCU_UNI_CMD(BSS_INFO_UPDATE) || 46 cmd == MCU_UNI_CMD(STA_REC_UPDATE) || 47 cmd == MCU_UNI_CMD(HIF_CTRL) || 48 cmd == MCU_UNI_CMD(OFFLOAD) || 49 cmd == MCU_UNI_CMD(SUSPEND)) { 50 struct mt76_connac_mcu_uni_event *event; 51 52 skb_pull(skb, sizeof(*rxd)); 53 event = (struct mt76_connac_mcu_uni_event *)skb->data; 54 ret = le32_to_cpu(event->status); 55 /* skip invalid event */ 56 if (mcu_cmd != event->cid) 57 ret = -EAGAIN; 58 } else if (cmd == MCU_CE_QUERY(REG_READ)) { 59 struct mt76_connac_mcu_reg_event *event; 60 61 skb_pull(skb, sizeof(*rxd)); 62 event = (struct mt76_connac_mcu_reg_event *)skb->data; 63 ret = (int)le32_to_cpu(event->val); 64 } else { 65 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 66 } 67 68 return ret; 69 } 70 EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response); 71 72 static int mt7921_mcu_read_eeprom(struct mt792x_dev *dev, u32 offset, u8 *val) 73 { 74 struct mt7921_mcu_eeprom_info *res, req = { 75 .addr = cpu_to_le32(round_down(offset, 76 MT7921_EEPROM_BLOCK_SIZE)), 77 }; 78 struct sk_buff *skb; 79 int ret; 80 81 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS), 82 &req, sizeof(req), true, &skb); 83 if (ret) 84 return ret; 85 86 res = (struct mt7921_mcu_eeprom_info *)skb->data; 87 *val = res->data[offset % MT7921_EEPROM_BLOCK_SIZE]; 88 dev_kfree_skb(skb); 89 90 return 0; 91 } 92 93 #ifdef CONFIG_PM 94 95 static int 96 mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev, 97 struct ieee80211_vif *vif, bool suspend) 98 { 99 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 100 struct { 101 struct { 102 u8 bss_idx; 103 u8 pad[3]; 104 } __packed hdr; 105 struct mt76_connac_arpns_tlv arpns; 106 } req = { 107 .hdr = { 108 .bss_idx = mvif->bss_conf.mt76.idx, 109 }, 110 .arpns = { 111 .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND), 112 .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)), 113 .mode = suspend, 114 }, 115 }; 116 117 return mt76_mcu_send_msg(dev, MCU_UNI_CMD_OFFLOAD, &req, sizeof(req), 118 true); 119 } 120 121 void mt7921_mcu_set_suspend_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) 122 { 123 if (IS_ENABLED(CONFIG_IPV6)) { 124 struct mt76_phy *phy = priv; 125 126 mt7921_mcu_set_ipv6_ns_filter(phy->dev, vif, 127 !test_bit(MT76_STATE_RUNNING, 128 &phy->state)); 129 } 130 131 mt76_connac_mcu_set_suspend_iter(priv, mac, vif); 132 } 133 134 #endif /* CONFIG_PM */ 135 136 static void 137 mt7921_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb) 138 { 139 struct mt7921_roc_grant_tlv *grant; 140 struct mt76_connac2_mcu_rxd *rxd; 141 int duration; 142 143 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 144 grant = (struct mt7921_roc_grant_tlv *)(rxd->tlv + 4); 145 146 /* should never happen */ 147 WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT)); 148 149 if (grant->reqtype == MT7921_ROC_REQ_ROC) 150 ieee80211_ready_on_channel(dev->mt76.phy.hw); 151 152 dev->phy.roc_grant = true; 153 wake_up(&dev->phy.roc_wait); 154 duration = le32_to_cpu(grant->max_interval); 155 mod_timer(&dev->phy.roc_timer, 156 jiffies + msecs_to_jiffies(duration)); 157 } 158 159 static void 160 mt7921_mcu_scan_event(struct mt792x_dev *dev, struct sk_buff *skb) 161 { 162 struct mt76_phy *mphy = &dev->mt76.phy; 163 struct mt792x_phy *phy = mphy->priv; 164 165 spin_lock_bh(&dev->mt76.lock); 166 __skb_queue_tail(&phy->scan_event_list, skb); 167 spin_unlock_bh(&dev->mt76.lock); 168 169 ieee80211_queue_delayed_work(mphy->hw, &phy->scan_work, 170 MT792x_HW_SCAN_TIMEOUT); 171 } 172 173 static void 174 mt7921_mcu_connection_loss_iter(void *priv, u8 *mac, 175 struct ieee80211_vif *vif) 176 { 177 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; 178 struct mt76_connac_beacon_loss_event *event = priv; 179 180 if (mvif->idx != event->bss_idx) 181 return; 182 183 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER) || 184 vif->type != NL80211_IFTYPE_STATION) 185 return; 186 187 ieee80211_connection_loss(vif); 188 } 189 190 static void 191 mt7921_mcu_connection_loss_event(struct mt792x_dev *dev, struct sk_buff *skb) 192 { 193 struct mt76_connac_beacon_loss_event *event; 194 struct mt76_phy *mphy = &dev->mt76.phy; 195 196 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 197 event = (struct mt76_connac_beacon_loss_event *)skb->data; 198 199 ieee80211_iterate_active_interfaces_atomic(mphy->hw, 200 IEEE80211_IFACE_ITER_RESUME_ALL, 201 mt7921_mcu_connection_loss_iter, event); 202 } 203 204 static void 205 mt7921_mcu_debug_msg_event(struct mt792x_dev *dev, struct sk_buff *skb) 206 { 207 struct mt7921_debug_msg { 208 __le16 id; 209 u8 type; 210 u8 flag; 211 __le32 value; 212 __le16 len; 213 u8 content[512]; 214 } __packed * msg; 215 216 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 217 msg = (struct mt7921_debug_msg *)skb->data; 218 219 if (msg->type == 3) { /* fw log */ 220 u16 len = min_t(u16, le16_to_cpu(msg->len), 512); 221 int i; 222 223 for (i = 0 ; i < len; i++) { 224 if (!msg->content[i]) 225 msg->content[i] = ' '; 226 } 227 wiphy_info(mt76_hw(dev)->wiphy, "%.*s", len, msg->content); 228 } 229 } 230 231 static void 232 mt7921_mcu_low_power_event(struct mt792x_dev *dev, struct sk_buff *skb) 233 { 234 struct mt7921_mcu_lp_event { 235 u8 state; 236 u8 reserved[3]; 237 } __packed * event; 238 239 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 240 event = (struct mt7921_mcu_lp_event *)skb->data; 241 242 trace_lp_event(dev, event->state); 243 } 244 245 static void 246 mt7921_mcu_tx_done_event(struct mt792x_dev *dev, struct sk_buff *skb) 247 { 248 struct mt7921_mcu_tx_done_event *event; 249 250 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 251 event = (struct mt7921_mcu_tx_done_event *)skb->data; 252 253 mt7921_mac_add_txs(dev, event->txs); 254 } 255 256 static void 257 mt7921_mcu_rssi_monitor_iter(void *priv, u8 *mac, 258 struct ieee80211_vif *vif) 259 { 260 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 261 struct mt76_connac_rssi_notify_event *event = priv; 262 enum nl80211_cqm_rssi_threshold_event nl_event; 263 s32 rssi = le32_to_cpu(event->rssi[mvif->bss_conf.mt76.idx]); 264 265 if (!rssi) 266 return; 267 268 if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI)) 269 return; 270 271 if (rssi > vif->bss_conf.cqm_rssi_thold) 272 nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; 273 else 274 nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW; 275 276 ieee80211_cqm_rssi_notify(vif, nl_event, rssi, GFP_KERNEL); 277 } 278 279 static void 280 mt7921_mcu_rssi_monitor_event(struct mt792x_dev *dev, struct sk_buff *skb) 281 { 282 struct mt76_connac_rssi_notify_event *event; 283 284 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 285 event = (struct mt76_connac_rssi_notify_event *)skb->data; 286 287 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), 288 IEEE80211_IFACE_ITER_RESUME_ALL, 289 mt7921_mcu_rssi_monitor_iter, event); 290 } 291 292 static void 293 mt7921_mcu_rx_unsolicited_event(struct mt792x_dev *dev, struct sk_buff *skb) 294 { 295 struct mt76_connac2_mcu_rxd *rxd; 296 297 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 298 switch (rxd->eid) { 299 case MCU_EVENT_BSS_BEACON_LOSS: 300 mt7921_mcu_connection_loss_event(dev, skb); 301 break; 302 case MCU_EVENT_SCHED_SCAN_DONE: 303 case MCU_EVENT_SCAN_DONE: 304 mt7921_mcu_scan_event(dev, skb); 305 return; 306 case MCU_EVENT_DBG_MSG: 307 mt7921_mcu_debug_msg_event(dev, skb); 308 break; 309 case MCU_EVENT_COREDUMP: 310 dev->fw_assert = true; 311 mt76_connac_mcu_coredump_event(&dev->mt76, skb, 312 &dev->coredump); 313 return; 314 case MCU_EVENT_LP_INFO: 315 mt7921_mcu_low_power_event(dev, skb); 316 break; 317 case MCU_EVENT_TX_DONE: 318 mt7921_mcu_tx_done_event(dev, skb); 319 break; 320 case MCU_EVENT_RSSI_NOTIFY: 321 mt7921_mcu_rssi_monitor_event(dev, skb); 322 break; 323 default: 324 break; 325 } 326 dev_kfree_skb(skb); 327 } 328 329 static void 330 mt7921_mcu_uni_rx_unsolicited_event(struct mt792x_dev *dev, 331 struct sk_buff *skb) 332 { 333 struct mt76_connac2_mcu_rxd *rxd; 334 335 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 336 337 switch (rxd->eid) { 338 case MCU_UNI_EVENT_ROC: 339 mt7921_mcu_uni_roc_event(dev, skb); 340 break; 341 default: 342 break; 343 } 344 dev_kfree_skb(skb); 345 } 346 347 void mt7921_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb) 348 { 349 struct mt76_connac2_mcu_rxd *rxd; 350 351 if (skb_linearize(skb)) 352 return; 353 354 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 355 356 if (rxd->option & MCU_UNI_CMD_UNSOLICITED_EVENT) { 357 mt7921_mcu_uni_rx_unsolicited_event(dev, skb); 358 return; 359 } 360 361 if (rxd->eid == 0x6) { 362 mt76_mcu_rx_event(&dev->mt76, skb); 363 return; 364 } 365 366 if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT || 367 rxd->eid == MCU_EVENT_BSS_BEACON_LOSS || 368 rxd->eid == MCU_EVENT_SCHED_SCAN_DONE || 369 rxd->eid == MCU_EVENT_RSSI_NOTIFY || 370 rxd->eid == MCU_EVENT_SCAN_DONE || 371 rxd->eid == MCU_EVENT_TX_DONE || 372 rxd->eid == MCU_EVENT_DBG_MSG || 373 rxd->eid == MCU_EVENT_COREDUMP || 374 rxd->eid == MCU_EVENT_LP_INFO || 375 !rxd->seq) 376 mt7921_mcu_rx_unsolicited_event(dev, skb); 377 else 378 mt76_mcu_rx_event(&dev->mt76, skb); 379 } 380 381 /** starec & wtbl **/ 382 int mt7921_mcu_uni_tx_ba(struct mt792x_dev *dev, 383 struct ieee80211_ampdu_params *params, 384 bool enable) 385 { 386 struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; 387 388 if (enable && !params->amsdu) 389 msta->deflink.wcid.amsdu = false; 390 391 return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->bss_conf.mt76, params, 392 MCU_UNI_CMD(STA_REC_UPDATE), 393 enable, true); 394 } 395 396 int mt7921_mcu_uni_rx_ba(struct mt792x_dev *dev, 397 struct ieee80211_ampdu_params *params, 398 bool enable) 399 { 400 struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; 401 402 return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->bss_conf.mt76, params, 403 MCU_UNI_CMD(STA_REC_UPDATE), 404 enable, false); 405 } 406 407 static int mt7921_load_clc(struct mt792x_dev *dev, const char *fw_name) 408 { 409 const struct mt76_connac2_fw_trailer *hdr; 410 const struct mt76_connac2_fw_region *region; 411 const struct mt7921_clc *clc; 412 struct mt76_dev *mdev = &dev->mt76; 413 struct mt792x_phy *phy = &dev->phy; 414 const struct firmware *fw; 415 int ret, i, len, offset = 0; 416 u8 *clc_base = NULL, hw_encap = 0; 417 418 dev->phy.clc_chan_conf = 0xff; 419 if (mt7921_disable_clc || 420 mt76_is_usb(&dev->mt76)) 421 return 0; 422 423 if (mt76_is_mmio(&dev->mt76)) { 424 ret = mt7921_mcu_read_eeprom(dev, MT_EE_HW_TYPE, &hw_encap); 425 if (ret) 426 return ret; 427 hw_encap = u8_get_bits(hw_encap, MT_EE_HW_TYPE_ENCAP); 428 } 429 430 ret = request_firmware(&fw, fw_name, mdev->dev); 431 if (ret) 432 return ret; 433 434 if (!fw || !fw->data || fw->size < sizeof(*hdr)) { 435 dev_err(mdev->dev, "Invalid firmware\n"); 436 ret = -EINVAL; 437 goto out; 438 } 439 440 hdr = (const void *)(fw->data + fw->size - sizeof(*hdr)); 441 for (i = 0; i < hdr->n_region; i++) { 442 region = (const void *)((const u8 *)hdr - 443 (hdr->n_region - i) * sizeof(*region)); 444 len = le32_to_cpu(region->len); 445 446 /* check if we have valid buffer size */ 447 if (offset + len > fw->size) { 448 dev_err(mdev->dev, "Invalid firmware region\n"); 449 ret = -EINVAL; 450 goto out; 451 } 452 453 if ((region->feature_set & FW_FEATURE_NON_DL) && 454 region->type == FW_TYPE_CLC) { 455 clc_base = (u8 *)(fw->data + offset); 456 break; 457 } 458 offset += len; 459 } 460 461 if (!clc_base) 462 goto out; 463 464 for (offset = 0; offset < len; offset += le32_to_cpu(clc->len)) { 465 clc = (const struct mt7921_clc *)(clc_base + offset); 466 467 /* do not init buf again if chip reset triggered */ 468 if (phy->clc[clc->idx]) 469 continue; 470 471 /* header content sanity */ 472 if (clc->idx == MT7921_CLC_POWER && 473 u8_get_bits(clc->type, MT_EE_HW_TYPE_ENCAP) != hw_encap) 474 continue; 475 476 phy->clc[clc->idx] = devm_kmemdup(mdev->dev, clc, 477 le32_to_cpu(clc->len), 478 GFP_KERNEL); 479 480 if (!phy->clc[clc->idx]) { 481 ret = -ENOMEM; 482 goto out; 483 } 484 } 485 ret = mt7921_mcu_set_clc(dev, "00", ENVIRON_INDOOR); 486 out: 487 release_firmware(fw); 488 489 return ret; 490 } 491 492 static void mt7921_mcu_parse_tx_resource(struct mt76_dev *dev, 493 struct sk_buff *skb) 494 { 495 struct mt76_sdio *sdio = &dev->sdio; 496 struct mt7921_tx_resource { 497 __le32 version; 498 __le32 pse_data_quota; 499 __le32 pse_mcu_quota; 500 __le32 ple_data_quota; 501 __le32 ple_mcu_quota; 502 __le16 pse_page_size; 503 __le16 ple_page_size; 504 u8 pp_padding; 505 u8 pad[3]; 506 } __packed * tx_res; 507 508 tx_res = (struct mt7921_tx_resource *)skb->data; 509 sdio->sched.pse_data_quota = le32_to_cpu(tx_res->pse_data_quota); 510 sdio->sched.pse_mcu_quota = le32_to_cpu(tx_res->pse_mcu_quota); 511 sdio->sched.ple_data_quota = le32_to_cpu(tx_res->ple_data_quota); 512 sdio->sched.pse_page_size = le16_to_cpu(tx_res->pse_page_size); 513 sdio->sched.deficit = tx_res->pp_padding; 514 } 515 516 static void mt7921_mcu_parse_phy_cap(struct mt76_dev *dev, 517 struct sk_buff *skb) 518 { 519 struct mt7921_phy_cap { 520 u8 ht; 521 u8 vht; 522 u8 _5g; 523 u8 max_bw; 524 u8 nss; 525 u8 dbdc; 526 u8 tx_ldpc; 527 u8 rx_ldpc; 528 u8 tx_stbc; 529 u8 rx_stbc; 530 u8 hw_path; 531 u8 he; 532 } __packed * cap; 533 534 enum { 535 WF0_24G, 536 WF0_5G 537 }; 538 539 cap = (struct mt7921_phy_cap *)skb->data; 540 541 dev->phy.antenna_mask = BIT(cap->nss) - 1; 542 dev->phy.chainmask = dev->phy.antenna_mask; 543 dev->phy.cap.has_2ghz = cap->hw_path & BIT(WF0_24G); 544 dev->phy.cap.has_5ghz = cap->hw_path & BIT(WF0_5G); 545 } 546 547 static int mt7921_mcu_get_nic_capability(struct mt792x_phy *mphy) 548 { 549 struct mt76_connac_cap_hdr { 550 __le16 n_element; 551 u8 rsv[2]; 552 } __packed * hdr; 553 struct sk_buff *skb; 554 struct mt76_phy *phy = mphy->mt76; 555 int ret, i; 556 557 ret = mt76_mcu_send_and_get_msg(phy->dev, MCU_CE_CMD(GET_NIC_CAPAB), 558 NULL, 0, true, &skb); 559 if (ret) 560 return ret; 561 562 hdr = (struct mt76_connac_cap_hdr *)skb->data; 563 if (skb->len < sizeof(*hdr)) { 564 ret = -EINVAL; 565 goto out; 566 } 567 568 skb_pull(skb, sizeof(*hdr)); 569 570 for (i = 0; i < le16_to_cpu(hdr->n_element); i++) { 571 struct tlv_hdr { 572 __le32 type; 573 __le32 len; 574 } __packed * tlv = (struct tlv_hdr *)skb->data; 575 int len; 576 577 if (skb->len < sizeof(*tlv)) 578 break; 579 580 skb_pull(skb, sizeof(*tlv)); 581 582 len = le32_to_cpu(tlv->len); 583 if (skb->len < len) 584 break; 585 586 switch (le32_to_cpu(tlv->type)) { 587 case MT_NIC_CAP_6G: 588 phy->cap.has_6ghz = skb->data[0]; 589 break; 590 case MT_NIC_CAP_MAC_ADDR: 591 memcpy(phy->macaddr, (void *)skb->data, ETH_ALEN); 592 break; 593 case MT_NIC_CAP_PHY: 594 mt7921_mcu_parse_phy_cap(phy->dev, skb); 595 break; 596 case MT_NIC_CAP_TX_RESOURCE: 597 if (mt76_is_sdio(phy->dev)) 598 mt7921_mcu_parse_tx_resource(phy->dev, 599 skb); 600 break; 601 case MT_NIC_CAP_CHIP_CAP: 602 memcpy(&mphy->chip_cap, (void *)skb->data, sizeof(u64)); 603 break; 604 default: 605 break; 606 } 607 skb_pull(skb, len); 608 } 609 out: 610 dev_kfree_skb(skb); 611 612 return ret; 613 } 614 615 int mt7921_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl) 616 { 617 struct { 618 u8 ctrl_val; 619 u8 pad[3]; 620 } data = { 621 .ctrl_val = ctrl 622 }; 623 624 return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(FWLOG_2_HOST), 625 &data, sizeof(data), false); 626 } 627 628 int mt7921_run_firmware(struct mt792x_dev *dev) 629 { 630 int err; 631 632 err = mt792x_load_firmware(dev); 633 if (err) 634 return err; 635 636 err = mt7921_mcu_get_nic_capability(&dev->phy); 637 if (err) 638 return err; 639 640 set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 641 err = mt7921_load_clc(dev, mt792x_ram_name(dev)); 642 if (err) 643 return err; 644 645 return mt7921_mcu_fw_log_2_host(dev, 1); 646 } 647 EXPORT_SYMBOL_GPL(mt7921_run_firmware); 648 649 int mt7921_mcu_radio_led_ctrl(struct mt792x_dev *dev, u8 value) 650 { 651 struct { 652 u8 ctrlid; 653 u8 rsv[3]; 654 } __packed req = { 655 .ctrlid = value, 656 }; 657 658 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ID_RADIO_ON_OFF_CTRL), 659 &req, sizeof(req), false); 660 } 661 EXPORT_SYMBOL_GPL(mt7921_mcu_radio_led_ctrl); 662 663 int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) 664 { 665 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 666 struct edca { 667 __le16 cw_min; 668 __le16 cw_max; 669 __le16 txop; 670 __le16 aifs; 671 u8 guardtime; 672 u8 acm; 673 } __packed; 674 struct mt7921_mcu_tx { 675 struct edca edca[IEEE80211_NUM_ACS]; 676 u8 bss_idx; 677 u8 qos; 678 u8 wmm_idx; 679 u8 pad; 680 } __packed req = { 681 .bss_idx = mvif->bss_conf.mt76.idx, 682 .qos = vif->bss_conf.qos, 683 .wmm_idx = mvif->bss_conf.mt76.wmm_idx, 684 }; 685 struct mu_edca { 686 u8 cw_min; 687 u8 cw_max; 688 u8 aifsn; 689 u8 acm; 690 u8 timer; 691 u8 padding[3]; 692 }; 693 struct mt7921_mcu_mu_tx { 694 u8 ver; 695 u8 pad0; 696 __le16 len; 697 u8 bss_idx; 698 u8 qos; 699 u8 wmm_idx; 700 u8 pad1; 701 struct mu_edca edca[IEEE80211_NUM_ACS]; 702 u8 pad3[32]; 703 } __packed req_mu = { 704 .bss_idx = mvif->bss_conf.mt76.idx, 705 .qos = vif->bss_conf.qos, 706 .wmm_idx = mvif->bss_conf.mt76.wmm_idx, 707 }; 708 static const int to_aci[] = { 1, 0, 2, 3 }; 709 int ac, ret; 710 711 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 712 struct ieee80211_tx_queue_params *q = &mvif->bss_conf.queue_params[ac]; 713 struct edca *e = &req.edca[to_aci[ac]]; 714 715 e->aifs = cpu_to_le16(q->aifs); 716 e->txop = cpu_to_le16(q->txop); 717 718 if (q->cw_min) 719 e->cw_min = cpu_to_le16(q->cw_min); 720 else 721 e->cw_min = cpu_to_le16(5); 722 723 if (q->cw_max) 724 e->cw_max = cpu_to_le16(q->cw_max); 725 else 726 e->cw_max = cpu_to_le16(10); 727 } 728 729 ret = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_EDCA_PARMS), &req, 730 sizeof(req), false); 731 if (ret) 732 return ret; 733 734 if (!vif->bss_conf.he_support) 735 return 0; 736 737 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 738 struct ieee80211_he_mu_edca_param_ac_rec *q; 739 struct mu_edca *e; 740 741 if (!mvif->bss_conf.queue_params[ac].mu_edca) 742 break; 743 744 q = &mvif->bss_conf.queue_params[ac].mu_edca_param_rec; 745 e = &(req_mu.edca[to_aci[ac]]); 746 747 e->cw_min = q->ecw_min_max & 0xf; 748 e->cw_max = (q->ecw_min_max & 0xf0) >> 4; 749 e->aifsn = q->aifsn; 750 e->timer = q->mu_edca_timer; 751 } 752 753 return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_MU_EDCA_PARMS), 754 &req_mu, sizeof(req_mu), false); 755 } 756 757 int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, 758 struct ieee80211_channel *chan, int duration, 759 enum mt7921_roc_req type, u8 token_id) 760 { 761 int center_ch = ieee80211_frequency_to_channel(chan->center_freq); 762 struct mt792x_dev *dev = phy->dev; 763 struct { 764 struct { 765 u8 rsv[4]; 766 } __packed hdr; 767 struct roc_acquire_tlv { 768 __le16 tag; 769 __le16 len; 770 u8 bss_idx; 771 u8 tokenid; 772 u8 control_channel; 773 u8 sco; 774 u8 band; 775 u8 bw; 776 u8 center_chan; 777 u8 center_chan2; 778 u8 bw_from_ap; 779 u8 center_chan_from_ap; 780 u8 center_chan2_from_ap; 781 u8 reqtype; 782 __le32 maxinterval; 783 u8 dbdcband; 784 u8 rsv[3]; 785 } __packed roc; 786 } __packed req = { 787 .roc = { 788 .tag = cpu_to_le16(UNI_ROC_ACQUIRE), 789 .len = cpu_to_le16(sizeof(struct roc_acquire_tlv)), 790 .tokenid = token_id, 791 .reqtype = type, 792 .maxinterval = cpu_to_le32(duration), 793 .bss_idx = vif->bss_conf.mt76.idx, 794 .control_channel = chan->hw_value, 795 .bw = CMD_CBW_20MHZ, 796 .bw_from_ap = CMD_CBW_20MHZ, 797 .center_chan = center_ch, 798 .center_chan_from_ap = center_ch, 799 .dbdcband = 0xff, /* auto */ 800 }, 801 }; 802 803 if (chan->hw_value < center_ch) 804 req.roc.sco = 1; /* SCA */ 805 else if (chan->hw_value > center_ch) 806 req.roc.sco = 3; /* SCB */ 807 808 switch (chan->band) { 809 case NL80211_BAND_6GHZ: 810 req.roc.band = 3; 811 break; 812 case NL80211_BAND_5GHZ: 813 req.roc.band = 2; 814 break; 815 default: 816 req.roc.band = 1; 817 break; 818 } 819 820 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC), 821 &req, sizeof(req), false); 822 } 823 824 int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, 825 u8 token_id) 826 { 827 struct mt792x_dev *dev = phy->dev; 828 struct { 829 struct { 830 u8 rsv[4]; 831 } __packed hdr; 832 struct roc_abort_tlv { 833 __le16 tag; 834 __le16 len; 835 u8 bss_idx; 836 u8 tokenid; 837 u8 dbdcband; 838 u8 rsv[5]; 839 } __packed abort; 840 } __packed req = { 841 .abort = { 842 .tag = cpu_to_le16(UNI_ROC_ABORT), 843 .len = cpu_to_le16(sizeof(struct roc_abort_tlv)), 844 .tokenid = token_id, 845 .bss_idx = vif->bss_conf.mt76.idx, 846 .dbdcband = 0xff, /* auto*/ 847 }, 848 }; 849 850 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(ROC), 851 &req, sizeof(req), false); 852 } 853 854 int mt7921_mcu_set_chan_info(struct mt792x_phy *phy, int cmd) 855 { 856 struct mt792x_dev *dev = phy->dev; 857 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 858 int freq1 = chandef->center_freq1; 859 struct { 860 u8 control_ch; 861 u8 center_ch; 862 u8 bw; 863 u8 tx_streams_num; 864 u8 rx_streams; /* mask or num */ 865 u8 switch_reason; 866 u8 band_idx; 867 u8 center_ch2; /* for 80+80 only */ 868 __le16 cac_case; 869 u8 channel_band; 870 u8 rsv0; 871 __le32 outband_freq; 872 u8 txpower_drop; 873 u8 ap_bw; 874 u8 ap_center_ch; 875 u8 rsv1[57]; 876 } __packed req = { 877 .control_ch = chandef->chan->hw_value, 878 .center_ch = ieee80211_frequency_to_channel(freq1), 879 .bw = mt76_connac_chan_bw(chandef), 880 .tx_streams_num = hweight8(phy->mt76->antenna_mask), 881 .rx_streams = phy->mt76->antenna_mask, 882 .band_idx = phy != &dev->phy, 883 }; 884 885 if (chandef->chan->band == NL80211_BAND_6GHZ) 886 req.channel_band = 2; 887 else 888 req.channel_band = chandef->chan->band; 889 890 if (cmd == MCU_EXT_CMD(SET_RX_PATH) || 891 dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) 892 req.switch_reason = CH_SWITCH_NORMAL; 893 else if (phy->mt76->offchannel) 894 req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; 895 else if (!cfg80211_reg_can_beacon(dev->mt76.hw->wiphy, chandef, 896 NL80211_IFTYPE_AP)) 897 req.switch_reason = CH_SWITCH_DFS; 898 else 899 req.switch_reason = CH_SWITCH_NORMAL; 900 901 if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH)) 902 req.rx_streams = hweight8(req.rx_streams); 903 904 if (chandef->width == NL80211_CHAN_WIDTH_80P80) { 905 int freq2 = chandef->center_freq2; 906 907 req.center_ch2 = ieee80211_frequency_to_channel(freq2); 908 } 909 910 return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); 911 } 912 913 int mt7921_mcu_set_eeprom(struct mt792x_dev *dev) 914 { 915 struct req_hdr { 916 u8 buffer_mode; 917 u8 format; 918 __le16 len; 919 } __packed req = { 920 .buffer_mode = EE_MODE_EFUSE, 921 .format = EE_FORMAT_WHOLE, 922 }; 923 924 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE), 925 &req, sizeof(req), true); 926 } 927 EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom); 928 929 int mt7921_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif) 930 { 931 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 932 struct { 933 struct { 934 u8 bss_idx; 935 u8 pad[3]; 936 } __packed hdr; 937 struct ps_tlv { 938 __le16 tag; 939 __le16 len; 940 u8 ps_state; /* 0: device awake 941 * 1: static power save 942 * 2: dynamic power saving 943 * 3: enter TWT power saving 944 * 4: leave TWT power saving 945 */ 946 u8 pad[3]; 947 } __packed ps; 948 } __packed ps_req = { 949 .hdr = { 950 .bss_idx = mvif->bss_conf.mt76.idx, 951 }, 952 .ps = { 953 .tag = cpu_to_le16(UNI_BSS_INFO_PS), 954 .len = cpu_to_le16(sizeof(struct ps_tlv)), 955 .ps_state = vif->cfg.ps ? 2 : 0, 956 }, 957 }; 958 959 if (vif->type != NL80211_IFTYPE_STATION) 960 return -EOPNOTSUPP; 961 962 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), 963 &ps_req, sizeof(ps_req), true); 964 } 965 966 static int 967 mt7921_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif, 968 bool enable) 969 { 970 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 971 struct { 972 struct { 973 u8 bss_idx; 974 u8 pad[3]; 975 } __packed hdr; 976 struct bcnft_tlv { 977 __le16 tag; 978 __le16 len; 979 __le16 bcn_interval; 980 u8 dtim_period; 981 u8 pad; 982 } __packed bcnft; 983 } __packed bcnft_req = { 984 .hdr = { 985 .bss_idx = mvif->bss_conf.mt76.idx, 986 }, 987 .bcnft = { 988 .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT), 989 .len = cpu_to_le16(sizeof(struct bcnft_tlv)), 990 .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), 991 .dtim_period = vif->bss_conf.dtim_period, 992 }, 993 }; 994 995 if (vif->type != NL80211_IFTYPE_STATION) 996 return 0; 997 998 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), 999 &bcnft_req, sizeof(bcnft_req), true); 1000 } 1001 1002 int 1003 mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, 1004 bool enable) 1005 { 1006 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 1007 struct { 1008 u8 bss_idx; 1009 u8 dtim_period; 1010 __le16 aid; 1011 __le16 bcn_interval; 1012 __le16 atim_window; 1013 u8 uapsd; 1014 u8 bmc_delivered_ac; 1015 u8 bmc_triggered_ac; 1016 u8 pad; 1017 } req = { 1018 .bss_idx = mvif->bss_conf.mt76.idx, 1019 .aid = cpu_to_le16(vif->cfg.aid), 1020 .dtim_period = vif->bss_conf.dtim_period, 1021 .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), 1022 }; 1023 struct { 1024 u8 bss_idx; 1025 u8 pad[3]; 1026 } req_hdr = { 1027 .bss_idx = mvif->bss_conf.mt76.idx, 1028 }; 1029 int err; 1030 1031 err = mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_ABORT), 1032 &req_hdr, sizeof(req_hdr), false); 1033 if (err < 0 || !enable) 1034 return err; 1035 1036 return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_BSS_CONNECTED), 1037 &req, sizeof(req), false); 1038 } 1039 1040 int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, 1041 struct ieee80211_vif *vif, bool enable, 1042 enum mt76_sta_info_state state) 1043 { 1044 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 1045 int rssi = -ewma_rssi_read(&mvif->bss_conf.rssi); 1046 struct mt76_sta_cmd_info info = { 1047 .sta = sta, 1048 .vif = vif, 1049 .enable = enable, 1050 .cmd = MCU_UNI_CMD(STA_REC_UPDATE), 1051 .state = state, 1052 .offload_fw = true, 1053 .rcpi = to_rcpi(rssi), 1054 }; 1055 struct mt792x_sta *msta; 1056 1057 msta = sta ? (struct mt792x_sta *)sta->drv_priv : NULL; 1058 info.wcid = msta ? &msta->deflink.wcid : &mvif->sta.deflink.wcid; 1059 info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true; 1060 1061 return mt76_connac_mcu_sta_cmd(&dev->mphy, &info); 1062 } 1063 1064 int mt7921_mcu_set_beacon_filter(struct mt792x_dev *dev, 1065 struct ieee80211_vif *vif, 1066 bool enable) 1067 { 1068 #define MT7921_FIF_BIT_CLR BIT(1) 1069 #define MT7921_FIF_BIT_SET BIT(0) 1070 int err; 1071 1072 if (enable) { 1073 err = mt7921_mcu_uni_bss_bcnft(dev, vif, true); 1074 if (err) 1075 return err; 1076 1077 err = mt7921_mcu_set_rxfilter(dev, 0, 1078 MT7921_FIF_BIT_SET, 1079 MT_WF_RFCR_DROP_OTHER_BEACON); 1080 if (err) 1081 return err; 1082 1083 return 0; 1084 } 1085 1086 err = mt7921_mcu_set_bss_pm(dev, vif, false); 1087 if (err) 1088 return err; 1089 1090 err = mt7921_mcu_set_rxfilter(dev, 0, 1091 MT7921_FIF_BIT_CLR, 1092 MT_WF_RFCR_DROP_OTHER_BEACON); 1093 if (err) 1094 return err; 1095 1096 return 0; 1097 } 1098 1099 int mt7921_get_txpwr_info(struct mt792x_dev *dev, struct mt7921_txpwr *txpwr) 1100 { 1101 struct mt7921_txpwr_event *event; 1102 struct mt7921_txpwr_req req = { 1103 .dbdc_idx = 0, 1104 }; 1105 struct sk_buff *skb; 1106 int ret; 1107 1108 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CE_CMD(GET_TXPWR), 1109 &req, sizeof(req), true, &skb); 1110 if (ret) 1111 return ret; 1112 1113 event = (struct mt7921_txpwr_event *)skb->data; 1114 WARN_ON(skb->len != le16_to_cpu(event->len)); 1115 memcpy(txpwr, &event->txpwr, sizeof(event->txpwr)); 1116 1117 dev_kfree_skb(skb); 1118 1119 return 0; 1120 } 1121 1122 int mt7921_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif, 1123 bool enable) 1124 { 1125 struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; 1126 struct { 1127 struct { 1128 u8 band_idx; 1129 u8 pad[3]; 1130 } __packed hdr; 1131 struct sniffer_enable_tlv { 1132 __le16 tag; 1133 __le16 len; 1134 u8 enable; 1135 u8 pad[3]; 1136 } __packed enable; 1137 } req = { 1138 .hdr = { 1139 .band_idx = mvif->band_idx, 1140 }, 1141 .enable = { 1142 .tag = cpu_to_le16(0), 1143 .len = cpu_to_le16(sizeof(struct sniffer_enable_tlv)), 1144 .enable = enable, 1145 }, 1146 }; 1147 1148 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(SNIFFER), &req, sizeof(req), 1149 true); 1150 } 1151 1152 int mt7921_mcu_config_sniffer(struct mt792x_vif *vif, 1153 struct ieee80211_chanctx_conf *ctx) 1154 { 1155 struct cfg80211_chan_def *chandef = &ctx->def; 1156 int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; 1157 static const u8 ch_band[] = { 1158 [NL80211_BAND_2GHZ] = 1, 1159 [NL80211_BAND_5GHZ] = 2, 1160 [NL80211_BAND_6GHZ] = 3, 1161 }; 1162 static const u8 ch_width[] = { 1163 [NL80211_CHAN_WIDTH_20_NOHT] = 0, 1164 [NL80211_CHAN_WIDTH_20] = 0, 1165 [NL80211_CHAN_WIDTH_40] = 0, 1166 [NL80211_CHAN_WIDTH_80] = 1, 1167 [NL80211_CHAN_WIDTH_160] = 2, 1168 [NL80211_CHAN_WIDTH_80P80] = 3, 1169 [NL80211_CHAN_WIDTH_5] = 4, 1170 [NL80211_CHAN_WIDTH_10] = 5, 1171 [NL80211_CHAN_WIDTH_320] = 6, 1172 }; 1173 struct { 1174 struct { 1175 u8 band_idx; 1176 u8 pad[3]; 1177 } __packed hdr; 1178 struct config_tlv { 1179 __le16 tag; 1180 __le16 len; 1181 u16 aid; 1182 u8 ch_band; 1183 u8 bw; 1184 u8 control_ch; 1185 u8 sco; 1186 u8 center_ch; 1187 u8 center_ch2; 1188 u8 drop_err; 1189 u8 pad[3]; 1190 } __packed tlv; 1191 } __packed req = { 1192 .hdr = { 1193 .band_idx = vif->bss_conf.mt76.band_idx, 1194 }, 1195 .tlv = { 1196 .tag = cpu_to_le16(1), 1197 .len = cpu_to_le16(sizeof(req.tlv)), 1198 .control_ch = chandef->chan->hw_value, 1199 .center_ch = ieee80211_frequency_to_channel(freq1), 1200 .drop_err = 1, 1201 }, 1202 }; 1203 if (chandef->chan->band < ARRAY_SIZE(ch_band)) 1204 req.tlv.ch_band = ch_band[chandef->chan->band]; 1205 if (chandef->width < ARRAY_SIZE(ch_width)) 1206 req.tlv.bw = ch_width[chandef->width]; 1207 1208 if (freq2) 1209 req.tlv.center_ch2 = ieee80211_frequency_to_channel(freq2); 1210 1211 if (req.tlv.control_ch < req.tlv.center_ch) 1212 req.tlv.sco = 1; /* SCA */ 1213 else if (req.tlv.control_ch > req.tlv.center_ch) 1214 req.tlv.sco = 3; /* SCB */ 1215 1216 return mt76_mcu_send_msg(vif->phy->mt76->dev, MCU_UNI_CMD(SNIFFER), 1217 &req, sizeof(req), true); 1218 } 1219 1220 int 1221 mt7921_mcu_uni_add_beacon_offload(struct mt792x_dev *dev, 1222 struct ieee80211_hw *hw, 1223 struct ieee80211_vif *vif, 1224 bool enable) 1225 { 1226 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 1227 struct mt76_wcid *wcid = &dev->mt76.global_wcid; 1228 struct ieee80211_mutable_offsets offs; 1229 struct { 1230 struct req_hdr { 1231 u8 bss_idx; 1232 u8 pad[3]; 1233 } __packed hdr; 1234 struct bcn_content_tlv { 1235 __le16 tag; 1236 __le16 len; 1237 __le16 tim_ie_pos; 1238 __le16 csa_ie_pos; 1239 __le16 bcc_ie_pos; 1240 /* 0: disable beacon offload 1241 * 1: enable beacon offload 1242 * 2: update probe respond offload 1243 */ 1244 u8 enable; 1245 /* 0: legacy format (TXD + payload) 1246 * 1: only cap field IE 1247 */ 1248 u8 type; 1249 __le16 pkt_len; 1250 u8 pkt[512]; 1251 } __packed beacon_tlv; 1252 } req = { 1253 .hdr = { 1254 .bss_idx = mvif->bss_conf.mt76.idx, 1255 }, 1256 .beacon_tlv = { 1257 .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT), 1258 .len = cpu_to_le16(sizeof(struct bcn_content_tlv)), 1259 .enable = enable, 1260 }, 1261 }; 1262 struct sk_buff *skb; 1263 1264 /* support enable/update process only 1265 * disable flow would be handled in bss stop handler automatically 1266 */ 1267 if (!enable) 1268 return -EOPNOTSUPP; 1269 1270 skb = ieee80211_beacon_get_template(mt76_hw(dev), vif, &offs, 0); 1271 if (!skb) 1272 return -EINVAL; 1273 1274 if (skb->len > 512 - MT_TXD_SIZE) { 1275 dev_err(dev->mt76.dev, "beacon size limit exceed\n"); 1276 dev_kfree_skb(skb); 1277 return -EINVAL; 1278 } 1279 1280 mt76_connac2_mac_write_txwi(&dev->mt76, (__le32 *)(req.beacon_tlv.pkt), 1281 skb, wcid, NULL, 0, 0, BSS_CHANGED_BEACON); 1282 memcpy(req.beacon_tlv.pkt + MT_TXD_SIZE, skb->data, skb->len); 1283 req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len); 1284 req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset); 1285 1286 if (offs.cntdwn_counter_offs[0]) { 1287 u16 csa_offs; 1288 1289 csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4; 1290 req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs); 1291 } 1292 dev_kfree_skb(skb); 1293 1294 return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), 1295 &req, sizeof(req), true); 1296 } 1297 1298 static 1299 int __mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, 1300 enum environment_cap env_cap, 1301 struct mt7921_clc *clc, 1302 u8 idx) 1303 { 1304 #define CLC_CAP_EVT_EN BIT(0) 1305 #define CLC_CAP_DTS_EN BIT(1) 1306 struct sk_buff *skb, *ret_skb = NULL; 1307 struct { 1308 u8 ver; 1309 u8 pad0; 1310 __le16 len; 1311 u8 idx; 1312 u8 env; 1313 u8 acpi_conf; 1314 u8 cap; 1315 u8 alpha2[2]; 1316 u8 type[2]; 1317 u8 env_6g; 1318 u8 mtcl_conf; 1319 u8 rsvd[62]; 1320 } __packed req = { 1321 .ver = 1, 1322 .idx = idx, 1323 .env = env_cap, 1324 .env_6g = dev->phy.power_type, 1325 .acpi_conf = mt792x_acpi_get_flags(&dev->phy), 1326 .mtcl_conf = mt792x_acpi_get_mtcl_conf(&dev->phy, alpha2), 1327 }; 1328 int ret, valid_cnt = 0; 1329 u32 buf_len = 0; 1330 u8 *pos; 1331 1332 if (!clc) 1333 return 0; 1334 1335 if (dev->phy.chip_cap & MT792x_CHIP_CAP_CLC_EVT_EN) 1336 req.cap |= CLC_CAP_EVT_EN; 1337 if (mt76_find_power_limits_node(&dev->mt76)) 1338 req.cap |= CLC_CAP_DTS_EN; 1339 1340 buf_len = le32_to_cpu(clc->len) - sizeof(*clc); 1341 pos = clc->data; 1342 while (buf_len > 16) { 1343 struct mt7921_clc_rule *rule = (struct mt7921_clc_rule *)pos; 1344 u16 len = le16_to_cpu(rule->len); 1345 u16 offset = len + sizeof(*rule); 1346 1347 pos += offset; 1348 buf_len -= offset; 1349 if (rule->alpha2[0] != alpha2[0] || 1350 rule->alpha2[1] != alpha2[1]) 1351 continue; 1352 1353 memcpy(req.alpha2, rule->alpha2, 2); 1354 memcpy(req.type, rule->type, 2); 1355 1356 req.len = cpu_to_le16(sizeof(req) + len); 1357 skb = __mt76_mcu_msg_alloc(&dev->mt76, &req, 1358 le16_to_cpu(req.len), 1359 sizeof(req), GFP_KERNEL); 1360 if (!skb) 1361 return -ENOMEM; 1362 skb_put_data(skb, rule->data, len); 1363 1364 ret = mt76_mcu_skb_send_and_get_msg(&dev->mt76, skb, 1365 MCU_CE_CMD(SET_CLC), 1366 !!(req.cap & CLC_CAP_EVT_EN), 1367 &ret_skb); 1368 if (ret < 0) 1369 return ret; 1370 1371 if (ret_skb) { 1372 struct mt7921_clc_info_tlv *info; 1373 1374 info = (struct mt7921_clc_info_tlv *)(ret_skb->data + 4); 1375 dev->phy.clc_chan_conf = info->chan_conf; 1376 dev_kfree_skb(ret_skb); 1377 } 1378 1379 valid_cnt++; 1380 } 1381 1382 if (!valid_cnt) 1383 return -ENOENT; 1384 1385 return 0; 1386 } 1387 1388 int mt7921_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, 1389 enum environment_cap env_cap) 1390 { 1391 struct mt792x_phy *phy = (struct mt792x_phy *)&dev->phy; 1392 int i, ret; 1393 1394 /* submit all clc config */ 1395 for (i = 0; i < ARRAY_SIZE(phy->clc); i++) { 1396 ret = __mt7921_mcu_set_clc(dev, alpha2, env_cap, 1397 phy->clc[i], i); 1398 1399 /* If no country found, set "00" as default */ 1400 if (ret == -ENOENT) 1401 ret = __mt7921_mcu_set_clc(dev, "00", 1402 ENVIRON_INDOOR, 1403 phy->clc[i], i); 1404 if (ret < 0) 1405 return ret; 1406 } 1407 return 0; 1408 } 1409 1410 int mt7921_mcu_get_temperature(struct mt792x_phy *phy) 1411 { 1412 struct mt792x_dev *dev = phy->dev; 1413 struct { 1414 u8 ctrl_id; 1415 u8 action; 1416 u8 band_idx; 1417 u8 rsv[5]; 1418 } req = { 1419 .ctrl_id = THERMAL_SENSOR_TEMP_QUERY, 1420 .band_idx = phy->mt76->band_idx, 1421 }; 1422 1423 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req, 1424 sizeof(req), true); 1425 } 1426 1427 int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif, 1428 u8 bit_op, u32 bit_map) 1429 { 1430 struct { 1431 u8 rsv[4]; 1432 u8 mode; 1433 u8 rsv2[3]; 1434 __le32 fif; 1435 __le32 bit_map; /* bit_* for bitmap update */ 1436 u8 bit_op; 1437 u8 pad[51]; 1438 } __packed data = { 1439 .mode = fif ? 1 : 2, 1440 .fif = cpu_to_le32(fif), 1441 .bit_map = cpu_to_le32(bit_map), 1442 .bit_op = bit_op, 1443 }; 1444 1445 return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_RX_FILTER), 1446 &data, sizeof(data), false); 1447 } 1448 1449 int mt7921_mcu_set_rssimonitor(struct mt792x_dev *dev, struct ieee80211_vif *vif) 1450 { 1451 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; 1452 struct { 1453 u8 enable; 1454 s8 cqm_rssi_high; 1455 s8 cqm_rssi_low; 1456 u8 bss_idx; 1457 u16 duration; 1458 u8 rsv2[2]; 1459 } __packed data = { 1460 .enable = vif->cfg.assoc, 1461 .cqm_rssi_high = vif->bss_conf.cqm_rssi_thold + vif->bss_conf.cqm_rssi_hyst, 1462 .cqm_rssi_low = vif->bss_conf.cqm_rssi_thold - vif->bss_conf.cqm_rssi_hyst, 1463 .bss_idx = mvif->bss_conf.mt76.idx, 1464 }; 1465 1466 return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(RSSI_MONITOR), 1467 &data, sizeof(data), false); 1468 } 1469