1 /* SPDX-License-Identifier: ISC */ 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #ifndef __MT76_H 7 #define __MT76_H 8 9 #include <linux/kernel.h> 10 #include <linux/io.h> 11 #include <linux/spinlock.h> 12 #include <linux/skbuff.h> 13 #include <linux/leds.h> 14 #include <linux/usb.h> 15 #include <linux/average.h> 16 #include <linux/soc/mediatek/mtk_wed.h> 17 #if defined(__FreeBSD__) 18 #include <linux/wait.h> 19 #include <linux/bitfield.h> 20 #include <linux/debugfs.h> 21 #include <linux/pci.h> 22 #include <linux/interrupt.h> 23 #endif 24 #include <net/mac80211.h> 25 #include <net/page_pool/helpers.h> 26 #include "util.h" 27 #include "testmode.h" 28 29 #define MT_MCU_RING_SIZE 32 30 #define MT_RX_BUF_SIZE 2048 31 #define MT_SKB_HEAD_LEN 256 32 33 #define MT_MAX_NON_AQL_PKT 16 34 #define MT_TXQ_FREE_THR 32 35 36 #define MT76_TOKEN_FREE_THR 64 37 38 #define MT_QFLAG_WED_RING GENMASK(1, 0) 39 #define MT_QFLAG_WED_TYPE GENMASK(4, 2) 40 #define MT_QFLAG_WED BIT(5) 41 #define MT_QFLAG_WED_RRO BIT(6) 42 #define MT_QFLAG_WED_RRO_EN BIT(7) 43 44 #define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \ 45 FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \ 46 FIELD_PREP(MT_QFLAG_WED_RING, _n)) 47 #define __MT_WED_RRO_Q(_type, _n) (MT_QFLAG_WED_RRO | __MT_WED_Q(_type, _n)) 48 49 #define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n) 50 #define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n) 51 #define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0) 52 #define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n) 53 #define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n) 54 #define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0) 55 56 struct mt76_dev; 57 struct mt76_phy; 58 struct mt76_wcid; 59 struct mt76s_intr; 60 struct mt76_chanctx; 61 struct mt76_vif_link; 62 63 struct mt76_reg_pair { 64 u32 reg; 65 u32 value; 66 }; 67 68 enum mt76_bus_type { 69 MT76_BUS_MMIO, 70 MT76_BUS_USB, 71 MT76_BUS_SDIO, 72 }; 73 74 enum mt76_wed_type { 75 MT76_WED_Q_TX, 76 MT76_WED_Q_TXFREE, 77 MT76_WED_Q_RX, 78 MT76_WED_RRO_Q_DATA, 79 MT76_WED_RRO_Q_MSDU_PG, 80 MT76_WED_RRO_Q_IND, 81 }; 82 83 struct mt76_bus_ops { 84 u32 (*rr)(struct mt76_dev *dev, u32 offset); 85 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 86 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 87 void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data, 88 int len); 89 void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data, 90 int len); 91 int (*wr_rp)(struct mt76_dev *dev, u32 base, 92 const struct mt76_reg_pair *rp, int len); 93 int (*rd_rp)(struct mt76_dev *dev, u32 base, 94 struct mt76_reg_pair *rp, int len); 95 enum mt76_bus_type type; 96 }; 97 98 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB) 99 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO) 100 #define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO) 101 102 enum mt76_txq_id { 103 MT_TXQ_VO = IEEE80211_AC_VO, 104 MT_TXQ_VI = IEEE80211_AC_VI, 105 MT_TXQ_BE = IEEE80211_AC_BE, 106 MT_TXQ_BK = IEEE80211_AC_BK, 107 MT_TXQ_PSD, 108 MT_TXQ_BEACON, 109 MT_TXQ_CAB, 110 __MT_TXQ_MAX 111 }; 112 113 enum mt76_mcuq_id { 114 MT_MCUQ_WM, 115 MT_MCUQ_WA, 116 MT_MCUQ_FWDL, 117 __MT_MCUQ_MAX 118 }; 119 120 enum mt76_rxq_id { 121 MT_RXQ_MAIN, 122 MT_RXQ_MCU, 123 MT_RXQ_MCU_WA, 124 MT_RXQ_BAND1, 125 MT_RXQ_BAND1_WA, 126 MT_RXQ_MAIN_WA, 127 MT_RXQ_BAND2, 128 MT_RXQ_BAND2_WA, 129 MT_RXQ_RRO_BAND0, 130 MT_RXQ_RRO_BAND1, 131 MT_RXQ_RRO_BAND2, 132 MT_RXQ_MSDU_PAGE_BAND0, 133 MT_RXQ_MSDU_PAGE_BAND1, 134 MT_RXQ_MSDU_PAGE_BAND2, 135 MT_RXQ_TXFREE_BAND0, 136 MT_RXQ_TXFREE_BAND1, 137 MT_RXQ_TXFREE_BAND2, 138 MT_RXQ_RRO_IND, 139 __MT_RXQ_MAX 140 }; 141 142 enum mt76_band_id { 143 MT_BAND0, 144 MT_BAND1, 145 MT_BAND2, 146 __MT_MAX_BAND 147 }; 148 149 enum mt76_cipher_type { 150 MT_CIPHER_NONE, 151 MT_CIPHER_WEP40, 152 MT_CIPHER_TKIP, 153 MT_CIPHER_TKIP_NO_MIC, 154 MT_CIPHER_AES_CCMP, 155 MT_CIPHER_WEP104, 156 MT_CIPHER_BIP_CMAC_128, 157 MT_CIPHER_WEP128, 158 MT_CIPHER_WAPI, 159 MT_CIPHER_CCMP_CCX, 160 MT_CIPHER_CCMP_256, 161 MT_CIPHER_GCMP, 162 MT_CIPHER_GCMP_256, 163 }; 164 165 enum mt76_dfs_state { 166 MT_DFS_STATE_UNKNOWN, 167 MT_DFS_STATE_DISABLED, 168 MT_DFS_STATE_CAC, 169 MT_DFS_STATE_ACTIVE, 170 }; 171 172 #define MT76_RNR_SCAN_MAX_BSSIDS 16 173 struct mt76_scan_rnr_param { 174 u8 bssid[MT76_RNR_SCAN_MAX_BSSIDS][ETH_ALEN]; 175 u8 channel[MT76_RNR_SCAN_MAX_BSSIDS]; 176 u8 random_mac[ETH_ALEN]; 177 u8 seq_num; 178 u8 bssid_num; 179 u32 sreq_flag; 180 }; 181 182 struct mt76_queue_buf { 183 dma_addr_t addr; 184 u16 len:15, 185 skip_unmap:1; 186 }; 187 188 struct mt76_tx_info { 189 struct mt76_queue_buf buf[32]; 190 struct sk_buff *skb; 191 int nbuf; 192 u32 info; 193 }; 194 195 struct mt76_queue_entry { 196 union { 197 void *buf; 198 struct sk_buff *skb; 199 }; 200 union { 201 struct mt76_txwi_cache *txwi; 202 struct urb *urb; 203 int buf_sz; 204 }; 205 dma_addr_t dma_addr[2]; 206 u16 dma_len[2]; 207 u16 wcid; 208 bool skip_buf0:1; 209 bool skip_buf1:1; 210 bool done:1; 211 }; 212 213 struct mt76_queue_regs { 214 u32 desc_base; 215 u32 ring_size; 216 u32 cpu_idx; 217 u32 dma_idx; 218 } __packed __aligned(4); 219 220 struct mt76_queue { 221 struct mt76_queue_regs __iomem *regs; 222 223 spinlock_t lock; 224 spinlock_t cleanup_lock; 225 struct mt76_queue_entry *entry; 226 struct mt76_rro_desc *rro_desc; 227 struct mt76_desc *desc; 228 229 u16 first; 230 u16 head; 231 u16 tail; 232 u8 hw_idx; 233 u8 ep; 234 int ndesc; 235 int queued; 236 int buf_size; 237 bool stopped; 238 bool blocked; 239 240 u8 buf_offset; 241 u16 flags; 242 243 struct mtk_wed_device *wed; 244 u32 wed_regs; 245 246 dma_addr_t desc_dma; 247 struct sk_buff *rx_head; 248 struct page_pool *page_pool; 249 }; 250 251 struct mt76_mcu_ops { 252 unsigned int max_retry; 253 u32 headroom; 254 u32 tailroom; 255 256 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, 257 int len, bool wait_resp); 258 int (*mcu_skb_prepare_msg)(struct mt76_dev *dev, struct sk_buff *skb, 259 int cmd, int *seq); 260 int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, 261 int cmd, int *seq); 262 int (*mcu_parse_response)(struct mt76_dev *dev, int cmd, 263 struct sk_buff *skb, int seq); 264 u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset); 265 void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val); 266 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, 267 const struct mt76_reg_pair *rp, int len); 268 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 269 struct mt76_reg_pair *rp, int len); 270 int (*mcu_restart)(struct mt76_dev *dev); 271 }; 272 273 struct mt76_queue_ops { 274 int (*init)(struct mt76_dev *dev, 275 int (*poll)(struct napi_struct *napi, int budget)); 276 277 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q, 278 int idx, int n_desc, int bufsize, 279 u32 ring_base); 280 281 int (*tx_queue_skb)(struct mt76_phy *phy, struct mt76_queue *q, 282 enum mt76_txq_id qid, struct sk_buff *skb, 283 struct mt76_wcid *wcid, struct ieee80211_sta *sta); 284 285 int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q, 286 struct sk_buff *skb, u32 tx_info); 287 288 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 289 int *len, u32 *info, bool *more); 290 291 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 292 293 void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q, 294 bool flush); 295 296 void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q); 297 298 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 299 300 void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q); 301 }; 302 303 enum mt76_phy_type { 304 MT_PHY_TYPE_CCK, 305 MT_PHY_TYPE_OFDM, 306 MT_PHY_TYPE_HT, 307 MT_PHY_TYPE_HT_GF, 308 MT_PHY_TYPE_VHT, 309 MT_PHY_TYPE_HE_SU = 8, 310 MT_PHY_TYPE_HE_EXT_SU, 311 MT_PHY_TYPE_HE_TB, 312 MT_PHY_TYPE_HE_MU, 313 MT_PHY_TYPE_EHT_SU = 13, 314 MT_PHY_TYPE_EHT_TRIG, 315 MT_PHY_TYPE_EHT_MU, 316 __MT_PHY_TYPE_MAX, 317 }; 318 319 struct mt76_sta_stats { 320 u64 tx_mode[__MT_PHY_TYPE_MAX]; 321 u64 tx_bw[5]; /* 20, 40, 80, 160, 320 */ 322 u64 tx_nss[4]; /* 1, 2, 3, 4 */ 323 u64 tx_mcs[16]; /* mcs idx */ 324 u64 tx_bytes; 325 /* WED TX */ 326 u32 tx_packets; /* unit: MSDU */ 327 u32 tx_retries; 328 u32 tx_failed; 329 /* WED RX */ 330 u64 rx_bytes; 331 u32 rx_packets; 332 u32 rx_errors; 333 u32 rx_drops; 334 }; 335 336 enum mt76_wcid_flags { 337 MT_WCID_FLAG_CHECK_PS, 338 MT_WCID_FLAG_PS, 339 MT_WCID_FLAG_4ADDR, 340 MT_WCID_FLAG_HDR_TRANS, 341 }; 342 343 #define MT76_N_WCIDS 1088 344 345 /* stored in ieee80211_tx_info::hw_queue */ 346 #define MT_TX_HW_QUEUE_PHY GENMASK(3, 2) 347 348 DECLARE_EWMA(signal, 10, 8); 349 350 #define MT_WCID_TX_INFO_RATE GENMASK(15, 0) 351 #define MT_WCID_TX_INFO_NSS GENMASK(17, 16) 352 #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18) 353 #define MT_WCID_TX_INFO_SET BIT(31) 354 355 struct mt76_wcid { 356 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 357 358 atomic_t non_aql_packets; 359 unsigned long flags; 360 361 struct ewma_signal rssi; 362 int inactive_count; 363 364 struct rate_info rate; 365 unsigned long ampdu_state; 366 367 u16 idx; 368 u8 hw_key_idx; 369 u8 hw_key_idx2; 370 371 u8 offchannel:1; 372 u8 sta:1; 373 u8 sta_disabled:1; 374 u8 amsdu:1; 375 u8 phy_idx:2; 376 u8 link_id:4; 377 bool link_valid; 378 379 u8 rx_check_pn; 380 u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6]; 381 u16 cipher; 382 383 u32 tx_info; 384 bool sw_iv; 385 386 struct list_head tx_list; 387 struct sk_buff_head tx_pending; 388 struct sk_buff_head tx_offchannel; 389 390 struct list_head list; 391 struct idr pktid; 392 393 struct mt76_sta_stats stats; 394 395 struct list_head poll_list; 396 397 struct mt76_wcid *def_wcid; 398 }; 399 400 struct mt76_txq { 401 u16 wcid; 402 403 u16 agg_ssn; 404 bool send_bar; 405 bool aggr; 406 }; 407 408 struct mt76_wed_rro_ind { 409 u32 se_id : 12; 410 u32 rsv : 4; 411 u32 start_sn : 12; 412 u32 ind_reason : 4; 413 u32 ind_cnt : 13; 414 u32 win_sz : 3; 415 u32 rsv2 : 13; 416 u32 magic_cnt : 3; 417 }; 418 419 struct mt76_txwi_cache { 420 struct list_head list; 421 dma_addr_t dma_addr; 422 423 union { 424 struct sk_buff *skb; 425 void *ptr; 426 }; 427 }; 428 429 struct mt76_rx_tid { 430 struct rcu_head rcu_head; 431 432 struct mt76_dev *dev; 433 434 spinlock_t lock; 435 struct delayed_work reorder_work; 436 437 u16 id; 438 u16 head; 439 u16 size; 440 u16 nframes; 441 442 u8 num; 443 444 u8 started:1, stopped:1, timer_pending:1; 445 446 struct sk_buff *reorder_buf[] __counted_by(size); 447 }; 448 449 #define MT_TX_CB_DMA_DONE BIT(0) 450 #define MT_TX_CB_TXS_DONE BIT(1) 451 #define MT_TX_CB_TXS_FAILED BIT(2) 452 453 #define MT_PACKET_ID_MASK GENMASK(6, 0) 454 #define MT_PACKET_ID_NO_ACK 0 455 #define MT_PACKET_ID_NO_SKB 1 456 #define MT_PACKET_ID_WED 2 457 #define MT_PACKET_ID_FIRST 3 458 #define MT_PACKET_ID_HAS_RATE BIT(7) 459 /* This is timer for when to give up when waiting for TXS callback, 460 * with starting time being the time at which the DMA_DONE callback 461 * was seen (so, we know packet was processed then, it should not take 462 * long after that for firmware to send the TXS callback if it is going 463 * to do so.) 464 */ 465 #define MT_TX_STATUS_SKB_TIMEOUT (HZ / 4) 466 467 struct mt76_tx_cb { 468 unsigned long jiffies; 469 u16 wcid; 470 u8 pktid; 471 u8 flags; 472 }; 473 474 enum { 475 MT76_STATE_INITIALIZED, 476 MT76_STATE_REGISTERED, 477 MT76_STATE_RUNNING, 478 MT76_STATE_MCU_RUNNING, 479 MT76_SCANNING, 480 MT76_HW_SCANNING, 481 MT76_HW_SCHED_SCANNING, 482 MT76_RESTART, 483 MT76_RESET, 484 MT76_MCU_RESET, 485 MT76_REMOVED, 486 MT76_READING_STATS, 487 MT76_STATE_POWER_OFF, 488 MT76_STATE_SUSPEND, 489 MT76_STATE_ROC, 490 MT76_STATE_PM, 491 MT76_STATE_WED_RESET, 492 }; 493 494 enum mt76_sta_event { 495 MT76_STA_EVENT_ASSOC, 496 MT76_STA_EVENT_AUTHORIZE, 497 MT76_STA_EVENT_DISASSOC, 498 }; 499 500 struct mt76_hw_cap { 501 bool has_2ghz; 502 bool has_5ghz; 503 bool has_6ghz; 504 }; 505 506 #define MT_DRV_TXWI_NO_FREE BIT(0) 507 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1) 508 #define MT_DRV_SW_RX_AIRTIME BIT(2) 509 #define MT_DRV_RX_DMA_HDR BIT(3) 510 #define MT_DRV_HW_MGMT_TXQ BIT(4) 511 #define MT_DRV_AMSDU_OFFLOAD BIT(5) 512 #define MT_DRV_IGNORE_TXS_FAILED BIT(6) 513 514 struct mt76_driver_ops { 515 u32 drv_flags; 516 u32 survey_flags; 517 u16 txwi_size; 518 u16 token_size; 519 u8 mcs_rates; 520 521 unsigned int link_data_size; 522 523 void (*update_survey)(struct mt76_phy *phy); 524 int (*set_channel)(struct mt76_phy *phy); 525 526 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 527 enum mt76_txq_id qid, struct mt76_wcid *wcid, 528 struct ieee80211_sta *sta, 529 struct mt76_tx_info *tx_info); 530 531 void (*tx_complete_skb)(struct mt76_dev *dev, 532 struct mt76_queue_entry *e); 533 534 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); 535 536 bool (*rx_check)(struct mt76_dev *dev, void *data, int len); 537 538 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 539 struct sk_buff *skb, u32 *info); 540 541 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 542 543 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 544 bool ps); 545 546 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, 547 struct ieee80211_sta *sta); 548 549 int (*sta_event)(struct mt76_dev *dev, struct ieee80211_vif *vif, 550 struct ieee80211_sta *sta, enum mt76_sta_event ev); 551 552 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, 553 struct ieee80211_sta *sta); 554 555 int (*vif_link_add)(struct mt76_phy *phy, struct ieee80211_vif *vif, 556 struct ieee80211_bss_conf *link_conf, 557 struct mt76_vif_link *mlink); 558 559 void (*vif_link_remove)(struct mt76_phy *phy, 560 struct ieee80211_vif *vif, 561 struct ieee80211_bss_conf *link_conf, 562 struct mt76_vif_link *mlink); 563 }; 564 565 struct mt76_channel_state { 566 u64 cc_active; 567 u64 cc_busy; 568 u64 cc_rx; 569 u64 cc_bss_rx; 570 u64 cc_tx; 571 572 s8 noise; 573 }; 574 575 struct mt76_sband { 576 struct ieee80211_supported_band sband; 577 struct mt76_channel_state *chan; 578 }; 579 580 /* addr req mask */ 581 #define MT_VEND_TYPE_EEPROM BIT(31) 582 #define MT_VEND_TYPE_CFG BIT(30) 583 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) 584 585 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) 586 enum mt_vendor_req { 587 MT_VEND_DEV_MODE = 0x1, 588 MT_VEND_WRITE = 0x2, 589 MT_VEND_POWER_ON = 0x4, 590 MT_VEND_MULTI_WRITE = 0x6, 591 MT_VEND_MULTI_READ = 0x7, 592 MT_VEND_READ_EEPROM = 0x9, 593 MT_VEND_WRITE_FCE = 0x42, 594 MT_VEND_WRITE_CFG = 0x46, 595 MT_VEND_READ_CFG = 0x47, 596 MT_VEND_READ_EXT = 0x63, 597 MT_VEND_WRITE_EXT = 0x66, 598 MT_VEND_FEATURE_SET = 0x91, 599 }; 600 601 enum mt76u_in_ep { 602 MT_EP_IN_PKT_RX, 603 MT_EP_IN_CMD_RESP, 604 __MT_EP_IN_MAX, 605 }; 606 607 enum mt76u_out_ep { 608 MT_EP_OUT_INBAND_CMD, 609 MT_EP_OUT_AC_BE, 610 MT_EP_OUT_AC_BK, 611 MT_EP_OUT_AC_VI, 612 MT_EP_OUT_AC_VO, 613 MT_EP_OUT_HCCA, 614 __MT_EP_OUT_MAX, 615 }; 616 617 struct mt76_mcu { 618 struct mutex mutex; 619 u32 msg_seq; 620 int timeout; 621 622 struct sk_buff_head res_q; 623 wait_queue_head_t wait; 624 }; 625 626 #define MT_TX_SG_MAX_SIZE 8 627 #define MT_RX_SG_MAX_SIZE 4 628 #define MT_NUM_TX_ENTRIES 256 629 #define MT_NUM_RX_ENTRIES 128 630 #define MCU_RESP_URB_SIZE 1024 631 struct mt76_usb { 632 struct mutex usb_ctrl_mtx; 633 u8 *data; 634 u16 data_len; 635 636 struct mt76_worker status_worker; 637 struct mt76_worker rx_worker; 638 639 struct work_struct stat_work; 640 641 u8 out_ep[__MT_EP_OUT_MAX]; 642 u8 in_ep[__MT_EP_IN_MAX]; 643 bool sg_en; 644 645 struct mt76u_mcu { 646 u8 *data; 647 /* multiple reads */ 648 struct mt76_reg_pair *rp; 649 int rp_len; 650 u32 base; 651 } mcu; 652 }; 653 654 #define MT76S_XMIT_BUF_SZ 0x3fe00 655 #define MT76S_NUM_TX_ENTRIES 256 656 #define MT76S_NUM_RX_ENTRIES 512 657 struct mt76_sdio { 658 struct mt76_worker txrx_worker; 659 struct mt76_worker status_worker; 660 struct mt76_worker net_worker; 661 struct mt76_worker stat_worker; 662 663 u8 *xmit_buf; 664 u32 xmit_buf_sz; 665 666 struct sdio_func *func; 667 void *intr_data; 668 u8 hw_ver; 669 wait_queue_head_t wait; 670 671 int pse_mcu_quota_max; 672 struct { 673 int pse_data_quota; 674 int ple_data_quota; 675 int pse_mcu_quota; 676 int pse_page_size; 677 int deficit; 678 } sched; 679 680 int (*parse_irq)(struct mt76_dev *dev, struct mt76s_intr *intr); 681 }; 682 683 struct mt76_mmio { 684 void __iomem *regs; 685 spinlock_t irq_lock; 686 u32 irqmask; 687 688 struct mtk_wed_device wed; 689 struct mtk_wed_device wed_hif2; 690 struct completion wed_reset; 691 struct completion wed_reset_complete; 692 }; 693 694 struct mt76_rx_status { 695 union { 696 struct mt76_wcid *wcid; 697 u16 wcid_idx; 698 }; 699 700 u32 reorder_time; 701 702 u32 ampdu_ref; 703 u32 timestamp; 704 705 u8 iv[6]; 706 707 u8 phy_idx:2; 708 u8 aggr:1; 709 u8 qos_ctl; 710 u16 seqno; 711 712 u16 freq; 713 u32 flag; 714 u8 enc_flags; 715 u8 encoding:3, bw:4; 716 union { 717 struct { 718 u8 he_ru:3; 719 u8 he_gi:2; 720 u8 he_dcm:1; 721 }; 722 struct { 723 u8 ru:4; 724 u8 gi:2; 725 } eht; 726 }; 727 728 u8 amsdu:1, first_amsdu:1, last_amsdu:1; 729 u8 rate_idx; 730 u8 nss:5, band:3; 731 s8 signal; 732 u8 chains; 733 s8 chain_signal[IEEE80211_MAX_CHAINS]; 734 }; 735 736 struct mt76_freq_range_power { 737 const struct cfg80211_sar_freq_ranges *range; 738 s8 power; 739 }; 740 741 struct mt76_testmode_ops { 742 int (*set_state)(struct mt76_phy *phy, enum mt76_testmode_state state); 743 int (*set_params)(struct mt76_phy *phy, struct nlattr **tb, 744 enum mt76_testmode_state new_state); 745 int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg); 746 }; 747 748 struct mt76_testmode_data { 749 enum mt76_testmode_state state; 750 751 u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)]; 752 struct sk_buff *tx_skb; 753 754 u32 tx_count; 755 u16 tx_mpdu_len; 756 757 u8 tx_rate_mode; 758 u8 tx_rate_idx; 759 u8 tx_rate_nss; 760 u8 tx_rate_sgi; 761 u8 tx_rate_ldpc; 762 u8 tx_rate_stbc; 763 u8 tx_ltf; 764 765 u8 tx_antenna_mask; 766 u8 tx_spe_idx; 767 768 u8 tx_duty_cycle; 769 u32 tx_time; 770 u32 tx_ipg; 771 772 u32 freq_offset; 773 774 u8 tx_power[4]; 775 u8 tx_power_control; 776 777 u8 addr[3][ETH_ALEN]; 778 779 u32 tx_pending; 780 u32 tx_queued; 781 u16 tx_queued_limit; 782 u32 tx_done; 783 struct { 784 u64 packets[__MT_RXQ_MAX]; 785 u64 fcs_error[__MT_RXQ_MAX]; 786 } rx_stats; 787 }; 788 789 struct mt76_vif_link { 790 u8 idx; 791 u8 link_idx; 792 u8 omac_idx; 793 u8 band_idx; 794 u8 wmm_idx; 795 u8 scan_seq_num; 796 u8 cipher; 797 u8 basic_rates_idx; 798 u8 mcast_rates_idx; 799 u8 beacon_rates_idx; 800 bool offchannel; 801 struct ieee80211_chanctx_conf *ctx; 802 struct mt76_wcid *wcid; 803 struct mt76_vif_data *mvif; 804 struct rcu_head rcu_head; 805 }; 806 807 struct mt76_vif_data { 808 struct mt76_vif_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; 809 struct mt76_vif_link __rcu *offchannel_link; 810 811 struct mt76_phy *roc_phy; 812 u16 valid_links; 813 u8 deflink_id; 814 }; 815 816 struct mt76_phy { 817 struct ieee80211_hw *hw; 818 struct mt76_dev *dev; 819 void *priv; 820 821 unsigned long state; 822 unsigned int num_sta; 823 u8 band_idx; 824 825 spinlock_t tx_lock; 826 struct list_head tx_list; 827 struct mt76_queue *q_tx[__MT_TXQ_MAX]; 828 829 struct cfg80211_chan_def chandef; 830 struct cfg80211_chan_def main_chandef; 831 bool offchannel; 832 bool radar_enabled; 833 834 struct delayed_work roc_work; 835 struct ieee80211_vif *roc_vif; 836 struct mt76_vif_link *roc_link; 837 838 struct mt76_chanctx *chanctx; 839 840 struct mt76_channel_state *chan_state; 841 enum mt76_dfs_state dfs_state; 842 ktime_t survey_time; 843 844 u32 aggr_stats[32]; 845 846 struct mt76_hw_cap cap; 847 struct mt76_sband sband_2g; 848 struct mt76_sband sband_5g; 849 struct mt76_sband sband_6g; 850 851 u8 macaddr[ETH_ALEN]; 852 853 int txpower_cur; 854 u8 antenna_mask; 855 u16 chainmask; 856 857 #ifdef CONFIG_NL80211_TESTMODE 858 struct mt76_testmode_data test; 859 #endif 860 861 struct delayed_work mac_work; 862 u8 mac_work_count; 863 864 struct { 865 struct sk_buff *head; 866 struct sk_buff **tail; 867 u16 seqno; 868 } rx_amsdu[__MT_RXQ_MAX]; 869 870 struct mt76_freq_range_power *frp; 871 872 struct { 873 struct led_classdev cdev; 874 char name[32]; 875 bool al; 876 u8 pin; 877 } leds; 878 }; 879 880 struct mt76_dev { 881 struct mt76_phy phy; /* must be first */ 882 struct mt76_phy *phys[__MT_MAX_BAND]; 883 struct mt76_phy *band_phys[NUM_NL80211_BANDS]; 884 885 struct ieee80211_hw *hw; 886 887 spinlock_t wed_lock; 888 spinlock_t lock; 889 spinlock_t cc_lock; 890 891 u32 cur_cc_bss_rx; 892 893 struct mt76_rx_status rx_ampdu_status; 894 u32 rx_ampdu_len; 895 u32 rx_ampdu_ref; 896 897 struct mutex mutex; 898 899 const struct mt76_bus_ops *bus; 900 const struct mt76_driver_ops *drv; 901 const struct mt76_mcu_ops *mcu_ops; 902 struct device *dev; 903 struct device *dma_dev; 904 905 struct mt76_mcu mcu; 906 907 struct net_device *napi_dev; 908 struct net_device *tx_napi_dev; 909 spinlock_t rx_lock; 910 struct napi_struct napi[__MT_RXQ_MAX]; 911 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 912 struct tasklet_struct irq_tasklet; 913 914 struct list_head txwi_cache; 915 struct list_head rxwi_cache; 916 struct mt76_queue *q_mcu[__MT_MCUQ_MAX]; 917 struct mt76_queue q_rx[__MT_RXQ_MAX]; 918 const struct mt76_queue_ops *queue_ops; 919 int tx_dma_idx[4]; 920 921 struct mt76_worker tx_worker; 922 struct napi_struct tx_napi; 923 924 spinlock_t token_lock; 925 struct idr token; 926 u16 wed_token_count; 927 u16 token_count; 928 u16 token_size; 929 930 spinlock_t rx_token_lock; 931 struct idr rx_token; 932 u16 rx_token_size; 933 934 wait_queue_head_t tx_wait; 935 /* spinclock used to protect wcid pktid linked list */ 936 spinlock_t status_lock; 937 938 u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; 939 940 u64 vif_mask; 941 942 struct mt76_wcid global_wcid; 943 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; 944 struct list_head wcid_list; 945 946 struct list_head sta_poll_list; 947 spinlock_t sta_poll_lock; 948 949 u32 rev; 950 951 struct tasklet_struct pre_tbtt_tasklet; 952 int beacon_int; 953 u8 beacon_mask; 954 955 struct debugfs_blob_wrapper eeprom; 956 struct debugfs_blob_wrapper otp; 957 958 char alpha2[3]; 959 enum nl80211_dfs_regions region; 960 961 struct mt76_scan_rnr_param rnr; 962 963 u32 debugfs_reg; 964 965 u8 csa_complete; 966 967 u32 rxfilter; 968 969 struct delayed_work scan_work; 970 struct { 971 struct cfg80211_scan_request *req; 972 struct ieee80211_channel *chan; 973 struct ieee80211_vif *vif; 974 struct mt76_vif_link *mlink; 975 struct mt76_phy *phy; 976 int chan_idx; 977 } scan; 978 979 #ifdef CONFIG_NL80211_TESTMODE 980 const struct mt76_testmode_ops *test_ops; 981 struct { 982 const char *name; 983 u32 offset; 984 } test_mtd; 985 #endif 986 struct workqueue_struct *wq; 987 988 union { 989 struct mt76_mmio mmio; 990 struct mt76_usb usb; 991 struct mt76_sdio sdio; 992 }; 993 994 atomic_t bus_hung; 995 }; 996 997 /* per-phy stats. */ 998 struct mt76_mib_stats { 999 u32 ack_fail_cnt; 1000 u32 fcs_err_cnt; 1001 u32 rts_cnt; 1002 u32 rts_retries_cnt; 1003 u32 ba_miss_cnt; 1004 u32 tx_bf_cnt; 1005 u32 tx_mu_bf_cnt; 1006 u32 tx_mu_mpdu_cnt; 1007 u32 tx_mu_acked_mpdu_cnt; 1008 u32 tx_su_acked_mpdu_cnt; 1009 u32 tx_bf_ibf_ppdu_cnt; 1010 u32 tx_bf_ebf_ppdu_cnt; 1011 1012 u32 tx_bf_rx_fb_all_cnt; 1013 u32 tx_bf_rx_fb_eht_cnt; 1014 u32 tx_bf_rx_fb_he_cnt; 1015 u32 tx_bf_rx_fb_vht_cnt; 1016 u32 tx_bf_rx_fb_ht_cnt; 1017 1018 u32 tx_bf_rx_fb_bw; /* value of last sample, not cumulative */ 1019 u32 tx_bf_rx_fb_nc_cnt; 1020 u32 tx_bf_rx_fb_nr_cnt; 1021 u32 tx_bf_fb_cpl_cnt; 1022 u32 tx_bf_fb_trig_cnt; 1023 1024 u32 tx_ampdu_cnt; 1025 u32 tx_stop_q_empty_cnt; 1026 u32 tx_mpdu_attempts_cnt; 1027 u32 tx_mpdu_success_cnt; 1028 u32 tx_pkt_ebf_cnt; 1029 u32 tx_pkt_ibf_cnt; 1030 1031 u32 tx_rwp_fail_cnt; 1032 u32 tx_rwp_need_cnt; 1033 1034 /* rx stats */ 1035 u32 rx_fifo_full_cnt; 1036 u32 channel_idle_cnt; 1037 u32 primary_cca_busy_time; 1038 u32 secondary_cca_busy_time; 1039 u32 primary_energy_detect_time; 1040 u32 cck_mdrdy_time; 1041 u32 ofdm_mdrdy_time; 1042 u32 green_mdrdy_time; 1043 u32 rx_vector_mismatch_cnt; 1044 u32 rx_delimiter_fail_cnt; 1045 u32 rx_mrdy_cnt; 1046 u32 rx_len_mismatch_cnt; 1047 u32 rx_mpdu_cnt; 1048 u32 rx_ampdu_cnt; 1049 u32 rx_ampdu_bytes_cnt; 1050 u32 rx_ampdu_valid_subframe_cnt; 1051 u32 rx_ampdu_valid_subframe_bytes_cnt; 1052 u32 rx_pfdrop_cnt; 1053 u32 rx_vec_queue_overflow_drop_cnt; 1054 u32 rx_ba_cnt; 1055 1056 u32 tx_amsdu[8]; 1057 u32 tx_amsdu_cnt; 1058 1059 /* mcu_muru_stats */ 1060 u32 dl_cck_cnt; 1061 u32 dl_ofdm_cnt; 1062 u32 dl_htmix_cnt; 1063 u32 dl_htgf_cnt; 1064 u32 dl_vht_su_cnt; 1065 u32 dl_vht_2mu_cnt; 1066 u32 dl_vht_3mu_cnt; 1067 u32 dl_vht_4mu_cnt; 1068 u32 dl_he_su_cnt; 1069 u32 dl_he_ext_su_cnt; 1070 u32 dl_he_2ru_cnt; 1071 u32 dl_he_2mu_cnt; 1072 u32 dl_he_3ru_cnt; 1073 u32 dl_he_3mu_cnt; 1074 u32 dl_he_4ru_cnt; 1075 u32 dl_he_4mu_cnt; 1076 u32 dl_he_5to8ru_cnt; 1077 u32 dl_he_9to16ru_cnt; 1078 u32 dl_he_gtr16ru_cnt; 1079 1080 u32 ul_hetrig_su_cnt; 1081 u32 ul_hetrig_2ru_cnt; 1082 u32 ul_hetrig_3ru_cnt; 1083 u32 ul_hetrig_4ru_cnt; 1084 u32 ul_hetrig_5to8ru_cnt; 1085 u32 ul_hetrig_9to16ru_cnt; 1086 u32 ul_hetrig_gtr16ru_cnt; 1087 u32 ul_hetrig_2mu_cnt; 1088 u32 ul_hetrig_3mu_cnt; 1089 u32 ul_hetrig_4mu_cnt; 1090 }; 1091 1092 struct mt76_power_limits { 1093 s8 cck[4]; 1094 s8 ofdm[8]; 1095 s8 mcs[4][10]; 1096 s8 ru[7][12]; 1097 s8 eht[16][16]; 1098 }; 1099 1100 struct mt76_ethtool_worker_info { 1101 u64 *data; 1102 int idx; 1103 int initial_stat_idx; 1104 int worker_stat_count; 1105 int sta_count; 1106 }; 1107 1108 struct mt76_chanctx { 1109 struct mt76_phy *phy; 1110 }; 1111 1112 #define CCK_RATE(_idx, _rate) { \ 1113 .bitrate = _rate, \ 1114 .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ 1115 .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \ 1116 .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \ 1117 } 1118 1119 #define OFDM_RATE(_idx, _rate) { \ 1120 .bitrate = _rate, \ 1121 .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ 1122 .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ 1123 } 1124 1125 extern struct ieee80211_rate mt76_rates[12]; 1126 1127 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) 1128 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) 1129 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) 1130 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__) 1131 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__) 1132 1133 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) 1134 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) 1135 1136 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 1137 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 1138 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 1139 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__) 1140 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__) 1141 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) 1142 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) 1143 1144 1145 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76)) 1146 1147 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 1148 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 1149 1150 #define mt76_get_field(_dev, _reg, _field) \ 1151 FIELD_GET(_field, mt76_rr(dev, _reg)) 1152 1153 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 1154 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 1155 1156 #define __mt76_rmw_field(_dev, _reg, _field, _val) \ 1157 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 1158 1159 #define mt76_hw(dev) (dev)->mphy.hw 1160 1161 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 1162 int timeout); 1163 1164 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 1165 1166 bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 1167 int timeout, int kick); 1168 #define __mt76_poll_msec(...) ____mt76_poll_msec(__VA_ARGS__, 10) 1169 #define mt76_poll_msec(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10) 1170 #define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 1171 1172 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 1173 void mt76_pci_disable_aspm(struct pci_dev *pdev); 1174 bool mt76_pci_aspm_supported(struct pci_dev *pdev); 1175 1176 static inline u16 mt76_chip(struct mt76_dev *dev) 1177 { 1178 return dev->rev >> 16; 1179 } 1180 1181 static inline u16 mt76_rev(struct mt76_dev *dev) 1182 { 1183 return dev->rev & 0xffff; 1184 } 1185 1186 void mt76_wed_release_rx_buf(struct mtk_wed_device *wed); 1187 void mt76_wed_offload_disable(struct mtk_wed_device *wed); 1188 void mt76_wed_reset_complete(struct mtk_wed_device *wed); 1189 void mt76_wed_dma_reset(struct mt76_dev *dev); 1190 int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1191 struct net_device *netdev, enum tc_setup_type type, 1192 void *type_data); 1193 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 1194 u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size); 1195 int mt76_wed_offload_enable(struct mtk_wed_device *wed); 1196 int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset); 1197 #else 1198 static inline u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size) 1199 { 1200 return 0; 1201 } 1202 1203 static inline int mt76_wed_offload_enable(struct mtk_wed_device *wed) 1204 { 1205 return 0; 1206 } 1207 1208 static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, 1209 bool reset) 1210 { 1211 return 0; 1212 } 1213 #endif /* CONFIG_NET_MEDIATEK_SOC_WED */ 1214 1215 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 1216 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 1217 1218 #define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__) 1219 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 1220 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) 1221 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__) 1222 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 1223 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 1224 #define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__) 1225 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 1226 #define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__) 1227 1228 #define mt76_for_each_q_rx(dev, i) \ 1229 for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \ 1230 if ((dev)->q_rx[i].ndesc) 1231 1232 1233 #define mt76_dereference(p, dev) \ 1234 rcu_dereference_protected(p, lockdep_is_held(&(dev)->mutex)) 1235 1236 static inline struct mt76_wcid * 1237 __mt76_wcid_ptr(struct mt76_dev *dev, u16 idx) 1238 { 1239 if (idx >= ARRAY_SIZE(dev->wcid)) 1240 return NULL; 1241 return rcu_dereference(dev->wcid[idx]); 1242 } 1243 1244 #define mt76_wcid_ptr(dev, idx) __mt76_wcid_ptr(&(dev)->mt76, idx) 1245 1246 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, 1247 const struct ieee80211_ops *ops, 1248 const struct mt76_driver_ops *drv_ops); 1249 int mt76_register_device(struct mt76_dev *dev, bool vht, 1250 struct ieee80211_rate *rates, int n_rates); 1251 void mt76_unregister_device(struct mt76_dev *dev); 1252 void mt76_free_device(struct mt76_dev *dev); 1253 void mt76_reset_device(struct mt76_dev *dev); 1254 void mt76_unregister_phy(struct mt76_phy *phy); 1255 1256 struct mt76_phy *mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size, 1257 u8 band_idx); 1258 struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, 1259 const struct ieee80211_ops *ops, 1260 u8 band_idx); 1261 int mt76_register_phy(struct mt76_phy *phy, bool vht, 1262 struct ieee80211_rate *rates, int n_rates); 1263 struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw, 1264 struct ieee80211_vif *vif); 1265 1266 struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy, 1267 const struct file_operations *ops); 1268 static inline struct dentry *mt76_register_debugfs(struct mt76_dev *dev) 1269 { 1270 return mt76_register_debugfs_fops(&dev->phy, NULL); 1271 } 1272 1273 int mt76_queues_read(struct seq_file *s, void *data); 1274 void mt76_seq_puts_array(struct seq_file *file, const char *str, 1275 s8 *val, int len); 1276 1277 int mt76_eeprom_init(struct mt76_dev *dev, int len); 1278 void mt76_eeprom_override(struct mt76_phy *phy); 1279 int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len); 1280 int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep, 1281 const char *cell_name, int len); 1282 1283 struct mt76_queue * 1284 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, 1285 int ring_base, void *wed, u32 flags); 1286 static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx, 1287 int n_desc, int ring_base, void *wed, 1288 u32 flags) 1289 { 1290 struct mt76_queue *q; 1291 1292 q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, wed, flags); 1293 if (IS_ERR(q)) 1294 return PTR_ERR(q); 1295 1296 phy->q_tx[qid] = q; 1297 1298 return 0; 1299 } 1300 1301 static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx, 1302 int n_desc, int ring_base) 1303 { 1304 struct mt76_queue *q; 1305 1306 q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, NULL, 0); 1307 if (IS_ERR(q)) 1308 return PTR_ERR(q); 1309 1310 dev->q_mcu[qid] = q; 1311 1312 return 0; 1313 } 1314 1315 static inline struct mt76_phy * 1316 mt76_dev_phy(struct mt76_dev *dev, u8 phy_idx) 1317 { 1318 if ((phy_idx == MT_BAND1 && dev->phys[phy_idx]) || 1319 (phy_idx == MT_BAND2 && dev->phys[phy_idx])) 1320 return dev->phys[phy_idx]; 1321 1322 return &dev->phy; 1323 } 1324 1325 static inline struct ieee80211_hw * 1326 mt76_phy_hw(struct mt76_dev *dev, u8 phy_idx) 1327 { 1328 return mt76_dev_phy(dev, phy_idx)->hw; 1329 } 1330 1331 static inline u8 * 1332 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t) 1333 { 1334 return (u8 *)t - dev->drv->txwi_size; 1335 } 1336 1337 /* increment with wrap-around */ 1338 static inline int mt76_incr(int val, int size) 1339 { 1340 return (val + 1) & (size - 1); 1341 } 1342 1343 /* decrement with wrap-around */ 1344 static inline int mt76_decr(int val, int size) 1345 { 1346 return (val - 1) & (size - 1); 1347 } 1348 1349 u8 mt76_ac_to_hwq(u8 ac); 1350 1351 static inline struct ieee80211_txq * 1352 mtxq_to_txq(struct mt76_txq *mtxq) 1353 { 1354 void *ptr = mtxq; 1355 1356 return container_of(ptr, struct ieee80211_txq, drv_priv); 1357 } 1358 1359 static inline struct ieee80211_sta * 1360 wcid_to_sta(struct mt76_wcid *wcid) 1361 { 1362 void *ptr = wcid; 1363 1364 if (!wcid || !wcid->sta) 1365 return NULL; 1366 1367 if (wcid->def_wcid) 1368 ptr = wcid->def_wcid; 1369 1370 return container_of(ptr, struct ieee80211_sta, drv_priv); 1371 } 1372 1373 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) 1374 { 1375 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) > 1376 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data)); 1377 return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data); 1378 } 1379 1380 static inline void *mt76_skb_get_hdr(struct sk_buff *skb) 1381 { 1382 struct mt76_rx_status mstat; 1383 u8 *data = skb->data; 1384 1385 /* Alignment concerns */ 1386 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4); 1387 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4); 1388 1389 mstat = *((struct mt76_rx_status *)skb->cb); 1390 1391 if (mstat.flag & RX_FLAG_RADIOTAP_HE) 1392 data += sizeof(struct ieee80211_radiotap_he); 1393 if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU) 1394 data += sizeof(struct ieee80211_radiotap_he_mu); 1395 1396 return data; 1397 } 1398 1399 static inline void mt76_insert_hdr_pad(struct sk_buff *skb) 1400 { 1401 int len = ieee80211_get_hdrlen_from_skb(skb); 1402 1403 if (len % 4 == 0) 1404 return; 1405 1406 skb_push(skb, 2); 1407 memmove(skb->data, skb->data + 2, len); 1408 1409 skb->data[len] = 0; 1410 skb->data[len + 1] = 0; 1411 } 1412 1413 static inline bool mt76_is_skb_pktid(u8 pktid) 1414 { 1415 if (pktid & MT_PACKET_ID_HAS_RATE) 1416 return false; 1417 1418 return pktid >= MT_PACKET_ID_FIRST; 1419 } 1420 1421 static inline u8 mt76_tx_power_path_delta(u8 path) 1422 { 1423 static const u8 path_delta[5] = { 0, 6, 9, 12, 14 }; 1424 u8 idx = path - 1; 1425 1426 return (idx < ARRAY_SIZE(path_delta)) ? path_delta[idx] : 0; 1427 } 1428 1429 static inline bool mt76_testmode_enabled(struct mt76_phy *phy) 1430 { 1431 #ifdef CONFIG_NL80211_TESTMODE 1432 return phy->test.state != MT76_TM_STATE_OFF; 1433 #else 1434 return false; 1435 #endif 1436 } 1437 1438 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev, 1439 struct sk_buff *skb, 1440 struct ieee80211_hw **hw) 1441 { 1442 #ifdef CONFIG_NL80211_TESTMODE 1443 int i; 1444 1445 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 1446 struct mt76_phy *phy = dev->phys[i]; 1447 1448 if (phy && skb == phy->test.tx_skb) { 1449 *hw = dev->phys[i]->hw; 1450 return true; 1451 } 1452 } 1453 return false; 1454 #else 1455 return false; 1456 #endif 1457 } 1458 1459 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 1460 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta, 1461 struct mt76_wcid *wcid, struct sk_buff *skb); 1462 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 1463 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, 1464 bool send_bar); 1465 void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb); 1466 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); 1467 void mt76_txq_schedule_all(struct mt76_phy *phy); 1468 void mt76_tx_worker_run(struct mt76_dev *dev); 1469 void mt76_tx_worker(struct mt76_worker *w); 1470 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 1471 struct ieee80211_sta *sta, 1472 u16 tids, int nframes, 1473 enum ieee80211_frame_release_type reason, 1474 bool more_data); 1475 bool mt76_has_tx_pending(struct mt76_phy *phy); 1476 int mt76_update_channel(struct mt76_phy *phy); 1477 void mt76_update_survey(struct mt76_phy *phy); 1478 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time); 1479 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 1480 struct survey_info *survey); 1481 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal); 1482 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht); 1483 1484 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 1485 u16 ssn, u16 size); 1486 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 1487 1488 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 1489 struct ieee80211_key_conf *key); 1490 1491 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 1492 __acquires(&dev->status_lock); 1493 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 1494 __releases(&dev->status_lock); 1495 1496 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 1497 struct sk_buff *skb); 1498 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev, 1499 struct mt76_wcid *wcid, int pktid, 1500 struct sk_buff_head *list); 1501 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 1502 struct sk_buff_head *list); 1503 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb, 1504 struct list_head *free_list); 1505 static inline void 1506 mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb) 1507 { 1508 __mt76_tx_complete_skb(dev, wcid, skb, NULL); 1509 } 1510 1511 void mt76_tx_status_check(struct mt76_dev *dev, bool flush); 1512 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1513 struct ieee80211_sta *sta, 1514 enum ieee80211_sta_state old_state, 1515 enum ieee80211_sta_state new_state); 1516 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif, 1517 struct ieee80211_sta *sta); 1518 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1519 struct ieee80211_sta *sta); 1520 1521 int mt76_get_min_avg_rssi(struct mt76_dev *dev, u8 phy_idx); 1522 1523 s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower); 1524 1525 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1526 unsigned int link_id, int *dbm); 1527 int mt76_init_sar_power(struct ieee80211_hw *hw, 1528 const struct cfg80211_sar_specs *sar); 1529 int mt76_get_sar_power(struct mt76_phy *phy, 1530 struct ieee80211_channel *chan, 1531 int power); 1532 1533 void mt76_csa_check(struct mt76_dev *dev); 1534 void mt76_csa_finish(struct mt76_dev *dev); 1535 1536 int mt76_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant, 1537 u32 *rx_ant); 1538 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); 1539 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id); 1540 int mt76_get_rate(struct mt76_dev *dev, 1541 struct ieee80211_supported_band *sband, 1542 int idx, bool cck); 1543 int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1544 struct ieee80211_scan_request *hw_req); 1545 void mt76_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 1546 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1547 const u8 *mac); 1548 void mt76_sw_scan_complete(struct ieee80211_hw *hw, 1549 struct ieee80211_vif *vif); 1550 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy); 1551 int mt76_add_chanctx(struct ieee80211_hw *hw, 1552 struct ieee80211_chanctx_conf *conf); 1553 void mt76_remove_chanctx(struct ieee80211_hw *hw, 1554 struct ieee80211_chanctx_conf *conf); 1555 void mt76_change_chanctx(struct ieee80211_hw *hw, 1556 struct ieee80211_chanctx_conf *conf, 1557 u32 changed); 1558 int mt76_assign_vif_chanctx(struct ieee80211_hw *hw, 1559 struct ieee80211_vif *vif, 1560 struct ieee80211_bss_conf *link_conf, 1561 struct ieee80211_chanctx_conf *conf); 1562 void mt76_unassign_vif_chanctx(struct ieee80211_hw *hw, 1563 struct ieee80211_vif *vif, 1564 struct ieee80211_bss_conf *link_conf, 1565 struct ieee80211_chanctx_conf *conf); 1566 int mt76_switch_vif_chanctx(struct ieee80211_hw *hw, 1567 struct ieee80211_vif_chanctx_switch *vifs, 1568 int n_vifs, 1569 enum ieee80211_chanctx_switch_mode mode); 1570 int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1571 struct ieee80211_channel *chan, int duration, 1572 enum ieee80211_roc_type type); 1573 int mt76_cancel_remain_on_channel(struct ieee80211_hw *hw, 1574 struct ieee80211_vif *vif); 1575 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1576 void *data, int len); 1577 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, 1578 struct netlink_callback *cb, void *data, int len); 1579 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state); 1580 int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len); 1581 1582 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable) 1583 { 1584 #ifdef CONFIG_NL80211_TESTMODE 1585 enum mt76_testmode_state state = MT76_TM_STATE_IDLE; 1586 1587 if (disable || phy->test.state == MT76_TM_STATE_OFF) 1588 state = MT76_TM_STATE_OFF; 1589 1590 mt76_testmode_set_state(phy, state); 1591 #endif 1592 } 1593 1594 1595 /* internal */ 1596 static inline struct ieee80211_hw * 1597 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) 1598 { 1599 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1600 u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 1601 struct ieee80211_hw *hw = mt76_phy_hw(dev, phy_idx); 1602 1603 info->hw_queue &= ~MT_TX_HW_QUEUE_PHY; 1604 1605 return hw; 1606 } 1607 1608 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 1609 void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 1610 struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev); 1611 void mt76_free_pending_rxwi(struct mt76_dev *dev); 1612 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 1613 struct napi_struct *napi); 1614 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 1615 struct napi_struct *napi); 1616 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 1617 void mt76_testmode_tx_pending(struct mt76_phy *phy); 1618 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, 1619 struct mt76_queue_entry *e); 1620 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, 1621 bool offchannel); 1622 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, 1623 bool offchannel); 1624 void mt76_scan_work(struct work_struct *work); 1625 void mt76_abort_scan(struct mt76_dev *dev); 1626 void mt76_roc_complete_work(struct work_struct *work); 1627 void mt76_abort_roc(struct mt76_phy *phy); 1628 struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy, 1629 struct ieee80211_vif *vif); 1630 void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif, 1631 struct mt76_vif_link *mlink); 1632 1633 /* usb */ 1634 static inline bool mt76u_urb_error(struct urb *urb) 1635 { 1636 return urb->status && 1637 urb->status != -ECONNRESET && 1638 urb->status != -ESHUTDOWN && 1639 urb->status != -ENOENT; 1640 } 1641 1642 static inline int 1643 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, 1644 int timeout, int ep) 1645 { 1646 #if defined(__FreeBSD__) && !defined(CONFIG_USB) 1647 return (0); 1648 #else 1649 struct usb_interface *uintf = to_usb_interface(dev->dev); 1650 struct usb_device *udev = interface_to_usbdev(uintf); 1651 struct mt76_usb *usb = &dev->usb; 1652 unsigned int pipe; 1653 1654 if (actual_len) 1655 pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]); 1656 else 1657 pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]); 1658 1659 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); 1660 #endif 1661 } 1662 1663 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index); 1664 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, 1665 struct mt76_sta_stats *stats, bool eht); 1666 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad); 1667 int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, 1668 u16 val, u16 offset, void *buf, size_t len); 1669 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 1670 u8 req_type, u16 val, u16 offset, 1671 void *buf, size_t len); 1672 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 1673 const u16 offset, const u32 val); 1674 void mt76u_read_copy(struct mt76_dev *dev, u32 offset, 1675 void *data, int len); 1676 u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr); 1677 void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type, 1678 u32 addr, u32 val); 1679 int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, 1680 struct mt76_bus_ops *ops); 1681 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); 1682 int mt76u_alloc_mcu_queue(struct mt76_dev *dev); 1683 int mt76u_alloc_queues(struct mt76_dev *dev); 1684 void mt76u_stop_tx(struct mt76_dev *dev); 1685 void mt76u_stop_rx(struct mt76_dev *dev); 1686 int mt76u_resume_rx(struct mt76_dev *dev); 1687 void mt76u_queues_deinit(struct mt76_dev *dev); 1688 1689 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, 1690 const struct mt76_bus_ops *bus_ops); 1691 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid); 1692 int mt76s_alloc_tx(struct mt76_dev *dev); 1693 void mt76s_deinit(struct mt76_dev *dev); 1694 void mt76s_sdio_irq(struct sdio_func *func); 1695 void mt76s_txrx_worker(struct mt76_sdio *sdio); 1696 bool mt76s_txqs_empty(struct mt76_dev *dev); 1697 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, 1698 int hw_ver); 1699 u32 mt76s_rr(struct mt76_dev *dev, u32 offset); 1700 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val); 1701 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 1702 u32 mt76s_read_pcr(struct mt76_dev *dev); 1703 void mt76s_write_copy(struct mt76_dev *dev, u32 offset, 1704 const void *data, int len); 1705 void mt76s_read_copy(struct mt76_dev *dev, u32 offset, 1706 void *data, int len); 1707 int mt76s_wr_rp(struct mt76_dev *dev, u32 base, 1708 const struct mt76_reg_pair *data, 1709 int len); 1710 int mt76s_rd_rp(struct mt76_dev *dev, u32 base, 1711 struct mt76_reg_pair *data, int len); 1712 1713 struct sk_buff * 1714 __mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, 1715 int len, int data_len, gfp_t gfp); 1716 static inline struct sk_buff * 1717 mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, 1718 int data_len) 1719 { 1720 return __mt76_mcu_msg_alloc(dev, data, data_len, data_len, GFP_KERNEL); 1721 } 1722 1723 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); 1724 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, 1725 unsigned long expires); 1726 int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data, 1727 int len, bool wait_resp, struct sk_buff **ret); 1728 int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, 1729 int cmd, bool wait_resp, struct sk_buff **ret); 1730 #if defined(__linux__) 1731 int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, 1732 #elif defined(__FreeBSD__) 1733 int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const u8 *data, 1734 #endif 1735 int len, int max_len); 1736 static inline int 1737 mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, 1738 int len) 1739 { 1740 int max_len = 4096 - dev->mcu_ops->headroom; 1741 1742 return __mt76_mcu_send_firmware(dev, cmd, data, len, max_len); 1743 } 1744 1745 static inline int 1746 mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len, 1747 bool wait_resp) 1748 { 1749 return mt76_mcu_send_and_get_msg(dev, cmd, data, len, wait_resp, NULL); 1750 } 1751 1752 static inline int 1753 mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, 1754 bool wait_resp) 1755 { 1756 return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, NULL); 1757 } 1758 1759 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); 1760 1761 struct device_node * 1762 mt76_find_power_limits_node(struct mt76_dev *dev); 1763 struct device_node * 1764 mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan); 1765 1766 s8 mt76_get_rate_power_limits(struct mt76_phy *phy, 1767 struct ieee80211_channel *chan, 1768 struct mt76_power_limits *dest, 1769 s8 target_power); 1770 1771 static inline bool mt76_queue_is_rx(struct mt76_dev *dev, struct mt76_queue *q) 1772 { 1773 int i; 1774 1775 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { 1776 if (q == &dev->q_rx[i]) 1777 return true; 1778 } 1779 1780 return false; 1781 } 1782 1783 static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q) 1784 { 1785 return (q->flags & MT_QFLAG_WED) && 1786 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE; 1787 } 1788 1789 static inline bool mt76_queue_is_wed_rro(struct mt76_queue *q) 1790 { 1791 return q->flags & MT_QFLAG_WED_RRO; 1792 } 1793 1794 static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q) 1795 { 1796 return mt76_queue_is_wed_rro(q) && 1797 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND; 1798 } 1799 1800 static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q) 1801 { 1802 return mt76_queue_is_wed_rro(q) && 1803 (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA || 1804 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_MSDU_PG); 1805 } 1806 1807 static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q) 1808 { 1809 if (!(q->flags & MT_QFLAG_WED)) 1810 return false; 1811 1812 return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX || 1813 mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q); 1814 1815 } 1816 1817 struct mt76_txwi_cache * 1818 mt76_token_release(struct mt76_dev *dev, int token, bool *wake); 1819 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi); 1820 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked); 1821 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token); 1822 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, 1823 struct mt76_txwi_cache *r, dma_addr_t phys); 1824 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q); 1825 static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct) 1826 { 1827 struct page *page = virt_to_head_page(buf); 1828 1829 page_pool_put_full_page(pp_page_to_nmdesc(page)->pp, page, 1830 allow_direct); 1831 } 1832 1833 static inline void * 1834 mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size) 1835 { 1836 struct page *page; 1837 1838 page = page_pool_dev_alloc_frag(q->page_pool, offset, size); 1839 if (!page) 1840 return NULL; 1841 1842 #if defined(__linux__) 1843 return page_address(page) + *offset; 1844 #elif defined(__FreeBSD__) 1845 return (void *)((uintptr_t)page_address(page) + *offset); 1846 #endif 1847 } 1848 1849 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) 1850 { 1851 spin_lock_bh(&dev->token_lock); 1852 __mt76_set_tx_blocked(dev, blocked); 1853 spin_unlock_bh(&dev->token_lock); 1854 } 1855 1856 static inline int 1857 mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) 1858 { 1859 int token; 1860 1861 spin_lock_bh(&dev->token_lock); 1862 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); 1863 spin_unlock_bh(&dev->token_lock); 1864 1865 return token; 1866 } 1867 1868 static inline struct mt76_txwi_cache * 1869 mt76_token_put(struct mt76_dev *dev, int token) 1870 { 1871 struct mt76_txwi_cache *txwi; 1872 1873 spin_lock_bh(&dev->token_lock); 1874 txwi = idr_remove(&dev->token, token); 1875 spin_unlock_bh(&dev->token_lock); 1876 1877 return txwi; 1878 } 1879 1880 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx); 1881 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid); 1882 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid); 1883 1884 static inline void 1885 mt76_vif_init(struct ieee80211_vif *vif, struct mt76_vif_data *mvif) 1886 { 1887 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 1888 1889 mlink->mvif = mvif; 1890 rcu_assign_pointer(mvif->link[0], mlink); 1891 } 1892 1893 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif); 1894 1895 static inline struct mt76_vif_link * 1896 mt76_vif_link(struct mt76_dev *dev, struct ieee80211_vif *vif, int link_id) 1897 { 1898 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 1899 struct mt76_vif_data *mvif = mlink->mvif; 1900 1901 if (!link_id) 1902 return mlink; 1903 1904 return mt76_dereference(mvif->link[link_id], dev); 1905 } 1906 1907 static inline struct mt76_vif_link * 1908 mt76_vif_conf_link(struct mt76_dev *dev, struct ieee80211_vif *vif, 1909 struct ieee80211_bss_conf *link_conf) 1910 { 1911 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 1912 struct mt76_vif_data *mvif = mlink->mvif; 1913 1914 if (link_conf == &vif->bss_conf || !link_conf->link_id) 1915 return mlink; 1916 1917 return mt76_dereference(mvif->link[link_conf->link_id], dev); 1918 } 1919 1920 static inline struct mt76_phy * 1921 mt76_vif_link_phy(struct mt76_vif_link *mlink) 1922 { 1923 struct mt76_chanctx *ctx; 1924 1925 if (!mlink->ctx) 1926 return NULL; 1927 1928 ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv; 1929 1930 return ctx->phy; 1931 } 1932 1933 #endif 1934