1 /* SPDX-License-Identifier: ISC */ 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #ifndef __MT76_H 7 #define __MT76_H 8 9 #include <linux/kernel.h> 10 #include <linux/io.h> 11 #include <linux/spinlock.h> 12 #include <linux/skbuff.h> 13 #include <linux/leds.h> 14 #include <linux/usb.h> 15 #include <linux/average.h> 16 #include <linux/soc/mediatek/mtk_wed.h> 17 #include <net/mac80211.h> 18 #include <net/page_pool/helpers.h> 19 #include "util.h" 20 #include "testmode.h" 21 22 #define MT_MCU_RING_SIZE 32 23 #define MT_RX_BUF_SIZE 2048 24 #define MT_SKB_HEAD_LEN 256 25 26 #define MT_MAX_NON_AQL_PKT 16 27 #define MT_TXQ_FREE_THR 32 28 29 #define MT76_TOKEN_FREE_THR 64 30 31 #define MT_QFLAG_WED_RING GENMASK(1, 0) 32 #define MT_QFLAG_WED_TYPE GENMASK(4, 2) 33 #define MT_QFLAG_WED BIT(5) 34 #define MT_QFLAG_WED_RRO BIT(6) 35 #define MT_QFLAG_WED_RRO_EN BIT(7) 36 #define MT_QFLAG_EMI_EN BIT(8) 37 38 #define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \ 39 FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \ 40 FIELD_PREP(MT_QFLAG_WED_RING, _n)) 41 #define __MT_WED_RRO_Q(_type, _n) (MT_QFLAG_WED_RRO | __MT_WED_Q(_type, _n)) 42 43 #define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n) 44 #define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n) 45 #define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0) 46 #define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n) 47 #define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n) 48 #define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0) 49 #define MT_WED_RRO_Q_RXDMAD_C __MT_WED_RRO_Q(MT76_WED_RRO_Q_RXDMAD_C, 0) 50 51 struct mt76_dev; 52 struct mt76_phy; 53 struct mt76_wcid; 54 struct mt76s_intr; 55 struct mt76_chanctx; 56 struct mt76_vif_link; 57 58 struct mt76_reg_pair { 59 u32 reg; 60 u32 value; 61 }; 62 63 enum mt76_bus_type { 64 MT76_BUS_MMIO, 65 MT76_BUS_USB, 66 MT76_BUS_SDIO, 67 }; 68 69 enum mt76_wed_type { 70 MT76_WED_Q_TX, 71 MT76_WED_Q_TXFREE, 72 MT76_WED_Q_RX, 73 MT76_WED_RRO_Q_DATA, 74 MT76_WED_RRO_Q_MSDU_PG, 75 MT76_WED_RRO_Q_IND, 76 MT76_WED_RRO_Q_RXDMAD_C, 77 }; 78 79 enum mt76_hwrro_mode { 80 MT76_HWRRO_OFF, 81 MT76_HWRRO_V3, 82 MT76_HWRRO_V3_1, 83 }; 84 85 struct mt76_bus_ops { 86 u32 (*rr)(struct mt76_dev *dev, u32 offset); 87 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 88 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 89 void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data, 90 int len); 91 void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data, 92 int len); 93 int (*wr_rp)(struct mt76_dev *dev, u32 base, 94 const struct mt76_reg_pair *rp, int len); 95 int (*rd_rp)(struct mt76_dev *dev, u32 base, 96 struct mt76_reg_pair *rp, int len); 97 enum mt76_bus_type type; 98 }; 99 100 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB) 101 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO) 102 #define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO) 103 104 enum mt76_txq_id { 105 MT_TXQ_VO = IEEE80211_AC_VO, 106 MT_TXQ_VI = IEEE80211_AC_VI, 107 MT_TXQ_BE = IEEE80211_AC_BE, 108 MT_TXQ_BK = IEEE80211_AC_BK, 109 MT_TXQ_PSD, 110 MT_TXQ_BEACON, 111 MT_TXQ_CAB, 112 __MT_TXQ_MAX 113 }; 114 115 enum mt76_mcuq_id { 116 MT_MCUQ_WM, 117 MT_MCUQ_WA, 118 MT_MCUQ_FWDL, 119 __MT_MCUQ_MAX 120 }; 121 122 enum mt76_rxq_id { 123 MT_RXQ_MAIN, 124 MT_RXQ_MCU, 125 MT_RXQ_MCU_WA, 126 MT_RXQ_BAND1, 127 MT_RXQ_BAND1_WA, 128 MT_RXQ_MAIN_WA, 129 MT_RXQ_BAND2, 130 MT_RXQ_BAND2_WA, 131 MT_RXQ_RRO_BAND0, 132 MT_RXQ_RRO_BAND1, 133 MT_RXQ_RRO_BAND2, 134 MT_RXQ_MSDU_PAGE_BAND0, 135 MT_RXQ_MSDU_PAGE_BAND1, 136 MT_RXQ_MSDU_PAGE_BAND2, 137 MT_RXQ_TXFREE_BAND0, 138 MT_RXQ_TXFREE_BAND1, 139 MT_RXQ_TXFREE_BAND2, 140 MT_RXQ_RRO_IND, 141 MT_RXQ_RRO_RXDMAD_C, 142 __MT_RXQ_MAX 143 }; 144 145 enum mt76_band_id { 146 MT_BAND0, 147 MT_BAND1, 148 MT_BAND2, 149 __MT_MAX_BAND 150 }; 151 152 enum mt76_cipher_type { 153 MT_CIPHER_NONE, 154 MT_CIPHER_WEP40, 155 MT_CIPHER_TKIP, 156 MT_CIPHER_TKIP_NO_MIC, 157 MT_CIPHER_AES_CCMP, 158 MT_CIPHER_WEP104, 159 MT_CIPHER_BIP_CMAC_128, 160 MT_CIPHER_WEP128, 161 MT_CIPHER_WAPI, 162 MT_CIPHER_CCMP_CCX, 163 MT_CIPHER_CCMP_256, 164 MT_CIPHER_GCMP, 165 MT_CIPHER_GCMP_256, 166 }; 167 168 enum mt76_dfs_state { 169 MT_DFS_STATE_UNKNOWN, 170 MT_DFS_STATE_DISABLED, 171 MT_DFS_STATE_CAC, 172 MT_DFS_STATE_ACTIVE, 173 }; 174 175 #define MT76_RNR_SCAN_MAX_BSSIDS 16 176 struct mt76_scan_rnr_param { 177 u8 bssid[MT76_RNR_SCAN_MAX_BSSIDS][ETH_ALEN]; 178 u8 channel[MT76_RNR_SCAN_MAX_BSSIDS]; 179 u8 random_mac[ETH_ALEN]; 180 u8 seq_num; 181 u8 bssid_num; 182 u32 sreq_flag; 183 }; 184 185 struct mt76_queue_buf { 186 dma_addr_t addr; 187 u16 len:15, 188 skip_unmap:1; 189 }; 190 191 struct mt76_tx_info { 192 struct mt76_queue_buf buf[32]; 193 struct sk_buff *skb; 194 int nbuf; 195 u32 info; 196 }; 197 198 struct mt76_queue_entry { 199 union { 200 void *buf; 201 struct sk_buff *skb; 202 }; 203 union { 204 struct mt76_txwi_cache *txwi; 205 struct urb *urb; 206 int buf_sz; 207 }; 208 dma_addr_t dma_addr[2]; 209 u16 dma_len[2]; 210 u16 wcid; 211 bool skip_buf0:1; 212 bool skip_buf1:1; 213 bool done:1; 214 }; 215 216 struct mt76_queue_regs { 217 u32 desc_base; 218 u32 ring_size; 219 u32 cpu_idx; 220 u32 dma_idx; 221 } __packed __aligned(4); 222 223 struct mt76_queue { 224 struct mt76_queue_regs __iomem *regs; 225 226 spinlock_t lock; 227 spinlock_t cleanup_lock; 228 struct mt76_queue_entry *entry; 229 struct mt76_rro_desc *rro_desc; 230 struct mt76_desc *desc; 231 232 u16 first; 233 u16 head; 234 u16 tail; 235 u8 hw_idx; 236 u8 ep; 237 int ndesc; 238 int queued; 239 int buf_size; 240 bool stopped; 241 bool blocked; 242 243 u8 buf_offset; 244 u16 flags; 245 u8 magic_cnt; 246 247 __le16 *emi_cpu_idx; 248 249 struct mtk_wed_device *wed; 250 u32 wed_regs; 251 252 dma_addr_t desc_dma; 253 struct sk_buff *rx_head; 254 struct page_pool *page_pool; 255 }; 256 257 struct mt76_mcu_ops { 258 unsigned int max_retry; 259 u32 headroom; 260 u32 tailroom; 261 262 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, 263 int len, bool wait_resp); 264 int (*mcu_skb_prepare_msg)(struct mt76_dev *dev, struct sk_buff *skb, 265 int cmd, int *seq); 266 int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, 267 int cmd, int *seq); 268 int (*mcu_parse_response)(struct mt76_dev *dev, int cmd, 269 struct sk_buff *skb, int seq); 270 u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset); 271 void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val); 272 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, 273 const struct mt76_reg_pair *rp, int len); 274 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 275 struct mt76_reg_pair *rp, int len); 276 int (*mcu_restart)(struct mt76_dev *dev); 277 }; 278 279 struct mt76_queue_ops { 280 int (*init)(struct mt76_dev *dev, 281 int (*poll)(struct napi_struct *napi, int budget)); 282 283 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q, 284 int idx, int n_desc, int bufsize, 285 u32 ring_base); 286 287 int (*tx_queue_skb)(struct mt76_phy *phy, struct mt76_queue *q, 288 enum mt76_txq_id qid, struct sk_buff *skb, 289 struct mt76_wcid *wcid, struct ieee80211_sta *sta); 290 291 int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q, 292 struct sk_buff *skb, u32 tx_info); 293 294 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 295 int *len, u32 *info, bool *more); 296 297 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 298 299 void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q, 300 bool flush); 301 302 void (*rx_queue_init)(struct mt76_dev *dev, enum mt76_rxq_id qid, 303 int (*poll)(struct napi_struct *napi, int budget)); 304 305 void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q); 306 307 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 308 309 void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q, 310 bool reset_idx); 311 }; 312 313 enum mt76_phy_type { 314 MT_PHY_TYPE_CCK, 315 MT_PHY_TYPE_OFDM, 316 MT_PHY_TYPE_HT, 317 MT_PHY_TYPE_HT_GF, 318 MT_PHY_TYPE_VHT, 319 MT_PHY_TYPE_HE_SU = 8, 320 MT_PHY_TYPE_HE_EXT_SU, 321 MT_PHY_TYPE_HE_TB, 322 MT_PHY_TYPE_HE_MU, 323 MT_PHY_TYPE_EHT_SU = 13, 324 MT_PHY_TYPE_EHT_TRIG, 325 MT_PHY_TYPE_EHT_MU, 326 __MT_PHY_TYPE_MAX, 327 }; 328 329 struct mt76_sta_stats { 330 u64 tx_mode[__MT_PHY_TYPE_MAX]; 331 u64 tx_bw[5]; /* 20, 40, 80, 160, 320 */ 332 u64 tx_nss[4]; /* 1, 2, 3, 4 */ 333 u64 tx_mcs[16]; /* mcs idx */ 334 u64 tx_bytes; 335 /* WED TX */ 336 u32 tx_packets; /* unit: MSDU */ 337 u32 tx_retries; 338 u32 tx_failed; 339 /* WED RX */ 340 u64 rx_bytes; 341 u32 rx_packets; 342 u32 rx_errors; 343 u32 rx_drops; 344 }; 345 346 enum mt76_wcid_flags { 347 MT_WCID_FLAG_CHECK_PS, 348 MT_WCID_FLAG_PS, 349 MT_WCID_FLAG_4ADDR, 350 MT_WCID_FLAG_HDR_TRANS, 351 }; 352 353 #define MT76_N_WCIDS 1088 354 355 /* stored in ieee80211_tx_info::hw_queue */ 356 #define MT_TX_HW_QUEUE_PHY GENMASK(3, 2) 357 358 DECLARE_EWMA(signal, 10, 8); 359 360 #define MT_WCID_TX_INFO_RATE GENMASK(15, 0) 361 #define MT_WCID_TX_INFO_NSS GENMASK(17, 16) 362 #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18) 363 #define MT_WCID_TX_INFO_SET BIT(31) 364 365 struct mt76_wcid { 366 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 367 368 atomic_t non_aql_packets; 369 unsigned long flags; 370 371 struct ewma_signal rssi; 372 int inactive_count; 373 374 struct rate_info rate; 375 unsigned long ampdu_state; 376 377 u16 idx; 378 u8 hw_key_idx; 379 u8 hw_key_idx2; 380 381 u8 offchannel:1; 382 u8 sta:1; 383 u8 sta_disabled:1; 384 u8 amsdu:1; 385 u8 phy_idx:2; 386 u8 link_id:4; 387 bool link_valid; 388 389 u8 rx_check_pn; 390 u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6]; 391 u16 cipher; 392 393 u32 tx_info; 394 bool sw_iv; 395 396 struct list_head tx_list; 397 struct sk_buff_head tx_pending; 398 struct sk_buff_head tx_offchannel; 399 400 struct list_head list; 401 struct idr pktid; 402 403 struct mt76_sta_stats stats; 404 405 struct list_head poll_list; 406 407 struct mt76_wcid *def_wcid; 408 }; 409 410 struct mt76_txq { 411 u16 wcid; 412 413 u16 agg_ssn; 414 bool send_bar; 415 bool aggr; 416 }; 417 418 /* data0 */ 419 #define RRO_IND_DATA0_IND_REASON_MASK GENMASK(31, 28) 420 #define RRO_IND_DATA0_START_SEQ_MASK GENMASK(27, 16) 421 #define RRO_IND_DATA0_SEQ_ID_MASK GENMASK(11, 0) 422 /* data1 */ 423 #define RRO_IND_DATA1_MAGIC_CNT_MASK GENMASK(31, 29) 424 #define RRO_IND_DATA1_IND_COUNT_MASK GENMASK(12, 0) 425 struct mt76_wed_rro_ind { 426 __le32 data0; 427 __le32 data1; 428 }; 429 430 struct mt76_txwi_cache { 431 struct list_head list; 432 dma_addr_t dma_addr; 433 434 union { 435 struct sk_buff *skb; 436 void *ptr; 437 }; 438 439 u8 qid; 440 }; 441 442 struct mt76_rx_tid { 443 struct rcu_head rcu_head; 444 445 struct mt76_dev *dev; 446 447 spinlock_t lock; 448 struct delayed_work reorder_work; 449 450 u16 id; 451 u16 head; 452 u16 size; 453 u16 nframes; 454 455 u8 num; 456 457 u8 started:1, stopped:1, timer_pending:1; 458 459 struct sk_buff *reorder_buf[] __counted_by(size); 460 }; 461 462 #define MT_TX_CB_DMA_DONE BIT(0) 463 #define MT_TX_CB_TXS_DONE BIT(1) 464 #define MT_TX_CB_TXS_FAILED BIT(2) 465 466 #define MT_PACKET_ID_MASK GENMASK(6, 0) 467 #define MT_PACKET_ID_NO_ACK 0 468 #define MT_PACKET_ID_NO_SKB 1 469 #define MT_PACKET_ID_WED 2 470 #define MT_PACKET_ID_FIRST 3 471 #define MT_PACKET_ID_HAS_RATE BIT(7) 472 /* This is timer for when to give up when waiting for TXS callback, 473 * with starting time being the time at which the DMA_DONE callback 474 * was seen (so, we know packet was processed then, it should not take 475 * long after that for firmware to send the TXS callback if it is going 476 * to do so.) 477 */ 478 #define MT_TX_STATUS_SKB_TIMEOUT (HZ / 4) 479 480 struct mt76_tx_cb { 481 unsigned long jiffies; 482 u16 wcid; 483 u8 pktid; 484 u8 flags; 485 }; 486 487 enum { 488 MT76_STATE_INITIALIZED, 489 MT76_STATE_REGISTERED, 490 MT76_STATE_RUNNING, 491 MT76_STATE_MCU_RUNNING, 492 MT76_SCANNING, 493 MT76_HW_SCANNING, 494 MT76_HW_SCHED_SCANNING, 495 MT76_RESTART, 496 MT76_RESET, 497 MT76_MCU_RESET, 498 MT76_REMOVED, 499 MT76_READING_STATS, 500 MT76_STATE_POWER_OFF, 501 MT76_STATE_SUSPEND, 502 MT76_STATE_ROC, 503 MT76_STATE_PM, 504 MT76_STATE_WED_RESET, 505 }; 506 507 enum mt76_sta_event { 508 MT76_STA_EVENT_ASSOC, 509 MT76_STA_EVENT_AUTHORIZE, 510 MT76_STA_EVENT_DISASSOC, 511 }; 512 513 struct mt76_hw_cap { 514 bool has_2ghz; 515 bool has_5ghz; 516 bool has_6ghz; 517 }; 518 519 #define MT_DRV_TXWI_NO_FREE BIT(0) 520 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1) 521 #define MT_DRV_SW_RX_AIRTIME BIT(2) 522 #define MT_DRV_RX_DMA_HDR BIT(3) 523 #define MT_DRV_HW_MGMT_TXQ BIT(4) 524 #define MT_DRV_AMSDU_OFFLOAD BIT(5) 525 #define MT_DRV_IGNORE_TXS_FAILED BIT(6) 526 527 struct mt76_driver_ops { 528 u32 drv_flags; 529 u32 survey_flags; 530 u16 txwi_size; 531 u16 token_size; 532 u8 mcs_rates; 533 534 unsigned int link_data_size; 535 536 void (*update_survey)(struct mt76_phy *phy); 537 int (*set_channel)(struct mt76_phy *phy); 538 539 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 540 enum mt76_txq_id qid, struct mt76_wcid *wcid, 541 struct ieee80211_sta *sta, 542 struct mt76_tx_info *tx_info); 543 544 void (*tx_complete_skb)(struct mt76_dev *dev, 545 struct mt76_queue_entry *e); 546 547 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); 548 549 bool (*rx_check)(struct mt76_dev *dev, void *data, int len); 550 551 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 552 struct sk_buff *skb, u32 *info); 553 554 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 555 556 void (*rx_rro_ind_process)(struct mt76_dev *dev, void *data); 557 int (*rx_rro_add_msdu_page)(struct mt76_dev *dev, struct mt76_queue *q, 558 dma_addr_t p, void *data); 559 560 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 561 bool ps); 562 563 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, 564 struct ieee80211_sta *sta); 565 566 int (*sta_event)(struct mt76_dev *dev, struct ieee80211_vif *vif, 567 struct ieee80211_sta *sta, enum mt76_sta_event ev); 568 569 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, 570 struct ieee80211_sta *sta); 571 572 int (*vif_link_add)(struct mt76_phy *phy, struct ieee80211_vif *vif, 573 struct ieee80211_bss_conf *link_conf, 574 struct mt76_vif_link *mlink); 575 576 void (*vif_link_remove)(struct mt76_phy *phy, 577 struct ieee80211_vif *vif, 578 struct ieee80211_bss_conf *link_conf, 579 struct mt76_vif_link *mlink); 580 }; 581 582 struct mt76_channel_state { 583 u64 cc_active; 584 u64 cc_busy; 585 u64 cc_rx; 586 u64 cc_bss_rx; 587 u64 cc_tx; 588 589 s8 noise; 590 }; 591 592 struct mt76_sband { 593 struct ieee80211_supported_band sband; 594 struct mt76_channel_state *chan; 595 }; 596 597 /* addr req mask */ 598 #define MT_VEND_TYPE_EEPROM BIT(31) 599 #define MT_VEND_TYPE_CFG BIT(30) 600 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) 601 602 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) 603 enum mt_vendor_req { 604 MT_VEND_DEV_MODE = 0x1, 605 MT_VEND_WRITE = 0x2, 606 MT_VEND_POWER_ON = 0x4, 607 MT_VEND_MULTI_WRITE = 0x6, 608 MT_VEND_MULTI_READ = 0x7, 609 MT_VEND_READ_EEPROM = 0x9, 610 MT_VEND_WRITE_FCE = 0x42, 611 MT_VEND_WRITE_CFG = 0x46, 612 MT_VEND_READ_CFG = 0x47, 613 MT_VEND_READ_EXT = 0x63, 614 MT_VEND_WRITE_EXT = 0x66, 615 MT_VEND_FEATURE_SET = 0x91, 616 }; 617 618 enum mt76u_in_ep { 619 MT_EP_IN_PKT_RX, 620 MT_EP_IN_CMD_RESP, 621 __MT_EP_IN_MAX, 622 }; 623 624 enum mt76u_out_ep { 625 MT_EP_OUT_INBAND_CMD, 626 MT_EP_OUT_AC_BE, 627 MT_EP_OUT_AC_BK, 628 MT_EP_OUT_AC_VI, 629 MT_EP_OUT_AC_VO, 630 MT_EP_OUT_HCCA, 631 __MT_EP_OUT_MAX, 632 }; 633 634 struct mt76_mcu { 635 struct mutex mutex; 636 u32 msg_seq; 637 int timeout; 638 639 struct sk_buff_head res_q; 640 wait_queue_head_t wait; 641 }; 642 643 #define MT_TX_SG_MAX_SIZE 8 644 #define MT_RX_SG_MAX_SIZE 4 645 #define MT_NUM_TX_ENTRIES 256 646 #define MT_NUM_RX_ENTRIES 128 647 #define MCU_RESP_URB_SIZE 1024 648 struct mt76_usb { 649 struct mutex usb_ctrl_mtx; 650 u8 *data; 651 u16 data_len; 652 653 struct mt76_worker status_worker; 654 struct mt76_worker rx_worker; 655 656 struct work_struct stat_work; 657 658 u8 out_ep[__MT_EP_OUT_MAX]; 659 u8 in_ep[__MT_EP_IN_MAX]; 660 bool sg_en; 661 662 struct mt76u_mcu { 663 u8 *data; 664 /* multiple reads */ 665 struct mt76_reg_pair *rp; 666 int rp_len; 667 u32 base; 668 } mcu; 669 }; 670 671 #define MT76S_XMIT_BUF_SZ 0x3fe00 672 #define MT76S_NUM_TX_ENTRIES 256 673 #define MT76S_NUM_RX_ENTRIES 512 674 struct mt76_sdio { 675 struct mt76_worker txrx_worker; 676 struct mt76_worker status_worker; 677 struct mt76_worker net_worker; 678 struct mt76_worker stat_worker; 679 680 u8 *xmit_buf; 681 u32 xmit_buf_sz; 682 683 struct sdio_func *func; 684 void *intr_data; 685 u8 hw_ver; 686 wait_queue_head_t wait; 687 688 int pse_mcu_quota_max; 689 struct { 690 int pse_data_quota; 691 int ple_data_quota; 692 int pse_mcu_quota; 693 int pse_page_size; 694 int deficit; 695 } sched; 696 697 int (*parse_irq)(struct mt76_dev *dev, struct mt76s_intr *intr); 698 }; 699 700 struct mt76_mmio { 701 void __iomem *regs; 702 spinlock_t irq_lock; 703 u32 irqmask; 704 705 struct mtk_wed_device wed; 706 struct mtk_wed_device wed_hif2; 707 struct completion wed_reset; 708 struct completion wed_reset_complete; 709 }; 710 711 struct mt76_rx_status { 712 union { 713 struct mt76_wcid *wcid; 714 u16 wcid_idx; 715 }; 716 717 u32 reorder_time; 718 719 u32 ampdu_ref; 720 u32 timestamp; 721 722 u8 iv[6]; 723 724 u8 phy_idx:2; 725 u8 aggr:1; 726 u8 qos_ctl; 727 u16 seqno; 728 729 u16 freq; 730 u32 flag; 731 u8 enc_flags; 732 u8 encoding:3, bw:4; 733 union { 734 struct { 735 u8 he_ru:3; 736 u8 he_gi:2; 737 u8 he_dcm:1; 738 }; 739 struct { 740 u8 ru:4; 741 u8 gi:2; 742 } eht; 743 }; 744 745 u8 amsdu:1, first_amsdu:1, last_amsdu:1; 746 u8 rate_idx; 747 u8 nss:5, band:3; 748 s8 signal; 749 u8 chains; 750 s8 chain_signal[IEEE80211_MAX_CHAINS]; 751 }; 752 753 struct mt76_freq_range_power { 754 const struct cfg80211_sar_freq_ranges *range; 755 s8 power; 756 }; 757 758 struct mt76_testmode_ops { 759 int (*set_state)(struct mt76_phy *phy, enum mt76_testmode_state state); 760 int (*set_params)(struct mt76_phy *phy, struct nlattr **tb, 761 enum mt76_testmode_state new_state); 762 int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg); 763 }; 764 765 struct mt76_testmode_data { 766 enum mt76_testmode_state state; 767 768 u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)]; 769 struct sk_buff *tx_skb; 770 771 u32 tx_count; 772 u16 tx_mpdu_len; 773 774 u8 tx_rate_mode; 775 u8 tx_rate_idx; 776 u8 tx_rate_nss; 777 u8 tx_rate_sgi; 778 u8 tx_rate_ldpc; 779 u8 tx_rate_stbc; 780 u8 tx_ltf; 781 782 u8 tx_antenna_mask; 783 u8 tx_spe_idx; 784 785 u8 tx_duty_cycle; 786 u32 tx_time; 787 u32 tx_ipg; 788 789 u32 freq_offset; 790 791 u8 tx_power[4]; 792 u8 tx_power_control; 793 794 u8 addr[3][ETH_ALEN]; 795 796 u32 tx_pending; 797 u32 tx_queued; 798 u16 tx_queued_limit; 799 u32 tx_done; 800 struct { 801 u64 packets[__MT_RXQ_MAX]; 802 u64 fcs_error[__MT_RXQ_MAX]; 803 } rx_stats; 804 }; 805 806 struct mt76_vif_link { 807 u8 idx; 808 u8 link_idx; 809 u8 omac_idx; 810 u8 band_idx; 811 u8 wmm_idx; 812 u8 scan_seq_num; 813 u8 cipher; 814 u8 basic_rates_idx; 815 u8 mcast_rates_idx; 816 u8 beacon_rates_idx; 817 bool offchannel; 818 struct ieee80211_chanctx_conf *ctx; 819 struct mt76_wcid *wcid; 820 struct mt76_vif_data *mvif; 821 struct rcu_head rcu_head; 822 }; 823 824 struct mt76_vif_data { 825 struct mt76_vif_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; 826 struct mt76_vif_link __rcu *offchannel_link; 827 828 struct mt76_phy *roc_phy; 829 u16 valid_links; 830 u8 deflink_id; 831 }; 832 833 struct mt76_phy { 834 struct ieee80211_hw *hw; 835 struct mt76_dev *dev; 836 void *priv; 837 838 unsigned long state; 839 unsigned int num_sta; 840 u8 band_idx; 841 842 spinlock_t tx_lock; 843 struct list_head tx_list; 844 struct mt76_queue *q_tx[__MT_TXQ_MAX]; 845 846 struct cfg80211_chan_def chandef; 847 struct cfg80211_chan_def main_chandef; 848 bool offchannel; 849 bool radar_enabled; 850 851 struct delayed_work roc_work; 852 struct ieee80211_vif *roc_vif; 853 struct mt76_vif_link *roc_link; 854 855 struct mt76_chanctx *chanctx; 856 857 struct mt76_channel_state *chan_state; 858 enum mt76_dfs_state dfs_state; 859 ktime_t survey_time; 860 861 u32 aggr_stats[32]; 862 863 struct mt76_hw_cap cap; 864 struct mt76_sband sband_2g; 865 struct mt76_sband sband_5g; 866 struct mt76_sband sband_6g; 867 868 u8 macaddr[ETH_ALEN]; 869 870 int txpower_cur; 871 u8 antenna_mask; 872 u16 chainmask; 873 874 #ifdef CONFIG_NL80211_TESTMODE 875 struct mt76_testmode_data test; 876 #endif 877 878 struct delayed_work mac_work; 879 u8 mac_work_count; 880 881 struct { 882 struct sk_buff *head; 883 struct sk_buff **tail; 884 u16 seqno; 885 } rx_amsdu[__MT_RXQ_MAX]; 886 887 struct mt76_freq_range_power *frp; 888 889 struct { 890 struct led_classdev cdev; 891 char name[32]; 892 bool al; 893 u8 pin; 894 } leds; 895 }; 896 897 struct mt76_dev { 898 struct mt76_phy phy; /* must be first */ 899 struct mt76_phy *phys[__MT_MAX_BAND]; 900 struct mt76_phy *band_phys[NUM_NL80211_BANDS]; 901 902 struct ieee80211_hw *hw; 903 904 spinlock_t wed_lock; 905 spinlock_t lock; 906 spinlock_t cc_lock; 907 908 u32 cur_cc_bss_rx; 909 910 struct mt76_rx_status rx_ampdu_status; 911 u32 rx_ampdu_len; 912 u32 rx_ampdu_ref; 913 914 struct mutex mutex; 915 916 const struct mt76_bus_ops *bus; 917 const struct mt76_driver_ops *drv; 918 const struct mt76_mcu_ops *mcu_ops; 919 struct device *dev; 920 struct device *dma_dev; 921 922 struct mt76_mcu mcu; 923 924 struct net_device *napi_dev; 925 struct net_device *tx_napi_dev; 926 spinlock_t rx_lock; 927 struct napi_struct napi[__MT_RXQ_MAX]; 928 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 929 struct tasklet_struct irq_tasklet; 930 931 struct list_head txwi_cache; 932 struct list_head rxwi_cache; 933 struct mt76_queue *q_mcu[__MT_MCUQ_MAX]; 934 struct mt76_queue q_rx[__MT_RXQ_MAX]; 935 const struct mt76_queue_ops *queue_ops; 936 int tx_dma_idx[4]; 937 enum mt76_hwrro_mode hwrro_mode; 938 939 struct mt76_worker tx_worker; 940 struct napi_struct tx_napi; 941 942 spinlock_t token_lock; 943 struct idr token; 944 u16 wed_token_count; 945 u16 token_count; 946 u16 token_size; 947 948 spinlock_t rx_token_lock; 949 struct idr rx_token; 950 u16 rx_token_size; 951 952 wait_queue_head_t tx_wait; 953 /* spinclock used to protect wcid pktid linked list */ 954 spinlock_t status_lock; 955 956 u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; 957 958 u64 vif_mask; 959 960 struct mt76_wcid global_wcid; 961 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; 962 struct list_head wcid_list; 963 964 struct list_head sta_poll_list; 965 spinlock_t sta_poll_lock; 966 967 u32 rev; 968 969 struct tasklet_struct pre_tbtt_tasklet; 970 int beacon_int; 971 u8 beacon_mask; 972 973 struct debugfs_blob_wrapper eeprom; 974 struct debugfs_blob_wrapper otp; 975 976 char alpha2[3]; 977 enum nl80211_dfs_regions region; 978 979 struct mt76_scan_rnr_param rnr; 980 981 u32 debugfs_reg; 982 983 u8 csa_complete; 984 985 u32 rxfilter; 986 987 struct delayed_work scan_work; 988 struct { 989 struct cfg80211_scan_request *req; 990 struct ieee80211_channel *chan; 991 struct ieee80211_vif *vif; 992 struct mt76_vif_link *mlink; 993 struct mt76_phy *phy; 994 int chan_idx; 995 } scan; 996 997 #ifdef CONFIG_NL80211_TESTMODE 998 const struct mt76_testmode_ops *test_ops; 999 struct { 1000 const char *name; 1001 u32 offset; 1002 } test_mtd; 1003 #endif 1004 struct workqueue_struct *wq; 1005 1006 union { 1007 struct mt76_mmio mmio; 1008 struct mt76_usb usb; 1009 struct mt76_sdio sdio; 1010 }; 1011 1012 atomic_t bus_hung; 1013 }; 1014 1015 /* per-phy stats. */ 1016 struct mt76_mib_stats { 1017 u32 ack_fail_cnt; 1018 u32 fcs_err_cnt; 1019 u32 rts_cnt; 1020 u32 rts_retries_cnt; 1021 u32 ba_miss_cnt; 1022 u32 tx_bf_cnt; 1023 u32 tx_mu_bf_cnt; 1024 u32 tx_mu_mpdu_cnt; 1025 u32 tx_mu_acked_mpdu_cnt; 1026 u32 tx_su_acked_mpdu_cnt; 1027 u32 tx_bf_ibf_ppdu_cnt; 1028 u32 tx_bf_ebf_ppdu_cnt; 1029 1030 u32 tx_bf_rx_fb_all_cnt; 1031 u32 tx_bf_rx_fb_eht_cnt; 1032 u32 tx_bf_rx_fb_he_cnt; 1033 u32 tx_bf_rx_fb_vht_cnt; 1034 u32 tx_bf_rx_fb_ht_cnt; 1035 1036 u32 tx_bf_rx_fb_bw; /* value of last sample, not cumulative */ 1037 u32 tx_bf_rx_fb_nc_cnt; 1038 u32 tx_bf_rx_fb_nr_cnt; 1039 u32 tx_bf_fb_cpl_cnt; 1040 u32 tx_bf_fb_trig_cnt; 1041 1042 u32 tx_ampdu_cnt; 1043 u32 tx_stop_q_empty_cnt; 1044 u32 tx_mpdu_attempts_cnt; 1045 u32 tx_mpdu_success_cnt; 1046 u32 tx_pkt_ebf_cnt; 1047 u32 tx_pkt_ibf_cnt; 1048 1049 u32 tx_rwp_fail_cnt; 1050 u32 tx_rwp_need_cnt; 1051 1052 /* rx stats */ 1053 u32 rx_fifo_full_cnt; 1054 u32 channel_idle_cnt; 1055 u32 primary_cca_busy_time; 1056 u32 secondary_cca_busy_time; 1057 u32 primary_energy_detect_time; 1058 u32 cck_mdrdy_time; 1059 u32 ofdm_mdrdy_time; 1060 u32 green_mdrdy_time; 1061 u32 rx_vector_mismatch_cnt; 1062 u32 rx_delimiter_fail_cnt; 1063 u32 rx_mrdy_cnt; 1064 u32 rx_len_mismatch_cnt; 1065 u32 rx_mpdu_cnt; 1066 u32 rx_ampdu_cnt; 1067 u32 rx_ampdu_bytes_cnt; 1068 u32 rx_ampdu_valid_subframe_cnt; 1069 u32 rx_ampdu_valid_subframe_bytes_cnt; 1070 u32 rx_pfdrop_cnt; 1071 u32 rx_vec_queue_overflow_drop_cnt; 1072 u32 rx_ba_cnt; 1073 1074 u32 tx_amsdu[8]; 1075 u32 tx_amsdu_cnt; 1076 1077 /* mcu_muru_stats */ 1078 u32 dl_cck_cnt; 1079 u32 dl_ofdm_cnt; 1080 u32 dl_htmix_cnt; 1081 u32 dl_htgf_cnt; 1082 u32 dl_vht_su_cnt; 1083 u32 dl_vht_2mu_cnt; 1084 u32 dl_vht_3mu_cnt; 1085 u32 dl_vht_4mu_cnt; 1086 u32 dl_he_su_cnt; 1087 u32 dl_he_ext_su_cnt; 1088 u32 dl_he_2ru_cnt; 1089 u32 dl_he_2mu_cnt; 1090 u32 dl_he_3ru_cnt; 1091 u32 dl_he_3mu_cnt; 1092 u32 dl_he_4ru_cnt; 1093 u32 dl_he_4mu_cnt; 1094 u32 dl_he_5to8ru_cnt; 1095 u32 dl_he_9to16ru_cnt; 1096 u32 dl_he_gtr16ru_cnt; 1097 1098 u32 ul_hetrig_su_cnt; 1099 u32 ul_hetrig_2ru_cnt; 1100 u32 ul_hetrig_3ru_cnt; 1101 u32 ul_hetrig_4ru_cnt; 1102 u32 ul_hetrig_5to8ru_cnt; 1103 u32 ul_hetrig_9to16ru_cnt; 1104 u32 ul_hetrig_gtr16ru_cnt; 1105 u32 ul_hetrig_2mu_cnt; 1106 u32 ul_hetrig_3mu_cnt; 1107 u32 ul_hetrig_4mu_cnt; 1108 }; 1109 1110 struct mt76_power_limits { 1111 s8 cck[4]; 1112 s8 ofdm[8]; 1113 s8 mcs[4][10]; 1114 s8 ru[7][12]; 1115 s8 eht[16][16]; 1116 }; 1117 1118 struct mt76_ethtool_worker_info { 1119 u64 *data; 1120 int idx; 1121 int initial_stat_idx; 1122 int worker_stat_count; 1123 int sta_count; 1124 }; 1125 1126 struct mt76_chanctx { 1127 struct mt76_phy *phy; 1128 }; 1129 1130 #define CCK_RATE(_idx, _rate) { \ 1131 .bitrate = _rate, \ 1132 .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ 1133 .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \ 1134 .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \ 1135 } 1136 1137 #define OFDM_RATE(_idx, _rate) { \ 1138 .bitrate = _rate, \ 1139 .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ 1140 .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ 1141 } 1142 1143 extern struct ieee80211_rate mt76_rates[12]; 1144 1145 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) 1146 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) 1147 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) 1148 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__) 1149 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__) 1150 1151 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) 1152 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) 1153 1154 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 1155 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 1156 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 1157 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__) 1158 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__) 1159 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) 1160 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) 1161 1162 1163 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76)) 1164 1165 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 1166 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 1167 1168 #define mt76_get_field(_dev, _reg, _field) \ 1169 FIELD_GET(_field, mt76_rr(dev, _reg)) 1170 1171 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 1172 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 1173 1174 #define __mt76_rmw_field(_dev, _reg, _field, _val) \ 1175 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 1176 1177 #define mt76_hw(dev) (dev)->mphy.hw 1178 1179 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 1180 int timeout); 1181 1182 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 1183 1184 bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 1185 int timeout, int kick); 1186 #define __mt76_poll_msec(...) ____mt76_poll_msec(__VA_ARGS__, 10) 1187 #define mt76_poll_msec(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10) 1188 #define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 1189 1190 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 1191 void mt76_pci_disable_aspm(struct pci_dev *pdev); 1192 bool mt76_pci_aspm_supported(struct pci_dev *pdev); 1193 1194 static inline u16 mt76_chip(struct mt76_dev *dev) 1195 { 1196 return dev->rev >> 16; 1197 } 1198 1199 static inline u16 mt76_rev(struct mt76_dev *dev) 1200 { 1201 return dev->rev & 0xffff; 1202 } 1203 1204 void mt76_wed_release_rx_buf(struct mtk_wed_device *wed); 1205 void mt76_wed_offload_disable(struct mtk_wed_device *wed); 1206 void mt76_wed_reset_complete(struct mtk_wed_device *wed); 1207 void mt76_wed_dma_reset(struct mt76_dev *dev); 1208 int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1209 struct net_device *netdev, enum tc_setup_type type, 1210 void *type_data); 1211 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 1212 u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size); 1213 int mt76_wed_offload_enable(struct mtk_wed_device *wed); 1214 int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset); 1215 #else 1216 static inline u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size) 1217 { 1218 return 0; 1219 } 1220 1221 static inline int mt76_wed_offload_enable(struct mtk_wed_device *wed) 1222 { 1223 return 0; 1224 } 1225 1226 static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, 1227 bool reset) 1228 { 1229 return 0; 1230 } 1231 #endif /* CONFIG_NET_MEDIATEK_SOC_WED */ 1232 1233 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 1234 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 1235 1236 #define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__) 1237 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 1238 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) 1239 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__) 1240 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 1241 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 1242 #define mt76_queue_rx_init(dev, ...) (dev)->mt76.queue_ops->rx_queue_init(&((dev)->mt76), __VA_ARGS__) 1243 #define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__) 1244 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 1245 #define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__) 1246 1247 #define mt76_for_each_q_rx(dev, i) \ 1248 for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \ 1249 if ((dev)->q_rx[i].ndesc) 1250 1251 1252 #define mt76_dereference(p, dev) \ 1253 rcu_dereference_protected(p, lockdep_is_held(&(dev)->mutex)) 1254 1255 static inline struct mt76_wcid * 1256 __mt76_wcid_ptr(struct mt76_dev *dev, u16 idx) 1257 { 1258 if (idx >= ARRAY_SIZE(dev->wcid)) 1259 return NULL; 1260 return rcu_dereference(dev->wcid[idx]); 1261 } 1262 1263 #define mt76_wcid_ptr(dev, idx) __mt76_wcid_ptr(&(dev)->mt76, idx) 1264 1265 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, 1266 const struct ieee80211_ops *ops, 1267 const struct mt76_driver_ops *drv_ops); 1268 int mt76_register_device(struct mt76_dev *dev, bool vht, 1269 struct ieee80211_rate *rates, int n_rates); 1270 void mt76_unregister_device(struct mt76_dev *dev); 1271 void mt76_free_device(struct mt76_dev *dev); 1272 void mt76_reset_device(struct mt76_dev *dev); 1273 void mt76_unregister_phy(struct mt76_phy *phy); 1274 1275 struct mt76_phy *mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size, 1276 u8 band_idx); 1277 struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, 1278 const struct ieee80211_ops *ops, 1279 u8 band_idx); 1280 int mt76_register_phy(struct mt76_phy *phy, bool vht, 1281 struct ieee80211_rate *rates, int n_rates); 1282 struct mt76_phy *mt76_vif_phy(struct ieee80211_hw *hw, 1283 struct ieee80211_vif *vif); 1284 1285 struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy, 1286 const struct file_operations *ops); 1287 static inline struct dentry *mt76_register_debugfs(struct mt76_dev *dev) 1288 { 1289 return mt76_register_debugfs_fops(&dev->phy, NULL); 1290 } 1291 1292 int mt76_queues_read(struct seq_file *s, void *data); 1293 void mt76_seq_puts_array(struct seq_file *file, const char *str, 1294 s8 *val, int len); 1295 1296 int mt76_eeprom_init(struct mt76_dev *dev, int len); 1297 int mt76_eeprom_override(struct mt76_phy *phy); 1298 int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len); 1299 int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep, 1300 const char *cell_name, int len); 1301 1302 struct mt76_queue * 1303 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, 1304 int ring_base, void *wed, u32 flags); 1305 static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx, 1306 int n_desc, int ring_base, void *wed, 1307 u32 flags) 1308 { 1309 struct mt76_queue *q; 1310 1311 q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, wed, flags); 1312 if (IS_ERR(q)) 1313 return PTR_ERR(q); 1314 1315 phy->q_tx[qid] = q; 1316 1317 return 0; 1318 } 1319 1320 static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx, 1321 int n_desc, int ring_base) 1322 { 1323 struct mt76_queue *q; 1324 1325 q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, NULL, 0); 1326 if (IS_ERR(q)) 1327 return PTR_ERR(q); 1328 1329 dev->q_mcu[qid] = q; 1330 1331 return 0; 1332 } 1333 1334 static inline struct mt76_phy * 1335 mt76_dev_phy(struct mt76_dev *dev, u8 phy_idx) 1336 { 1337 if ((phy_idx == MT_BAND1 && dev->phys[phy_idx]) || 1338 (phy_idx == MT_BAND2 && dev->phys[phy_idx])) 1339 return dev->phys[phy_idx]; 1340 1341 return &dev->phy; 1342 } 1343 1344 static inline struct ieee80211_hw * 1345 mt76_phy_hw(struct mt76_dev *dev, u8 phy_idx) 1346 { 1347 return mt76_dev_phy(dev, phy_idx)->hw; 1348 } 1349 1350 static inline u8 * 1351 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t) 1352 { 1353 return (u8 *)t - dev->drv->txwi_size; 1354 } 1355 1356 /* increment with wrap-around */ 1357 static inline int mt76_incr(int val, int size) 1358 { 1359 return (val + 1) & (size - 1); 1360 } 1361 1362 /* decrement with wrap-around */ 1363 static inline int mt76_decr(int val, int size) 1364 { 1365 return (val - 1) & (size - 1); 1366 } 1367 1368 u8 mt76_ac_to_hwq(u8 ac); 1369 1370 static inline struct ieee80211_txq * 1371 mtxq_to_txq(struct mt76_txq *mtxq) 1372 { 1373 void *ptr = mtxq; 1374 1375 return container_of(ptr, struct ieee80211_txq, drv_priv); 1376 } 1377 1378 static inline struct ieee80211_sta * 1379 wcid_to_sta(struct mt76_wcid *wcid) 1380 { 1381 void *ptr = wcid; 1382 1383 if (!wcid || !wcid->sta) 1384 return NULL; 1385 1386 if (wcid->def_wcid) 1387 ptr = wcid->def_wcid; 1388 1389 return container_of(ptr, struct ieee80211_sta, drv_priv); 1390 } 1391 1392 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) 1393 { 1394 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) > 1395 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data)); 1396 return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data); 1397 } 1398 1399 static inline void *mt76_skb_get_hdr(struct sk_buff *skb) 1400 { 1401 struct mt76_rx_status mstat; 1402 u8 *data = skb->data; 1403 1404 /* Alignment concerns */ 1405 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4); 1406 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4); 1407 1408 mstat = *((struct mt76_rx_status *)skb->cb); 1409 1410 if (mstat.flag & RX_FLAG_RADIOTAP_HE) 1411 data += sizeof(struct ieee80211_radiotap_he); 1412 if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU) 1413 data += sizeof(struct ieee80211_radiotap_he_mu); 1414 1415 return data; 1416 } 1417 1418 static inline void mt76_insert_hdr_pad(struct sk_buff *skb) 1419 { 1420 int len = ieee80211_get_hdrlen_from_skb(skb); 1421 1422 if (len % 4 == 0) 1423 return; 1424 1425 skb_push(skb, 2); 1426 memmove(skb->data, skb->data + 2, len); 1427 1428 skb->data[len] = 0; 1429 skb->data[len + 1] = 0; 1430 } 1431 1432 static inline bool mt76_is_skb_pktid(u8 pktid) 1433 { 1434 if (pktid & MT_PACKET_ID_HAS_RATE) 1435 return false; 1436 1437 return pktid >= MT_PACKET_ID_FIRST; 1438 } 1439 1440 static inline u8 mt76_tx_power_path_delta(u8 path) 1441 { 1442 static const u8 path_delta[5] = { 0, 6, 9, 12, 14 }; 1443 u8 idx = path - 1; 1444 1445 return (idx < ARRAY_SIZE(path_delta)) ? path_delta[idx] : 0; 1446 } 1447 1448 static inline bool mt76_testmode_enabled(struct mt76_phy *phy) 1449 { 1450 #ifdef CONFIG_NL80211_TESTMODE 1451 return phy->test.state != MT76_TM_STATE_OFF; 1452 #else 1453 return false; 1454 #endif 1455 } 1456 1457 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev, 1458 struct sk_buff *skb, 1459 struct ieee80211_hw **hw) 1460 { 1461 #ifdef CONFIG_NL80211_TESTMODE 1462 int i; 1463 1464 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 1465 struct mt76_phy *phy = dev->phys[i]; 1466 1467 if (phy && skb == phy->test.tx_skb) { 1468 *hw = dev->phys[i]->hw; 1469 return true; 1470 } 1471 } 1472 return false; 1473 #else 1474 return false; 1475 #endif 1476 } 1477 1478 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 1479 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta, 1480 struct mt76_wcid *wcid, struct sk_buff *skb); 1481 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 1482 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, 1483 bool send_bar); 1484 void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb); 1485 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); 1486 void mt76_txq_schedule_all(struct mt76_phy *phy); 1487 void mt76_tx_worker_run(struct mt76_dev *dev); 1488 void mt76_tx_worker(struct mt76_worker *w); 1489 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 1490 struct ieee80211_sta *sta, 1491 u16 tids, int nframes, 1492 enum ieee80211_frame_release_type reason, 1493 bool more_data); 1494 bool mt76_has_tx_pending(struct mt76_phy *phy); 1495 int mt76_update_channel(struct mt76_phy *phy); 1496 void mt76_update_survey(struct mt76_phy *phy); 1497 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time); 1498 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 1499 struct survey_info *survey); 1500 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal); 1501 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht); 1502 1503 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 1504 u16 ssn, u16 size); 1505 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 1506 1507 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 1508 struct ieee80211_key_conf *key); 1509 1510 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 1511 __acquires(&dev->status_lock); 1512 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 1513 __releases(&dev->status_lock); 1514 1515 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 1516 struct sk_buff *skb); 1517 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev, 1518 struct mt76_wcid *wcid, int pktid, 1519 struct sk_buff_head *list); 1520 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 1521 struct sk_buff_head *list); 1522 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb, 1523 struct list_head *free_list); 1524 static inline void 1525 mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb) 1526 { 1527 __mt76_tx_complete_skb(dev, wcid, skb, NULL); 1528 } 1529 1530 void mt76_tx_status_check(struct mt76_dev *dev, bool flush); 1531 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1532 struct ieee80211_sta *sta, 1533 enum ieee80211_sta_state old_state, 1534 enum ieee80211_sta_state new_state); 1535 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif, 1536 struct ieee80211_sta *sta); 1537 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1538 struct ieee80211_sta *sta); 1539 1540 int mt76_get_min_avg_rssi(struct mt76_dev *dev, u8 phy_idx); 1541 1542 s8 mt76_get_power_bound(struct mt76_phy *phy, s8 txpower); 1543 1544 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1545 unsigned int link_id, int *dbm); 1546 int mt76_init_sar_power(struct ieee80211_hw *hw, 1547 const struct cfg80211_sar_specs *sar); 1548 int mt76_get_sar_power(struct mt76_phy *phy, 1549 struct ieee80211_channel *chan, 1550 int power); 1551 1552 void mt76_csa_check(struct mt76_dev *dev); 1553 void mt76_csa_finish(struct mt76_dev *dev); 1554 1555 int mt76_get_antenna(struct ieee80211_hw *hw, int radio_idx, u32 *tx_ant, 1556 u32 *rx_ant); 1557 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); 1558 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id); 1559 int mt76_get_rate(struct mt76_dev *dev, 1560 struct ieee80211_supported_band *sband, 1561 int idx, bool cck); 1562 int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1563 struct ieee80211_scan_request *hw_req); 1564 void mt76_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 1565 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1566 const u8 *mac); 1567 void mt76_sw_scan_complete(struct ieee80211_hw *hw, 1568 struct ieee80211_vif *vif); 1569 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy); 1570 int mt76_add_chanctx(struct ieee80211_hw *hw, 1571 struct ieee80211_chanctx_conf *conf); 1572 void mt76_remove_chanctx(struct ieee80211_hw *hw, 1573 struct ieee80211_chanctx_conf *conf); 1574 void mt76_change_chanctx(struct ieee80211_hw *hw, 1575 struct ieee80211_chanctx_conf *conf, 1576 u32 changed); 1577 int mt76_assign_vif_chanctx(struct ieee80211_hw *hw, 1578 struct ieee80211_vif *vif, 1579 struct ieee80211_bss_conf *link_conf, 1580 struct ieee80211_chanctx_conf *conf); 1581 void mt76_unassign_vif_chanctx(struct ieee80211_hw *hw, 1582 struct ieee80211_vif *vif, 1583 struct ieee80211_bss_conf *link_conf, 1584 struct ieee80211_chanctx_conf *conf); 1585 int mt76_switch_vif_chanctx(struct ieee80211_hw *hw, 1586 struct ieee80211_vif_chanctx_switch *vifs, 1587 int n_vifs, 1588 enum ieee80211_chanctx_switch_mode mode); 1589 int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1590 struct ieee80211_channel *chan, int duration, 1591 enum ieee80211_roc_type type); 1592 int mt76_cancel_remain_on_channel(struct ieee80211_hw *hw, 1593 struct ieee80211_vif *vif); 1594 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1595 void *data, int len); 1596 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, 1597 struct netlink_callback *cb, void *data, int len); 1598 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state); 1599 int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len); 1600 1601 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable) 1602 { 1603 #ifdef CONFIG_NL80211_TESTMODE 1604 enum mt76_testmode_state state = MT76_TM_STATE_IDLE; 1605 1606 if (disable || phy->test.state == MT76_TM_STATE_OFF) 1607 state = MT76_TM_STATE_OFF; 1608 1609 mt76_testmode_set_state(phy, state); 1610 #endif 1611 } 1612 1613 1614 /* internal */ 1615 static inline struct ieee80211_hw * 1616 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) 1617 { 1618 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1619 u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 1620 struct ieee80211_hw *hw = mt76_phy_hw(dev, phy_idx); 1621 1622 info->hw_queue &= ~MT_TX_HW_QUEUE_PHY; 1623 1624 return hw; 1625 } 1626 1627 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 1628 void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 1629 struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev); 1630 void mt76_free_pending_rxwi(struct mt76_dev *dev); 1631 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 1632 struct napi_struct *napi); 1633 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 1634 struct napi_struct *napi); 1635 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 1636 void mt76_testmode_tx_pending(struct mt76_phy *phy); 1637 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, 1638 struct mt76_queue_entry *e); 1639 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, 1640 bool offchannel); 1641 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, 1642 bool offchannel); 1643 void mt76_scan_work(struct work_struct *work); 1644 void mt76_abort_scan(struct mt76_dev *dev); 1645 void mt76_roc_complete_work(struct work_struct *work); 1646 void mt76_roc_complete(struct mt76_phy *phy); 1647 void mt76_abort_roc(struct mt76_phy *phy); 1648 struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy, 1649 struct ieee80211_vif *vif); 1650 void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif, 1651 struct mt76_vif_link *mlink); 1652 1653 /* usb */ 1654 static inline bool mt76u_urb_error(struct urb *urb) 1655 { 1656 return urb->status && 1657 urb->status != -ECONNRESET && 1658 urb->status != -ESHUTDOWN && 1659 urb->status != -ENOENT; 1660 } 1661 1662 static inline int 1663 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, 1664 int timeout, int ep) 1665 { 1666 struct usb_interface *uintf = to_usb_interface(dev->dev); 1667 struct usb_device *udev = interface_to_usbdev(uintf); 1668 struct mt76_usb *usb = &dev->usb; 1669 unsigned int pipe; 1670 1671 if (actual_len) 1672 pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]); 1673 else 1674 pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]); 1675 1676 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); 1677 } 1678 1679 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index); 1680 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, 1681 struct mt76_sta_stats *stats, bool eht); 1682 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad); 1683 int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, 1684 u16 val, u16 offset, void *buf, size_t len); 1685 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 1686 u8 req_type, u16 val, u16 offset, 1687 void *buf, size_t len); 1688 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 1689 const u16 offset, const u32 val); 1690 void mt76u_read_copy(struct mt76_dev *dev, u32 offset, 1691 void *data, int len); 1692 u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr); 1693 void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type, 1694 u32 addr, u32 val); 1695 int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, 1696 struct mt76_bus_ops *ops); 1697 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); 1698 int mt76u_alloc_mcu_queue(struct mt76_dev *dev); 1699 int mt76u_alloc_queues(struct mt76_dev *dev); 1700 void mt76u_stop_tx(struct mt76_dev *dev); 1701 void mt76u_stop_rx(struct mt76_dev *dev); 1702 int mt76u_resume_rx(struct mt76_dev *dev); 1703 void mt76u_queues_deinit(struct mt76_dev *dev); 1704 1705 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, 1706 const struct mt76_bus_ops *bus_ops); 1707 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid); 1708 int mt76s_alloc_tx(struct mt76_dev *dev); 1709 void mt76s_deinit(struct mt76_dev *dev); 1710 void mt76s_sdio_irq(struct sdio_func *func); 1711 void mt76s_txrx_worker(struct mt76_sdio *sdio); 1712 bool mt76s_txqs_empty(struct mt76_dev *dev); 1713 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, 1714 int hw_ver); 1715 u32 mt76s_rr(struct mt76_dev *dev, u32 offset); 1716 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val); 1717 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 1718 u32 mt76s_read_pcr(struct mt76_dev *dev); 1719 void mt76s_write_copy(struct mt76_dev *dev, u32 offset, 1720 const void *data, int len); 1721 void mt76s_read_copy(struct mt76_dev *dev, u32 offset, 1722 void *data, int len); 1723 int mt76s_wr_rp(struct mt76_dev *dev, u32 base, 1724 const struct mt76_reg_pair *data, 1725 int len); 1726 int mt76s_rd_rp(struct mt76_dev *dev, u32 base, 1727 struct mt76_reg_pair *data, int len); 1728 1729 struct sk_buff * 1730 __mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, 1731 int len, int data_len, gfp_t gfp); 1732 static inline struct sk_buff * 1733 mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, 1734 int data_len) 1735 { 1736 return __mt76_mcu_msg_alloc(dev, data, data_len, data_len, GFP_KERNEL); 1737 } 1738 1739 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); 1740 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, 1741 unsigned long expires); 1742 int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data, 1743 int len, bool wait_resp, struct sk_buff **ret); 1744 int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, 1745 int cmd, bool wait_resp, struct sk_buff **ret); 1746 int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, 1747 int len, int max_len); 1748 static inline int 1749 mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, 1750 int len) 1751 { 1752 int max_len = 4096 - dev->mcu_ops->headroom; 1753 1754 return __mt76_mcu_send_firmware(dev, cmd, data, len, max_len); 1755 } 1756 1757 static inline int 1758 mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len, 1759 bool wait_resp) 1760 { 1761 return mt76_mcu_send_and_get_msg(dev, cmd, data, len, wait_resp, NULL); 1762 } 1763 1764 static inline int 1765 mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, 1766 bool wait_resp) 1767 { 1768 return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, NULL); 1769 } 1770 1771 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); 1772 1773 struct device_node * 1774 mt76_find_power_limits_node(struct mt76_dev *dev); 1775 struct device_node * 1776 mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan); 1777 1778 s8 mt76_get_rate_power_limits(struct mt76_phy *phy, 1779 struct ieee80211_channel *chan, 1780 struct mt76_power_limits *dest, 1781 s8 target_power); 1782 1783 static inline bool mt76_queue_is_rx(struct mt76_dev *dev, struct mt76_queue *q) 1784 { 1785 int i; 1786 1787 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { 1788 if (q == &dev->q_rx[i]) 1789 return true; 1790 } 1791 1792 return false; 1793 } 1794 1795 static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q) 1796 { 1797 return (q->flags & MT_QFLAG_WED) && 1798 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE; 1799 } 1800 1801 static inline bool mt76_queue_is_wed_rro(struct mt76_queue *q) 1802 { 1803 return q->flags & MT_QFLAG_WED_RRO; 1804 } 1805 1806 static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q) 1807 { 1808 return mt76_queue_is_wed_rro(q) && 1809 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND; 1810 } 1811 1812 static inline bool mt76_queue_is_wed_rro_rxdmad_c(struct mt76_queue *q) 1813 { 1814 return mt76_queue_is_wed_rro(q) && 1815 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_RXDMAD_C; 1816 } 1817 1818 static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q) 1819 { 1820 return mt76_queue_is_wed_rro(q) && 1821 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA; 1822 } 1823 1824 static inline bool mt76_queue_is_wed_rro_msdu_pg(struct mt76_queue *q) 1825 { 1826 return mt76_queue_is_wed_rro(q) && 1827 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == 1828 MT76_WED_RRO_Q_MSDU_PG; 1829 } 1830 1831 static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q) 1832 { 1833 return (q->flags & MT_QFLAG_WED) && 1834 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX; 1835 } 1836 1837 static inline bool mt76_queue_is_emi(struct mt76_queue *q) 1838 { 1839 return q->flags & MT_QFLAG_EMI_EN; 1840 } 1841 1842 struct mt76_txwi_cache * 1843 mt76_token_release(struct mt76_dev *dev, int token, bool *wake); 1844 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi); 1845 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked); 1846 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token); 1847 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, 1848 struct mt76_txwi_cache *r, dma_addr_t phys); 1849 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q); 1850 static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct) 1851 { 1852 struct page *page = virt_to_head_page(buf); 1853 1854 page_pool_put_full_page(pp_page_to_nmdesc(page)->pp, page, 1855 allow_direct); 1856 } 1857 1858 static inline void * 1859 mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size) 1860 { 1861 struct page *page; 1862 1863 page = page_pool_dev_alloc_frag(q->page_pool, offset, size); 1864 if (!page) 1865 return NULL; 1866 1867 return page_address(page) + *offset; 1868 } 1869 1870 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) 1871 { 1872 spin_lock_bh(&dev->token_lock); 1873 __mt76_set_tx_blocked(dev, blocked); 1874 spin_unlock_bh(&dev->token_lock); 1875 } 1876 1877 static inline int 1878 mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) 1879 { 1880 int token; 1881 1882 spin_lock_bh(&dev->token_lock); 1883 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); 1884 spin_unlock_bh(&dev->token_lock); 1885 1886 return token; 1887 } 1888 1889 static inline struct mt76_txwi_cache * 1890 mt76_token_put(struct mt76_dev *dev, int token) 1891 { 1892 struct mt76_txwi_cache *txwi; 1893 1894 spin_lock_bh(&dev->token_lock); 1895 txwi = idr_remove(&dev->token, token); 1896 spin_unlock_bh(&dev->token_lock); 1897 1898 return txwi; 1899 } 1900 1901 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx); 1902 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid); 1903 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid); 1904 1905 static inline void 1906 mt76_vif_init(struct ieee80211_vif *vif, struct mt76_vif_data *mvif) 1907 { 1908 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 1909 1910 mlink->mvif = mvif; 1911 rcu_assign_pointer(mvif->link[0], mlink); 1912 } 1913 1914 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif); 1915 u16 mt76_select_links(struct ieee80211_vif *vif, int max_active_links); 1916 1917 static inline struct mt76_vif_link * 1918 mt76_vif_link(struct mt76_dev *dev, struct ieee80211_vif *vif, int link_id) 1919 { 1920 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 1921 struct mt76_vif_data *mvif = mlink->mvif; 1922 1923 if (!link_id) 1924 return mlink; 1925 1926 return mt76_dereference(mvif->link[link_id], dev); 1927 } 1928 1929 static inline struct mt76_vif_link * 1930 mt76_vif_conf_link(struct mt76_dev *dev, struct ieee80211_vif *vif, 1931 struct ieee80211_bss_conf *link_conf) 1932 { 1933 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 1934 struct mt76_vif_data *mvif = mlink->mvif; 1935 1936 if (link_conf == &vif->bss_conf || !link_conf->link_id) 1937 return mlink; 1938 1939 return mt76_dereference(mvif->link[link_conf->link_id], dev); 1940 } 1941 1942 static inline struct mt76_phy * 1943 mt76_vif_link_phy(struct mt76_vif_link *mlink) 1944 { 1945 struct mt76_chanctx *ctx; 1946 1947 if (!mlink->ctx) 1948 return NULL; 1949 1950 ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv; 1951 1952 return ctx->phy; 1953 } 1954 1955 #endif 1956