1 /* SPDX-License-Identifier: ISC */ 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #ifndef __MT76_H 7 #define __MT76_H 8 9 #include <linux/kernel.h> 10 #include <linux/io.h> 11 #include <linux/spinlock.h> 12 #include <linux/skbuff.h> 13 #include <linux/leds.h> 14 #include <linux/usb.h> 15 #include <linux/average.h> 16 #include <linux/soc/mediatek/mtk_wed.h> 17 #include <net/mac80211.h> 18 #include <net/page_pool/helpers.h> 19 #include "util.h" 20 #include "testmode.h" 21 22 #define MT_MCU_RING_SIZE 32 23 #define MT_RX_BUF_SIZE 2048 24 #define MT_SKB_HEAD_LEN 256 25 26 #define MT_MAX_NON_AQL_PKT 16 27 #define MT_TXQ_FREE_THR 32 28 29 #define MT76_TOKEN_FREE_THR 64 30 31 #define MT_QFLAG_WED_RING GENMASK(1, 0) 32 #define MT_QFLAG_WED_TYPE GENMASK(4, 2) 33 #define MT_QFLAG_WED BIT(5) 34 #define MT_QFLAG_WED_RRO BIT(6) 35 #define MT_QFLAG_WED_RRO_EN BIT(7) 36 37 #define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \ 38 FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \ 39 FIELD_PREP(MT_QFLAG_WED_RING, _n)) 40 #define __MT_WED_RRO_Q(_type, _n) (MT_QFLAG_WED_RRO | __MT_WED_Q(_type, _n)) 41 42 #define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n) 43 #define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n) 44 #define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0) 45 #define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n) 46 #define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n) 47 #define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0) 48 49 struct mt76_dev; 50 struct mt76_phy; 51 struct mt76_wcid; 52 struct mt76s_intr; 53 struct mt76_chanctx; 54 struct mt76_vif_link; 55 56 struct mt76_reg_pair { 57 u32 reg; 58 u32 value; 59 }; 60 61 enum mt76_bus_type { 62 MT76_BUS_MMIO, 63 MT76_BUS_USB, 64 MT76_BUS_SDIO, 65 }; 66 67 enum mt76_wed_type { 68 MT76_WED_Q_TX, 69 MT76_WED_Q_TXFREE, 70 MT76_WED_Q_RX, 71 MT76_WED_RRO_Q_DATA, 72 MT76_WED_RRO_Q_MSDU_PG, 73 MT76_WED_RRO_Q_IND, 74 }; 75 76 struct mt76_bus_ops { 77 u32 (*rr)(struct mt76_dev *dev, u32 offset); 78 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 79 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 80 void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data, 81 int len); 82 void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data, 83 int len); 84 int (*wr_rp)(struct mt76_dev *dev, u32 base, 85 const struct mt76_reg_pair *rp, int len); 86 int (*rd_rp)(struct mt76_dev *dev, u32 base, 87 struct mt76_reg_pair *rp, int len); 88 enum mt76_bus_type type; 89 }; 90 91 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB) 92 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO) 93 #define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO) 94 95 enum mt76_txq_id { 96 MT_TXQ_VO = IEEE80211_AC_VO, 97 MT_TXQ_VI = IEEE80211_AC_VI, 98 MT_TXQ_BE = IEEE80211_AC_BE, 99 MT_TXQ_BK = IEEE80211_AC_BK, 100 MT_TXQ_PSD, 101 MT_TXQ_BEACON, 102 MT_TXQ_CAB, 103 __MT_TXQ_MAX 104 }; 105 106 enum mt76_mcuq_id { 107 MT_MCUQ_WM, 108 MT_MCUQ_WA, 109 MT_MCUQ_FWDL, 110 __MT_MCUQ_MAX 111 }; 112 113 enum mt76_rxq_id { 114 MT_RXQ_MAIN, 115 MT_RXQ_MCU, 116 MT_RXQ_MCU_WA, 117 MT_RXQ_BAND1, 118 MT_RXQ_BAND1_WA, 119 MT_RXQ_MAIN_WA, 120 MT_RXQ_BAND2, 121 MT_RXQ_BAND2_WA, 122 MT_RXQ_RRO_BAND0, 123 MT_RXQ_RRO_BAND1, 124 MT_RXQ_RRO_BAND2, 125 MT_RXQ_MSDU_PAGE_BAND0, 126 MT_RXQ_MSDU_PAGE_BAND1, 127 MT_RXQ_MSDU_PAGE_BAND2, 128 MT_RXQ_TXFREE_BAND0, 129 MT_RXQ_TXFREE_BAND1, 130 MT_RXQ_TXFREE_BAND2, 131 MT_RXQ_RRO_IND, 132 __MT_RXQ_MAX 133 }; 134 135 enum mt76_band_id { 136 MT_BAND0, 137 MT_BAND1, 138 MT_BAND2, 139 __MT_MAX_BAND 140 }; 141 142 enum mt76_cipher_type { 143 MT_CIPHER_NONE, 144 MT_CIPHER_WEP40, 145 MT_CIPHER_TKIP, 146 MT_CIPHER_TKIP_NO_MIC, 147 MT_CIPHER_AES_CCMP, 148 MT_CIPHER_WEP104, 149 MT_CIPHER_BIP_CMAC_128, 150 MT_CIPHER_WEP128, 151 MT_CIPHER_WAPI, 152 MT_CIPHER_CCMP_CCX, 153 MT_CIPHER_CCMP_256, 154 MT_CIPHER_GCMP, 155 MT_CIPHER_GCMP_256, 156 }; 157 158 enum mt76_dfs_state { 159 MT_DFS_STATE_UNKNOWN, 160 MT_DFS_STATE_DISABLED, 161 MT_DFS_STATE_CAC, 162 MT_DFS_STATE_ACTIVE, 163 }; 164 165 struct mt76_queue_buf { 166 dma_addr_t addr; 167 u16 len:15, 168 skip_unmap:1; 169 }; 170 171 struct mt76_tx_info { 172 struct mt76_queue_buf buf[32]; 173 struct sk_buff *skb; 174 int nbuf; 175 u32 info; 176 }; 177 178 struct mt76_queue_entry { 179 union { 180 void *buf; 181 struct sk_buff *skb; 182 }; 183 union { 184 struct mt76_txwi_cache *txwi; 185 struct urb *urb; 186 int buf_sz; 187 }; 188 dma_addr_t dma_addr[2]; 189 u16 dma_len[2]; 190 u16 wcid; 191 bool skip_buf0:1; 192 bool skip_buf1:1; 193 bool done:1; 194 }; 195 196 struct mt76_queue_regs { 197 u32 desc_base; 198 u32 ring_size; 199 u32 cpu_idx; 200 u32 dma_idx; 201 } __packed __aligned(4); 202 203 struct mt76_queue { 204 struct mt76_queue_regs __iomem *regs; 205 206 spinlock_t lock; 207 spinlock_t cleanup_lock; 208 struct mt76_queue_entry *entry; 209 struct mt76_rro_desc *rro_desc; 210 struct mt76_desc *desc; 211 212 u16 first; 213 u16 head; 214 u16 tail; 215 u8 hw_idx; 216 u8 ep; 217 int ndesc; 218 int queued; 219 int buf_size; 220 bool stopped; 221 bool blocked; 222 223 u8 buf_offset; 224 u16 flags; 225 226 struct mtk_wed_device *wed; 227 u32 wed_regs; 228 229 dma_addr_t desc_dma; 230 struct sk_buff *rx_head; 231 struct page_pool *page_pool; 232 }; 233 234 struct mt76_mcu_ops { 235 unsigned int max_retry; 236 u32 headroom; 237 u32 tailroom; 238 239 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, 240 int len, bool wait_resp); 241 int (*mcu_skb_prepare_msg)(struct mt76_dev *dev, struct sk_buff *skb, 242 int cmd, int *seq); 243 int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, 244 int cmd, int *seq); 245 int (*mcu_parse_response)(struct mt76_dev *dev, int cmd, 246 struct sk_buff *skb, int seq); 247 u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset); 248 void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val); 249 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, 250 const struct mt76_reg_pair *rp, int len); 251 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 252 struct mt76_reg_pair *rp, int len); 253 int (*mcu_restart)(struct mt76_dev *dev); 254 }; 255 256 struct mt76_queue_ops { 257 int (*init)(struct mt76_dev *dev, 258 int (*poll)(struct napi_struct *napi, int budget)); 259 260 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q, 261 int idx, int n_desc, int bufsize, 262 u32 ring_base); 263 264 int (*tx_queue_skb)(struct mt76_phy *phy, struct mt76_queue *q, 265 enum mt76_txq_id qid, struct sk_buff *skb, 266 struct mt76_wcid *wcid, struct ieee80211_sta *sta); 267 268 int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q, 269 struct sk_buff *skb, u32 tx_info); 270 271 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 272 int *len, u32 *info, bool *more); 273 274 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 275 276 void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q, 277 bool flush); 278 279 void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q); 280 281 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 282 283 void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q); 284 }; 285 286 enum mt76_phy_type { 287 MT_PHY_TYPE_CCK, 288 MT_PHY_TYPE_OFDM, 289 MT_PHY_TYPE_HT, 290 MT_PHY_TYPE_HT_GF, 291 MT_PHY_TYPE_VHT, 292 MT_PHY_TYPE_HE_SU = 8, 293 MT_PHY_TYPE_HE_EXT_SU, 294 MT_PHY_TYPE_HE_TB, 295 MT_PHY_TYPE_HE_MU, 296 MT_PHY_TYPE_EHT_SU = 13, 297 MT_PHY_TYPE_EHT_TRIG, 298 MT_PHY_TYPE_EHT_MU, 299 __MT_PHY_TYPE_MAX, 300 }; 301 302 struct mt76_sta_stats { 303 u64 tx_mode[__MT_PHY_TYPE_MAX]; 304 u64 tx_bw[5]; /* 20, 40, 80, 160, 320 */ 305 u64 tx_nss[4]; /* 1, 2, 3, 4 */ 306 u64 tx_mcs[16]; /* mcs idx */ 307 u64 tx_bytes; 308 /* WED TX */ 309 u32 tx_packets; /* unit: MSDU */ 310 u32 tx_retries; 311 u32 tx_failed; 312 /* WED RX */ 313 u64 rx_bytes; 314 u32 rx_packets; 315 u32 rx_errors; 316 u32 rx_drops; 317 }; 318 319 enum mt76_wcid_flags { 320 MT_WCID_FLAG_CHECK_PS, 321 MT_WCID_FLAG_PS, 322 MT_WCID_FLAG_4ADDR, 323 MT_WCID_FLAG_HDR_TRANS, 324 }; 325 326 #define MT76_N_WCIDS 1088 327 328 /* stored in ieee80211_tx_info::hw_queue */ 329 #define MT_TX_HW_QUEUE_PHY GENMASK(3, 2) 330 331 DECLARE_EWMA(signal, 10, 8); 332 333 #define MT_WCID_TX_INFO_RATE GENMASK(15, 0) 334 #define MT_WCID_TX_INFO_NSS GENMASK(17, 16) 335 #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18) 336 #define MT_WCID_TX_INFO_SET BIT(31) 337 338 struct mt76_wcid { 339 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 340 341 atomic_t non_aql_packets; 342 unsigned long flags; 343 344 struct ewma_signal rssi; 345 int inactive_count; 346 347 struct rate_info rate; 348 unsigned long ampdu_state; 349 350 u16 idx; 351 u8 hw_key_idx; 352 u8 hw_key_idx2; 353 354 u8 sta:1; 355 u8 sta_disabled:1; 356 u8 amsdu:1; 357 u8 phy_idx:2; 358 u8 link_id:4; 359 bool link_valid; 360 361 u8 rx_check_pn; 362 u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6]; 363 u16 cipher; 364 365 u32 tx_info; 366 bool sw_iv; 367 368 struct list_head tx_list; 369 struct sk_buff_head tx_pending; 370 struct sk_buff_head tx_offchannel; 371 372 struct list_head list; 373 struct idr pktid; 374 375 struct mt76_sta_stats stats; 376 377 struct list_head poll_list; 378 379 struct mt76_wcid *def_wcid; 380 }; 381 382 struct mt76_txq { 383 u16 wcid; 384 385 u16 agg_ssn; 386 bool send_bar; 387 bool aggr; 388 }; 389 390 struct mt76_wed_rro_ind { 391 u32 se_id : 12; 392 u32 rsv : 4; 393 u32 start_sn : 12; 394 u32 ind_reason : 4; 395 u32 ind_cnt : 13; 396 u32 win_sz : 3; 397 u32 rsv2 : 13; 398 u32 magic_cnt : 3; 399 }; 400 401 struct mt76_txwi_cache { 402 struct list_head list; 403 dma_addr_t dma_addr; 404 405 union { 406 struct sk_buff *skb; 407 void *ptr; 408 }; 409 }; 410 411 struct mt76_rx_tid { 412 struct rcu_head rcu_head; 413 414 struct mt76_dev *dev; 415 416 spinlock_t lock; 417 struct delayed_work reorder_work; 418 419 u16 id; 420 u16 head; 421 u16 size; 422 u16 nframes; 423 424 u8 num; 425 426 u8 started:1, stopped:1, timer_pending:1; 427 428 struct sk_buff *reorder_buf[] __counted_by(size); 429 }; 430 431 #define MT_TX_CB_DMA_DONE BIT(0) 432 #define MT_TX_CB_TXS_DONE BIT(1) 433 #define MT_TX_CB_TXS_FAILED BIT(2) 434 435 #define MT_PACKET_ID_MASK GENMASK(6, 0) 436 #define MT_PACKET_ID_NO_ACK 0 437 #define MT_PACKET_ID_NO_SKB 1 438 #define MT_PACKET_ID_WED 2 439 #define MT_PACKET_ID_FIRST 3 440 #define MT_PACKET_ID_HAS_RATE BIT(7) 441 /* This is timer for when to give up when waiting for TXS callback, 442 * with starting time being the time at which the DMA_DONE callback 443 * was seen (so, we know packet was processed then, it should not take 444 * long after that for firmware to send the TXS callback if it is going 445 * to do so.) 446 */ 447 #define MT_TX_STATUS_SKB_TIMEOUT (HZ / 4) 448 449 struct mt76_tx_cb { 450 unsigned long jiffies; 451 u16 wcid; 452 u8 pktid; 453 u8 flags; 454 }; 455 456 enum { 457 MT76_STATE_INITIALIZED, 458 MT76_STATE_REGISTERED, 459 MT76_STATE_RUNNING, 460 MT76_STATE_MCU_RUNNING, 461 MT76_SCANNING, 462 MT76_HW_SCANNING, 463 MT76_HW_SCHED_SCANNING, 464 MT76_RESTART, 465 MT76_RESET, 466 MT76_MCU_RESET, 467 MT76_REMOVED, 468 MT76_READING_STATS, 469 MT76_STATE_POWER_OFF, 470 MT76_STATE_SUSPEND, 471 MT76_STATE_ROC, 472 MT76_STATE_PM, 473 MT76_STATE_WED_RESET, 474 }; 475 476 enum mt76_sta_event { 477 MT76_STA_EVENT_ASSOC, 478 MT76_STA_EVENT_AUTHORIZE, 479 MT76_STA_EVENT_DISASSOC, 480 }; 481 482 struct mt76_hw_cap { 483 bool has_2ghz; 484 bool has_5ghz; 485 bool has_6ghz; 486 }; 487 488 #define MT_DRV_TXWI_NO_FREE BIT(0) 489 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1) 490 #define MT_DRV_SW_RX_AIRTIME BIT(2) 491 #define MT_DRV_RX_DMA_HDR BIT(3) 492 #define MT_DRV_HW_MGMT_TXQ BIT(4) 493 #define MT_DRV_AMSDU_OFFLOAD BIT(5) 494 495 struct mt76_driver_ops { 496 u32 drv_flags; 497 u32 survey_flags; 498 u16 txwi_size; 499 u16 token_size; 500 u8 mcs_rates; 501 502 unsigned int link_data_size; 503 504 void (*update_survey)(struct mt76_phy *phy); 505 int (*set_channel)(struct mt76_phy *phy); 506 507 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 508 enum mt76_txq_id qid, struct mt76_wcid *wcid, 509 struct ieee80211_sta *sta, 510 struct mt76_tx_info *tx_info); 511 512 void (*tx_complete_skb)(struct mt76_dev *dev, 513 struct mt76_queue_entry *e); 514 515 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); 516 517 bool (*rx_check)(struct mt76_dev *dev, void *data, int len); 518 519 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 520 struct sk_buff *skb, u32 *info); 521 522 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 523 524 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 525 bool ps); 526 527 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, 528 struct ieee80211_sta *sta); 529 530 int (*sta_event)(struct mt76_dev *dev, struct ieee80211_vif *vif, 531 struct ieee80211_sta *sta, enum mt76_sta_event ev); 532 533 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, 534 struct ieee80211_sta *sta); 535 536 int (*vif_link_add)(struct mt76_phy *phy, struct ieee80211_vif *vif, 537 struct ieee80211_bss_conf *link_conf, 538 struct mt76_vif_link *mlink); 539 540 void (*vif_link_remove)(struct mt76_phy *phy, 541 struct ieee80211_vif *vif, 542 struct ieee80211_bss_conf *link_conf, 543 struct mt76_vif_link *mlink); 544 }; 545 546 struct mt76_channel_state { 547 u64 cc_active; 548 u64 cc_busy; 549 u64 cc_rx; 550 u64 cc_bss_rx; 551 u64 cc_tx; 552 553 s8 noise; 554 }; 555 556 struct mt76_sband { 557 struct ieee80211_supported_band sband; 558 struct mt76_channel_state *chan; 559 }; 560 561 /* addr req mask */ 562 #define MT_VEND_TYPE_EEPROM BIT(31) 563 #define MT_VEND_TYPE_CFG BIT(30) 564 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) 565 566 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) 567 enum mt_vendor_req { 568 MT_VEND_DEV_MODE = 0x1, 569 MT_VEND_WRITE = 0x2, 570 MT_VEND_POWER_ON = 0x4, 571 MT_VEND_MULTI_WRITE = 0x6, 572 MT_VEND_MULTI_READ = 0x7, 573 MT_VEND_READ_EEPROM = 0x9, 574 MT_VEND_WRITE_FCE = 0x42, 575 MT_VEND_WRITE_CFG = 0x46, 576 MT_VEND_READ_CFG = 0x47, 577 MT_VEND_READ_EXT = 0x63, 578 MT_VEND_WRITE_EXT = 0x66, 579 MT_VEND_FEATURE_SET = 0x91, 580 }; 581 582 enum mt76u_in_ep { 583 MT_EP_IN_PKT_RX, 584 MT_EP_IN_CMD_RESP, 585 __MT_EP_IN_MAX, 586 }; 587 588 enum mt76u_out_ep { 589 MT_EP_OUT_INBAND_CMD, 590 MT_EP_OUT_AC_BE, 591 MT_EP_OUT_AC_BK, 592 MT_EP_OUT_AC_VI, 593 MT_EP_OUT_AC_VO, 594 MT_EP_OUT_HCCA, 595 __MT_EP_OUT_MAX, 596 }; 597 598 struct mt76_mcu { 599 struct mutex mutex; 600 u32 msg_seq; 601 int timeout; 602 603 struct sk_buff_head res_q; 604 wait_queue_head_t wait; 605 }; 606 607 #define MT_TX_SG_MAX_SIZE 8 608 #define MT_RX_SG_MAX_SIZE 4 609 #define MT_NUM_TX_ENTRIES 256 610 #define MT_NUM_RX_ENTRIES 128 611 #define MCU_RESP_URB_SIZE 1024 612 struct mt76_usb { 613 struct mutex usb_ctrl_mtx; 614 u8 *data; 615 u16 data_len; 616 617 struct mt76_worker status_worker; 618 struct mt76_worker rx_worker; 619 620 struct work_struct stat_work; 621 622 u8 out_ep[__MT_EP_OUT_MAX]; 623 u8 in_ep[__MT_EP_IN_MAX]; 624 bool sg_en; 625 626 struct mt76u_mcu { 627 u8 *data; 628 /* multiple reads */ 629 struct mt76_reg_pair *rp; 630 int rp_len; 631 u32 base; 632 } mcu; 633 }; 634 635 #define MT76S_XMIT_BUF_SZ 0x3fe00 636 #define MT76S_NUM_TX_ENTRIES 256 637 #define MT76S_NUM_RX_ENTRIES 512 638 struct mt76_sdio { 639 struct mt76_worker txrx_worker; 640 struct mt76_worker status_worker; 641 struct mt76_worker net_worker; 642 struct mt76_worker stat_worker; 643 644 u8 *xmit_buf; 645 u32 xmit_buf_sz; 646 647 struct sdio_func *func; 648 void *intr_data; 649 u8 hw_ver; 650 wait_queue_head_t wait; 651 652 int pse_mcu_quota_max; 653 struct { 654 int pse_data_quota; 655 int ple_data_quota; 656 int pse_mcu_quota; 657 int pse_page_size; 658 int deficit; 659 } sched; 660 661 int (*parse_irq)(struct mt76_dev *dev, struct mt76s_intr *intr); 662 }; 663 664 struct mt76_mmio { 665 void __iomem *regs; 666 spinlock_t irq_lock; 667 u32 irqmask; 668 669 struct mtk_wed_device wed; 670 struct mtk_wed_device wed_hif2; 671 struct completion wed_reset; 672 struct completion wed_reset_complete; 673 }; 674 675 struct mt76_rx_status { 676 union { 677 struct mt76_wcid *wcid; 678 u16 wcid_idx; 679 }; 680 681 u32 reorder_time; 682 683 u32 ampdu_ref; 684 u32 timestamp; 685 686 u8 iv[6]; 687 688 u8 phy_idx:2; 689 u8 aggr:1; 690 u8 qos_ctl; 691 u16 seqno; 692 693 u16 freq; 694 u32 flag; 695 u8 enc_flags; 696 u8 encoding:3, bw:4; 697 union { 698 struct { 699 u8 he_ru:3; 700 u8 he_gi:2; 701 u8 he_dcm:1; 702 }; 703 struct { 704 u8 ru:4; 705 u8 gi:2; 706 } eht; 707 }; 708 709 u8 amsdu:1, first_amsdu:1, last_amsdu:1; 710 u8 rate_idx; 711 u8 nss:5, band:3; 712 s8 signal; 713 u8 chains; 714 s8 chain_signal[IEEE80211_MAX_CHAINS]; 715 }; 716 717 struct mt76_freq_range_power { 718 const struct cfg80211_sar_freq_ranges *range; 719 s8 power; 720 }; 721 722 struct mt76_testmode_ops { 723 int (*set_state)(struct mt76_phy *phy, enum mt76_testmode_state state); 724 int (*set_params)(struct mt76_phy *phy, struct nlattr **tb, 725 enum mt76_testmode_state new_state); 726 int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg); 727 }; 728 729 struct mt76_testmode_data { 730 enum mt76_testmode_state state; 731 732 u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)]; 733 struct sk_buff *tx_skb; 734 735 u32 tx_count; 736 u16 tx_mpdu_len; 737 738 u8 tx_rate_mode; 739 u8 tx_rate_idx; 740 u8 tx_rate_nss; 741 u8 tx_rate_sgi; 742 u8 tx_rate_ldpc; 743 u8 tx_rate_stbc; 744 u8 tx_ltf; 745 746 u8 tx_antenna_mask; 747 u8 tx_spe_idx; 748 749 u8 tx_duty_cycle; 750 u32 tx_time; 751 u32 tx_ipg; 752 753 u32 freq_offset; 754 755 u8 tx_power[4]; 756 u8 tx_power_control; 757 758 u8 addr[3][ETH_ALEN]; 759 760 u32 tx_pending; 761 u32 tx_queued; 762 u16 tx_queued_limit; 763 u32 tx_done; 764 struct { 765 u64 packets[__MT_RXQ_MAX]; 766 u64 fcs_error[__MT_RXQ_MAX]; 767 } rx_stats; 768 }; 769 770 struct mt76_vif_link { 771 u8 idx; 772 u8 omac_idx; 773 u8 band_idx; 774 u8 wmm_idx; 775 u8 scan_seq_num; 776 u8 cipher; 777 u8 basic_rates_idx; 778 u8 mcast_rates_idx; 779 u8 beacon_rates_idx; 780 bool offchannel; 781 struct ieee80211_chanctx_conf *ctx; 782 struct mt76_wcid *wcid; 783 struct mt76_vif_data *mvif; 784 struct rcu_head rcu_head; 785 }; 786 787 struct mt76_vif_data { 788 struct mt76_vif_link __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; 789 790 struct mt76_phy *roc_phy; 791 u16 valid_links; 792 u8 deflink_id; 793 }; 794 795 struct mt76_phy { 796 struct ieee80211_hw *hw; 797 struct mt76_dev *dev; 798 void *priv; 799 800 unsigned long state; 801 unsigned int num_sta; 802 u8 band_idx; 803 804 spinlock_t tx_lock; 805 struct list_head tx_list; 806 struct mt76_queue *q_tx[__MT_TXQ_MAX]; 807 808 struct cfg80211_chan_def chandef; 809 struct cfg80211_chan_def main_chandef; 810 bool offchannel; 811 bool radar_enabled; 812 813 struct delayed_work roc_work; 814 struct ieee80211_vif *roc_vif; 815 struct mt76_vif_link *roc_link; 816 817 struct mt76_chanctx *chanctx; 818 819 struct mt76_channel_state *chan_state; 820 enum mt76_dfs_state dfs_state; 821 ktime_t survey_time; 822 823 u32 aggr_stats[32]; 824 825 struct mt76_hw_cap cap; 826 struct mt76_sband sband_2g; 827 struct mt76_sband sband_5g; 828 struct mt76_sband sband_6g; 829 830 u8 macaddr[ETH_ALEN]; 831 832 int txpower_cur; 833 u8 antenna_mask; 834 u16 chainmask; 835 836 #ifdef CONFIG_NL80211_TESTMODE 837 struct mt76_testmode_data test; 838 #endif 839 840 struct delayed_work mac_work; 841 u8 mac_work_count; 842 843 struct { 844 struct sk_buff *head; 845 struct sk_buff **tail; 846 u16 seqno; 847 } rx_amsdu[__MT_RXQ_MAX]; 848 849 struct mt76_freq_range_power *frp; 850 851 struct { 852 struct led_classdev cdev; 853 char name[32]; 854 bool al; 855 u8 pin; 856 } leds; 857 }; 858 859 struct mt76_dev { 860 struct mt76_phy phy; /* must be first */ 861 struct mt76_phy *phys[__MT_MAX_BAND]; 862 struct mt76_phy *band_phys[NUM_NL80211_BANDS]; 863 864 struct ieee80211_hw *hw; 865 866 spinlock_t wed_lock; 867 spinlock_t lock; 868 spinlock_t cc_lock; 869 870 u32 cur_cc_bss_rx; 871 872 struct mt76_rx_status rx_ampdu_status; 873 u32 rx_ampdu_len; 874 u32 rx_ampdu_ref; 875 876 struct mutex mutex; 877 878 const struct mt76_bus_ops *bus; 879 const struct mt76_driver_ops *drv; 880 const struct mt76_mcu_ops *mcu_ops; 881 struct device *dev; 882 struct device *dma_dev; 883 884 struct mt76_mcu mcu; 885 886 struct net_device *napi_dev; 887 struct net_device *tx_napi_dev; 888 spinlock_t rx_lock; 889 struct napi_struct napi[__MT_RXQ_MAX]; 890 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 891 struct tasklet_struct irq_tasklet; 892 893 struct list_head txwi_cache; 894 struct list_head rxwi_cache; 895 struct mt76_queue *q_mcu[__MT_MCUQ_MAX]; 896 struct mt76_queue q_rx[__MT_RXQ_MAX]; 897 const struct mt76_queue_ops *queue_ops; 898 int tx_dma_idx[4]; 899 900 struct mt76_worker tx_worker; 901 struct napi_struct tx_napi; 902 903 spinlock_t token_lock; 904 struct idr token; 905 u16 wed_token_count; 906 u16 token_count; 907 u16 token_size; 908 909 spinlock_t rx_token_lock; 910 struct idr rx_token; 911 u16 rx_token_size; 912 913 wait_queue_head_t tx_wait; 914 /* spinclock used to protect wcid pktid linked list */ 915 spinlock_t status_lock; 916 917 u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; 918 919 u64 vif_mask; 920 921 struct mt76_wcid global_wcid; 922 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; 923 struct list_head wcid_list; 924 925 struct list_head sta_poll_list; 926 spinlock_t sta_poll_lock; 927 928 u32 rev; 929 930 struct tasklet_struct pre_tbtt_tasklet; 931 int beacon_int; 932 u8 beacon_mask; 933 934 struct debugfs_blob_wrapper eeprom; 935 struct debugfs_blob_wrapper otp; 936 937 char alpha2[3]; 938 enum nl80211_dfs_regions region; 939 940 u32 debugfs_reg; 941 942 u8 csa_complete; 943 944 u32 rxfilter; 945 946 struct delayed_work scan_work; 947 struct { 948 struct cfg80211_scan_request *req; 949 struct ieee80211_channel *chan; 950 struct ieee80211_vif *vif; 951 struct mt76_vif_link *mlink; 952 struct mt76_phy *phy; 953 int chan_idx; 954 } scan; 955 956 #ifdef CONFIG_NL80211_TESTMODE 957 const struct mt76_testmode_ops *test_ops; 958 struct { 959 const char *name; 960 u32 offset; 961 } test_mtd; 962 #endif 963 struct workqueue_struct *wq; 964 965 union { 966 struct mt76_mmio mmio; 967 struct mt76_usb usb; 968 struct mt76_sdio sdio; 969 }; 970 }; 971 972 /* per-phy stats. */ 973 struct mt76_mib_stats { 974 u32 ack_fail_cnt; 975 u32 fcs_err_cnt; 976 u32 rts_cnt; 977 u32 rts_retries_cnt; 978 u32 ba_miss_cnt; 979 u32 tx_bf_cnt; 980 u32 tx_mu_bf_cnt; 981 u32 tx_mu_mpdu_cnt; 982 u32 tx_mu_acked_mpdu_cnt; 983 u32 tx_su_acked_mpdu_cnt; 984 u32 tx_bf_ibf_ppdu_cnt; 985 u32 tx_bf_ebf_ppdu_cnt; 986 987 u32 tx_bf_rx_fb_all_cnt; 988 u32 tx_bf_rx_fb_eht_cnt; 989 u32 tx_bf_rx_fb_he_cnt; 990 u32 tx_bf_rx_fb_vht_cnt; 991 u32 tx_bf_rx_fb_ht_cnt; 992 993 u32 tx_bf_rx_fb_bw; /* value of last sample, not cumulative */ 994 u32 tx_bf_rx_fb_nc_cnt; 995 u32 tx_bf_rx_fb_nr_cnt; 996 u32 tx_bf_fb_cpl_cnt; 997 u32 tx_bf_fb_trig_cnt; 998 999 u32 tx_ampdu_cnt; 1000 u32 tx_stop_q_empty_cnt; 1001 u32 tx_mpdu_attempts_cnt; 1002 u32 tx_mpdu_success_cnt; 1003 u32 tx_pkt_ebf_cnt; 1004 u32 tx_pkt_ibf_cnt; 1005 1006 u32 tx_rwp_fail_cnt; 1007 u32 tx_rwp_need_cnt; 1008 1009 /* rx stats */ 1010 u32 rx_fifo_full_cnt; 1011 u32 channel_idle_cnt; 1012 u32 primary_cca_busy_time; 1013 u32 secondary_cca_busy_time; 1014 u32 primary_energy_detect_time; 1015 u32 cck_mdrdy_time; 1016 u32 ofdm_mdrdy_time; 1017 u32 green_mdrdy_time; 1018 u32 rx_vector_mismatch_cnt; 1019 u32 rx_delimiter_fail_cnt; 1020 u32 rx_mrdy_cnt; 1021 u32 rx_len_mismatch_cnt; 1022 u32 rx_mpdu_cnt; 1023 u32 rx_ampdu_cnt; 1024 u32 rx_ampdu_bytes_cnt; 1025 u32 rx_ampdu_valid_subframe_cnt; 1026 u32 rx_ampdu_valid_subframe_bytes_cnt; 1027 u32 rx_pfdrop_cnt; 1028 u32 rx_vec_queue_overflow_drop_cnt; 1029 u32 rx_ba_cnt; 1030 1031 u32 tx_amsdu[8]; 1032 u32 tx_amsdu_cnt; 1033 1034 /* mcu_muru_stats */ 1035 u32 dl_cck_cnt; 1036 u32 dl_ofdm_cnt; 1037 u32 dl_htmix_cnt; 1038 u32 dl_htgf_cnt; 1039 u32 dl_vht_su_cnt; 1040 u32 dl_vht_2mu_cnt; 1041 u32 dl_vht_3mu_cnt; 1042 u32 dl_vht_4mu_cnt; 1043 u32 dl_he_su_cnt; 1044 u32 dl_he_ext_su_cnt; 1045 u32 dl_he_2ru_cnt; 1046 u32 dl_he_2mu_cnt; 1047 u32 dl_he_3ru_cnt; 1048 u32 dl_he_3mu_cnt; 1049 u32 dl_he_4ru_cnt; 1050 u32 dl_he_4mu_cnt; 1051 u32 dl_he_5to8ru_cnt; 1052 u32 dl_he_9to16ru_cnt; 1053 u32 dl_he_gtr16ru_cnt; 1054 1055 u32 ul_hetrig_su_cnt; 1056 u32 ul_hetrig_2ru_cnt; 1057 u32 ul_hetrig_3ru_cnt; 1058 u32 ul_hetrig_4ru_cnt; 1059 u32 ul_hetrig_5to8ru_cnt; 1060 u32 ul_hetrig_9to16ru_cnt; 1061 u32 ul_hetrig_gtr16ru_cnt; 1062 u32 ul_hetrig_2mu_cnt; 1063 u32 ul_hetrig_3mu_cnt; 1064 u32 ul_hetrig_4mu_cnt; 1065 }; 1066 1067 struct mt76_power_limits { 1068 s8 cck[4]; 1069 s8 ofdm[8]; 1070 s8 mcs[4][10]; 1071 s8 ru[7][12]; 1072 s8 eht[16][16]; 1073 }; 1074 1075 struct mt76_ethtool_worker_info { 1076 u64 *data; 1077 int idx; 1078 int initial_stat_idx; 1079 int worker_stat_count; 1080 int sta_count; 1081 }; 1082 1083 struct mt76_chanctx { 1084 struct mt76_phy *phy; 1085 }; 1086 1087 #define CCK_RATE(_idx, _rate) { \ 1088 .bitrate = _rate, \ 1089 .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ 1090 .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \ 1091 .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \ 1092 } 1093 1094 #define OFDM_RATE(_idx, _rate) { \ 1095 .bitrate = _rate, \ 1096 .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ 1097 .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ 1098 } 1099 1100 extern struct ieee80211_rate mt76_rates[12]; 1101 1102 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) 1103 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) 1104 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) 1105 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__) 1106 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__) 1107 1108 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) 1109 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) 1110 1111 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 1112 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 1113 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 1114 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__) 1115 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__) 1116 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) 1117 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) 1118 1119 1120 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76)) 1121 1122 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 1123 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 1124 1125 #define mt76_get_field(_dev, _reg, _field) \ 1126 FIELD_GET(_field, mt76_rr(dev, _reg)) 1127 1128 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 1129 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 1130 1131 #define __mt76_rmw_field(_dev, _reg, _field, _val) \ 1132 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 1133 1134 #define mt76_hw(dev) (dev)->mphy.hw 1135 1136 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 1137 int timeout); 1138 1139 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 1140 1141 bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 1142 int timeout, int kick); 1143 #define __mt76_poll_msec(...) ____mt76_poll_msec(__VA_ARGS__, 10) 1144 #define mt76_poll_msec(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10) 1145 #define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 1146 1147 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 1148 void mt76_pci_disable_aspm(struct pci_dev *pdev); 1149 bool mt76_pci_aspm_supported(struct pci_dev *pdev); 1150 1151 static inline u16 mt76_chip(struct mt76_dev *dev) 1152 { 1153 return dev->rev >> 16; 1154 } 1155 1156 static inline u16 mt76_rev(struct mt76_dev *dev) 1157 { 1158 return dev->rev & 0xffff; 1159 } 1160 1161 void mt76_wed_release_rx_buf(struct mtk_wed_device *wed); 1162 void mt76_wed_offload_disable(struct mtk_wed_device *wed); 1163 void mt76_wed_reset_complete(struct mtk_wed_device *wed); 1164 void mt76_wed_dma_reset(struct mt76_dev *dev); 1165 int mt76_wed_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1166 struct net_device *netdev, enum tc_setup_type type, 1167 void *type_data); 1168 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 1169 u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size); 1170 int mt76_wed_offload_enable(struct mtk_wed_device *wed); 1171 int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset); 1172 #else 1173 static inline u32 mt76_wed_init_rx_buf(struct mtk_wed_device *wed, int size) 1174 { 1175 return 0; 1176 } 1177 1178 static inline int mt76_wed_offload_enable(struct mtk_wed_device *wed) 1179 { 1180 return 0; 1181 } 1182 1183 static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, 1184 bool reset) 1185 { 1186 return 0; 1187 } 1188 #endif /* CONFIG_NET_MEDIATEK_SOC_WED */ 1189 1190 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 1191 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 1192 1193 #define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__) 1194 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 1195 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) 1196 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__) 1197 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 1198 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 1199 #define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__) 1200 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 1201 #define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__) 1202 1203 #define mt76_for_each_q_rx(dev, i) \ 1204 for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \ 1205 if ((dev)->q_rx[i].ndesc) 1206 1207 1208 #define mt76_dereference(p, dev) \ 1209 rcu_dereference_protected(p, lockdep_is_held(&(dev)->mutex)) 1210 1211 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, 1212 const struct ieee80211_ops *ops, 1213 const struct mt76_driver_ops *drv_ops); 1214 int mt76_register_device(struct mt76_dev *dev, bool vht, 1215 struct ieee80211_rate *rates, int n_rates); 1216 void mt76_unregister_device(struct mt76_dev *dev); 1217 void mt76_free_device(struct mt76_dev *dev); 1218 void mt76_unregister_phy(struct mt76_phy *phy); 1219 1220 struct mt76_phy *mt76_alloc_radio_phy(struct mt76_dev *dev, unsigned int size, 1221 u8 band_idx); 1222 struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, 1223 const struct ieee80211_ops *ops, 1224 u8 band_idx); 1225 int mt76_register_phy(struct mt76_phy *phy, bool vht, 1226 struct ieee80211_rate *rates, int n_rates); 1227 1228 struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy, 1229 const struct file_operations *ops); 1230 static inline struct dentry *mt76_register_debugfs(struct mt76_dev *dev) 1231 { 1232 return mt76_register_debugfs_fops(&dev->phy, NULL); 1233 } 1234 1235 int mt76_queues_read(struct seq_file *s, void *data); 1236 void mt76_seq_puts_array(struct seq_file *file, const char *str, 1237 s8 *val, int len); 1238 1239 int mt76_eeprom_init(struct mt76_dev *dev, int len); 1240 void mt76_eeprom_override(struct mt76_phy *phy); 1241 int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len); 1242 int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep, 1243 const char *cell_name, int len); 1244 1245 struct mt76_queue * 1246 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, 1247 int ring_base, void *wed, u32 flags); 1248 static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx, 1249 int n_desc, int ring_base, void *wed, 1250 u32 flags) 1251 { 1252 struct mt76_queue *q; 1253 1254 q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, wed, flags); 1255 if (IS_ERR(q)) 1256 return PTR_ERR(q); 1257 1258 phy->q_tx[qid] = q; 1259 1260 return 0; 1261 } 1262 1263 static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx, 1264 int n_desc, int ring_base) 1265 { 1266 struct mt76_queue *q; 1267 1268 q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, NULL, 0); 1269 if (IS_ERR(q)) 1270 return PTR_ERR(q); 1271 1272 dev->q_mcu[qid] = q; 1273 1274 return 0; 1275 } 1276 1277 static inline struct mt76_phy * 1278 mt76_dev_phy(struct mt76_dev *dev, u8 phy_idx) 1279 { 1280 if ((phy_idx == MT_BAND1 && dev->phys[phy_idx]) || 1281 (phy_idx == MT_BAND2 && dev->phys[phy_idx])) 1282 return dev->phys[phy_idx]; 1283 1284 return &dev->phy; 1285 } 1286 1287 static inline struct ieee80211_hw * 1288 mt76_phy_hw(struct mt76_dev *dev, u8 phy_idx) 1289 { 1290 return mt76_dev_phy(dev, phy_idx)->hw; 1291 } 1292 1293 static inline u8 * 1294 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t) 1295 { 1296 return (u8 *)t - dev->drv->txwi_size; 1297 } 1298 1299 /* increment with wrap-around */ 1300 static inline int mt76_incr(int val, int size) 1301 { 1302 return (val + 1) & (size - 1); 1303 } 1304 1305 /* decrement with wrap-around */ 1306 static inline int mt76_decr(int val, int size) 1307 { 1308 return (val - 1) & (size - 1); 1309 } 1310 1311 u8 mt76_ac_to_hwq(u8 ac); 1312 1313 static inline struct ieee80211_txq * 1314 mtxq_to_txq(struct mt76_txq *mtxq) 1315 { 1316 void *ptr = mtxq; 1317 1318 return container_of(ptr, struct ieee80211_txq, drv_priv); 1319 } 1320 1321 static inline struct ieee80211_sta * 1322 wcid_to_sta(struct mt76_wcid *wcid) 1323 { 1324 void *ptr = wcid; 1325 1326 if (!wcid || !wcid->sta) 1327 return NULL; 1328 1329 if (wcid->def_wcid) 1330 ptr = wcid->def_wcid; 1331 1332 return container_of(ptr, struct ieee80211_sta, drv_priv); 1333 } 1334 1335 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) 1336 { 1337 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) > 1338 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data)); 1339 return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data); 1340 } 1341 1342 static inline void *mt76_skb_get_hdr(struct sk_buff *skb) 1343 { 1344 struct mt76_rx_status mstat; 1345 u8 *data = skb->data; 1346 1347 /* Alignment concerns */ 1348 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4); 1349 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4); 1350 1351 mstat = *((struct mt76_rx_status *)skb->cb); 1352 1353 if (mstat.flag & RX_FLAG_RADIOTAP_HE) 1354 data += sizeof(struct ieee80211_radiotap_he); 1355 if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU) 1356 data += sizeof(struct ieee80211_radiotap_he_mu); 1357 1358 return data; 1359 } 1360 1361 static inline void mt76_insert_hdr_pad(struct sk_buff *skb) 1362 { 1363 int len = ieee80211_get_hdrlen_from_skb(skb); 1364 1365 if (len % 4 == 0) 1366 return; 1367 1368 skb_push(skb, 2); 1369 memmove(skb->data, skb->data + 2, len); 1370 1371 skb->data[len] = 0; 1372 skb->data[len + 1] = 0; 1373 } 1374 1375 static inline bool mt76_is_skb_pktid(u8 pktid) 1376 { 1377 if (pktid & MT_PACKET_ID_HAS_RATE) 1378 return false; 1379 1380 return pktid >= MT_PACKET_ID_FIRST; 1381 } 1382 1383 static inline u8 mt76_tx_power_nss_delta(u8 nss) 1384 { 1385 static const u8 nss_delta[4] = { 0, 6, 9, 12 }; 1386 u8 idx = nss - 1; 1387 1388 return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0; 1389 } 1390 1391 static inline bool mt76_testmode_enabled(struct mt76_phy *phy) 1392 { 1393 #ifdef CONFIG_NL80211_TESTMODE 1394 return phy->test.state != MT76_TM_STATE_OFF; 1395 #else 1396 return false; 1397 #endif 1398 } 1399 1400 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev, 1401 struct sk_buff *skb, 1402 struct ieee80211_hw **hw) 1403 { 1404 #ifdef CONFIG_NL80211_TESTMODE 1405 int i; 1406 1407 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 1408 struct mt76_phy *phy = dev->phys[i]; 1409 1410 if (phy && skb == phy->test.tx_skb) { 1411 *hw = dev->phys[i]->hw; 1412 return true; 1413 } 1414 } 1415 return false; 1416 #else 1417 return false; 1418 #endif 1419 } 1420 1421 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 1422 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta, 1423 struct mt76_wcid *wcid, struct sk_buff *skb); 1424 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 1425 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, 1426 bool send_bar); 1427 void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb); 1428 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); 1429 void mt76_txq_schedule_all(struct mt76_phy *phy); 1430 void mt76_tx_worker_run(struct mt76_dev *dev); 1431 void mt76_tx_worker(struct mt76_worker *w); 1432 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 1433 struct ieee80211_sta *sta, 1434 u16 tids, int nframes, 1435 enum ieee80211_frame_release_type reason, 1436 bool more_data); 1437 bool mt76_has_tx_pending(struct mt76_phy *phy); 1438 int mt76_update_channel(struct mt76_phy *phy); 1439 void mt76_update_survey(struct mt76_phy *phy); 1440 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time); 1441 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 1442 struct survey_info *survey); 1443 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal); 1444 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht); 1445 1446 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 1447 u16 ssn, u16 size); 1448 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 1449 1450 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 1451 struct ieee80211_key_conf *key); 1452 1453 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 1454 __acquires(&dev->status_lock); 1455 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 1456 __releases(&dev->status_lock); 1457 1458 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 1459 struct sk_buff *skb); 1460 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev, 1461 struct mt76_wcid *wcid, int pktid, 1462 struct sk_buff_head *list); 1463 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 1464 struct sk_buff_head *list); 1465 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb, 1466 struct list_head *free_list); 1467 static inline void 1468 mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb) 1469 { 1470 __mt76_tx_complete_skb(dev, wcid, skb, NULL); 1471 } 1472 1473 void mt76_tx_status_check(struct mt76_dev *dev, bool flush); 1474 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1475 struct ieee80211_sta *sta, 1476 enum ieee80211_sta_state old_state, 1477 enum ieee80211_sta_state new_state); 1478 void __mt76_sta_remove(struct mt76_phy *phy, struct ieee80211_vif *vif, 1479 struct ieee80211_sta *sta); 1480 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1481 struct ieee80211_sta *sta); 1482 1483 int mt76_get_min_avg_rssi(struct mt76_dev *dev, u8 phy_idx); 1484 1485 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1486 unsigned int link_id, int *dbm); 1487 int mt76_init_sar_power(struct ieee80211_hw *hw, 1488 const struct cfg80211_sar_specs *sar); 1489 int mt76_get_sar_power(struct mt76_phy *phy, 1490 struct ieee80211_channel *chan, 1491 int power); 1492 1493 void mt76_csa_check(struct mt76_dev *dev); 1494 void mt76_csa_finish(struct mt76_dev *dev); 1495 1496 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); 1497 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); 1498 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id); 1499 int mt76_get_rate(struct mt76_dev *dev, 1500 struct ieee80211_supported_band *sband, 1501 int idx, bool cck); 1502 int mt76_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1503 struct ieee80211_scan_request *hw_req); 1504 void mt76_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 1505 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1506 const u8 *mac); 1507 void mt76_sw_scan_complete(struct ieee80211_hw *hw, 1508 struct ieee80211_vif *vif); 1509 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy); 1510 int mt76_add_chanctx(struct ieee80211_hw *hw, 1511 struct ieee80211_chanctx_conf *conf); 1512 void mt76_remove_chanctx(struct ieee80211_hw *hw, 1513 struct ieee80211_chanctx_conf *conf); 1514 void mt76_change_chanctx(struct ieee80211_hw *hw, 1515 struct ieee80211_chanctx_conf *conf, 1516 u32 changed); 1517 int mt76_assign_vif_chanctx(struct ieee80211_hw *hw, 1518 struct ieee80211_vif *vif, 1519 struct ieee80211_bss_conf *link_conf, 1520 struct ieee80211_chanctx_conf *conf); 1521 void mt76_unassign_vif_chanctx(struct ieee80211_hw *hw, 1522 struct ieee80211_vif *vif, 1523 struct ieee80211_bss_conf *link_conf, 1524 struct ieee80211_chanctx_conf *conf); 1525 int mt76_switch_vif_chanctx(struct ieee80211_hw *hw, 1526 struct ieee80211_vif_chanctx_switch *vifs, 1527 int n_vifs, 1528 enum ieee80211_chanctx_switch_mode mode); 1529 int mt76_remain_on_channel(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1530 struct ieee80211_channel *chan, int duration, 1531 enum ieee80211_roc_type type); 1532 int mt76_cancel_remain_on_channel(struct ieee80211_hw *hw, 1533 struct ieee80211_vif *vif); 1534 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1535 void *data, int len); 1536 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, 1537 struct netlink_callback *cb, void *data, int len); 1538 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state); 1539 int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len); 1540 1541 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable) 1542 { 1543 #ifdef CONFIG_NL80211_TESTMODE 1544 enum mt76_testmode_state state = MT76_TM_STATE_IDLE; 1545 1546 if (disable || phy->test.state == MT76_TM_STATE_OFF) 1547 state = MT76_TM_STATE_OFF; 1548 1549 mt76_testmode_set_state(phy, state); 1550 #endif 1551 } 1552 1553 1554 /* internal */ 1555 static inline struct ieee80211_hw * 1556 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) 1557 { 1558 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1559 u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 1560 struct ieee80211_hw *hw = mt76_phy_hw(dev, phy_idx); 1561 1562 info->hw_queue &= ~MT_TX_HW_QUEUE_PHY; 1563 1564 return hw; 1565 } 1566 1567 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 1568 void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 1569 struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev); 1570 void mt76_free_pending_rxwi(struct mt76_dev *dev); 1571 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 1572 struct napi_struct *napi); 1573 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 1574 struct napi_struct *napi); 1575 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 1576 void mt76_testmode_tx_pending(struct mt76_phy *phy); 1577 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, 1578 struct mt76_queue_entry *e); 1579 int __mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, 1580 bool offchannel); 1581 int mt76_set_channel(struct mt76_phy *phy, struct cfg80211_chan_def *chandef, 1582 bool offchannel); 1583 void mt76_scan_work(struct work_struct *work); 1584 void mt76_abort_scan(struct mt76_dev *dev); 1585 void mt76_roc_complete_work(struct work_struct *work); 1586 void mt76_abort_roc(struct mt76_phy *phy); 1587 struct mt76_vif_link *mt76_get_vif_phy_link(struct mt76_phy *phy, 1588 struct ieee80211_vif *vif); 1589 void mt76_put_vif_phy_link(struct mt76_phy *phy, struct ieee80211_vif *vif, 1590 struct mt76_vif_link *mlink); 1591 1592 /* usb */ 1593 static inline bool mt76u_urb_error(struct urb *urb) 1594 { 1595 return urb->status && 1596 urb->status != -ECONNRESET && 1597 urb->status != -ESHUTDOWN && 1598 urb->status != -ENOENT; 1599 } 1600 1601 static inline int 1602 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, 1603 int timeout, int ep) 1604 { 1605 struct usb_interface *uintf = to_usb_interface(dev->dev); 1606 struct usb_device *udev = interface_to_usbdev(uintf); 1607 struct mt76_usb *usb = &dev->usb; 1608 unsigned int pipe; 1609 1610 if (actual_len) 1611 pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]); 1612 else 1613 pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]); 1614 1615 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); 1616 } 1617 1618 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index); 1619 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, 1620 struct mt76_sta_stats *stats, bool eht); 1621 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad); 1622 int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, 1623 u16 val, u16 offset, void *buf, size_t len); 1624 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 1625 u8 req_type, u16 val, u16 offset, 1626 void *buf, size_t len); 1627 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 1628 const u16 offset, const u32 val); 1629 void mt76u_read_copy(struct mt76_dev *dev, u32 offset, 1630 void *data, int len); 1631 u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr); 1632 void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type, 1633 u32 addr, u32 val); 1634 int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, 1635 struct mt76_bus_ops *ops); 1636 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); 1637 int mt76u_alloc_mcu_queue(struct mt76_dev *dev); 1638 int mt76u_alloc_queues(struct mt76_dev *dev); 1639 void mt76u_stop_tx(struct mt76_dev *dev); 1640 void mt76u_stop_rx(struct mt76_dev *dev); 1641 int mt76u_resume_rx(struct mt76_dev *dev); 1642 void mt76u_queues_deinit(struct mt76_dev *dev); 1643 1644 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, 1645 const struct mt76_bus_ops *bus_ops); 1646 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid); 1647 int mt76s_alloc_tx(struct mt76_dev *dev); 1648 void mt76s_deinit(struct mt76_dev *dev); 1649 void mt76s_sdio_irq(struct sdio_func *func); 1650 void mt76s_txrx_worker(struct mt76_sdio *sdio); 1651 bool mt76s_txqs_empty(struct mt76_dev *dev); 1652 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, 1653 int hw_ver); 1654 u32 mt76s_rr(struct mt76_dev *dev, u32 offset); 1655 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val); 1656 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 1657 u32 mt76s_read_pcr(struct mt76_dev *dev); 1658 void mt76s_write_copy(struct mt76_dev *dev, u32 offset, 1659 const void *data, int len); 1660 void mt76s_read_copy(struct mt76_dev *dev, u32 offset, 1661 void *data, int len); 1662 int mt76s_wr_rp(struct mt76_dev *dev, u32 base, 1663 const struct mt76_reg_pair *data, 1664 int len); 1665 int mt76s_rd_rp(struct mt76_dev *dev, u32 base, 1666 struct mt76_reg_pair *data, int len); 1667 1668 struct sk_buff * 1669 __mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, 1670 int len, int data_len, gfp_t gfp); 1671 static inline struct sk_buff * 1672 mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, 1673 int data_len) 1674 { 1675 return __mt76_mcu_msg_alloc(dev, data, data_len, data_len, GFP_KERNEL); 1676 } 1677 1678 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); 1679 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, 1680 unsigned long expires); 1681 int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data, 1682 int len, bool wait_resp, struct sk_buff **ret); 1683 int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, 1684 int cmd, bool wait_resp, struct sk_buff **ret); 1685 int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, 1686 int len, int max_len); 1687 static inline int 1688 mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, 1689 int len) 1690 { 1691 int max_len = 4096 - dev->mcu_ops->headroom; 1692 1693 return __mt76_mcu_send_firmware(dev, cmd, data, len, max_len); 1694 } 1695 1696 static inline int 1697 mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len, 1698 bool wait_resp) 1699 { 1700 return mt76_mcu_send_and_get_msg(dev, cmd, data, len, wait_resp, NULL); 1701 } 1702 1703 static inline int 1704 mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, 1705 bool wait_resp) 1706 { 1707 return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, NULL); 1708 } 1709 1710 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); 1711 1712 struct device_node * 1713 mt76_find_power_limits_node(struct mt76_dev *dev); 1714 struct device_node * 1715 mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan); 1716 1717 s8 mt76_get_rate_power_limits(struct mt76_phy *phy, 1718 struct ieee80211_channel *chan, 1719 struct mt76_power_limits *dest, 1720 s8 target_power); 1721 1722 static inline bool mt76_queue_is_rx(struct mt76_dev *dev, struct mt76_queue *q) 1723 { 1724 int i; 1725 1726 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { 1727 if (q == &dev->q_rx[i]) 1728 return true; 1729 } 1730 1731 return false; 1732 } 1733 1734 static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q) 1735 { 1736 return (q->flags & MT_QFLAG_WED) && 1737 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE; 1738 } 1739 1740 static inline bool mt76_queue_is_wed_rro(struct mt76_queue *q) 1741 { 1742 return q->flags & MT_QFLAG_WED_RRO; 1743 } 1744 1745 static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q) 1746 { 1747 return mt76_queue_is_wed_rro(q) && 1748 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND; 1749 } 1750 1751 static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q) 1752 { 1753 return mt76_queue_is_wed_rro(q) && 1754 (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA || 1755 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_MSDU_PG); 1756 } 1757 1758 static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q) 1759 { 1760 if (!(q->flags & MT_QFLAG_WED)) 1761 return false; 1762 1763 return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX || 1764 mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q); 1765 1766 } 1767 1768 struct mt76_txwi_cache * 1769 mt76_token_release(struct mt76_dev *dev, int token, bool *wake); 1770 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi); 1771 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked); 1772 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token); 1773 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, 1774 struct mt76_txwi_cache *r, dma_addr_t phys); 1775 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q); 1776 static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct) 1777 { 1778 struct page *page = virt_to_head_page(buf); 1779 1780 page_pool_put_full_page(page->pp, page, allow_direct); 1781 } 1782 1783 static inline void * 1784 mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size) 1785 { 1786 struct page *page; 1787 1788 page = page_pool_dev_alloc_frag(q->page_pool, offset, size); 1789 if (!page) 1790 return NULL; 1791 1792 return page_address(page) + *offset; 1793 } 1794 1795 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) 1796 { 1797 spin_lock_bh(&dev->token_lock); 1798 __mt76_set_tx_blocked(dev, blocked); 1799 spin_unlock_bh(&dev->token_lock); 1800 } 1801 1802 static inline int 1803 mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) 1804 { 1805 int token; 1806 1807 spin_lock_bh(&dev->token_lock); 1808 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); 1809 spin_unlock_bh(&dev->token_lock); 1810 1811 return token; 1812 } 1813 1814 static inline struct mt76_txwi_cache * 1815 mt76_token_put(struct mt76_dev *dev, int token) 1816 { 1817 struct mt76_txwi_cache *txwi; 1818 1819 spin_lock_bh(&dev->token_lock); 1820 txwi = idr_remove(&dev->token, token); 1821 spin_unlock_bh(&dev->token_lock); 1822 1823 return txwi; 1824 } 1825 1826 void mt76_wcid_init(struct mt76_wcid *wcid, u8 band_idx); 1827 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid); 1828 void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid); 1829 1830 static inline void 1831 mt76_vif_init(struct ieee80211_vif *vif, struct mt76_vif_data *mvif) 1832 { 1833 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 1834 1835 mlink->mvif = mvif; 1836 rcu_assign_pointer(mvif->link[0], mlink); 1837 } 1838 1839 void mt76_vif_cleanup(struct mt76_dev *dev, struct ieee80211_vif *vif); 1840 1841 static inline struct mt76_vif_link * 1842 mt76_vif_link(struct mt76_dev *dev, struct ieee80211_vif *vif, int link_id) 1843 { 1844 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 1845 struct mt76_vif_data *mvif = mlink->mvif; 1846 1847 return mt76_dereference(mvif->link[link_id], dev); 1848 } 1849 1850 static inline struct mt76_vif_link * 1851 mt76_vif_conf_link(struct mt76_dev *dev, struct ieee80211_vif *vif, 1852 struct ieee80211_bss_conf *link_conf) 1853 { 1854 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 1855 struct mt76_vif_data *mvif = mlink->mvif; 1856 1857 if (link_conf == &vif->bss_conf) 1858 return mlink; 1859 1860 return mt76_dereference(mvif->link[link_conf->link_id], dev); 1861 } 1862 1863 static inline struct mt76_phy * 1864 mt76_vif_link_phy(struct mt76_vif_link *mlink) 1865 { 1866 struct mt76_chanctx *ctx; 1867 1868 if (!mlink->ctx) 1869 return NULL; 1870 1871 ctx = (struct mt76_chanctx *)mlink->ctx->drv_priv; 1872 1873 return ctx->phy; 1874 } 1875 1876 #endif 1877