1 /* SPDX-License-Identifier: ISC */ 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #ifndef __MT76_H 7 #define __MT76_H 8 9 #include <linux/kernel.h> 10 #include <linux/io.h> 11 #include <linux/spinlock.h> 12 #include <linux/skbuff.h> 13 #include <linux/leds.h> 14 #include <linux/usb.h> 15 #include <linux/average.h> 16 #include <linux/soc/mediatek/mtk_wed.h> 17 #include <net/mac80211.h> 18 #include <net/page_pool/helpers.h> 19 #include "util.h" 20 #include "testmode.h" 21 22 #define MT_MCU_RING_SIZE 32 23 #define MT_RX_BUF_SIZE 2048 24 #define MT_SKB_HEAD_LEN 256 25 26 #define MT_MAX_NON_AQL_PKT 16 27 #define MT_TXQ_FREE_THR 32 28 29 #define MT76_TOKEN_FREE_THR 64 30 31 #define MT_QFLAG_WED_RING GENMASK(1, 0) 32 #define MT_QFLAG_WED_TYPE GENMASK(4, 2) 33 #define MT_QFLAG_WED BIT(5) 34 #define MT_QFLAG_WED_RRO BIT(6) 35 #define MT_QFLAG_WED_RRO_EN BIT(7) 36 37 #define __MT_WED_Q(_type, _n) (MT_QFLAG_WED | \ 38 FIELD_PREP(MT_QFLAG_WED_TYPE, _type) | \ 39 FIELD_PREP(MT_QFLAG_WED_RING, _n)) 40 #define __MT_WED_RRO_Q(_type, _n) (MT_QFLAG_WED_RRO | __MT_WED_Q(_type, _n)) 41 42 #define MT_WED_Q_TX(_n) __MT_WED_Q(MT76_WED_Q_TX, _n) 43 #define MT_WED_Q_RX(_n) __MT_WED_Q(MT76_WED_Q_RX, _n) 44 #define MT_WED_Q_TXFREE __MT_WED_Q(MT76_WED_Q_TXFREE, 0) 45 #define MT_WED_RRO_Q_DATA(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_DATA, _n) 46 #define MT_WED_RRO_Q_MSDU_PG(_n) __MT_WED_RRO_Q(MT76_WED_RRO_Q_MSDU_PG, _n) 47 #define MT_WED_RRO_Q_IND __MT_WED_RRO_Q(MT76_WED_RRO_Q_IND, 0) 48 49 struct mt76_dev; 50 struct mt76_phy; 51 struct mt76_wcid; 52 struct mt76s_intr; 53 54 struct mt76_reg_pair { 55 u32 reg; 56 u32 value; 57 }; 58 59 enum mt76_bus_type { 60 MT76_BUS_MMIO, 61 MT76_BUS_USB, 62 MT76_BUS_SDIO, 63 }; 64 65 enum mt76_wed_type { 66 MT76_WED_Q_TX, 67 MT76_WED_Q_TXFREE, 68 MT76_WED_Q_RX, 69 MT76_WED_RRO_Q_DATA, 70 MT76_WED_RRO_Q_MSDU_PG, 71 MT76_WED_RRO_Q_IND, 72 }; 73 74 struct mt76_bus_ops { 75 u32 (*rr)(struct mt76_dev *dev, u32 offset); 76 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 77 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 78 void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data, 79 int len); 80 void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data, 81 int len); 82 int (*wr_rp)(struct mt76_dev *dev, u32 base, 83 const struct mt76_reg_pair *rp, int len); 84 int (*rd_rp)(struct mt76_dev *dev, u32 base, 85 struct mt76_reg_pair *rp, int len); 86 enum mt76_bus_type type; 87 }; 88 89 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB) 90 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO) 91 #define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO) 92 93 enum mt76_txq_id { 94 MT_TXQ_VO = IEEE80211_AC_VO, 95 MT_TXQ_VI = IEEE80211_AC_VI, 96 MT_TXQ_BE = IEEE80211_AC_BE, 97 MT_TXQ_BK = IEEE80211_AC_BK, 98 MT_TXQ_PSD, 99 MT_TXQ_BEACON, 100 MT_TXQ_CAB, 101 __MT_TXQ_MAX 102 }; 103 104 enum mt76_mcuq_id { 105 MT_MCUQ_WM, 106 MT_MCUQ_WA, 107 MT_MCUQ_FWDL, 108 __MT_MCUQ_MAX 109 }; 110 111 enum mt76_rxq_id { 112 MT_RXQ_MAIN, 113 MT_RXQ_MCU, 114 MT_RXQ_MCU_WA, 115 MT_RXQ_BAND1, 116 MT_RXQ_BAND1_WA, 117 MT_RXQ_MAIN_WA, 118 MT_RXQ_BAND2, 119 MT_RXQ_BAND2_WA, 120 MT_RXQ_RRO_BAND0, 121 MT_RXQ_RRO_BAND1, 122 MT_RXQ_RRO_BAND2, 123 MT_RXQ_MSDU_PAGE_BAND0, 124 MT_RXQ_MSDU_PAGE_BAND1, 125 MT_RXQ_MSDU_PAGE_BAND2, 126 MT_RXQ_TXFREE_BAND0, 127 MT_RXQ_TXFREE_BAND1, 128 MT_RXQ_TXFREE_BAND2, 129 MT_RXQ_RRO_IND, 130 __MT_RXQ_MAX 131 }; 132 133 enum mt76_band_id { 134 MT_BAND0, 135 MT_BAND1, 136 MT_BAND2, 137 __MT_MAX_BAND 138 }; 139 140 enum mt76_cipher_type { 141 MT_CIPHER_NONE, 142 MT_CIPHER_WEP40, 143 MT_CIPHER_TKIP, 144 MT_CIPHER_TKIP_NO_MIC, 145 MT_CIPHER_AES_CCMP, 146 MT_CIPHER_WEP104, 147 MT_CIPHER_BIP_CMAC_128, 148 MT_CIPHER_WEP128, 149 MT_CIPHER_WAPI, 150 MT_CIPHER_CCMP_CCX, 151 MT_CIPHER_CCMP_256, 152 MT_CIPHER_GCMP, 153 MT_CIPHER_GCMP_256, 154 }; 155 156 enum mt76_dfs_state { 157 MT_DFS_STATE_UNKNOWN, 158 MT_DFS_STATE_DISABLED, 159 MT_DFS_STATE_CAC, 160 MT_DFS_STATE_ACTIVE, 161 }; 162 163 struct mt76_queue_buf { 164 dma_addr_t addr; 165 u16 len; 166 bool skip_unmap; 167 }; 168 169 struct mt76_tx_info { 170 struct mt76_queue_buf buf[32]; 171 struct sk_buff *skb; 172 int nbuf; 173 u32 info; 174 }; 175 176 struct mt76_queue_entry { 177 union { 178 void *buf; 179 struct sk_buff *skb; 180 }; 181 union { 182 struct mt76_txwi_cache *txwi; 183 struct urb *urb; 184 int buf_sz; 185 }; 186 dma_addr_t dma_addr[2]; 187 u16 dma_len[2]; 188 u16 wcid; 189 bool skip_buf0:1; 190 bool skip_buf1:1; 191 bool done:1; 192 }; 193 194 struct mt76_queue_regs { 195 u32 desc_base; 196 u32 ring_size; 197 u32 cpu_idx; 198 u32 dma_idx; 199 } __packed __aligned(4); 200 201 struct mt76_queue { 202 struct mt76_queue_regs __iomem *regs; 203 204 spinlock_t lock; 205 spinlock_t cleanup_lock; 206 struct mt76_queue_entry *entry; 207 struct mt76_rro_desc *rro_desc; 208 struct mt76_desc *desc; 209 210 u16 first; 211 u16 head; 212 u16 tail; 213 int ndesc; 214 int queued; 215 int buf_size; 216 bool stopped; 217 bool blocked; 218 219 u8 buf_offset; 220 u8 hw_idx; 221 u16 flags; 222 223 struct mtk_wed_device *wed; 224 u32 wed_regs; 225 226 dma_addr_t desc_dma; 227 struct sk_buff *rx_head; 228 struct page_pool *page_pool; 229 }; 230 231 struct mt76_mcu_ops { 232 u32 headroom; 233 u32 tailroom; 234 235 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, 236 int len, bool wait_resp); 237 int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, 238 int cmd, int *seq); 239 int (*mcu_parse_response)(struct mt76_dev *dev, int cmd, 240 struct sk_buff *skb, int seq); 241 u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset); 242 void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val); 243 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, 244 const struct mt76_reg_pair *rp, int len); 245 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 246 struct mt76_reg_pair *rp, int len); 247 int (*mcu_restart)(struct mt76_dev *dev); 248 }; 249 250 struct mt76_queue_ops { 251 int (*init)(struct mt76_dev *dev, 252 int (*poll)(struct napi_struct *napi, int budget)); 253 254 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q, 255 int idx, int n_desc, int bufsize, 256 u32 ring_base); 257 258 int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q, 259 enum mt76_txq_id qid, struct sk_buff *skb, 260 struct mt76_wcid *wcid, struct ieee80211_sta *sta); 261 262 int (*tx_queue_skb_raw)(struct mt76_dev *dev, struct mt76_queue *q, 263 struct sk_buff *skb, u32 tx_info); 264 265 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 266 int *len, u32 *info, bool *more); 267 268 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 269 270 void (*tx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q, 271 bool flush); 272 273 void (*rx_cleanup)(struct mt76_dev *dev, struct mt76_queue *q); 274 275 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 276 277 void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q); 278 }; 279 280 enum mt76_phy_type { 281 MT_PHY_TYPE_CCK, 282 MT_PHY_TYPE_OFDM, 283 MT_PHY_TYPE_HT, 284 MT_PHY_TYPE_HT_GF, 285 MT_PHY_TYPE_VHT, 286 MT_PHY_TYPE_HE_SU = 8, 287 MT_PHY_TYPE_HE_EXT_SU, 288 MT_PHY_TYPE_HE_TB, 289 MT_PHY_TYPE_HE_MU, 290 MT_PHY_TYPE_EHT_SU = 13, 291 MT_PHY_TYPE_EHT_TRIG, 292 MT_PHY_TYPE_EHT_MU, 293 __MT_PHY_TYPE_MAX, 294 }; 295 296 struct mt76_sta_stats { 297 u64 tx_mode[__MT_PHY_TYPE_MAX]; 298 u64 tx_bw[5]; /* 20, 40, 80, 160, 320 */ 299 u64 tx_nss[4]; /* 1, 2, 3, 4 */ 300 u64 tx_mcs[16]; /* mcs idx */ 301 u64 tx_bytes; 302 /* WED TX */ 303 u32 tx_packets; /* unit: MSDU */ 304 u32 tx_retries; 305 u32 tx_failed; 306 /* WED RX */ 307 u64 rx_bytes; 308 u32 rx_packets; 309 u32 rx_errors; 310 u32 rx_drops; 311 }; 312 313 enum mt76_wcid_flags { 314 MT_WCID_FLAG_CHECK_PS, 315 MT_WCID_FLAG_PS, 316 MT_WCID_FLAG_4ADDR, 317 MT_WCID_FLAG_HDR_TRANS, 318 }; 319 320 #define MT76_N_WCIDS 1088 321 322 /* stored in ieee80211_tx_info::hw_queue */ 323 #define MT_TX_HW_QUEUE_PHY GENMASK(3, 2) 324 325 DECLARE_EWMA(signal, 10, 8); 326 327 #define MT_WCID_TX_INFO_RATE GENMASK(15, 0) 328 #define MT_WCID_TX_INFO_NSS GENMASK(17, 16) 329 #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18) 330 #define MT_WCID_TX_INFO_SET BIT(31) 331 332 struct mt76_wcid { 333 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 334 335 atomic_t non_aql_packets; 336 unsigned long flags; 337 338 struct ewma_signal rssi; 339 int inactive_count; 340 341 struct rate_info rate; 342 unsigned long ampdu_state; 343 344 u16 idx; 345 u8 hw_key_idx; 346 u8 hw_key_idx2; 347 348 u8 sta:1; 349 u8 amsdu:1; 350 u8 phy_idx:2; 351 352 u8 rx_check_pn; 353 u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6]; 354 u16 cipher; 355 356 u32 tx_info; 357 bool sw_iv; 358 359 struct list_head tx_list; 360 struct sk_buff_head tx_pending; 361 362 struct list_head list; 363 struct idr pktid; 364 365 struct mt76_sta_stats stats; 366 367 struct list_head poll_list; 368 }; 369 370 struct mt76_txq { 371 u16 wcid; 372 373 u16 agg_ssn; 374 bool send_bar; 375 bool aggr; 376 }; 377 378 struct mt76_wed_rro_ind { 379 u32 se_id : 12; 380 u32 rsv : 4; 381 u32 start_sn : 12; 382 u32 ind_reason : 4; 383 u32 ind_cnt : 13; 384 u32 win_sz : 3; 385 u32 rsv2 : 13; 386 u32 magic_cnt : 3; 387 }; 388 389 struct mt76_txwi_cache { 390 struct list_head list; 391 dma_addr_t dma_addr; 392 393 union { 394 struct sk_buff *skb; 395 void *ptr; 396 }; 397 }; 398 399 struct mt76_rx_tid { 400 struct rcu_head rcu_head; 401 402 struct mt76_dev *dev; 403 404 spinlock_t lock; 405 struct delayed_work reorder_work; 406 407 u16 id; 408 u16 head; 409 u16 size; 410 u16 nframes; 411 412 u8 num; 413 414 u8 started:1, stopped:1, timer_pending:1; 415 416 struct sk_buff *reorder_buf[] __counted_by(size); 417 }; 418 419 #define MT_TX_CB_DMA_DONE BIT(0) 420 #define MT_TX_CB_TXS_DONE BIT(1) 421 #define MT_TX_CB_TXS_FAILED BIT(2) 422 423 #define MT_PACKET_ID_MASK GENMASK(6, 0) 424 #define MT_PACKET_ID_NO_ACK 0 425 #define MT_PACKET_ID_NO_SKB 1 426 #define MT_PACKET_ID_WED 2 427 #define MT_PACKET_ID_FIRST 3 428 #define MT_PACKET_ID_HAS_RATE BIT(7) 429 /* This is timer for when to give up when waiting for TXS callback, 430 * with starting time being the time at which the DMA_DONE callback 431 * was seen (so, we know packet was processed then, it should not take 432 * long after that for firmware to send the TXS callback if it is going 433 * to do so.) 434 */ 435 #define MT_TX_STATUS_SKB_TIMEOUT (HZ / 4) 436 437 struct mt76_tx_cb { 438 unsigned long jiffies; 439 u16 wcid; 440 u8 pktid; 441 u8 flags; 442 }; 443 444 enum { 445 MT76_STATE_INITIALIZED, 446 MT76_STATE_REGISTERED, 447 MT76_STATE_RUNNING, 448 MT76_STATE_MCU_RUNNING, 449 MT76_SCANNING, 450 MT76_HW_SCANNING, 451 MT76_HW_SCHED_SCANNING, 452 MT76_RESTART, 453 MT76_RESET, 454 MT76_MCU_RESET, 455 MT76_REMOVED, 456 MT76_READING_STATS, 457 MT76_STATE_POWER_OFF, 458 MT76_STATE_SUSPEND, 459 MT76_STATE_ROC, 460 MT76_STATE_PM, 461 MT76_STATE_WED_RESET, 462 }; 463 464 struct mt76_hw_cap { 465 bool has_2ghz; 466 bool has_5ghz; 467 bool has_6ghz; 468 }; 469 470 #define MT_DRV_TXWI_NO_FREE BIT(0) 471 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1) 472 #define MT_DRV_SW_RX_AIRTIME BIT(2) 473 #define MT_DRV_RX_DMA_HDR BIT(3) 474 #define MT_DRV_HW_MGMT_TXQ BIT(4) 475 #define MT_DRV_AMSDU_OFFLOAD BIT(5) 476 477 struct mt76_driver_ops { 478 u32 drv_flags; 479 u32 survey_flags; 480 u16 txwi_size; 481 u16 token_size; 482 u8 mcs_rates; 483 484 void (*update_survey)(struct mt76_phy *phy); 485 486 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 487 enum mt76_txq_id qid, struct mt76_wcid *wcid, 488 struct ieee80211_sta *sta, 489 struct mt76_tx_info *tx_info); 490 491 void (*tx_complete_skb)(struct mt76_dev *dev, 492 struct mt76_queue_entry *e); 493 494 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); 495 496 bool (*rx_check)(struct mt76_dev *dev, void *data, int len); 497 498 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 499 struct sk_buff *skb, u32 *info); 500 501 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 502 503 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 504 bool ps); 505 506 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, 507 struct ieee80211_sta *sta); 508 509 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif, 510 struct ieee80211_sta *sta); 511 512 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, 513 struct ieee80211_sta *sta); 514 }; 515 516 struct mt76_channel_state { 517 u64 cc_active; 518 u64 cc_busy; 519 u64 cc_rx; 520 u64 cc_bss_rx; 521 u64 cc_tx; 522 523 s8 noise; 524 }; 525 526 struct mt76_sband { 527 struct ieee80211_supported_band sband; 528 struct mt76_channel_state *chan; 529 }; 530 531 /* addr req mask */ 532 #define MT_VEND_TYPE_EEPROM BIT(31) 533 #define MT_VEND_TYPE_CFG BIT(30) 534 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) 535 536 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) 537 enum mt_vendor_req { 538 MT_VEND_DEV_MODE = 0x1, 539 MT_VEND_WRITE = 0x2, 540 MT_VEND_POWER_ON = 0x4, 541 MT_VEND_MULTI_WRITE = 0x6, 542 MT_VEND_MULTI_READ = 0x7, 543 MT_VEND_READ_EEPROM = 0x9, 544 MT_VEND_WRITE_FCE = 0x42, 545 MT_VEND_WRITE_CFG = 0x46, 546 MT_VEND_READ_CFG = 0x47, 547 MT_VEND_READ_EXT = 0x63, 548 MT_VEND_WRITE_EXT = 0x66, 549 MT_VEND_FEATURE_SET = 0x91, 550 }; 551 552 enum mt76u_in_ep { 553 MT_EP_IN_PKT_RX, 554 MT_EP_IN_CMD_RESP, 555 __MT_EP_IN_MAX, 556 }; 557 558 enum mt76u_out_ep { 559 MT_EP_OUT_INBAND_CMD, 560 MT_EP_OUT_AC_BE, 561 MT_EP_OUT_AC_BK, 562 MT_EP_OUT_AC_VI, 563 MT_EP_OUT_AC_VO, 564 MT_EP_OUT_HCCA, 565 __MT_EP_OUT_MAX, 566 }; 567 568 struct mt76_mcu { 569 struct mutex mutex; 570 u32 msg_seq; 571 int timeout; 572 573 struct sk_buff_head res_q; 574 wait_queue_head_t wait; 575 }; 576 577 #define MT_TX_SG_MAX_SIZE 8 578 #define MT_RX_SG_MAX_SIZE 4 579 #define MT_NUM_TX_ENTRIES 256 580 #define MT_NUM_RX_ENTRIES 128 581 #define MCU_RESP_URB_SIZE 1024 582 struct mt76_usb { 583 struct mutex usb_ctrl_mtx; 584 u8 *data; 585 u16 data_len; 586 587 struct mt76_worker status_worker; 588 struct mt76_worker rx_worker; 589 590 struct work_struct stat_work; 591 592 u8 out_ep[__MT_EP_OUT_MAX]; 593 u8 in_ep[__MT_EP_IN_MAX]; 594 bool sg_en; 595 596 struct mt76u_mcu { 597 u8 *data; 598 /* multiple reads */ 599 struct mt76_reg_pair *rp; 600 int rp_len; 601 u32 base; 602 } mcu; 603 }; 604 605 #define MT76S_XMIT_BUF_SZ 0x3fe00 606 #define MT76S_NUM_TX_ENTRIES 256 607 #define MT76S_NUM_RX_ENTRIES 512 608 struct mt76_sdio { 609 struct mt76_worker txrx_worker; 610 struct mt76_worker status_worker; 611 struct mt76_worker net_worker; 612 struct mt76_worker stat_worker; 613 614 u8 *xmit_buf; 615 u32 xmit_buf_sz; 616 617 struct sdio_func *func; 618 void *intr_data; 619 u8 hw_ver; 620 wait_queue_head_t wait; 621 622 struct { 623 int pse_data_quota; 624 int ple_data_quota; 625 int pse_mcu_quota; 626 int pse_page_size; 627 int deficit; 628 } sched; 629 630 int (*parse_irq)(struct mt76_dev *dev, struct mt76s_intr *intr); 631 }; 632 633 struct mt76_mmio { 634 void __iomem *regs; 635 spinlock_t irq_lock; 636 u32 irqmask; 637 638 struct mtk_wed_device wed; 639 struct mtk_wed_device wed_hif2; 640 struct completion wed_reset; 641 struct completion wed_reset_complete; 642 }; 643 644 struct mt76_rx_status { 645 union { 646 struct mt76_wcid *wcid; 647 u16 wcid_idx; 648 }; 649 650 u32 reorder_time; 651 652 u32 ampdu_ref; 653 u32 timestamp; 654 655 u8 iv[6]; 656 657 u8 phy_idx:2; 658 u8 aggr:1; 659 u8 qos_ctl; 660 u16 seqno; 661 662 u16 freq; 663 u32 flag; 664 u8 enc_flags; 665 u8 encoding:3, bw:4; 666 union { 667 struct { 668 u8 he_ru:3; 669 u8 he_gi:2; 670 u8 he_dcm:1; 671 }; 672 struct { 673 u8 ru:4; 674 u8 gi:2; 675 } eht; 676 }; 677 678 u8 amsdu:1, first_amsdu:1, last_amsdu:1; 679 u8 rate_idx; 680 u8 nss:5, band:3; 681 s8 signal; 682 u8 chains; 683 s8 chain_signal[IEEE80211_MAX_CHAINS]; 684 }; 685 686 struct mt76_freq_range_power { 687 const struct cfg80211_sar_freq_ranges *range; 688 s8 power; 689 }; 690 691 struct mt76_testmode_ops { 692 int (*set_state)(struct mt76_phy *phy, enum mt76_testmode_state state); 693 int (*set_params)(struct mt76_phy *phy, struct nlattr **tb, 694 enum mt76_testmode_state new_state); 695 int (*dump_stats)(struct mt76_phy *phy, struct sk_buff *msg); 696 }; 697 698 struct mt76_testmode_data { 699 enum mt76_testmode_state state; 700 701 u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)]; 702 struct sk_buff *tx_skb; 703 704 u32 tx_count; 705 u16 tx_mpdu_len; 706 707 u8 tx_rate_mode; 708 u8 tx_rate_idx; 709 u8 tx_rate_nss; 710 u8 tx_rate_sgi; 711 u8 tx_rate_ldpc; 712 u8 tx_rate_stbc; 713 u8 tx_ltf; 714 715 u8 tx_antenna_mask; 716 u8 tx_spe_idx; 717 718 u8 tx_duty_cycle; 719 u32 tx_time; 720 u32 tx_ipg; 721 722 u32 freq_offset; 723 724 u8 tx_power[4]; 725 u8 tx_power_control; 726 727 u8 addr[3][ETH_ALEN]; 728 729 u32 tx_pending; 730 u32 tx_queued; 731 u16 tx_queued_limit; 732 u32 tx_done; 733 struct { 734 u64 packets[__MT_RXQ_MAX]; 735 u64 fcs_error[__MT_RXQ_MAX]; 736 } rx_stats; 737 }; 738 739 struct mt76_vif { 740 u8 idx; 741 u8 omac_idx; 742 u8 band_idx; 743 u8 wmm_idx; 744 u8 scan_seq_num; 745 u8 cipher; 746 u8 basic_rates_idx; 747 u8 mcast_rates_idx; 748 u8 beacon_rates_idx; 749 struct ieee80211_chanctx_conf *ctx; 750 }; 751 752 struct mt76_phy { 753 struct ieee80211_hw *hw; 754 struct mt76_dev *dev; 755 void *priv; 756 757 unsigned long state; 758 u8 band_idx; 759 760 spinlock_t tx_lock; 761 struct list_head tx_list; 762 struct mt76_queue *q_tx[__MT_TXQ_MAX]; 763 764 struct cfg80211_chan_def chandef; 765 struct ieee80211_channel *main_chan; 766 767 struct mt76_channel_state *chan_state; 768 enum mt76_dfs_state dfs_state; 769 ktime_t survey_time; 770 771 u32 aggr_stats[32]; 772 773 struct mt76_hw_cap cap; 774 struct mt76_sband sband_2g; 775 struct mt76_sband sband_5g; 776 struct mt76_sband sband_6g; 777 778 u8 macaddr[ETH_ALEN]; 779 780 int txpower_cur; 781 u8 antenna_mask; 782 u16 chainmask; 783 784 #ifdef CONFIG_NL80211_TESTMODE 785 struct mt76_testmode_data test; 786 #endif 787 788 struct delayed_work mac_work; 789 u8 mac_work_count; 790 791 struct { 792 struct sk_buff *head; 793 struct sk_buff **tail; 794 u16 seqno; 795 } rx_amsdu[__MT_RXQ_MAX]; 796 797 struct mt76_freq_range_power *frp; 798 799 struct { 800 struct led_classdev cdev; 801 char name[32]; 802 bool al; 803 u8 pin; 804 } leds; 805 }; 806 807 struct mt76_dev { 808 struct mt76_phy phy; /* must be first */ 809 struct mt76_phy *phys[__MT_MAX_BAND]; 810 811 struct ieee80211_hw *hw; 812 813 spinlock_t wed_lock; 814 spinlock_t lock; 815 spinlock_t cc_lock; 816 817 u32 cur_cc_bss_rx; 818 819 struct mt76_rx_status rx_ampdu_status; 820 u32 rx_ampdu_len; 821 u32 rx_ampdu_ref; 822 823 struct mutex mutex; 824 825 const struct mt76_bus_ops *bus; 826 const struct mt76_driver_ops *drv; 827 const struct mt76_mcu_ops *mcu_ops; 828 struct device *dev; 829 struct device *dma_dev; 830 831 struct mt76_mcu mcu; 832 833 struct net_device napi_dev; 834 struct net_device tx_napi_dev; 835 spinlock_t rx_lock; 836 struct napi_struct napi[__MT_RXQ_MAX]; 837 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 838 struct tasklet_struct irq_tasklet; 839 840 struct list_head txwi_cache; 841 struct list_head rxwi_cache; 842 struct mt76_queue *q_mcu[__MT_MCUQ_MAX]; 843 struct mt76_queue q_rx[__MT_RXQ_MAX]; 844 const struct mt76_queue_ops *queue_ops; 845 int tx_dma_idx[4]; 846 847 struct mt76_worker tx_worker; 848 struct napi_struct tx_napi; 849 850 spinlock_t token_lock; 851 struct idr token; 852 u16 wed_token_count; 853 u16 token_count; 854 u16 token_size; 855 856 spinlock_t rx_token_lock; 857 struct idr rx_token; 858 u16 rx_token_size; 859 860 wait_queue_head_t tx_wait; 861 /* spinclock used to protect wcid pktid linked list */ 862 spinlock_t status_lock; 863 864 u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; 865 u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)]; 866 867 u64 vif_mask; 868 869 struct mt76_wcid global_wcid; 870 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; 871 struct list_head wcid_list; 872 873 struct list_head sta_poll_list; 874 spinlock_t sta_poll_lock; 875 876 u32 rev; 877 878 struct tasklet_struct pre_tbtt_tasklet; 879 int beacon_int; 880 u8 beacon_mask; 881 882 struct debugfs_blob_wrapper eeprom; 883 struct debugfs_blob_wrapper otp; 884 885 char alpha2[3]; 886 enum nl80211_dfs_regions region; 887 888 u32 debugfs_reg; 889 890 u8 csa_complete; 891 892 u32 rxfilter; 893 894 #ifdef CONFIG_NL80211_TESTMODE 895 const struct mt76_testmode_ops *test_ops; 896 struct { 897 const char *name; 898 u32 offset; 899 } test_mtd; 900 #endif 901 struct workqueue_struct *wq; 902 903 union { 904 struct mt76_mmio mmio; 905 struct mt76_usb usb; 906 struct mt76_sdio sdio; 907 }; 908 }; 909 910 /* per-phy stats. */ 911 struct mt76_mib_stats { 912 u32 ack_fail_cnt; 913 u32 fcs_err_cnt; 914 u32 rts_cnt; 915 u32 rts_retries_cnt; 916 u32 ba_miss_cnt; 917 u32 tx_bf_cnt; 918 u32 tx_mu_bf_cnt; 919 u32 tx_mu_mpdu_cnt; 920 u32 tx_mu_acked_mpdu_cnt; 921 u32 tx_su_acked_mpdu_cnt; 922 u32 tx_bf_ibf_ppdu_cnt; 923 u32 tx_bf_ebf_ppdu_cnt; 924 925 u32 tx_bf_rx_fb_all_cnt; 926 u32 tx_bf_rx_fb_eht_cnt; 927 u32 tx_bf_rx_fb_he_cnt; 928 u32 tx_bf_rx_fb_vht_cnt; 929 u32 tx_bf_rx_fb_ht_cnt; 930 931 u32 tx_bf_rx_fb_bw; /* value of last sample, not cumulative */ 932 u32 tx_bf_rx_fb_nc_cnt; 933 u32 tx_bf_rx_fb_nr_cnt; 934 u32 tx_bf_fb_cpl_cnt; 935 u32 tx_bf_fb_trig_cnt; 936 937 u32 tx_ampdu_cnt; 938 u32 tx_stop_q_empty_cnt; 939 u32 tx_mpdu_attempts_cnt; 940 u32 tx_mpdu_success_cnt; 941 u32 tx_pkt_ebf_cnt; 942 u32 tx_pkt_ibf_cnt; 943 944 u32 tx_rwp_fail_cnt; 945 u32 tx_rwp_need_cnt; 946 947 /* rx stats */ 948 u32 rx_fifo_full_cnt; 949 u32 channel_idle_cnt; 950 u32 primary_cca_busy_time; 951 u32 secondary_cca_busy_time; 952 u32 primary_energy_detect_time; 953 u32 cck_mdrdy_time; 954 u32 ofdm_mdrdy_time; 955 u32 green_mdrdy_time; 956 u32 rx_vector_mismatch_cnt; 957 u32 rx_delimiter_fail_cnt; 958 u32 rx_mrdy_cnt; 959 u32 rx_len_mismatch_cnt; 960 u32 rx_mpdu_cnt; 961 u32 rx_ampdu_cnt; 962 u32 rx_ampdu_bytes_cnt; 963 u32 rx_ampdu_valid_subframe_cnt; 964 u32 rx_ampdu_valid_subframe_bytes_cnt; 965 u32 rx_pfdrop_cnt; 966 u32 rx_vec_queue_overflow_drop_cnt; 967 u32 rx_ba_cnt; 968 969 u32 tx_amsdu[8]; 970 u32 tx_amsdu_cnt; 971 972 /* mcu_muru_stats */ 973 u32 dl_cck_cnt; 974 u32 dl_ofdm_cnt; 975 u32 dl_htmix_cnt; 976 u32 dl_htgf_cnt; 977 u32 dl_vht_su_cnt; 978 u32 dl_vht_2mu_cnt; 979 u32 dl_vht_3mu_cnt; 980 u32 dl_vht_4mu_cnt; 981 u32 dl_he_su_cnt; 982 u32 dl_he_ext_su_cnt; 983 u32 dl_he_2ru_cnt; 984 u32 dl_he_2mu_cnt; 985 u32 dl_he_3ru_cnt; 986 u32 dl_he_3mu_cnt; 987 u32 dl_he_4ru_cnt; 988 u32 dl_he_4mu_cnt; 989 u32 dl_he_5to8ru_cnt; 990 u32 dl_he_9to16ru_cnt; 991 u32 dl_he_gtr16ru_cnt; 992 993 u32 ul_hetrig_su_cnt; 994 u32 ul_hetrig_2ru_cnt; 995 u32 ul_hetrig_3ru_cnt; 996 u32 ul_hetrig_4ru_cnt; 997 u32 ul_hetrig_5to8ru_cnt; 998 u32 ul_hetrig_9to16ru_cnt; 999 u32 ul_hetrig_gtr16ru_cnt; 1000 u32 ul_hetrig_2mu_cnt; 1001 u32 ul_hetrig_3mu_cnt; 1002 u32 ul_hetrig_4mu_cnt; 1003 }; 1004 1005 struct mt76_power_limits { 1006 s8 cck[4]; 1007 s8 ofdm[8]; 1008 s8 mcs[4][10]; 1009 s8 ru[7][12]; 1010 s8 eht[16][16]; 1011 }; 1012 1013 struct mt76_ethtool_worker_info { 1014 u64 *data; 1015 int idx; 1016 int initial_stat_idx; 1017 int worker_stat_count; 1018 int sta_count; 1019 }; 1020 1021 #define CCK_RATE(_idx, _rate) { \ 1022 .bitrate = _rate, \ 1023 .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ 1024 .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \ 1025 .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \ 1026 } 1027 1028 #define OFDM_RATE(_idx, _rate) { \ 1029 .bitrate = _rate, \ 1030 .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ 1031 .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ 1032 } 1033 1034 extern struct ieee80211_rate mt76_rates[12]; 1035 1036 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) 1037 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) 1038 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) 1039 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__) 1040 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__) 1041 1042 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) 1043 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) 1044 1045 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 1046 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 1047 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 1048 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__) 1049 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__) 1050 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) 1051 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) 1052 1053 1054 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76)) 1055 1056 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 1057 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 1058 1059 #define mt76_get_field(_dev, _reg, _field) \ 1060 FIELD_GET(_field, mt76_rr(dev, _reg)) 1061 1062 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 1063 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 1064 1065 #define __mt76_rmw_field(_dev, _reg, _field, _val) \ 1066 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 1067 1068 #define mt76_hw(dev) (dev)->mphy.hw 1069 1070 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 1071 int timeout); 1072 1073 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 1074 1075 bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 1076 int timeout, int kick); 1077 #define __mt76_poll_msec(...) ____mt76_poll_msec(__VA_ARGS__, 10) 1078 #define mt76_poll_msec(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10) 1079 #define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 1080 1081 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 1082 void mt76_pci_disable_aspm(struct pci_dev *pdev); 1083 1084 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 1085 int mt76_net_setup_tc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1086 struct net_device *netdev, enum tc_setup_type type, 1087 void *type_data); 1088 #endif /*CONFIG_NET_MEDIATEK_SOC_WED */ 1089 1090 static inline u16 mt76_chip(struct mt76_dev *dev) 1091 { 1092 return dev->rev >> 16; 1093 } 1094 1095 static inline u16 mt76_rev(struct mt76_dev *dev) 1096 { 1097 return dev->rev & 0xffff; 1098 } 1099 1100 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 1101 u32 mt76_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size); 1102 void mt76_mmio_wed_release_rx_buf(struct mtk_wed_device *wed); 1103 int mt76_mmio_wed_offload_enable(struct mtk_wed_device *wed); 1104 void mt76_mmio_wed_offload_disable(struct mtk_wed_device *wed); 1105 void mt76_mmio_wed_reset_complete(struct mtk_wed_device *wed); 1106 #endif /*CONFIG_NET_MEDIATEK_SOC_WED */ 1107 1108 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 1109 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 1110 1111 #define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__) 1112 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 1113 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) 1114 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__) 1115 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 1116 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 1117 #define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__) 1118 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 1119 #define mt76_queue_reset(dev, ...) (dev)->mt76.queue_ops->reset_q(&((dev)->mt76), __VA_ARGS__) 1120 1121 #define mt76_for_each_q_rx(dev, i) \ 1122 for (i = 0; i < ARRAY_SIZE((dev)->q_rx); i++) \ 1123 if ((dev)->q_rx[i].ndesc) 1124 1125 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, 1126 const struct ieee80211_ops *ops, 1127 const struct mt76_driver_ops *drv_ops); 1128 int mt76_register_device(struct mt76_dev *dev, bool vht, 1129 struct ieee80211_rate *rates, int n_rates); 1130 void mt76_unregister_device(struct mt76_dev *dev); 1131 void mt76_free_device(struct mt76_dev *dev); 1132 void mt76_unregister_phy(struct mt76_phy *phy); 1133 1134 struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, 1135 const struct ieee80211_ops *ops, 1136 u8 band_idx); 1137 int mt76_register_phy(struct mt76_phy *phy, bool vht, 1138 struct ieee80211_rate *rates, int n_rates); 1139 1140 struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy, 1141 const struct file_operations *ops); 1142 static inline struct dentry *mt76_register_debugfs(struct mt76_dev *dev) 1143 { 1144 return mt76_register_debugfs_fops(&dev->phy, NULL); 1145 } 1146 1147 int mt76_queues_read(struct seq_file *s, void *data); 1148 void mt76_seq_puts_array(struct seq_file *file, const char *str, 1149 s8 *val, int len); 1150 1151 int mt76_eeprom_init(struct mt76_dev *dev, int len); 1152 void mt76_eeprom_override(struct mt76_phy *phy); 1153 int mt76_get_of_data_from_mtd(struct mt76_dev *dev, void *eep, int offset, int len); 1154 int mt76_get_of_data_from_nvmem(struct mt76_dev *dev, void *eep, 1155 const char *cell_name, int len); 1156 1157 struct mt76_queue * 1158 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, 1159 int ring_base, void *wed, u32 flags); 1160 u16 mt76_calculate_default_rate(struct mt76_phy *phy, 1161 struct ieee80211_vif *vif, int rateidx); 1162 static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx, 1163 int n_desc, int ring_base, void *wed, 1164 u32 flags) 1165 { 1166 struct mt76_queue *q; 1167 1168 q = mt76_init_queue(phy->dev, qid, idx, n_desc, ring_base, wed, flags); 1169 if (IS_ERR(q)) 1170 return PTR_ERR(q); 1171 1172 phy->q_tx[qid] = q; 1173 1174 return 0; 1175 } 1176 1177 static inline int mt76_init_mcu_queue(struct mt76_dev *dev, int qid, int idx, 1178 int n_desc, int ring_base) 1179 { 1180 struct mt76_queue *q; 1181 1182 q = mt76_init_queue(dev, qid, idx, n_desc, ring_base, NULL, 0); 1183 if (IS_ERR(q)) 1184 return PTR_ERR(q); 1185 1186 dev->q_mcu[qid] = q; 1187 1188 return 0; 1189 } 1190 1191 static inline struct mt76_phy * 1192 mt76_dev_phy(struct mt76_dev *dev, u8 phy_idx) 1193 { 1194 if ((phy_idx == MT_BAND1 && dev->phys[phy_idx]) || 1195 (phy_idx == MT_BAND2 && dev->phys[phy_idx])) 1196 return dev->phys[phy_idx]; 1197 1198 return &dev->phy; 1199 } 1200 1201 static inline struct ieee80211_hw * 1202 mt76_phy_hw(struct mt76_dev *dev, u8 phy_idx) 1203 { 1204 return mt76_dev_phy(dev, phy_idx)->hw; 1205 } 1206 1207 static inline u8 * 1208 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t) 1209 { 1210 return (u8 *)t - dev->drv->txwi_size; 1211 } 1212 1213 /* increment with wrap-around */ 1214 static inline int mt76_incr(int val, int size) 1215 { 1216 return (val + 1) & (size - 1); 1217 } 1218 1219 /* decrement with wrap-around */ 1220 static inline int mt76_decr(int val, int size) 1221 { 1222 return (val - 1) & (size - 1); 1223 } 1224 1225 u8 mt76_ac_to_hwq(u8 ac); 1226 1227 static inline struct ieee80211_txq * 1228 mtxq_to_txq(struct mt76_txq *mtxq) 1229 { 1230 void *ptr = mtxq; 1231 1232 return container_of(ptr, struct ieee80211_txq, drv_priv); 1233 } 1234 1235 static inline struct ieee80211_sta * 1236 wcid_to_sta(struct mt76_wcid *wcid) 1237 { 1238 void *ptr = wcid; 1239 1240 if (!wcid || !wcid->sta) 1241 return NULL; 1242 1243 return container_of(ptr, struct ieee80211_sta, drv_priv); 1244 } 1245 1246 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) 1247 { 1248 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) > 1249 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data)); 1250 return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data); 1251 } 1252 1253 static inline void *mt76_skb_get_hdr(struct sk_buff *skb) 1254 { 1255 struct mt76_rx_status mstat; 1256 u8 *data = skb->data; 1257 1258 /* Alignment concerns */ 1259 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4); 1260 BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4); 1261 1262 mstat = *((struct mt76_rx_status *)skb->cb); 1263 1264 if (mstat.flag & RX_FLAG_RADIOTAP_HE) 1265 data += sizeof(struct ieee80211_radiotap_he); 1266 if (mstat.flag & RX_FLAG_RADIOTAP_HE_MU) 1267 data += sizeof(struct ieee80211_radiotap_he_mu); 1268 1269 return data; 1270 } 1271 1272 static inline void mt76_insert_hdr_pad(struct sk_buff *skb) 1273 { 1274 int len = ieee80211_get_hdrlen_from_skb(skb); 1275 1276 if (len % 4 == 0) 1277 return; 1278 1279 skb_push(skb, 2); 1280 memmove(skb->data, skb->data + 2, len); 1281 1282 skb->data[len] = 0; 1283 skb->data[len + 1] = 0; 1284 } 1285 1286 static inline bool mt76_is_skb_pktid(u8 pktid) 1287 { 1288 if (pktid & MT_PACKET_ID_HAS_RATE) 1289 return false; 1290 1291 return pktid >= MT_PACKET_ID_FIRST; 1292 } 1293 1294 static inline u8 mt76_tx_power_nss_delta(u8 nss) 1295 { 1296 static const u8 nss_delta[4] = { 0, 6, 9, 12 }; 1297 u8 idx = nss - 1; 1298 1299 return (idx < ARRAY_SIZE(nss_delta)) ? nss_delta[idx] : 0; 1300 } 1301 1302 static inline bool mt76_testmode_enabled(struct mt76_phy *phy) 1303 { 1304 #ifdef CONFIG_NL80211_TESTMODE 1305 return phy->test.state != MT76_TM_STATE_OFF; 1306 #else 1307 return false; 1308 #endif 1309 } 1310 1311 static inline bool mt76_is_testmode_skb(struct mt76_dev *dev, 1312 struct sk_buff *skb, 1313 struct ieee80211_hw **hw) 1314 { 1315 #ifdef CONFIG_NL80211_TESTMODE 1316 int i; 1317 1318 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { 1319 struct mt76_phy *phy = dev->phys[i]; 1320 1321 if (phy && skb == phy->test.tx_skb) { 1322 *hw = dev->phys[i]->hw; 1323 return true; 1324 } 1325 } 1326 return false; 1327 #else 1328 return false; 1329 #endif 1330 } 1331 1332 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 1333 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta, 1334 struct mt76_wcid *wcid, struct sk_buff *skb); 1335 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 1336 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, 1337 bool send_bar); 1338 void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb); 1339 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); 1340 void mt76_txq_schedule_all(struct mt76_phy *phy); 1341 void mt76_tx_worker_run(struct mt76_dev *dev); 1342 void mt76_tx_worker(struct mt76_worker *w); 1343 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 1344 struct ieee80211_sta *sta, 1345 u16 tids, int nframes, 1346 enum ieee80211_frame_release_type reason, 1347 bool more_data); 1348 bool mt76_has_tx_pending(struct mt76_phy *phy); 1349 void mt76_set_channel(struct mt76_phy *phy); 1350 void mt76_update_survey(struct mt76_phy *phy); 1351 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time); 1352 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 1353 struct survey_info *survey); 1354 int mt76_rx_signal(u8 chain_mask, s8 *chain_signal); 1355 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht); 1356 1357 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 1358 u16 ssn, u16 size); 1359 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 1360 1361 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 1362 struct ieee80211_key_conf *key); 1363 1364 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 1365 __acquires(&dev->status_lock); 1366 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 1367 __releases(&dev->status_lock); 1368 1369 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 1370 struct sk_buff *skb); 1371 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev, 1372 struct mt76_wcid *wcid, int pktid, 1373 struct sk_buff_head *list); 1374 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 1375 struct sk_buff_head *list); 1376 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb, 1377 struct list_head *free_list); 1378 static inline void 1379 mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb) 1380 { 1381 __mt76_tx_complete_skb(dev, wcid, skb, NULL); 1382 } 1383 1384 void mt76_tx_status_check(struct mt76_dev *dev, bool flush); 1385 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1386 struct ieee80211_sta *sta, 1387 enum ieee80211_sta_state old_state, 1388 enum ieee80211_sta_state new_state); 1389 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 1390 struct ieee80211_sta *sta); 1391 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1392 struct ieee80211_sta *sta); 1393 1394 int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy); 1395 1396 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1397 int *dbm); 1398 int mt76_init_sar_power(struct ieee80211_hw *hw, 1399 const struct cfg80211_sar_specs *sar); 1400 int mt76_get_sar_power(struct mt76_phy *phy, 1401 struct ieee80211_channel *chan, 1402 int power); 1403 1404 void mt76_csa_check(struct mt76_dev *dev); 1405 void mt76_csa_finish(struct mt76_dev *dev); 1406 1407 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); 1408 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); 1409 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id); 1410 int mt76_get_rate(struct mt76_dev *dev, 1411 struct ieee80211_supported_band *sband, 1412 int idx, bool cck); 1413 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1414 const u8 *mac); 1415 void mt76_sw_scan_complete(struct ieee80211_hw *hw, 1416 struct ieee80211_vif *vif); 1417 enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy); 1418 int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1419 void *data, int len); 1420 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, 1421 struct netlink_callback *cb, void *data, int len); 1422 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state); 1423 int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len); 1424 1425 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable) 1426 { 1427 #ifdef CONFIG_NL80211_TESTMODE 1428 enum mt76_testmode_state state = MT76_TM_STATE_IDLE; 1429 1430 if (disable || phy->test.state == MT76_TM_STATE_OFF) 1431 state = MT76_TM_STATE_OFF; 1432 1433 mt76_testmode_set_state(phy, state); 1434 #endif 1435 } 1436 1437 1438 /* internal */ 1439 static inline struct ieee80211_hw * 1440 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) 1441 { 1442 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1443 u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 1444 struct ieee80211_hw *hw = mt76_phy_hw(dev, phy_idx); 1445 1446 info->hw_queue &= ~MT_TX_HW_QUEUE_PHY; 1447 1448 return hw; 1449 } 1450 1451 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 1452 void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 1453 struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev); 1454 void mt76_free_pending_rxwi(struct mt76_dev *dev); 1455 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 1456 struct napi_struct *napi); 1457 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 1458 struct napi_struct *napi); 1459 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 1460 void mt76_testmode_tx_pending(struct mt76_phy *phy); 1461 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q, 1462 struct mt76_queue_entry *e); 1463 1464 /* usb */ 1465 static inline bool mt76u_urb_error(struct urb *urb) 1466 { 1467 return urb->status && 1468 urb->status != -ECONNRESET && 1469 urb->status != -ESHUTDOWN && 1470 urb->status != -ENOENT; 1471 } 1472 1473 /* Map hardware queues to usb endpoints */ 1474 static inline u8 q2ep(u8 qid) 1475 { 1476 /* TODO: take management packets to queue 5 */ 1477 return qid + 1; 1478 } 1479 1480 static inline int 1481 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, 1482 int timeout, int ep) 1483 { 1484 struct usb_interface *uintf = to_usb_interface(dev->dev); 1485 struct usb_device *udev = interface_to_usbdev(uintf); 1486 struct mt76_usb *usb = &dev->usb; 1487 unsigned int pipe; 1488 1489 if (actual_len) 1490 pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]); 1491 else 1492 pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]); 1493 1494 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); 1495 } 1496 1497 void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index); 1498 void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi, 1499 struct mt76_sta_stats *stats, bool eht); 1500 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad); 1501 int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, 1502 u16 val, u16 offset, void *buf, size_t len); 1503 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 1504 u8 req_type, u16 val, u16 offset, 1505 void *buf, size_t len); 1506 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 1507 const u16 offset, const u32 val); 1508 void mt76u_read_copy(struct mt76_dev *dev, u32 offset, 1509 void *data, int len); 1510 u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u8 req_type, u32 addr); 1511 void ___mt76u_wr(struct mt76_dev *dev, u8 req, u8 req_type, 1512 u32 addr, u32 val); 1513 int __mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, 1514 struct mt76_bus_ops *ops); 1515 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); 1516 int mt76u_alloc_mcu_queue(struct mt76_dev *dev); 1517 int mt76u_alloc_queues(struct mt76_dev *dev); 1518 void mt76u_stop_tx(struct mt76_dev *dev); 1519 void mt76u_stop_rx(struct mt76_dev *dev); 1520 int mt76u_resume_rx(struct mt76_dev *dev); 1521 void mt76u_queues_deinit(struct mt76_dev *dev); 1522 1523 int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, 1524 const struct mt76_bus_ops *bus_ops); 1525 int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid); 1526 int mt76s_alloc_tx(struct mt76_dev *dev); 1527 void mt76s_deinit(struct mt76_dev *dev); 1528 void mt76s_sdio_irq(struct sdio_func *func); 1529 void mt76s_txrx_worker(struct mt76_sdio *sdio); 1530 bool mt76s_txqs_empty(struct mt76_dev *dev); 1531 int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, 1532 int hw_ver); 1533 u32 mt76s_rr(struct mt76_dev *dev, u32 offset); 1534 void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val); 1535 u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 1536 u32 mt76s_read_pcr(struct mt76_dev *dev); 1537 void mt76s_write_copy(struct mt76_dev *dev, u32 offset, 1538 const void *data, int len); 1539 void mt76s_read_copy(struct mt76_dev *dev, u32 offset, 1540 void *data, int len); 1541 int mt76s_wr_rp(struct mt76_dev *dev, u32 base, 1542 const struct mt76_reg_pair *data, 1543 int len); 1544 int mt76s_rd_rp(struct mt76_dev *dev, u32 base, 1545 struct mt76_reg_pair *data, int len); 1546 1547 struct sk_buff * 1548 __mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, 1549 int len, int data_len, gfp_t gfp); 1550 static inline struct sk_buff * 1551 mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data, 1552 int data_len) 1553 { 1554 return __mt76_mcu_msg_alloc(dev, data, data_len, data_len, GFP_KERNEL); 1555 } 1556 1557 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); 1558 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, 1559 unsigned long expires); 1560 int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data, 1561 int len, bool wait_resp, struct sk_buff **ret); 1562 int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb, 1563 int cmd, bool wait_resp, struct sk_buff **ret); 1564 int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, 1565 int len, int max_len); 1566 static inline int 1567 mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data, 1568 int len) 1569 { 1570 int max_len = 4096 - dev->mcu_ops->headroom; 1571 1572 return __mt76_mcu_send_firmware(dev, cmd, data, len, max_len); 1573 } 1574 1575 static inline int 1576 mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len, 1577 bool wait_resp) 1578 { 1579 return mt76_mcu_send_and_get_msg(dev, cmd, data, len, wait_resp, NULL); 1580 } 1581 1582 static inline int 1583 mt76_mcu_skb_send_msg(struct mt76_dev *dev, struct sk_buff *skb, int cmd, 1584 bool wait_resp) 1585 { 1586 return mt76_mcu_skb_send_and_get_msg(dev, skb, cmd, wait_resp, NULL); 1587 } 1588 1589 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); 1590 1591 struct device_node * 1592 mt76_find_power_limits_node(struct mt76_dev *dev); 1593 struct device_node * 1594 mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan); 1595 1596 s8 mt76_get_rate_power_limits(struct mt76_phy *phy, 1597 struct ieee80211_channel *chan, 1598 struct mt76_power_limits *dest, 1599 s8 target_power); 1600 1601 static inline bool mt76_queue_is_wed_tx_free(struct mt76_queue *q) 1602 { 1603 return (q->flags & MT_QFLAG_WED) && 1604 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_TXFREE; 1605 } 1606 1607 static inline bool mt76_queue_is_wed_rro(struct mt76_queue *q) 1608 { 1609 return q->flags & MT_QFLAG_WED_RRO; 1610 } 1611 1612 static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q) 1613 { 1614 return mt76_queue_is_wed_rro(q) && 1615 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_IND; 1616 } 1617 1618 static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q) 1619 { 1620 return mt76_queue_is_wed_rro(q) && 1621 (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA || 1622 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_MSDU_PG); 1623 } 1624 1625 static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q) 1626 { 1627 if (!(q->flags & MT_QFLAG_WED)) 1628 return false; 1629 1630 return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX || 1631 mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q); 1632 1633 } 1634 1635 struct mt76_txwi_cache * 1636 mt76_token_release(struct mt76_dev *dev, int token, bool *wake); 1637 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi); 1638 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked); 1639 struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token); 1640 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr, 1641 struct mt76_txwi_cache *r, dma_addr_t phys); 1642 int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q); 1643 static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct) 1644 { 1645 struct page *page = virt_to_head_page(buf); 1646 1647 page_pool_put_full_page(page->pp, page, allow_direct); 1648 } 1649 1650 static inline void * 1651 mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size) 1652 { 1653 struct page *page; 1654 1655 page = page_pool_dev_alloc_frag(q->page_pool, offset, size); 1656 if (!page) 1657 return NULL; 1658 1659 return page_address(page) + *offset; 1660 } 1661 1662 static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) 1663 { 1664 spin_lock_bh(&dev->token_lock); 1665 __mt76_set_tx_blocked(dev, blocked); 1666 spin_unlock_bh(&dev->token_lock); 1667 } 1668 1669 static inline int 1670 mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) 1671 { 1672 int token; 1673 1674 spin_lock_bh(&dev->token_lock); 1675 token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC); 1676 spin_unlock_bh(&dev->token_lock); 1677 1678 return token; 1679 } 1680 1681 static inline struct mt76_txwi_cache * 1682 mt76_token_put(struct mt76_dev *dev, int token) 1683 { 1684 struct mt76_txwi_cache *txwi; 1685 1686 spin_lock_bh(&dev->token_lock); 1687 txwi = idr_remove(&dev->token, token); 1688 spin_unlock_bh(&dev->token_lock); 1689 1690 return txwi; 1691 } 1692 1693 void mt76_wcid_init(struct mt76_wcid *wcid); 1694 void mt76_wcid_cleanup(struct mt76_dev *dev, struct mt76_wcid *wcid); 1695 1696 #endif 1697