1 /* SPDX-License-Identifier: ISC */ 2 /* 3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 4 */ 5 6 #ifndef __MT76_H 7 #define __MT76_H 8 9 #include <linux/kernel.h> 10 #include <linux/io.h> 11 #include <linux/spinlock.h> 12 #include <linux/skbuff.h> 13 #include <linux/leds.h> 14 #include <linux/usb.h> 15 #include <linux/average.h> 16 #include <net/mac80211.h> 17 #include "util.h" 18 19 #define MT_TX_RING_SIZE 256 20 #define MT_MCU_RING_SIZE 32 21 #define MT_RX_BUF_SIZE 2048 22 #define MT_SKB_HEAD_LEN 128 23 24 struct mt76_dev; 25 struct mt76_phy; 26 struct mt76_wcid; 27 28 struct mt76_reg_pair { 29 u32 reg; 30 u32 value; 31 }; 32 33 enum mt76_bus_type { 34 MT76_BUS_MMIO, 35 MT76_BUS_USB, 36 }; 37 38 struct mt76_bus_ops { 39 u32 (*rr)(struct mt76_dev *dev, u32 offset); 40 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 41 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 42 void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data, 43 int len); 44 void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data, 45 int len); 46 int (*wr_rp)(struct mt76_dev *dev, u32 base, 47 const struct mt76_reg_pair *rp, int len); 48 int (*rd_rp)(struct mt76_dev *dev, u32 base, 49 struct mt76_reg_pair *rp, int len); 50 enum mt76_bus_type type; 51 }; 52 53 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB) 54 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO) 55 56 enum mt76_txq_id { 57 MT_TXQ_VO = IEEE80211_AC_VO, 58 MT_TXQ_VI = IEEE80211_AC_VI, 59 MT_TXQ_BE = IEEE80211_AC_BE, 60 MT_TXQ_BK = IEEE80211_AC_BK, 61 MT_TXQ_PSD, 62 MT_TXQ_MCU, 63 MT_TXQ_BEACON, 64 MT_TXQ_CAB, 65 MT_TXQ_FWDL, 66 __MT_TXQ_MAX 67 }; 68 69 enum mt76_rxq_id { 70 MT_RXQ_MAIN, 71 MT_RXQ_MCU, 72 __MT_RXQ_MAX 73 }; 74 75 struct mt76_queue_buf { 76 dma_addr_t addr; 77 int len; 78 }; 79 80 struct mt76_tx_info { 81 struct mt76_queue_buf buf[32]; 82 struct sk_buff *skb; 83 int nbuf; 84 u32 info; 85 }; 86 87 struct mt76_queue_entry { 88 union { 89 void *buf; 90 struct sk_buff *skb; 91 }; 92 union { 93 struct mt76_txwi_cache *txwi; 94 struct urb *urb; 95 }; 96 enum mt76_txq_id qid; 97 bool skip_buf0:1; 98 bool schedule:1; 99 bool done:1; 100 }; 101 102 struct mt76_queue_regs { 103 u32 desc_base; 104 u32 ring_size; 105 u32 cpu_idx; 106 u32 dma_idx; 107 } __packed __aligned(4); 108 109 struct mt76_queue { 110 struct mt76_queue_regs __iomem *regs; 111 112 spinlock_t lock; 113 struct mt76_queue_entry *entry; 114 struct mt76_desc *desc; 115 116 u16 first; 117 u16 head; 118 u16 tail; 119 int ndesc; 120 int queued; 121 int buf_size; 122 bool stopped; 123 124 u8 buf_offset; 125 u8 hw_idx; 126 127 dma_addr_t desc_dma; 128 struct sk_buff *rx_head; 129 struct page_frag_cache rx_page; 130 }; 131 132 struct mt76_sw_queue { 133 struct mt76_queue *q; 134 135 struct list_head swq; 136 int swq_queued; 137 }; 138 139 struct mt76_mcu_ops { 140 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, 141 int len, bool wait_resp); 142 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, 143 const struct mt76_reg_pair *rp, int len); 144 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 145 struct mt76_reg_pair *rp, int len); 146 int (*mcu_restart)(struct mt76_dev *dev); 147 }; 148 149 struct mt76_queue_ops { 150 int (*init)(struct mt76_dev *dev); 151 152 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q, 153 int idx, int n_desc, int bufsize, 154 u32 ring_base); 155 156 int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid, 157 struct sk_buff *skb, struct mt76_wcid *wcid, 158 struct ieee80211_sta *sta); 159 160 int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid, 161 struct sk_buff *skb, u32 tx_info); 162 163 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 164 int *len, u32 *info, bool *more); 165 166 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 167 168 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, 169 bool flush); 170 171 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 172 }; 173 174 enum mt76_wcid_flags { 175 MT_WCID_FLAG_CHECK_PS, 176 MT_WCID_FLAG_PS, 177 }; 178 179 #define MT76_N_WCIDS 128 180 181 /* stored in ieee80211_tx_info::hw_queue */ 182 #define MT_TX_HW_QUEUE_EXT_PHY BIT(3) 183 184 DECLARE_EWMA(signal, 10, 8); 185 186 #define MT_WCID_TX_INFO_RATE GENMASK(15, 0) 187 #define MT_WCID_TX_INFO_NSS GENMASK(17, 16) 188 #define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18) 189 #define MT_WCID_TX_INFO_SET BIT(31) 190 191 struct mt76_wcid { 192 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 193 194 unsigned long flags; 195 196 struct ewma_signal rssi; 197 int inactive_count; 198 199 u8 idx; 200 u8 hw_key_idx; 201 202 u8 sta:1; 203 u8 ext_phy:1; 204 205 u8 rx_check_pn; 206 u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; 207 u16 cipher; 208 209 u32 tx_info; 210 bool sw_iv; 211 212 u8 packet_id; 213 }; 214 215 struct mt76_txq { 216 struct mt76_sw_queue *swq; 217 struct mt76_wcid *wcid; 218 219 struct sk_buff_head retry_q; 220 221 u16 agg_ssn; 222 bool send_bar; 223 bool aggr; 224 }; 225 226 struct mt76_txwi_cache { 227 struct list_head list; 228 dma_addr_t dma_addr; 229 230 struct sk_buff *skb; 231 }; 232 233 struct mt76_rx_tid { 234 struct rcu_head rcu_head; 235 236 struct mt76_dev *dev; 237 238 spinlock_t lock; 239 struct delayed_work reorder_work; 240 241 u16 head; 242 u8 size; 243 u8 nframes; 244 245 u8 num; 246 247 u8 started:1, stopped:1, timer_pending:1; 248 249 struct sk_buff *reorder_buf[]; 250 }; 251 252 #define MT_TX_CB_DMA_DONE BIT(0) 253 #define MT_TX_CB_TXS_DONE BIT(1) 254 #define MT_TX_CB_TXS_FAILED BIT(2) 255 256 #define MT_PACKET_ID_MASK GENMASK(6, 0) 257 #define MT_PACKET_ID_NO_ACK 0 258 #define MT_PACKET_ID_NO_SKB 1 259 #define MT_PACKET_ID_FIRST 2 260 #define MT_PACKET_ID_HAS_RATE BIT(7) 261 262 #define MT_TX_STATUS_SKB_TIMEOUT HZ 263 264 struct mt76_tx_cb { 265 unsigned long jiffies; 266 u8 wcid; 267 u8 pktid; 268 u8 flags; 269 }; 270 271 enum { 272 MT76_STATE_INITIALIZED, 273 MT76_STATE_RUNNING, 274 MT76_STATE_MCU_RUNNING, 275 MT76_SCANNING, 276 MT76_RESET, 277 MT76_MCU_RESET, 278 MT76_REMOVED, 279 MT76_READING_STATS, 280 }; 281 282 struct mt76_hw_cap { 283 bool has_2ghz; 284 bool has_5ghz; 285 }; 286 287 #define MT_DRV_TXWI_NO_FREE BIT(0) 288 #define MT_DRV_TX_ALIGNED4_SKBS BIT(1) 289 #define MT_DRV_SW_RX_AIRTIME BIT(2) 290 #define MT_DRV_RX_DMA_HDR BIT(3) 291 292 struct mt76_driver_ops { 293 u32 drv_flags; 294 u32 survey_flags; 295 u16 txwi_size; 296 297 void (*update_survey)(struct mt76_dev *dev); 298 299 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 300 enum mt76_txq_id qid, struct mt76_wcid *wcid, 301 struct ieee80211_sta *sta, 302 struct mt76_tx_info *tx_info); 303 304 void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid, 305 struct mt76_queue_entry *e); 306 307 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); 308 309 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 310 struct sk_buff *skb); 311 312 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 313 314 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 315 bool ps); 316 317 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, 318 struct ieee80211_sta *sta); 319 320 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif, 321 struct ieee80211_sta *sta); 322 323 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, 324 struct ieee80211_sta *sta); 325 }; 326 327 struct mt76_channel_state { 328 u64 cc_active; 329 u64 cc_busy; 330 u64 cc_rx; 331 u64 cc_bss_rx; 332 u64 cc_tx; 333 334 s8 noise; 335 }; 336 337 struct mt76_sband { 338 struct ieee80211_supported_band sband; 339 struct mt76_channel_state *chan; 340 }; 341 342 struct mt76_rate_power { 343 union { 344 struct { 345 s8 cck[4]; 346 s8 ofdm[8]; 347 s8 stbc[10]; 348 s8 ht[16]; 349 s8 vht[10]; 350 }; 351 s8 all[48]; 352 }; 353 }; 354 355 /* addr req mask */ 356 #define MT_VEND_TYPE_EEPROM BIT(31) 357 #define MT_VEND_TYPE_CFG BIT(30) 358 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) 359 360 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) 361 enum mt_vendor_req { 362 MT_VEND_DEV_MODE = 0x1, 363 MT_VEND_WRITE = 0x2, 364 MT_VEND_POWER_ON = 0x4, 365 MT_VEND_MULTI_WRITE = 0x6, 366 MT_VEND_MULTI_READ = 0x7, 367 MT_VEND_READ_EEPROM = 0x9, 368 MT_VEND_WRITE_FCE = 0x42, 369 MT_VEND_WRITE_CFG = 0x46, 370 MT_VEND_READ_CFG = 0x47, 371 MT_VEND_READ_EXT = 0x63, 372 MT_VEND_WRITE_EXT = 0x66, 373 }; 374 375 enum mt76u_in_ep { 376 MT_EP_IN_PKT_RX, 377 MT_EP_IN_CMD_RESP, 378 __MT_EP_IN_MAX, 379 }; 380 381 enum mt76u_out_ep { 382 MT_EP_OUT_INBAND_CMD, 383 MT_EP_OUT_AC_BE, 384 MT_EP_OUT_AC_BK, 385 MT_EP_OUT_AC_VI, 386 MT_EP_OUT_AC_VO, 387 MT_EP_OUT_HCCA, 388 __MT_EP_OUT_MAX, 389 }; 390 391 struct mt76_mcu { 392 struct mutex mutex; 393 u32 msg_seq; 394 395 struct sk_buff_head res_q; 396 wait_queue_head_t wait; 397 }; 398 399 #define MT_TX_SG_MAX_SIZE 8 400 #define MT_RX_SG_MAX_SIZE 4 401 #define MT_NUM_TX_ENTRIES 256 402 #define MT_NUM_RX_ENTRIES 128 403 #define MCU_RESP_URB_SIZE 1024 404 struct mt76_usb { 405 struct mutex usb_ctrl_mtx; 406 __le32 reg_val; 407 u8 *data; 408 u16 data_len; 409 410 struct tasklet_struct rx_tasklet; 411 struct workqueue_struct *wq; 412 struct work_struct stat_work; 413 414 u8 out_ep[__MT_EP_OUT_MAX]; 415 u8 in_ep[__MT_EP_IN_MAX]; 416 bool sg_en; 417 418 struct mt76u_mcu { 419 u8 *data; 420 /* multiple reads */ 421 struct mt76_reg_pair *rp; 422 int rp_len; 423 u32 base; 424 bool burst; 425 } mcu; 426 }; 427 428 struct mt76_mmio { 429 void __iomem *regs; 430 spinlock_t irq_lock; 431 u32 irqmask; 432 }; 433 434 struct mt76_rx_status { 435 union { 436 struct mt76_wcid *wcid; 437 u8 wcid_idx; 438 }; 439 440 unsigned long reorder_time; 441 442 u32 ampdu_ref; 443 444 u8 iv[6]; 445 446 u8 ext_phy:1; 447 u8 aggr:1; 448 u8 tid; 449 u16 seqno; 450 451 u16 freq; 452 u32 flag; 453 u8 enc_flags; 454 u8 encoding:2, bw:3; 455 u8 rate_idx; 456 u8 nss; 457 u8 band; 458 s8 signal; 459 u8 chains; 460 s8 chain_signal[IEEE80211_MAX_CHAINS]; 461 }; 462 463 struct mt76_phy { 464 struct ieee80211_hw *hw; 465 struct mt76_dev *dev; 466 void *priv; 467 468 unsigned long state; 469 470 struct cfg80211_chan_def chandef; 471 struct ieee80211_channel *main_chan; 472 473 struct mt76_channel_state *chan_state; 474 ktime_t survey_time; 475 476 struct mt76_sband sband_2g; 477 struct mt76_sband sband_5g; 478 479 int txpower_cur; 480 u8 antenna_mask; 481 }; 482 483 struct mt76_dev { 484 struct mt76_phy phy; /* must be first */ 485 486 struct mt76_phy *phy2; 487 488 struct ieee80211_hw *hw; 489 490 spinlock_t lock; 491 spinlock_t cc_lock; 492 493 u32 cur_cc_bss_rx; 494 495 struct mt76_rx_status rx_ampdu_status; 496 u32 rx_ampdu_len; 497 u32 rx_ampdu_ref; 498 499 struct mutex mutex; 500 501 const struct mt76_bus_ops *bus; 502 const struct mt76_driver_ops *drv; 503 const struct mt76_mcu_ops *mcu_ops; 504 struct device *dev; 505 506 struct mt76_mcu mcu; 507 508 struct net_device napi_dev; 509 spinlock_t rx_lock; 510 struct napi_struct napi[__MT_RXQ_MAX]; 511 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 512 513 struct list_head txwi_cache; 514 struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX]; 515 struct mt76_queue q_rx[__MT_RXQ_MAX]; 516 const struct mt76_queue_ops *queue_ops; 517 int tx_dma_idx[4]; 518 519 struct tasklet_struct tx_tasklet; 520 struct napi_struct tx_napi; 521 struct delayed_work mac_work; 522 523 wait_queue_head_t tx_wait; 524 struct sk_buff_head status_list; 525 526 unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG]; 527 unsigned long wcid_phy_mask[MT76_N_WCIDS / BITS_PER_LONG]; 528 529 struct mt76_wcid global_wcid; 530 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; 531 532 u8 macaddr[ETH_ALEN]; 533 u32 rev; 534 535 u32 aggr_stats[32]; 536 537 struct tasklet_struct pre_tbtt_tasklet; 538 int beacon_int; 539 u8 beacon_mask; 540 541 struct debugfs_blob_wrapper eeprom; 542 struct debugfs_blob_wrapper otp; 543 struct mt76_hw_cap cap; 544 545 struct mt76_rate_power rate_power; 546 547 enum nl80211_dfs_regions region; 548 549 u32 debugfs_reg; 550 551 struct led_classdev led_cdev; 552 char led_name[32]; 553 bool led_al; 554 u8 led_pin; 555 556 u8 csa_complete; 557 558 u32 rxfilter; 559 560 union { 561 struct mt76_mmio mmio; 562 struct mt76_usb usb; 563 }; 564 }; 565 566 enum mt76_phy_type { 567 MT_PHY_TYPE_CCK, 568 MT_PHY_TYPE_OFDM, 569 MT_PHY_TYPE_HT, 570 MT_PHY_TYPE_HT_GF, 571 MT_PHY_TYPE_VHT, 572 }; 573 574 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) 575 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) 576 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) 577 #define __mt76_wr_copy(dev, ...) (dev)->bus->write_copy((dev), __VA_ARGS__) 578 #define __mt76_rr_copy(dev, ...) (dev)->bus->read_copy((dev), __VA_ARGS__) 579 580 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) 581 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) 582 583 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 584 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 585 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 586 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__) 587 #define mt76_rr_copy(dev, ...) (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__) 588 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) 589 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) 590 591 #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__) 592 #define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__) 593 #define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76)) 594 #define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev)) 595 596 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 597 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 598 599 #define mt76_get_field(_dev, _reg, _field) \ 600 FIELD_GET(_field, mt76_rr(dev, _reg)) 601 602 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 603 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 604 605 #define __mt76_rmw_field(_dev, _reg, _field, _val) \ 606 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 607 608 #define mt76_hw(dev) (dev)->mphy.hw 609 610 static inline struct ieee80211_hw * 611 mt76_wcid_hw(struct mt76_dev *dev, u8 wcid) 612 { 613 if (wcid <= MT76_N_WCIDS && 614 mt76_wcid_mask_test(dev->wcid_phy_mask, wcid)) 615 return dev->phy2->hw; 616 617 return dev->phy.hw; 618 } 619 620 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 621 int timeout); 622 623 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 624 625 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 626 int timeout); 627 628 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 629 630 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 631 void mt76_pci_disable_aspm(struct pci_dev *pdev); 632 633 static inline u16 mt76_chip(struct mt76_dev *dev) 634 { 635 return dev->rev >> 16; 636 } 637 638 static inline u16 mt76_rev(struct mt76_dev *dev) 639 { 640 return dev->rev & 0xffff; 641 } 642 643 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 644 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 645 646 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) 647 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 648 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) 649 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__) 650 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 651 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 652 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 653 654 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, 655 const struct ieee80211_ops *ops, 656 const struct mt76_driver_ops *drv_ops); 657 int mt76_register_device(struct mt76_dev *dev, bool vht, 658 struct ieee80211_rate *rates, int n_rates); 659 void mt76_unregister_device(struct mt76_dev *dev); 660 void mt76_free_device(struct mt76_dev *dev); 661 void mt76_unregister_phy(struct mt76_phy *phy); 662 663 struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size, 664 const struct ieee80211_ops *ops); 665 int mt76_register_phy(struct mt76_phy *phy); 666 667 struct dentry *mt76_register_debugfs(struct mt76_dev *dev); 668 int mt76_queues_read(struct seq_file *s, void *data); 669 void mt76_seq_puts_array(struct seq_file *file, const char *str, 670 s8 *val, int len); 671 672 int mt76_eeprom_init(struct mt76_dev *dev, int len); 673 void mt76_eeprom_override(struct mt76_dev *dev); 674 675 static inline struct mt76_phy * 676 mt76_dev_phy(struct mt76_dev *dev, bool phy_ext) 677 { 678 if (phy_ext && dev->phy2) 679 return dev->phy2; 680 return &dev->phy; 681 } 682 683 static inline struct ieee80211_hw * 684 mt76_phy_hw(struct mt76_dev *dev, bool phy_ext) 685 { 686 return mt76_dev_phy(dev, phy_ext)->hw; 687 } 688 689 static inline u8 * 690 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t) 691 { 692 return (u8 *)t - dev->drv->txwi_size; 693 } 694 695 /* increment with wrap-around */ 696 static inline int mt76_incr(int val, int size) 697 { 698 return (val + 1) & (size - 1); 699 } 700 701 /* decrement with wrap-around */ 702 static inline int mt76_decr(int val, int size) 703 { 704 return (val - 1) & (size - 1); 705 } 706 707 u8 mt76_ac_to_hwq(u8 ac); 708 709 static inline struct ieee80211_txq * 710 mtxq_to_txq(struct mt76_txq *mtxq) 711 { 712 void *ptr = mtxq; 713 714 return container_of(ptr, struct ieee80211_txq, drv_priv); 715 } 716 717 static inline struct ieee80211_sta * 718 wcid_to_sta(struct mt76_wcid *wcid) 719 { 720 void *ptr = wcid; 721 722 if (!wcid || !wcid->sta) 723 return NULL; 724 725 return container_of(ptr, struct ieee80211_sta, drv_priv); 726 } 727 728 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) 729 { 730 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) > 731 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data)); 732 return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data); 733 } 734 735 static inline void mt76_insert_hdr_pad(struct sk_buff *skb) 736 { 737 int len = ieee80211_get_hdrlen_from_skb(skb); 738 739 if (len % 4 == 0) 740 return; 741 742 skb_push(skb, 2); 743 memmove(skb->data, skb->data + 2, len); 744 745 skb->data[len] = 0; 746 skb->data[len + 1] = 0; 747 } 748 749 static inline bool mt76_is_skb_pktid(u8 pktid) 750 { 751 if (pktid & MT_PACKET_ID_HAS_RATE) 752 return false; 753 754 return pktid >= MT_PACKET_ID_FIRST; 755 } 756 757 static inline u8 mt76_tx_power_nss_delta(u8 nss) 758 { 759 static const u8 nss_delta[4] = { 0, 6, 9, 12 }; 760 761 return nss_delta[nss - 1]; 762 } 763 764 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 765 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta, 766 struct mt76_wcid *wcid, struct sk_buff *skb); 767 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 768 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); 769 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 770 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 771 bool send_bar); 772 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); 773 void mt76_txq_schedule_all(struct mt76_phy *phy); 774 void mt76_tx_tasklet(unsigned long data); 775 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 776 struct ieee80211_sta *sta, 777 u16 tids, int nframes, 778 enum ieee80211_frame_release_type reason, 779 bool more_data); 780 bool mt76_has_tx_pending(struct mt76_phy *phy); 781 void mt76_set_channel(struct mt76_phy *phy); 782 void mt76_update_survey(struct mt76_dev *dev); 783 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 784 struct survey_info *survey); 785 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht); 786 787 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 788 u16 ssn, u8 size); 789 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 790 791 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 792 struct ieee80211_key_conf *key); 793 794 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 795 __acquires(&dev->status_list.lock); 796 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 797 __releases(&dev->status_list.lock); 798 799 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 800 struct sk_buff *skb); 801 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev, 802 struct mt76_wcid *wcid, int pktid, 803 struct sk_buff_head *list); 804 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 805 struct sk_buff_head *list); 806 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb); 807 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, 808 bool flush); 809 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 810 struct ieee80211_sta *sta, 811 enum ieee80211_sta_state old_state, 812 enum ieee80211_sta_state new_state); 813 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 814 struct ieee80211_sta *sta); 815 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 816 struct ieee80211_sta *sta); 817 818 int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy); 819 820 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 821 int *dbm); 822 823 void mt76_csa_check(struct mt76_dev *dev); 824 void mt76_csa_finish(struct mt76_dev *dev); 825 826 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant); 827 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set); 828 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id); 829 int mt76_get_rate(struct mt76_dev *dev, 830 struct ieee80211_supported_band *sband, 831 int idx, bool cck); 832 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 833 const u8 *mac); 834 void mt76_sw_scan_complete(struct ieee80211_hw *hw, 835 struct ieee80211_vif *vif); 836 837 /* internal */ 838 static inline struct ieee80211_hw * 839 mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb) 840 { 841 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 842 struct ieee80211_hw *hw = dev->phy.hw; 843 844 if ((info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY) && dev->phy2) 845 hw = dev->phy2->hw; 846 847 info->hw_queue &= ~MT_TX_HW_QUEUE_EXT_PHY; 848 849 return hw; 850 } 851 852 void mt76_tx_free(struct mt76_dev *dev); 853 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev); 854 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 855 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 856 struct napi_struct *napi); 857 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 858 struct napi_struct *napi); 859 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 860 861 /* usb */ 862 static inline bool mt76u_urb_error(struct urb *urb) 863 { 864 return urb->status && 865 urb->status != -ECONNRESET && 866 urb->status != -ESHUTDOWN && 867 urb->status != -ENOENT; 868 } 869 870 /* Map hardware queues to usb endpoints */ 871 static inline u8 q2ep(u8 qid) 872 { 873 /* TODO: take management packets to queue 5 */ 874 return qid + 1; 875 } 876 877 static inline int 878 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, 879 int timeout, int ep) 880 { 881 struct usb_interface *uintf = to_usb_interface(dev->dev); 882 struct usb_device *udev = interface_to_usbdev(uintf); 883 struct mt76_usb *usb = &dev->usb; 884 unsigned int pipe; 885 886 if (actual_len) 887 pipe = usb_rcvbulkpipe(udev, usb->in_ep[ep]); 888 else 889 pipe = usb_sndbulkpipe(udev, usb->out_ep[ep]); 890 891 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); 892 } 893 894 int mt76u_skb_dma_info(struct sk_buff *skb, u32 info); 895 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 896 u8 req_type, u16 val, u16 offset, 897 void *buf, size_t len); 898 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 899 const u16 offset, const u32 val); 900 void mt76u_deinit(struct mt76_dev *dev); 901 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, 902 bool ext); 903 int mt76u_alloc_mcu_queue(struct mt76_dev *dev); 904 int mt76u_alloc_queues(struct mt76_dev *dev); 905 void mt76u_stop_tx(struct mt76_dev *dev); 906 void mt76u_stop_rx(struct mt76_dev *dev); 907 int mt76u_resume_rx(struct mt76_dev *dev); 908 void mt76u_queues_deinit(struct mt76_dev *dev); 909 910 struct sk_buff * 911 mt76_mcu_msg_alloc(const void *data, int head_len, 912 int data_len, int tail_len); 913 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); 914 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, 915 unsigned long expires); 916 917 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); 918 919 #endif 920