1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #ifndef __MT76_H 18 #define __MT76_H 19 20 #include <linux/kernel.h> 21 #include <linux/io.h> 22 #include <linux/spinlock.h> 23 #include <linux/skbuff.h> 24 #include <linux/leds.h> 25 #include <linux/usb.h> 26 #include <linux/average.h> 27 #include <net/mac80211.h> 28 #include "util.h" 29 30 #define MT_TX_RING_SIZE 256 31 #define MT_MCU_RING_SIZE 32 32 #define MT_RX_BUF_SIZE 2048 33 34 struct mt76_dev; 35 struct mt76_wcid; 36 37 struct mt76_reg_pair { 38 u32 reg; 39 u32 value; 40 }; 41 42 enum mt76_bus_type { 43 MT76_BUS_MMIO, 44 MT76_BUS_USB, 45 }; 46 47 struct mt76_bus_ops { 48 u32 (*rr)(struct mt76_dev *dev, u32 offset); 49 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 50 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 51 void (*copy)(struct mt76_dev *dev, u32 offset, const void *data, 52 int len); 53 int (*wr_rp)(struct mt76_dev *dev, u32 base, 54 const struct mt76_reg_pair *rp, int len); 55 int (*rd_rp)(struct mt76_dev *dev, u32 base, 56 struct mt76_reg_pair *rp, int len); 57 enum mt76_bus_type type; 58 }; 59 60 #define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB) 61 #define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO) 62 63 enum mt76_txq_id { 64 MT_TXQ_VO = IEEE80211_AC_VO, 65 MT_TXQ_VI = IEEE80211_AC_VI, 66 MT_TXQ_BE = IEEE80211_AC_BE, 67 MT_TXQ_BK = IEEE80211_AC_BK, 68 MT_TXQ_PSD, 69 MT_TXQ_MCU, 70 MT_TXQ_BEACON, 71 MT_TXQ_CAB, 72 __MT_TXQ_MAX 73 }; 74 75 enum mt76_rxq_id { 76 MT_RXQ_MAIN, 77 MT_RXQ_MCU, 78 __MT_RXQ_MAX 79 }; 80 81 struct mt76_queue_buf { 82 dma_addr_t addr; 83 int len; 84 }; 85 86 struct mt76u_buf { 87 struct mt76_dev *dev; 88 struct urb *urb; 89 size_t len; 90 bool done; 91 }; 92 93 struct mt76_queue_entry { 94 union { 95 void *buf; 96 struct sk_buff *skb; 97 }; 98 union { 99 struct mt76_txwi_cache *txwi; 100 struct mt76u_buf ubuf; 101 }; 102 bool schedule; 103 }; 104 105 struct mt76_queue_regs { 106 u32 desc_base; 107 u32 ring_size; 108 u32 cpu_idx; 109 u32 dma_idx; 110 } __packed __aligned(4); 111 112 struct mt76_queue { 113 struct mt76_queue_regs __iomem *regs; 114 115 spinlock_t lock; 116 struct mt76_queue_entry *entry; 117 struct mt76_desc *desc; 118 119 struct list_head swq; 120 int swq_queued; 121 122 u16 first; 123 u16 head; 124 u16 tail; 125 int ndesc; 126 int queued; 127 int buf_size; 128 129 u8 buf_offset; 130 u8 hw_idx; 131 132 dma_addr_t desc_dma; 133 struct sk_buff *rx_head; 134 struct page_frag_cache rx_page; 135 spinlock_t rx_page_lock; 136 }; 137 138 struct mt76_mcu_ops { 139 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, 140 int len, bool wait_resp); 141 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, 142 const struct mt76_reg_pair *rp, int len); 143 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 144 struct mt76_reg_pair *rp, int len); 145 }; 146 147 struct mt76_queue_ops { 148 int (*init)(struct mt76_dev *dev); 149 150 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q); 151 152 int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q, 153 struct mt76_queue_buf *buf, int nbufs, u32 info, 154 struct sk_buff *skb, void *txwi); 155 156 int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q, 157 struct sk_buff *skb, struct mt76_wcid *wcid, 158 struct ieee80211_sta *sta); 159 160 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 161 int *len, u32 *info, bool *more); 162 163 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 164 165 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, 166 bool flush); 167 168 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 169 }; 170 171 enum mt76_wcid_flags { 172 MT_WCID_FLAG_CHECK_PS, 173 MT_WCID_FLAG_PS, 174 }; 175 176 #define MT76_N_WCIDS 128 177 178 DECLARE_EWMA(signal, 10, 8); 179 180 struct mt76_wcid { 181 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 182 183 struct work_struct aggr_work; 184 185 unsigned long flags; 186 187 struct ewma_signal rssi; 188 int inactive_count; 189 190 u8 idx; 191 u8 hw_key_idx; 192 193 u8 sta:1; 194 195 u8 rx_check_pn; 196 u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; 197 198 __le16 tx_rate; 199 bool tx_rate_set; 200 u8 tx_rate_nss; 201 s8 max_txpwr_adj; 202 bool sw_iv; 203 204 u8 packet_id; 205 }; 206 207 struct mt76_txq { 208 struct list_head list; 209 struct mt76_queue *hwq; 210 struct mt76_wcid *wcid; 211 212 struct sk_buff_head retry_q; 213 214 u16 agg_ssn; 215 bool send_bar; 216 bool aggr; 217 }; 218 219 struct mt76_txwi_cache { 220 u32 txwi[8]; 221 dma_addr_t dma_addr; 222 struct list_head list; 223 }; 224 225 226 struct mt76_rx_tid { 227 struct rcu_head rcu_head; 228 229 struct mt76_dev *dev; 230 231 spinlock_t lock; 232 struct delayed_work reorder_work; 233 234 u16 head; 235 u8 size; 236 u8 nframes; 237 238 u8 started:1, stopped:1, timer_pending:1; 239 240 struct sk_buff *reorder_buf[]; 241 }; 242 243 #define MT_TX_CB_DMA_DONE BIT(0) 244 #define MT_TX_CB_TXS_DONE BIT(1) 245 #define MT_TX_CB_TXS_FAILED BIT(2) 246 247 #define MT_PACKET_ID_MASK GENMASK(7, 0) 248 #define MT_PACKET_ID_NO_ACK 0 249 #define MT_PACKET_ID_NO_SKB 1 250 #define MT_PACKET_ID_FIRST 2 251 252 #define MT_TX_STATUS_SKB_TIMEOUT HZ 253 254 struct mt76_tx_cb { 255 unsigned long jiffies; 256 u8 wcid; 257 u8 pktid; 258 u8 flags; 259 }; 260 261 enum { 262 MT76_STATE_INITIALIZED, 263 MT76_STATE_RUNNING, 264 MT76_STATE_MCU_RUNNING, 265 MT76_SCANNING, 266 MT76_RESET, 267 MT76_OFFCHANNEL, 268 MT76_REMOVED, 269 MT76_READING_STATS, 270 }; 271 272 struct mt76_hw_cap { 273 bool has_2ghz; 274 bool has_5ghz; 275 }; 276 277 struct mt76_driver_ops { 278 u16 txwi_size; 279 280 void (*update_survey)(struct mt76_dev *dev); 281 282 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 283 struct sk_buff *skb, struct mt76_queue *q, 284 struct mt76_wcid *wcid, 285 struct ieee80211_sta *sta, u32 *tx_info); 286 287 void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q, 288 struct mt76_queue_entry *e, bool flush); 289 290 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); 291 292 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 293 struct sk_buff *skb); 294 295 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 296 297 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 298 bool ps); 299 300 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, 301 struct ieee80211_sta *sta); 302 303 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, 304 struct ieee80211_sta *sta); 305 }; 306 307 struct mt76_channel_state { 308 u64 cc_active; 309 u64 cc_busy; 310 }; 311 312 struct mt76_sband { 313 struct ieee80211_supported_band sband; 314 struct mt76_channel_state *chan; 315 }; 316 317 struct mt76_rate_power { 318 union { 319 struct { 320 s8 cck[4]; 321 s8 ofdm[8]; 322 s8 stbc[10]; 323 s8 ht[16]; 324 s8 vht[10]; 325 }; 326 s8 all[48]; 327 }; 328 }; 329 330 /* addr req mask */ 331 #define MT_VEND_TYPE_EEPROM BIT(31) 332 #define MT_VEND_TYPE_CFG BIT(30) 333 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) 334 335 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) 336 enum mt_vendor_req { 337 MT_VEND_DEV_MODE = 0x1, 338 MT_VEND_WRITE = 0x2, 339 MT_VEND_MULTI_WRITE = 0x6, 340 MT_VEND_MULTI_READ = 0x7, 341 MT_VEND_READ_EEPROM = 0x9, 342 MT_VEND_WRITE_FCE = 0x42, 343 MT_VEND_WRITE_CFG = 0x46, 344 MT_VEND_READ_CFG = 0x47, 345 }; 346 347 enum mt76u_in_ep { 348 MT_EP_IN_PKT_RX, 349 MT_EP_IN_CMD_RESP, 350 __MT_EP_IN_MAX, 351 }; 352 353 enum mt76u_out_ep { 354 MT_EP_OUT_INBAND_CMD, 355 MT_EP_OUT_AC_BK, 356 MT_EP_OUT_AC_BE, 357 MT_EP_OUT_AC_VI, 358 MT_EP_OUT_AC_VO, 359 MT_EP_OUT_HCCA, 360 __MT_EP_OUT_MAX, 361 }; 362 363 #define MT_SG_MAX_SIZE 8 364 #define MT_NUM_TX_ENTRIES 256 365 #define MT_NUM_RX_ENTRIES 128 366 #define MCU_RESP_URB_SIZE 1024 367 struct mt76_usb { 368 struct mutex usb_ctrl_mtx; 369 u8 data[32]; 370 371 struct tasklet_struct rx_tasklet; 372 struct tasklet_struct tx_tasklet; 373 struct delayed_work stat_work; 374 375 u8 out_ep[__MT_EP_OUT_MAX]; 376 u16 out_max_packet; 377 u8 in_ep[__MT_EP_IN_MAX]; 378 u16 in_max_packet; 379 380 struct mt76u_mcu { 381 struct mutex mutex; 382 struct completion cmpl; 383 struct mt76u_buf res; 384 u32 msg_seq; 385 386 /* multiple reads */ 387 struct mt76_reg_pair *rp; 388 int rp_len; 389 u32 base; 390 bool burst; 391 } mcu; 392 }; 393 394 struct mt76_mmio { 395 struct mt76e_mcu { 396 struct mutex mutex; 397 398 wait_queue_head_t wait; 399 struct sk_buff_head res_q; 400 401 u32 msg_seq; 402 } mcu; 403 void __iomem *regs; 404 spinlock_t irq_lock; 405 u32 irqmask; 406 }; 407 408 struct mt76_dev { 409 struct ieee80211_hw *hw; 410 struct cfg80211_chan_def chandef; 411 struct ieee80211_channel *main_chan; 412 413 spinlock_t lock; 414 spinlock_t cc_lock; 415 416 struct mutex mutex; 417 418 const struct mt76_bus_ops *bus; 419 const struct mt76_driver_ops *drv; 420 const struct mt76_mcu_ops *mcu_ops; 421 struct device *dev; 422 423 struct net_device napi_dev; 424 spinlock_t rx_lock; 425 struct napi_struct napi[__MT_RXQ_MAX]; 426 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 427 428 struct list_head txwi_cache; 429 struct mt76_queue q_tx[__MT_TXQ_MAX]; 430 struct mt76_queue q_rx[__MT_RXQ_MAX]; 431 const struct mt76_queue_ops *queue_ops; 432 int tx_dma_idx[4]; 433 434 wait_queue_head_t tx_wait; 435 struct sk_buff_head status_list; 436 437 unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG]; 438 439 struct mt76_wcid global_wcid; 440 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; 441 442 u8 macaddr[ETH_ALEN]; 443 u32 rev; 444 unsigned long state; 445 446 u8 antenna_mask; 447 u16 chainmask; 448 449 struct mt76_sband sband_2g; 450 struct mt76_sband sband_5g; 451 struct debugfs_blob_wrapper eeprom; 452 struct debugfs_blob_wrapper otp; 453 struct mt76_hw_cap cap; 454 455 struct mt76_rate_power rate_power; 456 int txpower_conf; 457 int txpower_cur; 458 459 u32 debugfs_reg; 460 461 struct led_classdev led_cdev; 462 char led_name[32]; 463 bool led_al; 464 u8 led_pin; 465 466 u8 csa_complete; 467 468 u32 rxfilter; 469 470 union { 471 struct mt76_mmio mmio; 472 struct mt76_usb usb; 473 }; 474 }; 475 476 enum mt76_phy_type { 477 MT_PHY_TYPE_CCK, 478 MT_PHY_TYPE_OFDM, 479 MT_PHY_TYPE_HT, 480 MT_PHY_TYPE_HT_GF, 481 MT_PHY_TYPE_VHT, 482 }; 483 484 struct mt76_rx_status { 485 struct mt76_wcid *wcid; 486 487 unsigned long reorder_time; 488 489 u8 iv[6]; 490 491 u8 aggr:1; 492 u8 tid; 493 u16 seqno; 494 495 u16 freq; 496 u32 flag; 497 u8 enc_flags; 498 u8 encoding:2, bw:3; 499 u8 rate_idx; 500 u8 nss; 501 u8 band; 502 s8 signal; 503 u8 chains; 504 s8 chain_signal[IEEE80211_MAX_CHAINS]; 505 }; 506 507 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) 508 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) 509 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) 510 #define __mt76_wr_copy(dev, ...) (dev)->bus->copy((dev), __VA_ARGS__) 511 512 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) 513 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) 514 515 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 516 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 517 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 518 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__) 519 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) 520 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) 521 522 #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__) 523 524 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 525 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 526 527 #define mt76_get_field(_dev, _reg, _field) \ 528 FIELD_GET(_field, mt76_rr(dev, _reg)) 529 530 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 531 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 532 533 #define __mt76_rmw_field(_dev, _reg, _field, _val) \ 534 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 535 536 #define mt76_hw(dev) (dev)->mt76.hw 537 538 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 539 int timeout); 540 541 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 542 543 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 544 int timeout); 545 546 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 547 548 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 549 550 static inline u16 mt76_chip(struct mt76_dev *dev) 551 { 552 return dev->rev >> 16; 553 } 554 555 static inline u16 mt76_rev(struct mt76_dev *dev) 556 { 557 return dev->rev & 0xffff; 558 } 559 560 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 561 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 562 563 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) 564 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 565 #define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__) 566 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 567 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 568 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 569 570 static inline struct mt76_channel_state * 571 mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c) 572 { 573 struct mt76_sband *msband; 574 int idx; 575 576 if (c->band == NL80211_BAND_2GHZ) 577 msband = &dev->sband_2g; 578 else 579 msband = &dev->sband_5g; 580 581 idx = c - &msband->sband.channels[0]; 582 return &msband->chan[idx]; 583 } 584 585 struct mt76_dev *mt76_alloc_device(unsigned int size, 586 const struct ieee80211_ops *ops); 587 int mt76_register_device(struct mt76_dev *dev, bool vht, 588 struct ieee80211_rate *rates, int n_rates); 589 void mt76_unregister_device(struct mt76_dev *dev); 590 591 struct dentry *mt76_register_debugfs(struct mt76_dev *dev); 592 void mt76_seq_puts_array(struct seq_file *file, const char *str, 593 s8 *val, int len); 594 595 int mt76_eeprom_init(struct mt76_dev *dev, int len); 596 void mt76_eeprom_override(struct mt76_dev *dev); 597 598 /* increment with wrap-around */ 599 static inline int mt76_incr(int val, int size) 600 { 601 return (val + 1) & (size - 1); 602 } 603 604 /* decrement with wrap-around */ 605 static inline int mt76_decr(int val, int size) 606 { 607 return (val - 1) & (size - 1); 608 } 609 610 u8 mt76_ac_to_hwq(u8 ac); 611 612 static inline struct ieee80211_txq * 613 mtxq_to_txq(struct mt76_txq *mtxq) 614 { 615 void *ptr = mtxq; 616 617 return container_of(ptr, struct ieee80211_txq, drv_priv); 618 } 619 620 static inline struct ieee80211_sta * 621 wcid_to_sta(struct mt76_wcid *wcid) 622 { 623 void *ptr = wcid; 624 625 if (!wcid || !wcid->sta) 626 return NULL; 627 628 return container_of(ptr, struct ieee80211_sta, drv_priv); 629 } 630 631 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) 632 { 633 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) > 634 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data)); 635 return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data); 636 } 637 638 int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 639 struct sk_buff *skb, struct mt76_wcid *wcid, 640 struct ieee80211_sta *sta); 641 642 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 643 void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 644 struct mt76_wcid *wcid, struct sk_buff *skb); 645 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 646 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); 647 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 648 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 649 bool send_bar); 650 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq); 651 void mt76_txq_schedule_all(struct mt76_dev *dev); 652 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 653 struct ieee80211_sta *sta, 654 u16 tids, int nframes, 655 enum ieee80211_frame_release_type reason, 656 bool more_data); 657 void mt76_set_channel(struct mt76_dev *dev); 658 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 659 struct survey_info *survey); 660 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht); 661 662 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 663 u16 ssn, u8 size); 664 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 665 666 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 667 struct ieee80211_key_conf *key); 668 669 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 670 __acquires(&dev->status_list.lock); 671 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 672 __releases(&dev->status_list.lock); 673 674 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 675 struct sk_buff *skb); 676 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev, 677 struct mt76_wcid *wcid, int pktid, 678 struct sk_buff_head *list); 679 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 680 struct sk_buff_head *list); 681 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb); 682 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, 683 bool flush); 684 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 685 struct ieee80211_sta *sta, 686 enum ieee80211_sta_state old_state, 687 enum ieee80211_sta_state new_state); 688 689 struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); 690 691 int mt76_get_min_avg_rssi(struct mt76_dev *dev); 692 693 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 694 int *dbm); 695 696 void mt76_csa_check(struct mt76_dev *dev); 697 void mt76_csa_finish(struct mt76_dev *dev); 698 699 /* internal */ 700 void mt76_tx_free(struct mt76_dev *dev); 701 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev); 702 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 703 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 704 struct napi_struct *napi); 705 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 706 struct napi_struct *napi); 707 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 708 709 /* usb */ 710 static inline bool mt76u_urb_error(struct urb *urb) 711 { 712 return urb->status && 713 urb->status != -ECONNRESET && 714 urb->status != -ESHUTDOWN && 715 urb->status != -ENOENT; 716 } 717 718 /* Map hardware queues to usb endpoints */ 719 static inline u8 q2ep(u8 qid) 720 { 721 /* TODO: take management packets to queue 5 */ 722 return qid + 1; 723 } 724 725 static inline bool mt76u_check_sg(struct mt76_dev *dev) 726 { 727 struct usb_interface *intf = to_usb_interface(dev->dev); 728 struct usb_device *udev = interface_to_usbdev(intf); 729 730 return (udev->bus->sg_tablesize > 0 && 731 (udev->bus->no_sg_constraint || 732 udev->speed == USB_SPEED_WIRELESS)); 733 } 734 735 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 736 u8 req_type, u16 val, u16 offset, 737 void *buf, size_t len); 738 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 739 const u16 offset, const u32 val); 740 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); 741 void mt76u_deinit(struct mt76_dev *dev); 742 int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf, 743 int nsgs, int len, int sglen, gfp_t gfp); 744 void mt76u_buf_free(struct mt76u_buf *buf); 745 int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index, 746 struct mt76u_buf *buf, gfp_t gfp, 747 usb_complete_t complete_fn, void *context); 748 int mt76u_submit_rx_buffers(struct mt76_dev *dev); 749 int mt76u_alloc_queues(struct mt76_dev *dev); 750 void mt76u_stop_queues(struct mt76_dev *dev); 751 void mt76u_stop_stat_wk(struct mt76_dev *dev); 752 void mt76u_queues_deinit(struct mt76_dev *dev); 753 754 void mt76u_mcu_complete_urb(struct urb *urb); 755 int mt76u_mcu_init_rx(struct mt76_dev *dev); 756 void mt76u_mcu_deinit(struct mt76_dev *dev); 757 758 #endif 759