1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #ifndef __MT76_H 18 #define __MT76_H 19 20 #include <linux/kernel.h> 21 #include <linux/io.h> 22 #include <linux/spinlock.h> 23 #include <linux/skbuff.h> 24 #include <linux/leds.h> 25 #include <linux/usb.h> 26 #include <linux/average.h> 27 #include <net/mac80211.h> 28 #include "util.h" 29 30 #define MT_TX_RING_SIZE 256 31 #define MT_MCU_RING_SIZE 32 32 #define MT_RX_BUF_SIZE 2048 33 34 struct mt76_dev; 35 struct mt76_wcid; 36 37 struct mt76_reg_pair { 38 u32 reg; 39 u32 value; 40 }; 41 42 enum mt76_bus_type { 43 MT76_BUS_MMIO, 44 MT76_BUS_USB, 45 }; 46 47 struct mt76_bus_ops { 48 u32 (*rr)(struct mt76_dev *dev, u32 offset); 49 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 50 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 51 void (*copy)(struct mt76_dev *dev, u32 offset, const void *data, 52 int len); 53 int (*wr_rp)(struct mt76_dev *dev, u32 base, 54 const struct mt76_reg_pair *rp, int len); 55 int (*rd_rp)(struct mt76_dev *dev, u32 base, 56 struct mt76_reg_pair *rp, int len); 57 enum mt76_bus_type type; 58 }; 59 60 #define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB) 61 #define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO) 62 63 enum mt76_txq_id { 64 MT_TXQ_VO = IEEE80211_AC_VO, 65 MT_TXQ_VI = IEEE80211_AC_VI, 66 MT_TXQ_BE = IEEE80211_AC_BE, 67 MT_TXQ_BK = IEEE80211_AC_BK, 68 MT_TXQ_PSD, 69 MT_TXQ_MCU, 70 MT_TXQ_BEACON, 71 MT_TXQ_CAB, 72 __MT_TXQ_MAX 73 }; 74 75 enum mt76_rxq_id { 76 MT_RXQ_MAIN, 77 MT_RXQ_MCU, 78 __MT_RXQ_MAX 79 }; 80 81 struct mt76_queue_buf { 82 dma_addr_t addr; 83 int len; 84 }; 85 86 struct mt76u_buf { 87 struct mt76_dev *dev; 88 struct urb *urb; 89 size_t len; 90 void *buf; 91 bool done; 92 }; 93 94 struct mt76_queue_entry { 95 union { 96 void *buf; 97 struct sk_buff *skb; 98 }; 99 union { 100 struct mt76_txwi_cache *txwi; 101 struct mt76u_buf ubuf; 102 }; 103 bool schedule; 104 }; 105 106 struct mt76_queue_regs { 107 u32 desc_base; 108 u32 ring_size; 109 u32 cpu_idx; 110 u32 dma_idx; 111 } __packed __aligned(4); 112 113 struct mt76_queue { 114 struct mt76_queue_regs __iomem *regs; 115 116 spinlock_t lock; 117 struct mt76_queue_entry *entry; 118 struct mt76_desc *desc; 119 120 struct list_head swq; 121 int swq_queued; 122 123 u16 first; 124 u16 head; 125 u16 tail; 126 int ndesc; 127 int queued; 128 int buf_size; 129 bool stopped; 130 131 u8 buf_offset; 132 u8 hw_idx; 133 134 dma_addr_t desc_dma; 135 struct sk_buff *rx_head; 136 struct page_frag_cache rx_page; 137 spinlock_t rx_page_lock; 138 }; 139 140 struct mt76_mcu_ops { 141 int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, 142 int len, bool wait_resp); 143 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, 144 const struct mt76_reg_pair *rp, int len); 145 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 146 struct mt76_reg_pair *rp, int len); 147 int (*mcu_restart)(struct mt76_dev *dev); 148 }; 149 150 struct mt76_queue_ops { 151 int (*init)(struct mt76_dev *dev); 152 153 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q); 154 155 int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q, 156 struct mt76_queue_buf *buf, int nbufs, u32 info, 157 struct sk_buff *skb, void *txwi); 158 159 int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q, 160 struct sk_buff *skb, struct mt76_wcid *wcid, 161 struct ieee80211_sta *sta); 162 163 int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid, 164 struct sk_buff *skb, u32 tx_info); 165 166 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 167 int *len, u32 *info, bool *more); 168 169 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 170 171 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, 172 bool flush); 173 174 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 175 }; 176 177 enum mt76_wcid_flags { 178 MT_WCID_FLAG_CHECK_PS, 179 MT_WCID_FLAG_PS, 180 }; 181 182 #define MT76_N_WCIDS 128 183 184 DECLARE_EWMA(signal, 10, 8); 185 186 struct mt76_wcid { 187 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 188 189 struct work_struct aggr_work; 190 191 unsigned long flags; 192 193 struct ewma_signal rssi; 194 int inactive_count; 195 196 u8 idx; 197 u8 hw_key_idx; 198 199 u8 sta:1; 200 201 u8 rx_check_pn; 202 u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; 203 204 __le16 tx_rate; 205 bool tx_rate_set; 206 u8 tx_rate_nss; 207 s8 max_txpwr_adj; 208 bool sw_iv; 209 210 u8 packet_id; 211 }; 212 213 struct mt76_txq { 214 struct list_head list; 215 struct mt76_queue *hwq; 216 struct mt76_wcid *wcid; 217 218 struct sk_buff_head retry_q; 219 220 u16 agg_ssn; 221 bool send_bar; 222 bool aggr; 223 }; 224 225 struct mt76_txwi_cache { 226 u32 txwi[8]; 227 dma_addr_t dma_addr; 228 struct list_head list; 229 }; 230 231 232 struct mt76_rx_tid { 233 struct rcu_head rcu_head; 234 235 struct mt76_dev *dev; 236 237 spinlock_t lock; 238 struct delayed_work reorder_work; 239 240 u16 head; 241 u8 size; 242 u8 nframes; 243 244 u8 started:1, stopped:1, timer_pending:1; 245 246 struct sk_buff *reorder_buf[]; 247 }; 248 249 #define MT_TX_CB_DMA_DONE BIT(0) 250 #define MT_TX_CB_TXS_DONE BIT(1) 251 #define MT_TX_CB_TXS_FAILED BIT(2) 252 253 #define MT_PACKET_ID_MASK GENMASK(7, 0) 254 #define MT_PACKET_ID_NO_ACK 0 255 #define MT_PACKET_ID_NO_SKB 1 256 #define MT_PACKET_ID_FIRST 2 257 258 #define MT_TX_STATUS_SKB_TIMEOUT HZ 259 260 struct mt76_tx_cb { 261 unsigned long jiffies; 262 u8 wcid; 263 u8 pktid; 264 u8 flags; 265 }; 266 267 enum { 268 MT76_STATE_INITIALIZED, 269 MT76_STATE_RUNNING, 270 MT76_STATE_MCU_RUNNING, 271 MT76_SCANNING, 272 MT76_RESET, 273 MT76_OFFCHANNEL, 274 MT76_REMOVED, 275 MT76_READING_STATS, 276 }; 277 278 struct mt76_hw_cap { 279 bool has_2ghz; 280 bool has_5ghz; 281 }; 282 283 struct mt76_driver_ops { 284 u16 txwi_size; 285 286 void (*update_survey)(struct mt76_dev *dev); 287 288 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 289 struct sk_buff *skb, struct mt76_queue *q, 290 struct mt76_wcid *wcid, 291 struct ieee80211_sta *sta, u32 *tx_info); 292 293 void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q, 294 struct mt76_queue_entry *e, bool flush); 295 296 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); 297 298 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 299 struct sk_buff *skb); 300 301 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 302 303 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 304 bool ps); 305 306 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, 307 struct ieee80211_sta *sta); 308 309 void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif, 310 struct ieee80211_sta *sta); 311 312 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, 313 struct ieee80211_sta *sta); 314 }; 315 316 struct mt76_channel_state { 317 u64 cc_active; 318 u64 cc_busy; 319 }; 320 321 struct mt76_sband { 322 struct ieee80211_supported_band sband; 323 struct mt76_channel_state *chan; 324 }; 325 326 struct mt76_rate_power { 327 union { 328 struct { 329 s8 cck[4]; 330 s8 ofdm[8]; 331 s8 stbc[10]; 332 s8 ht[16]; 333 s8 vht[10]; 334 }; 335 s8 all[48]; 336 }; 337 }; 338 339 /* addr req mask */ 340 #define MT_VEND_TYPE_EEPROM BIT(31) 341 #define MT_VEND_TYPE_CFG BIT(30) 342 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) 343 344 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) 345 enum mt_vendor_req { 346 MT_VEND_DEV_MODE = 0x1, 347 MT_VEND_WRITE = 0x2, 348 MT_VEND_MULTI_WRITE = 0x6, 349 MT_VEND_MULTI_READ = 0x7, 350 MT_VEND_READ_EEPROM = 0x9, 351 MT_VEND_WRITE_FCE = 0x42, 352 MT_VEND_WRITE_CFG = 0x46, 353 MT_VEND_READ_CFG = 0x47, 354 }; 355 356 enum mt76u_in_ep { 357 MT_EP_IN_PKT_RX, 358 MT_EP_IN_CMD_RESP, 359 __MT_EP_IN_MAX, 360 }; 361 362 enum mt76u_out_ep { 363 MT_EP_OUT_INBAND_CMD, 364 MT_EP_OUT_AC_BK, 365 MT_EP_OUT_AC_BE, 366 MT_EP_OUT_AC_VI, 367 MT_EP_OUT_AC_VO, 368 MT_EP_OUT_HCCA, 369 __MT_EP_OUT_MAX, 370 }; 371 372 #define MT_SG_MAX_SIZE 8 373 #define MT_NUM_TX_ENTRIES 256 374 #define MT_NUM_RX_ENTRIES 128 375 #define MCU_RESP_URB_SIZE 1024 376 struct mt76_usb { 377 struct mutex usb_ctrl_mtx; 378 u8 data[32]; 379 380 struct tasklet_struct rx_tasklet; 381 struct tasklet_struct tx_tasklet; 382 struct delayed_work stat_work; 383 384 u8 out_ep[__MT_EP_OUT_MAX]; 385 u16 out_max_packet; 386 u8 in_ep[__MT_EP_IN_MAX]; 387 u16 in_max_packet; 388 bool sg_en; 389 390 struct mt76u_mcu { 391 struct mutex mutex; 392 u8 *data; 393 u32 msg_seq; 394 395 /* multiple reads */ 396 struct mt76_reg_pair *rp; 397 int rp_len; 398 u32 base; 399 bool burst; 400 } mcu; 401 }; 402 403 struct mt76_mmio { 404 struct mt76e_mcu { 405 struct mutex mutex; 406 407 wait_queue_head_t wait; 408 struct sk_buff_head res_q; 409 410 u32 msg_seq; 411 } mcu; 412 void __iomem *regs; 413 spinlock_t irq_lock; 414 u32 irqmask; 415 }; 416 417 struct mt76_dev { 418 struct ieee80211_hw *hw; 419 struct cfg80211_chan_def chandef; 420 struct ieee80211_channel *main_chan; 421 422 spinlock_t lock; 423 spinlock_t cc_lock; 424 425 struct mutex mutex; 426 427 const struct mt76_bus_ops *bus; 428 const struct mt76_driver_ops *drv; 429 const struct mt76_mcu_ops *mcu_ops; 430 struct device *dev; 431 432 struct net_device napi_dev; 433 spinlock_t rx_lock; 434 struct napi_struct napi[__MT_RXQ_MAX]; 435 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 436 437 struct list_head txwi_cache; 438 struct mt76_queue q_tx[__MT_TXQ_MAX]; 439 struct mt76_queue q_rx[__MT_RXQ_MAX]; 440 const struct mt76_queue_ops *queue_ops; 441 int tx_dma_idx[4]; 442 443 wait_queue_head_t tx_wait; 444 struct sk_buff_head status_list; 445 446 unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG]; 447 448 struct mt76_wcid global_wcid; 449 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; 450 451 u8 macaddr[ETH_ALEN]; 452 u32 rev; 453 unsigned long state; 454 455 u8 antenna_mask; 456 u16 chainmask; 457 458 struct mt76_sband sband_2g; 459 struct mt76_sband sband_5g; 460 struct debugfs_blob_wrapper eeprom; 461 struct debugfs_blob_wrapper otp; 462 struct mt76_hw_cap cap; 463 464 struct mt76_rate_power rate_power; 465 int txpower_conf; 466 int txpower_cur; 467 468 u32 debugfs_reg; 469 470 struct led_classdev led_cdev; 471 char led_name[32]; 472 bool led_al; 473 u8 led_pin; 474 475 u8 csa_complete; 476 477 u32 rxfilter; 478 479 union { 480 struct mt76_mmio mmio; 481 struct mt76_usb usb; 482 }; 483 }; 484 485 enum mt76_phy_type { 486 MT_PHY_TYPE_CCK, 487 MT_PHY_TYPE_OFDM, 488 MT_PHY_TYPE_HT, 489 MT_PHY_TYPE_HT_GF, 490 MT_PHY_TYPE_VHT, 491 }; 492 493 struct mt76_rx_status { 494 struct mt76_wcid *wcid; 495 496 unsigned long reorder_time; 497 498 u8 iv[6]; 499 500 u8 aggr:1; 501 u8 tid; 502 u16 seqno; 503 504 u16 freq; 505 u32 flag; 506 u8 enc_flags; 507 u8 encoding:2, bw:3; 508 u8 rate_idx; 509 u8 nss; 510 u8 band; 511 s8 signal; 512 u8 chains; 513 s8 chain_signal[IEEE80211_MAX_CHAINS]; 514 }; 515 516 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) 517 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) 518 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) 519 #define __mt76_wr_copy(dev, ...) (dev)->bus->copy((dev), __VA_ARGS__) 520 521 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) 522 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) 523 524 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 525 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 526 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 527 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__) 528 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) 529 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) 530 531 #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__) 532 533 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 534 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 535 536 #define mt76_get_field(_dev, _reg, _field) \ 537 FIELD_GET(_field, mt76_rr(dev, _reg)) 538 539 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 540 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 541 542 #define __mt76_rmw_field(_dev, _reg, _field, _val) \ 543 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 544 545 #define mt76_hw(dev) (dev)->mt76.hw 546 547 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 548 int timeout); 549 550 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 551 552 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 553 int timeout); 554 555 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 556 557 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 558 559 static inline u16 mt76_chip(struct mt76_dev *dev) 560 { 561 return dev->rev >> 16; 562 } 563 564 static inline u16 mt76_rev(struct mt76_dev *dev) 565 { 566 return dev->rev & 0xffff; 567 } 568 569 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 570 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 571 572 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) 573 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 574 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) 575 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 576 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 577 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 578 579 static inline struct mt76_channel_state * 580 mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c) 581 { 582 struct mt76_sband *msband; 583 int idx; 584 585 if (c->band == NL80211_BAND_2GHZ) 586 msband = &dev->sband_2g; 587 else 588 msband = &dev->sband_5g; 589 590 idx = c - &msband->sband.channels[0]; 591 return &msband->chan[idx]; 592 } 593 594 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size, 595 const struct ieee80211_ops *ops, 596 const struct mt76_driver_ops *drv_ops); 597 int mt76_register_device(struct mt76_dev *dev, bool vht, 598 struct ieee80211_rate *rates, int n_rates); 599 void mt76_unregister_device(struct mt76_dev *dev); 600 601 struct dentry *mt76_register_debugfs(struct mt76_dev *dev); 602 void mt76_seq_puts_array(struct seq_file *file, const char *str, 603 s8 *val, int len); 604 605 int mt76_eeprom_init(struct mt76_dev *dev, int len); 606 void mt76_eeprom_override(struct mt76_dev *dev); 607 608 /* increment with wrap-around */ 609 static inline int mt76_incr(int val, int size) 610 { 611 return (val + 1) & (size - 1); 612 } 613 614 /* decrement with wrap-around */ 615 static inline int mt76_decr(int val, int size) 616 { 617 return (val - 1) & (size - 1); 618 } 619 620 u8 mt76_ac_to_hwq(u8 ac); 621 622 static inline struct ieee80211_txq * 623 mtxq_to_txq(struct mt76_txq *mtxq) 624 { 625 void *ptr = mtxq; 626 627 return container_of(ptr, struct ieee80211_txq, drv_priv); 628 } 629 630 static inline struct ieee80211_sta * 631 wcid_to_sta(struct mt76_wcid *wcid) 632 { 633 void *ptr = wcid; 634 635 if (!wcid || !wcid->sta) 636 return NULL; 637 638 return container_of(ptr, struct ieee80211_sta, drv_priv); 639 } 640 641 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) 642 { 643 BUILD_BUG_ON(sizeof(struct mt76_tx_cb) > 644 sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data)); 645 return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data); 646 } 647 648 int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 649 struct sk_buff *skb, struct mt76_wcid *wcid, 650 struct ieee80211_sta *sta); 651 652 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 653 void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 654 struct mt76_wcid *wcid, struct sk_buff *skb); 655 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 656 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); 657 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 658 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 659 bool send_bar); 660 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq); 661 void mt76_txq_schedule_all(struct mt76_dev *dev); 662 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 663 struct ieee80211_sta *sta, 664 u16 tids, int nframes, 665 enum ieee80211_frame_release_type reason, 666 bool more_data); 667 void mt76_set_channel(struct mt76_dev *dev); 668 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 669 struct survey_info *survey); 670 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht); 671 672 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 673 u16 ssn, u8 size); 674 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 675 676 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 677 struct ieee80211_key_conf *key); 678 679 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) 680 __acquires(&dev->status_list.lock); 681 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) 682 __releases(&dev->status_list.lock); 683 684 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, 685 struct sk_buff *skb); 686 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev, 687 struct mt76_wcid *wcid, int pktid, 688 struct sk_buff_head *list); 689 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, 690 struct sk_buff_head *list); 691 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb); 692 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, 693 bool flush); 694 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 695 struct ieee80211_sta *sta, 696 enum ieee80211_sta_state old_state, 697 enum ieee80211_sta_state new_state); 698 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 699 struct ieee80211_sta *sta); 700 701 struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); 702 703 int mt76_get_min_avg_rssi(struct mt76_dev *dev); 704 705 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 706 int *dbm); 707 708 void mt76_csa_check(struct mt76_dev *dev); 709 void mt76_csa_finish(struct mt76_dev *dev); 710 711 /* internal */ 712 void mt76_tx_free(struct mt76_dev *dev); 713 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev); 714 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 715 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 716 struct napi_struct *napi); 717 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 718 struct napi_struct *napi); 719 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 720 721 /* usb */ 722 static inline bool mt76u_urb_error(struct urb *urb) 723 { 724 return urb->status && 725 urb->status != -ECONNRESET && 726 urb->status != -ESHUTDOWN && 727 urb->status != -ENOENT; 728 } 729 730 /* Map hardware queues to usb endpoints */ 731 static inline u8 q2ep(u8 qid) 732 { 733 /* TODO: take management packets to queue 5 */ 734 return qid + 1; 735 } 736 737 static inline int 738 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, 739 int timeout) 740 { 741 struct usb_interface *intf = to_usb_interface(dev->dev); 742 struct usb_device *udev = interface_to_usbdev(intf); 743 struct mt76_usb *usb = &dev->usb; 744 unsigned int pipe; 745 746 if (actual_len) 747 pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]); 748 else 749 pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]); 750 751 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); 752 } 753 754 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 755 u8 req_type, u16 val, u16 offset, 756 void *buf, size_t len); 757 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 758 const u16 offset, const u32 val); 759 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); 760 int mt76u_submit_rx_buffers(struct mt76_dev *dev); 761 int mt76u_alloc_queues(struct mt76_dev *dev); 762 void mt76u_stop_queues(struct mt76_dev *dev); 763 void mt76u_stop_stat_wk(struct mt76_dev *dev); 764 void mt76u_queues_deinit(struct mt76_dev *dev); 765 766 struct sk_buff * 767 mt76_mcu_msg_alloc(const void *data, int head_len, 768 int data_len, int tail_len); 769 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); 770 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, 771 unsigned long expires); 772 773 #endif 774