1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #ifndef __MT76_H 18 #define __MT76_H 19 20 #include <linux/kernel.h> 21 #include <linux/io.h> 22 #include <linux/spinlock.h> 23 #include <linux/skbuff.h> 24 #include <linux/leds.h> 25 #include <linux/usb.h> 26 #include <net/mac80211.h> 27 #include "util.h" 28 29 #define MT_TX_RING_SIZE 256 30 #define MT_MCU_RING_SIZE 32 31 #define MT_RX_BUF_SIZE 2048 32 33 struct mt76_dev; 34 struct mt76_wcid; 35 36 struct mt76_reg_pair { 37 u32 reg; 38 u32 value; 39 }; 40 41 struct mt76_bus_ops { 42 u32 (*rr)(struct mt76_dev *dev, u32 offset); 43 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 44 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 45 void (*copy)(struct mt76_dev *dev, u32 offset, const void *data, 46 int len); 47 int (*wr_rp)(struct mt76_dev *dev, u32 base, 48 const struct mt76_reg_pair *rp, int len); 49 int (*rd_rp)(struct mt76_dev *dev, u32 base, 50 struct mt76_reg_pair *rp, int len); 51 }; 52 53 enum mt76_txq_id { 54 MT_TXQ_VO = IEEE80211_AC_VO, 55 MT_TXQ_VI = IEEE80211_AC_VI, 56 MT_TXQ_BE = IEEE80211_AC_BE, 57 MT_TXQ_BK = IEEE80211_AC_BK, 58 MT_TXQ_PSD, 59 MT_TXQ_MCU, 60 MT_TXQ_BEACON, 61 MT_TXQ_CAB, 62 __MT_TXQ_MAX 63 }; 64 65 enum mt76_rxq_id { 66 MT_RXQ_MAIN, 67 MT_RXQ_MCU, 68 __MT_RXQ_MAX 69 }; 70 71 struct mt76_queue_buf { 72 dma_addr_t addr; 73 int len; 74 }; 75 76 struct mt76u_buf { 77 struct mt76_dev *dev; 78 struct urb *urb; 79 size_t len; 80 bool done; 81 }; 82 83 struct mt76_queue_entry { 84 union { 85 void *buf; 86 struct sk_buff *skb; 87 }; 88 union { 89 struct mt76_txwi_cache *txwi; 90 struct mt76u_buf ubuf; 91 }; 92 bool schedule; 93 }; 94 95 struct mt76_queue_regs { 96 u32 desc_base; 97 u32 ring_size; 98 u32 cpu_idx; 99 u32 dma_idx; 100 } __packed __aligned(4); 101 102 struct mt76_queue { 103 struct mt76_queue_regs __iomem *regs; 104 105 spinlock_t lock; 106 struct mt76_queue_entry *entry; 107 struct mt76_desc *desc; 108 109 struct list_head swq; 110 int swq_queued; 111 112 u16 first; 113 u16 head; 114 u16 tail; 115 int ndesc; 116 int queued; 117 int buf_size; 118 119 u8 buf_offset; 120 u8 hw_idx; 121 122 dma_addr_t desc_dma; 123 struct sk_buff *rx_head; 124 struct page_frag_cache rx_page; 125 spinlock_t rx_page_lock; 126 }; 127 128 struct mt76_mcu_ops { 129 struct sk_buff *(*mcu_msg_alloc)(const void *data, int len); 130 int (*mcu_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, 131 int cmd, bool wait_resp); 132 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, 133 const struct mt76_reg_pair *rp, int len); 134 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 135 struct mt76_reg_pair *rp, int len); 136 }; 137 138 struct mt76_queue_ops { 139 int (*init)(struct mt76_dev *dev); 140 141 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q); 142 143 int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q, 144 struct mt76_queue_buf *buf, int nbufs, u32 info, 145 struct sk_buff *skb, void *txwi); 146 147 int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q, 148 struct sk_buff *skb, struct mt76_wcid *wcid, 149 struct ieee80211_sta *sta); 150 151 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 152 int *len, u32 *info, bool *more); 153 154 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 155 156 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, 157 bool flush); 158 159 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 160 }; 161 162 enum mt76_wcid_flags { 163 MT_WCID_FLAG_CHECK_PS, 164 MT_WCID_FLAG_PS, 165 }; 166 167 #define MT76_N_WCIDS 128 168 169 struct mt76_wcid { 170 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 171 172 struct work_struct aggr_work; 173 174 unsigned long flags; 175 176 u8 idx; 177 u8 hw_key_idx; 178 179 u8 sta:1; 180 181 u8 rx_check_pn; 182 u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; 183 184 __le16 tx_rate; 185 bool tx_rate_set; 186 u8 tx_rate_nss; 187 s8 max_txpwr_adj; 188 bool sw_iv; 189 }; 190 191 struct mt76_txq { 192 struct list_head list; 193 struct mt76_queue *hwq; 194 struct mt76_wcid *wcid; 195 196 struct sk_buff_head retry_q; 197 198 u16 agg_ssn; 199 bool send_bar; 200 bool aggr; 201 }; 202 203 struct mt76_txwi_cache { 204 u32 txwi[8]; 205 dma_addr_t dma_addr; 206 struct list_head list; 207 }; 208 209 210 struct mt76_rx_tid { 211 struct rcu_head rcu_head; 212 213 struct mt76_dev *dev; 214 215 spinlock_t lock; 216 struct delayed_work reorder_work; 217 218 u16 head; 219 u8 size; 220 u8 nframes; 221 222 u8 started:1, stopped:1, timer_pending:1; 223 224 struct sk_buff *reorder_buf[]; 225 }; 226 227 enum { 228 MT76_STATE_INITIALIZED, 229 MT76_STATE_RUNNING, 230 MT76_STATE_MCU_RUNNING, 231 MT76_SCANNING, 232 MT76_RESET, 233 MT76_OFFCHANNEL, 234 MT76_REMOVED, 235 MT76_READING_STATS, 236 }; 237 238 struct mt76_hw_cap { 239 bool has_2ghz; 240 bool has_5ghz; 241 }; 242 243 struct mt76_driver_ops { 244 u16 txwi_size; 245 246 void (*update_survey)(struct mt76_dev *dev); 247 248 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 249 struct sk_buff *skb, struct mt76_queue *q, 250 struct mt76_wcid *wcid, 251 struct ieee80211_sta *sta, u32 *tx_info); 252 253 void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q, 254 struct mt76_queue_entry *e, bool flush); 255 256 bool (*tx_status_data)(struct mt76_dev *dev, u8 *update); 257 258 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 259 struct sk_buff *skb); 260 261 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 262 263 void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, 264 bool ps); 265 }; 266 267 struct mt76_channel_state { 268 u64 cc_active; 269 u64 cc_busy; 270 }; 271 272 struct mt76_sband { 273 struct ieee80211_supported_band sband; 274 struct mt76_channel_state *chan; 275 }; 276 277 struct mt76_rate_power { 278 union { 279 struct { 280 s8 cck[4]; 281 s8 ofdm[8]; 282 s8 stbc[10]; 283 s8 ht[16]; 284 s8 vht[10]; 285 }; 286 s8 all[48]; 287 }; 288 }; 289 290 /* addr req mask */ 291 #define MT_VEND_TYPE_EEPROM BIT(31) 292 #define MT_VEND_TYPE_CFG BIT(30) 293 #define MT_VEND_TYPE_MASK (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG) 294 295 #define MT_VEND_ADDR(type, n) (MT_VEND_TYPE_##type | (n)) 296 enum mt_vendor_req { 297 MT_VEND_DEV_MODE = 0x1, 298 MT_VEND_WRITE = 0x2, 299 MT_VEND_MULTI_WRITE = 0x6, 300 MT_VEND_MULTI_READ = 0x7, 301 MT_VEND_READ_EEPROM = 0x9, 302 MT_VEND_WRITE_FCE = 0x42, 303 MT_VEND_WRITE_CFG = 0x46, 304 MT_VEND_READ_CFG = 0x47, 305 }; 306 307 enum mt76u_in_ep { 308 MT_EP_IN_PKT_RX, 309 MT_EP_IN_CMD_RESP, 310 __MT_EP_IN_MAX, 311 }; 312 313 enum mt76u_out_ep { 314 MT_EP_OUT_INBAND_CMD, 315 MT_EP_OUT_AC_BK, 316 MT_EP_OUT_AC_BE, 317 MT_EP_OUT_AC_VI, 318 MT_EP_OUT_AC_VO, 319 MT_EP_OUT_HCCA, 320 __MT_EP_OUT_MAX, 321 }; 322 323 #define MT_SG_MAX_SIZE 8 324 #define MT_NUM_TX_ENTRIES 256 325 #define MT_NUM_RX_ENTRIES 128 326 #define MCU_RESP_URB_SIZE 1024 327 struct mt76_usb { 328 struct mutex usb_ctrl_mtx; 329 u8 data[32]; 330 331 struct tasklet_struct rx_tasklet; 332 struct tasklet_struct tx_tasklet; 333 struct delayed_work stat_work; 334 335 u8 out_ep[__MT_EP_OUT_MAX]; 336 u16 out_max_packet; 337 u8 in_ep[__MT_EP_IN_MAX]; 338 u16 in_max_packet; 339 340 struct mt76u_mcu { 341 struct mutex mutex; 342 struct completion cmpl; 343 struct mt76u_buf res; 344 u32 msg_seq; 345 346 /* multiple reads */ 347 struct mt76_reg_pair *rp; 348 int rp_len; 349 u32 base; 350 bool burst; 351 } mcu; 352 }; 353 354 struct mt76_mmio { 355 struct mt76e_mcu { 356 struct mutex mutex; 357 358 wait_queue_head_t wait; 359 struct sk_buff_head res_q; 360 361 u32 msg_seq; 362 } mcu; 363 void __iomem *regs; 364 spinlock_t irq_lock; 365 u32 irqmask; 366 }; 367 368 struct mt76_dev { 369 struct ieee80211_hw *hw; 370 struct cfg80211_chan_def chandef; 371 struct ieee80211_channel *main_chan; 372 373 spinlock_t lock; 374 spinlock_t cc_lock; 375 376 struct mutex mutex; 377 378 const struct mt76_bus_ops *bus; 379 const struct mt76_driver_ops *drv; 380 const struct mt76_mcu_ops *mcu_ops; 381 struct device *dev; 382 383 struct net_device napi_dev; 384 spinlock_t rx_lock; 385 struct napi_struct napi[__MT_RXQ_MAX]; 386 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 387 388 struct list_head txwi_cache; 389 struct mt76_queue q_tx[__MT_TXQ_MAX]; 390 struct mt76_queue q_rx[__MT_RXQ_MAX]; 391 const struct mt76_queue_ops *queue_ops; 392 393 wait_queue_head_t tx_wait; 394 395 unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG]; 396 397 struct mt76_wcid global_wcid; 398 struct mt76_wcid __rcu *wcid[MT76_N_WCIDS]; 399 400 u8 macaddr[ETH_ALEN]; 401 u32 rev; 402 unsigned long state; 403 404 u8 antenna_mask; 405 u16 chainmask; 406 407 struct mt76_sband sband_2g; 408 struct mt76_sband sband_5g; 409 struct debugfs_blob_wrapper eeprom; 410 struct debugfs_blob_wrapper otp; 411 struct mt76_hw_cap cap; 412 413 struct mt76_rate_power rate_power; 414 int txpower_conf; 415 int txpower_cur; 416 417 u32 debugfs_reg; 418 419 struct led_classdev led_cdev; 420 char led_name[32]; 421 bool led_al; 422 u8 led_pin; 423 424 u32 rxfilter; 425 426 union { 427 struct mt76_mmio mmio; 428 struct mt76_usb usb; 429 }; 430 }; 431 432 enum mt76_phy_type { 433 MT_PHY_TYPE_CCK, 434 MT_PHY_TYPE_OFDM, 435 MT_PHY_TYPE_HT, 436 MT_PHY_TYPE_HT_GF, 437 MT_PHY_TYPE_VHT, 438 }; 439 440 struct mt76_rx_status { 441 struct mt76_wcid *wcid; 442 443 unsigned long reorder_time; 444 445 u8 iv[6]; 446 447 u8 aggr:1; 448 u8 tid; 449 u16 seqno; 450 451 u16 freq; 452 u32 flag; 453 u8 enc_flags; 454 u8 encoding:2, bw:3; 455 u8 rate_idx; 456 u8 nss; 457 u8 band; 458 u8 signal; 459 u8 chains; 460 s8 chain_signal[IEEE80211_MAX_CHAINS]; 461 }; 462 463 #define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__) 464 #define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__) 465 #define __mt76_rmw(dev, ...) (dev)->bus->rmw((dev), __VA_ARGS__) 466 #define __mt76_wr_copy(dev, ...) (dev)->bus->copy((dev), __VA_ARGS__) 467 468 #define __mt76_set(dev, offset, val) __mt76_rmw(dev, offset, 0, val) 469 #define __mt76_clear(dev, offset, val) __mt76_rmw(dev, offset, val, 0) 470 471 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 472 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 473 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 474 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__) 475 #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) 476 #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) 477 478 #define mt76_mcu_msg_alloc(dev, ...) (dev)->mt76.mcu_ops->mcu_msg_alloc(__VA_ARGS__) 479 #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__) 480 481 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 482 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 483 484 #define mt76_get_field(_dev, _reg, _field) \ 485 FIELD_GET(_field, mt76_rr(dev, _reg)) 486 487 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 488 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 489 490 #define __mt76_rmw_field(_dev, _reg, _field, _val) \ 491 __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 492 493 #define mt76_hw(dev) (dev)->mt76.hw 494 495 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 496 int timeout); 497 498 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 499 500 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 501 int timeout); 502 503 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 504 505 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 506 507 static inline u16 mt76_chip(struct mt76_dev *dev) 508 { 509 return dev->rev >> 16; 510 } 511 512 static inline u16 mt76_rev(struct mt76_dev *dev) 513 { 514 return dev->rev & 0xffff; 515 } 516 517 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 518 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 519 520 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) 521 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 522 #define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__) 523 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 524 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 525 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 526 527 static inline struct mt76_channel_state * 528 mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c) 529 { 530 struct mt76_sband *msband; 531 int idx; 532 533 if (c->band == NL80211_BAND_2GHZ) 534 msband = &dev->sband_2g; 535 else 536 msband = &dev->sband_5g; 537 538 idx = c - &msband->sband.channels[0]; 539 return &msband->chan[idx]; 540 } 541 542 struct mt76_dev *mt76_alloc_device(unsigned int size, 543 const struct ieee80211_ops *ops); 544 int mt76_register_device(struct mt76_dev *dev, bool vht, 545 struct ieee80211_rate *rates, int n_rates); 546 void mt76_unregister_device(struct mt76_dev *dev); 547 548 struct dentry *mt76_register_debugfs(struct mt76_dev *dev); 549 void mt76_seq_puts_array(struct seq_file *file, const char *str, 550 s8 *val, int len); 551 552 int mt76_eeprom_init(struct mt76_dev *dev, int len); 553 void mt76_eeprom_override(struct mt76_dev *dev); 554 555 /* increment with wrap-around */ 556 static inline int mt76_incr(int val, int size) 557 { 558 return (val + 1) & (size - 1); 559 } 560 561 /* decrement with wrap-around */ 562 static inline int mt76_decr(int val, int size) 563 { 564 return (val - 1) & (size - 1); 565 } 566 567 u8 mt76_ac_to_hwq(u8 ac); 568 569 static inline struct ieee80211_txq * 570 mtxq_to_txq(struct mt76_txq *mtxq) 571 { 572 void *ptr = mtxq; 573 574 return container_of(ptr, struct ieee80211_txq, drv_priv); 575 } 576 577 static inline struct ieee80211_sta * 578 wcid_to_sta(struct mt76_wcid *wcid) 579 { 580 void *ptr = wcid; 581 582 if (!wcid || !wcid->sta) 583 return NULL; 584 585 return container_of(ptr, struct ieee80211_sta, drv_priv); 586 } 587 588 int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 589 struct sk_buff *skb, struct mt76_wcid *wcid, 590 struct ieee80211_sta *sta); 591 592 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 593 void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 594 struct mt76_wcid *wcid, struct sk_buff *skb); 595 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 596 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); 597 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 598 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 599 bool send_bar); 600 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq); 601 void mt76_txq_schedule_all(struct mt76_dev *dev); 602 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 603 struct ieee80211_sta *sta, 604 u16 tids, int nframes, 605 enum ieee80211_frame_release_type reason, 606 bool more_data); 607 void mt76_set_channel(struct mt76_dev *dev); 608 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 609 struct survey_info *survey); 610 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht); 611 612 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 613 u16 ssn, u8 size); 614 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 615 616 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 617 struct ieee80211_key_conf *key); 618 619 struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); 620 621 /* internal */ 622 void mt76_tx_free(struct mt76_dev *dev); 623 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev); 624 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 625 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 626 struct napi_struct *napi); 627 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 628 struct napi_struct *napi); 629 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 630 631 /* usb */ 632 static inline bool mt76u_urb_error(struct urb *urb) 633 { 634 return urb->status && 635 urb->status != -ECONNRESET && 636 urb->status != -ESHUTDOWN && 637 urb->status != -ENOENT; 638 } 639 640 /* Map hardware queues to usb endpoints */ 641 static inline u8 q2ep(u8 qid) 642 { 643 /* TODO: take management packets to queue 5 */ 644 return qid + 1; 645 } 646 647 static inline bool mt76u_check_sg(struct mt76_dev *dev) 648 { 649 struct usb_interface *intf = to_usb_interface(dev->dev); 650 struct usb_device *udev = interface_to_usbdev(intf); 651 652 return (udev->bus->sg_tablesize > 0 && 653 (udev->bus->no_sg_constraint || 654 udev->speed == USB_SPEED_WIRELESS)); 655 } 656 657 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 658 u8 req_type, u16 val, u16 offset, 659 void *buf, size_t len); 660 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 661 const u16 offset, const u32 val); 662 u32 mt76u_rr(struct mt76_dev *dev, u32 addr); 663 void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val); 664 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); 665 void mt76u_deinit(struct mt76_dev *dev); 666 int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf, 667 int nsgs, int len, int sglen, gfp_t gfp); 668 void mt76u_buf_free(struct mt76u_buf *buf); 669 int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index, 670 struct mt76u_buf *buf, gfp_t gfp, 671 usb_complete_t complete_fn, void *context); 672 int mt76u_submit_rx_buffers(struct mt76_dev *dev); 673 int mt76u_alloc_queues(struct mt76_dev *dev); 674 void mt76u_stop_queues(struct mt76_dev *dev); 675 void mt76u_stop_stat_wk(struct mt76_dev *dev); 676 void mt76u_queues_deinit(struct mt76_dev *dev); 677 678 void mt76u_mcu_complete_urb(struct urb *urb); 679 int mt76u_mcu_init_rx(struct mt76_dev *dev); 680 void mt76u_mcu_deinit(struct mt76_dev *dev); 681 682 #endif 683