1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * 4 * Permission to use, copy, modify, and/or distribute this software for any 5 * purpose with or without fee is hereby granted, provided that the above 6 * copyright notice and this permission notice appear in all copies. 7 * 8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 */ 16 17 #ifndef __MT76_H 18 #define __MT76_H 19 20 #include <linux/kernel.h> 21 #include <linux/io.h> 22 #include <linux/spinlock.h> 23 #include <linux/skbuff.h> 24 #include <linux/leds.h> 25 #include <net/mac80211.h> 26 #include "util.h" 27 28 #define MT_TX_RING_SIZE 256 29 #define MT_MCU_RING_SIZE 32 30 #define MT_RX_BUF_SIZE 2048 31 32 struct mt76_dev; 33 34 struct mt76_bus_ops { 35 u32 (*rr)(struct mt76_dev *dev, u32 offset); 36 void (*wr)(struct mt76_dev *dev, u32 offset, u32 val); 37 u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val); 38 void (*copy)(struct mt76_dev *dev, u32 offset, const void *data, 39 int len); 40 }; 41 42 enum mt76_txq_id { 43 MT_TXQ_VO = IEEE80211_AC_VO, 44 MT_TXQ_VI = IEEE80211_AC_VI, 45 MT_TXQ_BE = IEEE80211_AC_BE, 46 MT_TXQ_BK = IEEE80211_AC_BK, 47 MT_TXQ_PSD, 48 MT_TXQ_MCU, 49 MT_TXQ_BEACON, 50 MT_TXQ_CAB, 51 __MT_TXQ_MAX 52 }; 53 54 enum mt76_rxq_id { 55 MT_RXQ_MAIN, 56 MT_RXQ_MCU, 57 __MT_RXQ_MAX 58 }; 59 60 struct mt76_queue_buf { 61 dma_addr_t addr; 62 int len; 63 }; 64 65 struct mt76_queue_entry { 66 union { 67 void *buf; 68 struct sk_buff *skb; 69 }; 70 struct mt76_txwi_cache *txwi; 71 bool schedule; 72 }; 73 74 struct mt76_queue_regs { 75 u32 desc_base; 76 u32 ring_size; 77 u32 cpu_idx; 78 u32 dma_idx; 79 } __packed __aligned(4); 80 81 struct mt76_queue { 82 struct mt76_queue_regs __iomem *regs; 83 84 spinlock_t lock; 85 struct mt76_queue_entry *entry; 86 struct mt76_desc *desc; 87 88 struct list_head swq; 89 int swq_queued; 90 91 u16 head; 92 u16 tail; 93 int ndesc; 94 int queued; 95 int buf_size; 96 97 u8 buf_offset; 98 u8 hw_idx; 99 100 dma_addr_t desc_dma; 101 struct sk_buff *rx_head; 102 }; 103 104 struct mt76_queue_ops { 105 int (*init)(struct mt76_dev *dev); 106 107 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q); 108 109 int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q, 110 struct mt76_queue_buf *buf, int nbufs, u32 info, 111 struct sk_buff *skb, void *txwi); 112 113 void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, 114 int *len, u32 *info, bool *more); 115 116 void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid); 117 118 void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid, 119 bool flush); 120 121 void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); 122 }; 123 124 struct mt76_wcid { 125 struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS]; 126 127 struct work_struct aggr_work; 128 129 u8 idx; 130 u8 hw_key_idx; 131 132 u8 sta:1; 133 134 u8 rx_check_pn; 135 u8 rx_key_pn[IEEE80211_NUM_TIDS][6]; 136 137 __le16 tx_rate; 138 bool tx_rate_set; 139 u8 tx_rate_nss; 140 s8 max_txpwr_adj; 141 bool sw_iv; 142 }; 143 144 struct mt76_txq { 145 struct list_head list; 146 struct mt76_queue *hwq; 147 struct mt76_wcid *wcid; 148 149 struct sk_buff_head retry_q; 150 151 u16 agg_ssn; 152 bool send_bar; 153 bool aggr; 154 }; 155 156 struct mt76_txwi_cache { 157 u32 txwi[8]; 158 dma_addr_t dma_addr; 159 struct list_head list; 160 }; 161 162 163 struct mt76_rx_tid { 164 struct rcu_head rcu_head; 165 166 struct mt76_dev *dev; 167 168 spinlock_t lock; 169 struct delayed_work reorder_work; 170 171 u16 head; 172 u8 size; 173 u8 nframes; 174 175 u8 started:1, stopped:1, timer_pending:1; 176 177 struct sk_buff *reorder_buf[]; 178 }; 179 180 enum { 181 MT76_STATE_INITIALIZED, 182 MT76_STATE_RUNNING, 183 MT76_SCANNING, 184 MT76_RESET, 185 }; 186 187 struct mt76_hw_cap { 188 bool has_2ghz; 189 bool has_5ghz; 190 }; 191 192 struct mt76_driver_ops { 193 u16 txwi_size; 194 195 void (*update_survey)(struct mt76_dev *dev); 196 197 int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr, 198 struct sk_buff *skb, struct mt76_queue *q, 199 struct mt76_wcid *wcid, 200 struct ieee80211_sta *sta, u32 *tx_info); 201 202 void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q, 203 struct mt76_queue_entry *e, bool flush); 204 205 void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, 206 struct sk_buff *skb); 207 208 void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); 209 }; 210 211 struct mt76_channel_state { 212 u64 cc_active; 213 u64 cc_busy; 214 }; 215 216 struct mt76_sband { 217 struct ieee80211_supported_band sband; 218 struct mt76_channel_state *chan; 219 }; 220 221 struct mt76_dev { 222 struct ieee80211_hw *hw; 223 struct cfg80211_chan_def chandef; 224 struct ieee80211_channel *main_chan; 225 226 spinlock_t lock; 227 spinlock_t cc_lock; 228 const struct mt76_bus_ops *bus; 229 const struct mt76_driver_ops *drv; 230 void __iomem *regs; 231 struct device *dev; 232 233 struct net_device napi_dev; 234 struct napi_struct napi[__MT_RXQ_MAX]; 235 struct sk_buff_head rx_skb[__MT_RXQ_MAX]; 236 237 struct list_head txwi_cache; 238 struct mt76_queue q_tx[__MT_TXQ_MAX]; 239 struct mt76_queue q_rx[__MT_RXQ_MAX]; 240 const struct mt76_queue_ops *queue_ops; 241 242 u8 macaddr[ETH_ALEN]; 243 u32 rev; 244 unsigned long state; 245 246 struct mt76_sband sband_2g; 247 struct mt76_sband sband_5g; 248 struct debugfs_blob_wrapper eeprom; 249 struct debugfs_blob_wrapper otp; 250 struct mt76_hw_cap cap; 251 252 u32 debugfs_reg; 253 254 struct led_classdev led_cdev; 255 char led_name[32]; 256 bool led_al; 257 u8 led_pin; 258 }; 259 260 enum mt76_phy_type { 261 MT_PHY_TYPE_CCK, 262 MT_PHY_TYPE_OFDM, 263 MT_PHY_TYPE_HT, 264 MT_PHY_TYPE_HT_GF, 265 MT_PHY_TYPE_VHT, 266 }; 267 268 struct mt76_rate_power { 269 union { 270 struct { 271 s8 cck[4]; 272 s8 ofdm[8]; 273 s8 ht[16]; 274 s8 vht[10]; 275 }; 276 s8 all[38]; 277 }; 278 }; 279 280 struct mt76_rx_status { 281 struct mt76_wcid *wcid; 282 283 unsigned long reorder_time; 284 285 u8 iv[6]; 286 287 u8 aggr:1; 288 u8 tid; 289 u16 seqno; 290 291 u16 freq; 292 u32 flag; 293 u8 enc_flags; 294 u8 encoding:2, bw:3; 295 u8 rate_idx; 296 u8 nss; 297 u8 band; 298 u8 signal; 299 u8 chains; 300 s8 chain_signal[IEEE80211_MAX_CHAINS]; 301 }; 302 303 #define mt76_rr(dev, ...) (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__) 304 #define mt76_wr(dev, ...) (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__) 305 #define mt76_rmw(dev, ...) (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__) 306 #define mt76_wr_copy(dev, ...) (dev)->mt76.bus->copy(&((dev)->mt76), __VA_ARGS__) 307 308 #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) 309 #define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0) 310 311 #define mt76_get_field(_dev, _reg, _field) \ 312 FIELD_GET(_field, mt76_rr(dev, _reg)) 313 314 #define mt76_rmw_field(_dev, _reg, _field, _val) \ 315 mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val)) 316 317 #define mt76_hw(dev) (dev)->mt76.hw 318 319 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 320 int timeout); 321 322 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__) 323 324 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, 325 int timeout); 326 327 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__) 328 329 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); 330 331 static inline u16 mt76_chip(struct mt76_dev *dev) 332 { 333 return dev->rev >> 16; 334 } 335 336 static inline u16 mt76_rev(struct mt76_dev *dev) 337 { 338 return dev->rev & 0xffff; 339 } 340 341 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 342 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 343 344 #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) 345 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 346 #define mt76_queue_add_buf(dev, ...) (dev)->mt76.queue_ops->add_buf(&((dev)->mt76), __VA_ARGS__) 347 #define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__) 348 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__) 349 #define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__) 350 351 static inline struct mt76_channel_state * 352 mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c) 353 { 354 struct mt76_sband *msband; 355 int idx; 356 357 if (c->band == NL80211_BAND_2GHZ) 358 msband = &dev->sband_2g; 359 else 360 msband = &dev->sband_5g; 361 362 idx = c - &msband->sband.channels[0]; 363 return &msband->chan[idx]; 364 } 365 366 int mt76_register_device(struct mt76_dev *dev, bool vht, 367 struct ieee80211_rate *rates, int n_rates); 368 void mt76_unregister_device(struct mt76_dev *dev); 369 370 struct dentry *mt76_register_debugfs(struct mt76_dev *dev); 371 372 int mt76_eeprom_init(struct mt76_dev *dev, int len); 373 void mt76_eeprom_override(struct mt76_dev *dev); 374 375 static inline struct ieee80211_txq * 376 mtxq_to_txq(struct mt76_txq *mtxq) 377 { 378 void *ptr = mtxq; 379 380 return container_of(ptr, struct ieee80211_txq, drv_priv); 381 } 382 383 static inline struct ieee80211_sta * 384 wcid_to_sta(struct mt76_wcid *wcid) 385 { 386 void *ptr = wcid; 387 388 if (!wcid || !wcid->sta) 389 return NULL; 390 391 return container_of(ptr, struct ieee80211_sta, drv_priv); 392 } 393 394 int mt76_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, 395 struct sk_buff *skb, struct mt76_wcid *wcid, 396 struct ieee80211_sta *sta); 397 398 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 399 void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, 400 struct mt76_wcid *wcid, struct sk_buff *skb); 401 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq); 402 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq); 403 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq); 404 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, 405 bool send_bar); 406 void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq); 407 void mt76_txq_schedule_all(struct mt76_dev *dev); 408 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 409 struct ieee80211_sta *sta, 410 u16 tids, int nframes, 411 enum ieee80211_frame_release_type reason, 412 bool more_data); 413 void mt76_set_channel(struct mt76_dev *dev); 414 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 415 struct survey_info *survey); 416 417 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid, 418 u16 ssn, u8 size); 419 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); 420 421 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, 422 struct ieee80211_key_conf *key); 423 424 /* internal */ 425 void mt76_tx_free(struct mt76_dev *dev); 426 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t); 427 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames, 428 int queue); 429 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q); 430 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 431 432 #endif 433