1 /* 2 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> 3 * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #ifndef __MT76x02_H 19 #define __MT76x02_H 20 21 #include <linux/kfifo.h> 22 23 #include "mt76.h" 24 #include "mt76x02_regs.h" 25 #include "mt76x02_mac.h" 26 #include "mt76x02_dfs.h" 27 #include "mt76x02_dma.h" 28 29 #define MT_CALIBRATE_INTERVAL HZ 30 31 #define MT_WATCHDOG_TIME (HZ / 10) 32 #define MT_TX_HANG_TH 10 33 34 #define MT_MAX_CHAINS 2 35 struct mt76x02_rx_freq_cal { 36 s8 high_gain[MT_MAX_CHAINS]; 37 s8 rssi_offset[MT_MAX_CHAINS]; 38 s8 lna_gain; 39 u32 mcu_gain; 40 s16 temp_offset; 41 u8 freq_offset; 42 }; 43 44 struct mt76x02_calibration { 45 struct mt76x02_rx_freq_cal rx; 46 47 u8 agc_gain_init[MT_MAX_CHAINS]; 48 u8 agc_gain_cur[MT_MAX_CHAINS]; 49 50 u16 false_cca; 51 s8 avg_rssi_all; 52 s8 agc_gain_adjust; 53 s8 low_gain; 54 55 s8 temp_vco; 56 s8 temp; 57 58 bool init_cal_done; 59 bool tssi_cal_done; 60 bool tssi_comp_pending; 61 bool dpd_cal_done; 62 bool channel_cal_done; 63 bool gain_init_done; 64 65 int tssi_target; 66 s8 tssi_dc; 67 }; 68 69 struct mt76x02_dev { 70 struct mt76_dev mt76; /* must be first */ 71 72 struct mac_address macaddr_list[8]; 73 74 struct mutex phy_mutex; 75 76 u8 txdone_seq; 77 DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); 78 79 struct sk_buff *rx_head; 80 81 struct tasklet_struct tx_tasklet; 82 struct tasklet_struct pre_tbtt_tasklet; 83 struct delayed_work cal_work; 84 struct delayed_work mac_work; 85 struct delayed_work wdt_work; 86 87 u32 aggr_stats[32]; 88 89 struct sk_buff *beacons[8]; 90 u8 beacon_mask; 91 u8 beacon_data_mask; 92 93 u8 tbtt_count; 94 u16 beacon_int; 95 96 u32 tx_hang_reset; 97 u8 tx_hang_check; 98 99 struct mt76x02_calibration cal; 100 101 s8 target_power; 102 s8 target_power_delta[2]; 103 bool enable_tpc; 104 105 bool no_2ghz; 106 107 u8 coverage_class; 108 u8 slottime; 109 110 struct mt76x02_dfs_pattern_detector dfs_pd; 111 112 /* edcca monitor */ 113 bool ed_tx_blocked; 114 bool ed_monitor; 115 u8 ed_trigger; 116 u8 ed_silent; 117 }; 118 119 extern struct ieee80211_rate mt76x02_rates[12]; 120 121 void mt76x02_init_device(struct mt76x02_dev *dev); 122 void mt76x02_configure_filter(struct ieee80211_hw *hw, 123 unsigned int changed_flags, 124 unsigned int *total_flags, u64 multicast); 125 int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, 126 struct ieee80211_sta *sta); 127 void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, 128 struct ieee80211_sta *sta); 129 130 void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev); 131 void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, 132 unsigned int idx); 133 int mt76x02_add_interface(struct ieee80211_hw *hw, 134 struct ieee80211_vif *vif); 135 void mt76x02_remove_interface(struct ieee80211_hw *hw, 136 struct ieee80211_vif *vif); 137 138 int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 139 struct ieee80211_ampdu_params *params); 140 int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 141 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 142 struct ieee80211_key_conf *key); 143 int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 144 u16 queue, const struct ieee80211_tx_queue_params *params); 145 void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, 146 struct ieee80211_vif *vif, 147 struct ieee80211_sta *sta); 148 s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, 149 const struct ieee80211_tx_rate *rate); 150 s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, 151 s8 max_txpwr_adj); 152 void mt76x02_wdt_work(struct work_struct *work); 153 void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr); 154 void mt76x02_set_tx_ackto(struct mt76x02_dev *dev); 155 void mt76x02_set_coverage_class(struct ieee80211_hw *hw, 156 s16 coverage_class); 157 int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val); 158 int mt76x02_insert_hdr_pad(struct sk_buff *skb); 159 void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len); 160 bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update); 161 void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 162 struct sk_buff *skb); 163 void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); 164 irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance); 165 void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, 166 struct sk_buff *skb); 167 int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, 168 struct sk_buff *skb, struct mt76_queue *q, 169 struct mt76_wcid *wcid, struct ieee80211_sta *sta, 170 u32 *tx_info); 171 void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 172 const u8 *mac); 173 void mt76x02_sw_scan_complete(struct ieee80211_hw *hw, 174 struct ieee80211_vif *vif); 175 void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); 176 void mt76x02_bss_info_changed(struct ieee80211_hw *hw, 177 struct ieee80211_vif *vif, 178 struct ieee80211_bss_conf *info, u32 changed); 179 180 extern const u16 mt76x02_beacon_offsets[16]; 181 void mt76x02_init_beacon_config(struct mt76x02_dev *dev); 182 void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set); 183 void mt76x02_mac_start(struct mt76x02_dev *dev); 184 185 void mt76x02_init_debugfs(struct mt76x02_dev *dev); 186 187 static inline bool is_mt76x2(struct mt76x02_dev *dev) 188 { 189 return mt76_chip(&dev->mt76) == 0x7612 || 190 mt76_chip(&dev->mt76) == 0x7662 || 191 mt76_chip(&dev->mt76) == 0x7602; 192 } 193 194 static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask) 195 { 196 mt76x02_set_irq_mask(dev, 0, mask); 197 } 198 199 static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask) 200 { 201 mt76x02_set_irq_mask(dev, mask, 0); 202 } 203 204 static inline bool 205 mt76x02_wait_for_txrx_idle(struct mt76_dev *dev) 206 { 207 return __mt76_poll_msec(dev, MT_MAC_STATUS, 208 MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 209 0, 100); 210 } 211 212 static inline struct mt76x02_sta * 213 mt76x02_rx_get_sta(struct mt76_dev *dev, u8 idx) 214 { 215 struct mt76_wcid *wcid; 216 217 if (idx >= ARRAY_SIZE(dev->wcid)) 218 return NULL; 219 220 wcid = rcu_dereference(dev->wcid[idx]); 221 if (!wcid) 222 return NULL; 223 224 return container_of(wcid, struct mt76x02_sta, wcid); 225 } 226 227 static inline struct mt76_wcid * 228 mt76x02_rx_get_sta_wcid(struct mt76x02_sta *sta, bool unicast) 229 { 230 if (!sta) 231 return NULL; 232 233 if (unicast) 234 return &sta->wcid; 235 else 236 return &sta->vif->group_wcid; 237 } 238 239 #endif /* __MT76x02_H */ 240