1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2023 MediaTek Inc. */
3
4 #include <linux/module.h>
5 #if defined(__FreeBSD__)
6 #include <linux/delay.h>
7 #endif
8
9 #include "mt792x.h"
10 #include "mt792x_regs.h"
11
mt792x_mac_work(struct work_struct * work)12 void mt792x_mac_work(struct work_struct *work)
13 {
14 struct mt792x_phy *phy;
15 struct mt76_phy *mphy;
16
17 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
18 mac_work.work);
19 phy = mphy->priv;
20
21 mt792x_mutex_acquire(phy->dev);
22
23 mt76_update_survey(mphy);
24 if (++mphy->mac_work_count == 2) {
25 mphy->mac_work_count = 0;
26
27 mt792x_mac_update_mib_stats(phy);
28 }
29
30 mt792x_mutex_release(phy->dev);
31
32 mt76_tx_status_check(mphy->dev, false);
33 ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
34 MT792x_WATCHDOG_TIME);
35 }
36 EXPORT_SYMBOL_GPL(mt792x_mac_work);
37
mt792x_mac_set_timeing(struct mt792x_phy * phy)38 void mt792x_mac_set_timeing(struct mt792x_phy *phy)
39 {
40 s16 coverage_class = phy->coverage_class;
41 struct mt792x_dev *dev = phy->dev;
42 u32 val, reg_offset;
43 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
44 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
45 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
46 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
47 bool is_2ghz = phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ;
48 int sifs = is_2ghz ? 10 : 16, offset;
49
50 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
51 return;
52
53 mt76_set(dev, MT_ARB_SCR(0),
54 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
55 udelay(1);
56
57 offset = 3 * coverage_class;
58 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
59 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
60
61 mt76_wr(dev, MT_TMAC_CDTR(0), cck + reg_offset);
62 mt76_wr(dev, MT_TMAC_ODTR(0), ofdm + reg_offset);
63 mt76_wr(dev, MT_TMAC_ICR0(0),
64 FIELD_PREP(MT_IFS_EIFS, 360) |
65 FIELD_PREP(MT_IFS_RIFS, 2) |
66 FIELD_PREP(MT_IFS_SIFS, sifs) |
67 FIELD_PREP(MT_IFS_SLOT, phy->slottime));
68
69 if (phy->slottime < 20 || !is_2ghz)
70 val = MT792x_CFEND_RATE_DEFAULT;
71 else
72 val = MT792x_CFEND_RATE_11B;
73
74 mt76_rmw_field(dev, MT_AGG_ACR0(0), MT_AGG_ACR_CFEND_RATE, val);
75 mt76_clear(dev, MT_ARB_SCR(0),
76 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
77 }
78 EXPORT_SYMBOL_GPL(mt792x_mac_set_timeing);
79
mt792x_mac_update_mib_stats(struct mt792x_phy * phy)80 void mt792x_mac_update_mib_stats(struct mt792x_phy *phy)
81 {
82 struct mt76_mib_stats *mib = &phy->mib;
83 struct mt792x_dev *dev = phy->dev;
84 int i, aggr0 = 0, aggr1;
85 u32 val;
86
87 mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
88 MT_MIB_SDR3_FCS_ERR_MASK);
89 mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0),
90 MT_MIB_ACK_FAIL_COUNT_MASK);
91 mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0),
92 MT_MIB_BA_FAIL_COUNT_MASK);
93 mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0),
94 MT_MIB_RTS_COUNT_MASK);
95 mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
96 MT_MIB_RTS_FAIL_COUNT_MASK);
97
98 mib->tx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR12(0));
99 mib->tx_mpdu_attempts_cnt += mt76_rr(dev, MT_MIB_SDR14(0));
100 mib->tx_mpdu_success_cnt += mt76_rr(dev, MT_MIB_SDR15(0));
101
102 val = mt76_rr(dev, MT_MIB_SDR32(0));
103 mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR9_EBF_CNT_MASK, val);
104 mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR9_IBF_CNT_MASK, val);
105
106 val = mt76_rr(dev, MT_ETBF_TX_APP_CNT(0));
107 mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, val);
108 mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, val);
109
110 val = mt76_rr(dev, MT_ETBF_RX_FB_CNT(0));
111 mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, val);
112 mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, val);
113 mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, val);
114 mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, val);
115
116 mib->rx_mpdu_cnt += mt76_rr(dev, MT_MIB_SDR5(0));
117 mib->rx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR22(0));
118 mib->rx_ampdu_bytes_cnt += mt76_rr(dev, MT_MIB_SDR23(0));
119 mib->rx_ba_cnt += mt76_rr(dev, MT_MIB_SDR31(0));
120
121 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
122 val = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
123 mib->tx_amsdu[i] += val;
124 mib->tx_amsdu_cnt += val;
125 }
126
127 for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
128 u32 val2;
129
130 val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
131 val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
132
133 phy->mt76->aggr_stats[aggr0++] += val & 0xffff;
134 phy->mt76->aggr_stats[aggr0++] += val >> 16;
135 phy->mt76->aggr_stats[aggr1++] += val2 & 0xffff;
136 phy->mt76->aggr_stats[aggr1++] += val2 >> 16;
137 }
138 }
139 EXPORT_SYMBOL_GPL(mt792x_mac_update_mib_stats);
140
mt792x_rx_get_wcid(struct mt792x_dev * dev,u16 idx,bool unicast)141 struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx,
142 bool unicast)
143 {
144 struct mt792x_link_sta *link;
145 struct mt792x_sta *sta;
146 struct mt76_wcid *wcid;
147
148 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
149 return NULL;
150
151 wcid = rcu_dereference(dev->mt76.wcid[idx]);
152 if (unicast || !wcid)
153 return wcid;
154
155 if (!wcid->sta)
156 return NULL;
157
158 link = container_of(wcid, struct mt792x_link_sta, wcid);
159 sta = link->sta;
160 if (!sta->vif)
161 return NULL;
162
163 return &sta->vif->sta.deflink.wcid;
164 }
165 EXPORT_SYMBOL_GPL(mt792x_rx_get_wcid);
166
167 static void
mt792x_mac_rssi_iter(void * priv,u8 * mac,struct ieee80211_vif * vif)168 mt792x_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
169 {
170 struct sk_buff *skb = priv;
171 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
172 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
173 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
174
175 if (status->signal > 0)
176 return;
177
178 if (!ether_addr_equal(vif->addr, hdr->addr1))
179 return;
180
181 ewma_rssi_add(&mvif->bss_conf.rssi, -status->signal);
182 }
183
mt792x_mac_assoc_rssi(struct mt792x_dev * dev,struct sk_buff * skb)184 void mt792x_mac_assoc_rssi(struct mt792x_dev *dev, struct sk_buff *skb)
185 {
186 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
187
188 if (!ieee80211_is_assoc_resp(hdr->frame_control) &&
189 !ieee80211_is_auth(hdr->frame_control))
190 return;
191
192 ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
193 IEEE80211_IFACE_ITER_RESUME_ALL,
194 mt792x_mac_rssi_iter, skb);
195 }
196 EXPORT_SYMBOL_GPL(mt792x_mac_assoc_rssi);
197
mt792x_mac_reset_counters(struct mt792x_phy * phy)198 void mt792x_mac_reset_counters(struct mt792x_phy *phy)
199 {
200 struct mt792x_dev *dev = phy->dev;
201 int i;
202
203 for (i = 0; i < 4; i++) {
204 mt76_rr(dev, MT_TX_AGG_CNT(0, i));
205 mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
206 }
207
208 dev->mt76.phy.survey_time = ktime_get_boottime();
209 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
210
211 /* reset airtime counters */
212 mt76_rr(dev, MT_MIB_SDR9(0));
213 mt76_rr(dev, MT_MIB_SDR36(0));
214 mt76_rr(dev, MT_MIB_SDR37(0));
215
216 mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
217 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
218 }
219 EXPORT_SYMBOL_GPL(mt792x_mac_reset_counters);
220
221 static u8
mt792x_phy_get_nf(struct mt792x_phy * phy,int idx)222 mt792x_phy_get_nf(struct mt792x_phy *phy, int idx)
223 {
224 return 0;
225 }
226
227 static void
mt792x_phy_update_channel(struct mt76_phy * mphy,int idx)228 mt792x_phy_update_channel(struct mt76_phy *mphy, int idx)
229 {
230 struct mt792x_dev *dev = container_of(mphy->dev, struct mt792x_dev, mt76);
231 struct mt792x_phy *phy = mphy->priv;
232 struct mt76_channel_state *state;
233 u64 busy_time, tx_time, rx_time, obss_time;
234 int nf;
235
236 busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
237 MT_MIB_SDR9_BUSY_MASK);
238 tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
239 MT_MIB_SDR36_TXTIME_MASK);
240 rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
241 MT_MIB_SDR37_RXTIME_MASK);
242 obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
243 MT_MIB_OBSSTIME_MASK);
244
245 nf = mt792x_phy_get_nf(phy, idx);
246 if (!phy->noise)
247 phy->noise = nf << 4;
248 else if (nf)
249 phy->noise += nf - (phy->noise >> 4);
250
251 state = mphy->chan_state;
252 state->cc_busy += busy_time;
253 state->cc_tx += tx_time;
254 state->cc_rx += rx_time + obss_time;
255 state->cc_bss_rx += rx_time;
256 state->noise = -(phy->noise >> 4);
257 }
258
mt792x_update_channel(struct mt76_phy * mphy)259 void mt792x_update_channel(struct mt76_phy *mphy)
260 {
261 struct mt792x_dev *dev = container_of(mphy->dev, struct mt792x_dev, mt76);
262
263 if (mt76_connac_pm_wake(mphy, &dev->pm))
264 return;
265
266 mt792x_phy_update_channel(mphy, 0);
267 /* reset obss airtime */
268 mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
269 mt76_connac_power_save_sched(mphy, &dev->pm);
270 }
271 EXPORT_SYMBOL_GPL(mt792x_update_channel);
272
mt792x_reset(struct mt76_dev * mdev)273 void mt792x_reset(struct mt76_dev *mdev)
274 {
275 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
276 struct mt76_connac_pm *pm = &dev->pm;
277
278 if (!dev->hw_init_done)
279 return;
280
281 if (dev->hw_full_reset)
282 return;
283
284 if (pm->suspended)
285 return;
286
287 queue_work(dev->mt76.wq, &dev->reset_work);
288 }
289 EXPORT_SYMBOL_GPL(mt792x_reset);
290
mt792x_mac_init_band(struct mt792x_dev * dev,u8 band)291 void mt792x_mac_init_band(struct mt792x_dev *dev, u8 band)
292 {
293 u32 mask, set;
294
295 mt76_rmw_field(dev, MT_TMAC_CTCR0(band),
296 MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
297 mt76_set(dev, MT_TMAC_CTCR0(band),
298 MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
299 MT_TMAC_CTCR0_INS_DDLMT_EN);
300
301 mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
302 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
303
304 /* enable MIB tx-rx time reporting */
305 mt76_set(dev, MT_MIB_SCR1(band), MT_MIB_TXDUR_EN);
306 mt76_set(dev, MT_MIB_SCR1(band), MT_MIB_RXDUR_EN);
307
308 mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536);
309 /* disable rx rate report by default due to hw issues */
310 mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
311
312 /* filter out non-resp frames and get instantaneous signal reporting */
313 mask = MT_WTBLOFF_TOP_RSCR_RCPI_MODE | MT_WTBLOFF_TOP_RSCR_RCPI_PARAM;
314 set = FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_MODE, 0) |
315 FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_PARAM, 0x3);
316 mt76_rmw(dev, MT_WTBLOFF_TOP_RSCR(band), mask, set);
317 }
318 EXPORT_SYMBOL_GPL(mt792x_mac_init_band);
319
mt792x_pm_wake_work(struct work_struct * work)320 void mt792x_pm_wake_work(struct work_struct *work)
321 {
322 struct mt792x_dev *dev;
323 struct mt76_phy *mphy;
324
325 dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev,
326 pm.wake_work);
327 mphy = dev->phy.mt76;
328
329 if (!mt792x_mcu_drv_pmctrl(dev)) {
330 struct mt76_dev *mdev = &dev->mt76;
331 int i;
332
333 if (mt76_is_sdio(mdev)) {
334 mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
335 mt76_worker_schedule(&mdev->sdio.txrx_worker);
336 } else {
337 local_bh_disable();
338 mt76_for_each_q_rx(mdev, i)
339 napi_schedule(&mdev->napi[i]);
340 local_bh_enable();
341 mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
342 mt76_connac_tx_cleanup(mdev);
343 }
344 if (test_bit(MT76_STATE_RUNNING, &mphy->state))
345 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
346 MT792x_WATCHDOG_TIME);
347 }
348
349 ieee80211_wake_queues(mphy->hw);
350 wake_up(&dev->pm.wait);
351 }
352 EXPORT_SYMBOL_GPL(mt792x_pm_wake_work);
353
mt792x_pm_power_save_work(struct work_struct * work)354 void mt792x_pm_power_save_work(struct work_struct *work)
355 {
356 struct mt792x_dev *dev;
357 unsigned long delta;
358 struct mt76_phy *mphy;
359
360 dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev,
361 pm.ps_work.work);
362 mphy = dev->phy.mt76;
363
364 delta = dev->pm.idle_timeout;
365 if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
366 test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) ||
367 dev->fw_assert)
368 goto out;
369
370 if (mutex_is_locked(&dev->mt76.mutex))
371 /* if mt76 mutex is held we should not put the device
372 * to sleep since we are currently accessing device
373 * register map. We need to wait for the next power_save
374 * trigger.
375 */
376 goto out;
377
378 if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
379 delta = dev->pm.last_activity + delta - jiffies;
380 goto out;
381 }
382
383 if (!mt792x_mcu_fw_pmctrl(dev)) {
384 cancel_delayed_work_sync(&mphy->mac_work);
385 return;
386 }
387 out:
388 queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
389 }
390 EXPORT_SYMBOL_GPL(mt792x_pm_power_save_work);
391