xref: /linux/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c (revision 24b10e5f8e0d2bee1a10fc67011ea5d936c1a389)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
5  */
6 
7 #include <linux/kernel.h>
8 #include <linux/irq.h>
9 
10 #include "mt76x02.h"
11 #include "mt76x02_mcu.h"
12 #include "trace.h"
13 
14 static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
15 {
16 	struct mt76x02_dev *dev = from_tasklet(dev, t, mt76.pre_tbtt_tasklet);
17 	struct mt76_dev *mdev = &dev->mt76;
18 	struct mt76_queue *q = dev->mphy.q_tx[MT_TXQ_PSD];
19 	struct beacon_bc_data data = {
20 		.dev = dev,
21 	};
22 	struct sk_buff *skb;
23 	int i;
24 
25 	if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
26 		return;
27 
28 	__skb_queue_head_init(&data.q);
29 
30 	mt76x02_resync_beacon_timer(dev);
31 
32 	/* Prevent corrupt transmissions during update */
33 	mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff);
34 	dev->beacon_data_count = 0;
35 
36 	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
37 		IEEE80211_IFACE_ITER_RESUME_ALL,
38 		mt76x02_update_beacon_iter, &data);
39 
40 	while ((skb = __skb_dequeue(&data.q)) != NULL)
41 		mt76x02_mac_set_beacon(dev, skb);
42 
43 	mt76_wr(dev, MT_BCN_BYPASS_MASK,
44 		0xff00 | ~(0xff00 >> dev->beacon_data_count));
45 
46 	mt76_csa_check(mdev);
47 
48 	if (mdev->csa_complete)
49 		return;
50 
51 	mt76x02_enqueue_buffered_bc(dev, &data, 8);
52 
53 	if (!skb_queue_len(&data.q))
54 		return;
55 
56 	for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
57 		if (!data.tail[i])
58 			continue;
59 
60 		mt76_skb_set_moredata(data.tail[i], false);
61 	}
62 
63 	spin_lock(&q->lock);
64 	while ((skb = __skb_dequeue(&data.q)) != NULL) {
65 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
66 		struct ieee80211_vif *vif = info->control.vif;
67 		struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
68 
69 		mt76_tx_queue_skb(dev, q, MT_TXQ_PSD, skb, &mvif->group_wcid,
70 				  NULL);
71 	}
72 	spin_unlock(&q->lock);
73 }
74 
75 static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
76 {
77 	if (en)
78 		tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
79 	else
80 		tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
81 }
82 
83 static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en)
84 {
85 	mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
86 	if (en)
87 		mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
88 	else
89 		mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
90 }
91 
92 void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
93 {
94 	static const struct mt76x02_beacon_ops beacon_ops = {
95 		.nslots = 8,
96 		.slot_size = 1024,
97 		.pre_tbtt_enable = mt76x02e_pre_tbtt_enable,
98 		.beacon_enable = mt76x02e_beacon_enable,
99 	};
100 
101 	dev->beacon_ops = &beacon_ops;
102 
103 	/* Fire a pre-TBTT interrupt 8 ms before TBTT */
104 	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
105 		       8 << 4);
106 	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
107 		       MT_DFS_GP_INTERVAL);
108 	mt76_wr(dev, MT_INT_TIMER_EN, 0);
109 
110 	mt76x02_init_beacon_config(dev);
111 }
112 EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
113 
114 static int
115 mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
116 		      int idx, int n_desc, int bufsize)
117 {
118 	int err;
119 
120 	err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
121 			       MT_RX_RING_BASE);
122 	if (err < 0)
123 		return err;
124 
125 	mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
126 
127 	return 0;
128 }
129 
130 static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
131 {
132 	struct mt76x02_tx_status stat;
133 	u8 update = 1;
134 
135 	while (kfifo_get(&dev->txstatus_fifo, &stat))
136 		mt76x02_send_tx_status(dev, &stat, &update);
137 }
138 
139 static void mt76x02_tx_worker(struct mt76_worker *w)
140 {
141 	struct mt76x02_dev *dev;
142 
143 	dev = container_of(w, struct mt76x02_dev, mt76.tx_worker);
144 
145 	mt76x02_mac_poll_tx_status(dev, false);
146 	mt76x02_process_tx_status_fifo(dev);
147 
148 	mt76_txq_schedule_all(&dev->mphy);
149 }
150 
151 static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
152 {
153 	struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev,
154 					       mt76.tx_napi);
155 	int i;
156 
157 	mt76x02_mac_poll_tx_status(dev, false);
158 
159 	mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
160 	for (i = MT_TXQ_PSD; i >= 0; i--)
161 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
162 
163 	if (napi_complete_done(napi, 0))
164 		mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
165 
166 	mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
167 	for (i = MT_TXQ_PSD; i >= 0; i--)
168 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], false);
169 
170 	mt76_worker_schedule(&dev->mt76.tx_worker);
171 
172 	return 0;
173 }
174 
175 int mt76x02_dma_init(struct mt76x02_dev *dev)
176 {
177 	struct mt76_txwi_cache __maybe_unused *t;
178 	int i, ret, fifo_size;
179 	struct mt76_queue *q;
180 	void *status_fifo;
181 
182 	BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
183 
184 	fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
185 	status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
186 	if (!status_fifo)
187 		return -ENOMEM;
188 
189 	dev->mt76.tx_worker.fn = mt76x02_tx_worker;
190 	tasklet_setup(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet);
191 
192 	spin_lock_init(&dev->txstatus_fifo_lock);
193 	kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
194 
195 	mt76_dma_attach(&dev->mt76);
196 
197 	mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
198 
199 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
200 		ret = mt76_init_tx_queue(&dev->mphy, i, mt76_ac_to_hwq(i),
201 					 MT76x02_TX_RING_SIZE,
202 					 MT_TX_RING_BASE, NULL, 0);
203 		if (ret)
204 			return ret;
205 	}
206 
207 	ret = mt76_init_tx_queue(&dev->mphy, MT_TXQ_PSD, MT_TX_HW_QUEUE_MGMT,
208 				 MT76x02_PSD_RING_SIZE, MT_TX_RING_BASE,
209 				 NULL, 0);
210 	if (ret)
211 		return ret;
212 
213 	ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM, MT_TX_HW_QUEUE_MCU,
214 				  MT_MCU_RING_SIZE, MT_TX_RING_BASE);
215 	if (ret)
216 		return ret;
217 
218 	mt76x02_irq_enable(dev,
219 			   MT_INT_TX_DONE(IEEE80211_AC_VO) |
220 			   MT_INT_TX_DONE(IEEE80211_AC_VI) |
221 			   MT_INT_TX_DONE(IEEE80211_AC_BE) |
222 			   MT_INT_TX_DONE(IEEE80211_AC_BK) |
223 			   MT_INT_TX_DONE(MT_TX_HW_QUEUE_MGMT) |
224 			   MT_INT_TX_DONE(MT_TX_HW_QUEUE_MCU));
225 
226 	ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
227 				    MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
228 	if (ret)
229 		return ret;
230 
231 	q = &dev->mt76.q_rx[MT_RXQ_MAIN];
232 	q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
233 	ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
234 				    MT_RX_BUF_SIZE);
235 	if (ret)
236 		return ret;
237 
238 	ret = mt76_init_queues(dev, mt76_dma_rx_poll);
239 	if (ret)
240 		return ret;
241 
242 	netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
243 			  mt76x02_poll_tx);
244 	napi_enable(&dev->mt76.tx_napi);
245 
246 	return 0;
247 }
248 EXPORT_SYMBOL_GPL(mt76x02_dma_init);
249 
250 void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
251 {
252 	struct mt76x02_dev *dev;
253 
254 	dev = container_of(mdev, struct mt76x02_dev, mt76);
255 	mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
256 }
257 EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
258 
259 irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
260 {
261 	struct mt76x02_dev *dev = dev_instance;
262 	u32 intr, mask;
263 
264 	intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
265 	intr &= dev->mt76.mmio.irqmask;
266 	mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
267 
268 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
269 		return IRQ_NONE;
270 
271 	trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
272 
273 	mask = intr & (MT_INT_RX_DONE_ALL | MT_INT_GPTIMER);
274 	if (intr & (MT_INT_TX_DONE_ALL | MT_INT_TX_STAT))
275 		mask |= MT_INT_TX_DONE_ALL;
276 
277 	mt76x02_irq_disable(dev, mask);
278 
279 	if (intr & MT_INT_RX_DONE(0))
280 		napi_schedule(&dev->mt76.napi[0]);
281 
282 	if (intr & MT_INT_RX_DONE(1))
283 		napi_schedule(&dev->mt76.napi[1]);
284 
285 	if (intr & MT_INT_PRE_TBTT)
286 		tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
287 
288 	/* send buffered multicast frames now */
289 	if (intr & MT_INT_TBTT) {
290 		if (dev->mt76.csa_complete)
291 			mt76_csa_finish(&dev->mt76);
292 		else
293 			mt76_queue_kick(dev, dev->mphy.q_tx[MT_TXQ_PSD]);
294 	}
295 
296 	if (intr & MT_INT_TX_STAT)
297 		mt76x02_mac_poll_tx_status(dev, true);
298 
299 	if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL))
300 		napi_schedule(&dev->mt76.tx_napi);
301 
302 	if (intr & MT_INT_GPTIMER)
303 		tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
304 
305 	return IRQ_HANDLED;
306 }
307 EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
308 
309 static void mt76x02_dma_enable(struct mt76x02_dev *dev)
310 {
311 	u32 val;
312 
313 	mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
314 	mt76x02_wait_for_wpdma(&dev->mt76, 1000);
315 	usleep_range(50, 100);
316 
317 	val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
318 	      MT_WPDMA_GLO_CFG_TX_DMA_EN |
319 	      MT_WPDMA_GLO_CFG_RX_DMA_EN;
320 	mt76_set(dev, MT_WPDMA_GLO_CFG, val);
321 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
322 		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
323 }
324 
325 void mt76x02_dma_disable(struct mt76x02_dev *dev)
326 {
327 	u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
328 
329 	val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
330 	       MT_WPDMA_GLO_CFG_BIG_ENDIAN |
331 	       MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
332 	val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
333 	mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
334 }
335 EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
336 
337 void mt76x02_mac_start(struct mt76x02_dev *dev)
338 {
339 	mt76x02_mac_reset_counters(dev);
340 	mt76x02_dma_enable(dev);
341 	mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
342 	mt76_wr(dev, MT_MAC_SYS_CTRL,
343 		MT_MAC_SYS_CTRL_ENABLE_TX |
344 		MT_MAC_SYS_CTRL_ENABLE_RX);
345 	mt76x02_irq_enable(dev,
346 			   MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
347 			   MT_INT_TX_STAT);
348 }
349 EXPORT_SYMBOL_GPL(mt76x02_mac_start);
350 
351 static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
352 {
353 	u32 dma_idx, prev_dma_idx;
354 	struct mt76_queue *q;
355 	int i;
356 
357 	for (i = 0; i < 4; i++) {
358 		q = dev->mphy.q_tx[i];
359 
360 		prev_dma_idx = dev->mt76.tx_dma_idx[i];
361 		dma_idx = readl(&q->regs->dma_idx);
362 		dev->mt76.tx_dma_idx[i] = dma_idx;
363 
364 		if (!q->queued || prev_dma_idx != dma_idx) {
365 			dev->tx_hang_check[i] = 0;
366 			continue;
367 		}
368 
369 		if (++dev->tx_hang_check[i] >= MT_TX_HANG_TH)
370 			return true;
371 	}
372 
373 	return false;
374 }
375 
376 static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
377 			     struct ieee80211_sta *sta,
378 			     struct ieee80211_key_conf *key, void *data)
379 {
380 	struct mt76x02_dev *dev = hw->priv;
381 	struct mt76_wcid *wcid;
382 
383 	if (!sta)
384 		return;
385 
386 	wcid = (struct mt76_wcid *)sta->drv_priv;
387 
388 	if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
389 		return;
390 
391 	mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
392 }
393 
394 static void mt76x02_reset_state(struct mt76x02_dev *dev)
395 {
396 	int i;
397 
398 	lockdep_assert_held(&dev->mt76.mutex);
399 
400 	clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
401 
402 	rcu_read_lock();
403 	ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
404 	rcu_read_unlock();
405 
406 	for (i = 0; i < MT76x02_N_WCIDS; i++) {
407 		struct ieee80211_sta *sta;
408 		struct ieee80211_vif *vif;
409 		struct mt76x02_sta *msta;
410 		struct mt76_wcid *wcid;
411 		void *priv;
412 
413 		wcid = rcu_dereference_protected(dev->mt76.wcid[i],
414 					lockdep_is_held(&dev->mt76.mutex));
415 		if (!wcid)
416 			continue;
417 
418 		rcu_assign_pointer(dev->mt76.wcid[i], NULL);
419 
420 		priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
421 		sta = container_of(priv, struct ieee80211_sta, drv_priv);
422 
423 		priv = msta->vif;
424 		vif = container_of(priv, struct ieee80211_vif, drv_priv);
425 
426 		__mt76_sta_remove(&dev->mt76, vif, sta);
427 		memset(msta, 0, sizeof(*msta));
428 	}
429 
430 	dev->mt76.vif_mask = 0;
431 	dev->mt76.beacon_mask = 0;
432 }
433 
434 static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
435 {
436 	u32 mask = dev->mt76.mmio.irqmask;
437 	bool restart = dev->mt76.mcu_ops->mcu_restart;
438 	int i;
439 
440 	ieee80211_stop_queues(dev->mt76.hw);
441 	set_bit(MT76_RESET, &dev->mphy.state);
442 
443 	tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
444 	mt76_worker_disable(&dev->mt76.tx_worker);
445 	napi_disable(&dev->mt76.tx_napi);
446 
447 	mt76_for_each_q_rx(&dev->mt76, i) {
448 		napi_disable(&dev->mt76.napi[i]);
449 	}
450 
451 	mutex_lock(&dev->mt76.mutex);
452 
453 	dev->mcu_timeout = 0;
454 	if (restart)
455 		mt76x02_reset_state(dev);
456 
457 	if (dev->mt76.beacon_mask)
458 		mt76_clear(dev, MT_BEACON_TIME_CFG,
459 			   MT_BEACON_TIME_CFG_BEACON_TX |
460 			   MT_BEACON_TIME_CFG_TBTT_EN);
461 
462 	mt76x02_irq_disable(dev, mask);
463 
464 	/* perform device reset */
465 	mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
466 	mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
467 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
468 		   MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
469 	usleep_range(5000, 10000);
470 	mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff);
471 
472 	/* let fw reset DMA */
473 	mt76_set(dev, 0x734, 0x3);
474 
475 	if (restart)
476 		mt76_mcu_restart(dev);
477 
478 	mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], true);
479 	for (i = 0; i < __MT_TXQ_MAX; i++)
480 		mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
481 
482 	mt76_for_each_q_rx(&dev->mt76, i) {
483 		mt76_queue_rx_reset(dev, i);
484 	}
485 
486 	mt76_tx_status_check(&dev->mt76, true);
487 
488 	mt76x02_mac_start(dev);
489 
490 	if (dev->ed_monitor)
491 		mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
492 
493 	if (dev->mt76.beacon_mask && !restart)
494 		mt76_set(dev, MT_BEACON_TIME_CFG,
495 			 MT_BEACON_TIME_CFG_BEACON_TX |
496 			 MT_BEACON_TIME_CFG_TBTT_EN);
497 
498 	mt76x02_irq_enable(dev, mask);
499 
500 	mutex_unlock(&dev->mt76.mutex);
501 
502 	clear_bit(MT76_RESET, &dev->mphy.state);
503 
504 	mt76_worker_enable(&dev->mt76.tx_worker);
505 	tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
506 
507 	local_bh_disable();
508 	napi_enable(&dev->mt76.tx_napi);
509 	napi_schedule(&dev->mt76.tx_napi);
510 
511 	mt76_for_each_q_rx(&dev->mt76, i) {
512 		napi_enable(&dev->mt76.napi[i]);
513 		napi_schedule(&dev->mt76.napi[i]);
514 	}
515 	local_bh_enable();
516 
517 	if (restart) {
518 		set_bit(MT76_RESTART, &dev->mphy.state);
519 		mt76x02_mcu_function_select(dev, Q_SELECT, 1);
520 		ieee80211_restart_hw(dev->mt76.hw);
521 	} else {
522 		ieee80211_wake_queues(dev->mt76.hw);
523 		mt76_txq_schedule_all(&dev->mphy);
524 	}
525 }
526 
527 void mt76x02_reconfig_complete(struct ieee80211_hw *hw,
528 			       enum ieee80211_reconfig_type reconfig_type)
529 {
530 	struct mt76x02_dev *dev = hw->priv;
531 
532 	if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
533 		return;
534 
535 	clear_bit(MT76_RESTART, &dev->mphy.state);
536 }
537 EXPORT_SYMBOL_GPL(mt76x02_reconfig_complete);
538 
539 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
540 {
541 	if (test_bit(MT76_RESTART, &dev->mphy.state))
542 		return;
543 
544 	if (!mt76x02_tx_hang(dev) && !dev->mcu_timeout)
545 		return;
546 
547 	mt76x02_watchdog_reset(dev);
548 
549 	dev->tx_hang_reset++;
550 	memset(dev->tx_hang_check, 0, sizeof(dev->tx_hang_check));
551 	memset(dev->mt76.tx_dma_idx, 0xff,
552 	       sizeof(dev->mt76.tx_dma_idx));
553 }
554 
555 void mt76x02_wdt_work(struct work_struct *work)
556 {
557 	struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
558 					       wdt_work.work);
559 
560 	mt76x02_check_tx_hang(dev);
561 
562 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
563 				     MT_WATCHDOG_TIME);
564 }
565