xref: /linux/drivers/net/wireless/mediatek/mt76/tx.c (revision 26fbb4c8c7c3ee9a4c3b4de555a8587b5a19154e)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include "mt76.h"
7 
8 static int
9 mt76_txq_get_qid(struct ieee80211_txq *txq)
10 {
11 	if (!txq->sta)
12 		return MT_TXQ_BE;
13 
14 	return txq->ac;
15 }
16 
17 void
18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
19 {
20 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
21 	struct ieee80211_txq *txq;
22 	struct mt76_txq *mtxq;
23 	u8 tid;
24 
25 	if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
26 	    !ieee80211_is_data_present(hdr->frame_control))
27 		return;
28 
29 	tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
30 	txq = sta->txq[tid];
31 	mtxq = (struct mt76_txq *)txq->drv_priv;
32 	if (!mtxq->aggr)
33 		return;
34 
35 	mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
36 }
37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
38 
39 void
40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
41 		   __acquires(&dev->status_list.lock)
42 {
43 	__skb_queue_head_init(list);
44 	spin_lock_bh(&dev->status_list.lock);
45 }
46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
47 
48 void
49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
50 		      __releases(&dev->status_list.lock)
51 {
52 	struct ieee80211_hw *hw;
53 	struct sk_buff *skb;
54 
55 	spin_unlock_bh(&dev->status_list.lock);
56 
57 	while ((skb = __skb_dequeue(list)) != NULL) {
58 		hw = mt76_tx_status_get_hw(dev, skb);
59 		ieee80211_tx_status(hw, skb);
60 	}
61 
62 }
63 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
64 
65 static void
66 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
67 			  struct sk_buff_head *list)
68 {
69 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
70 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
71 	u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
72 
73 	flags |= cb->flags;
74 	cb->flags = flags;
75 
76 	if ((flags & done) != done)
77 		return;
78 
79 	__skb_unlink(skb, &dev->status_list);
80 
81 	/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
82 	if (flags & MT_TX_CB_TXS_FAILED) {
83 		ieee80211_tx_info_clear_status(info);
84 		info->status.rates[0].idx = -1;
85 		info->flags |= IEEE80211_TX_STAT_ACK;
86 	}
87 
88 	__skb_queue_tail(list, skb);
89 }
90 
91 void
92 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
93 			struct sk_buff_head *list)
94 {
95 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
96 }
97 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
98 
99 int
100 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
101 		       struct sk_buff *skb)
102 {
103 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
104 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
105 	int pid;
106 
107 	if (!wcid)
108 		return MT_PACKET_ID_NO_ACK;
109 
110 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
111 		return MT_PACKET_ID_NO_ACK;
112 
113 	if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
114 			     IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
115 		return MT_PACKET_ID_NO_SKB;
116 
117 	spin_lock_bh(&dev->status_list.lock);
118 
119 	memset(cb, 0, sizeof(*cb));
120 	wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
121 	if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
122 	    wcid->packet_id == MT_PACKET_ID_NO_SKB)
123 		wcid->packet_id = MT_PACKET_ID_FIRST;
124 
125 	pid = wcid->packet_id;
126 	cb->wcid = wcid->idx;
127 	cb->pktid = pid;
128 	cb->jiffies = jiffies;
129 
130 	__skb_queue_tail(&dev->status_list, skb);
131 	spin_unlock_bh(&dev->status_list.lock);
132 
133 	return pid;
134 }
135 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
136 
137 struct sk_buff *
138 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
139 		       struct sk_buff_head *list)
140 {
141 	struct sk_buff *skb, *tmp;
142 
143 	skb_queue_walk_safe(&dev->status_list, skb, tmp) {
144 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
145 
146 		if (wcid && cb->wcid != wcid->idx)
147 			continue;
148 
149 		if (cb->pktid == pktid)
150 			return skb;
151 
152 		if (pktid >= 0 && !time_after(jiffies, cb->jiffies +
153 					      MT_TX_STATUS_SKB_TIMEOUT))
154 			continue;
155 
156 		__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
157 						    MT_TX_CB_TXS_DONE, list);
158 	}
159 
160 	return NULL;
161 }
162 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
163 
164 void
165 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
166 {
167 	struct sk_buff_head list;
168 
169 	mt76_tx_status_lock(dev, &list);
170 	mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
171 	mt76_tx_status_unlock(dev, &list);
172 }
173 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
174 
175 static void
176 mt76_tx_check_non_aql(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
177 {
178 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
179 	struct mt76_wcid *wcid;
180 	int pending;
181 
182 	if (info->tx_time_est)
183 		return;
184 
185 	if (wcid_idx >= ARRAY_SIZE(dev->wcid))
186 		return;
187 
188 	rcu_read_lock();
189 
190 	wcid = rcu_dereference(dev->wcid[wcid_idx]);
191 	if (wcid) {
192 		pending = atomic_dec_return(&wcid->non_aql_packets);
193 		if (pending < 0)
194 			atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
195 	}
196 
197 	rcu_read_unlock();
198 }
199 
200 void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
201 {
202 	struct ieee80211_hw *hw;
203 	struct sk_buff_head list;
204 
205 #ifdef CONFIG_NL80211_TESTMODE
206 	if (skb == dev->test.tx_skb) {
207 		dev->test.tx_done++;
208 		if (dev->test.tx_queued == dev->test.tx_done)
209 			wake_up(&dev->tx_wait);
210 	}
211 #endif
212 
213 	mt76_tx_check_non_aql(dev, wcid_idx, skb);
214 
215 	if (!skb->prev) {
216 		hw = mt76_tx_status_get_hw(dev, skb);
217 		ieee80211_free_txskb(hw, skb);
218 		return;
219 	}
220 
221 	mt76_tx_status_lock(dev, &list);
222 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
223 	mt76_tx_status_unlock(dev, &list);
224 }
225 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
226 
227 static int
228 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
229 		    struct mt76_wcid *wcid, struct ieee80211_sta *sta,
230 		    bool *stop)
231 {
232 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
233 	struct mt76_queue *q = phy->q_tx[qid];
234 	struct mt76_dev *dev = phy->dev;
235 	bool non_aql;
236 	int pending;
237 	int idx;
238 
239 	non_aql = !info->tx_time_est;
240 	idx = dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
241 	if (idx < 0 || !sta || !non_aql)
242 		return idx;
243 
244 	wcid = (struct mt76_wcid *)sta->drv_priv;
245 	q->entry[idx].wcid = wcid->idx;
246 	pending = atomic_inc_return(&wcid->non_aql_packets);
247 	if (stop && pending >= MT_MAX_NON_AQL_PKT)
248 		*stop = true;
249 
250 	return idx;
251 }
252 
253 void
254 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
255 	struct mt76_wcid *wcid, struct sk_buff *skb)
256 {
257 	struct mt76_dev *dev = phy->dev;
258 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
259 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
260 	struct mt76_queue *q;
261 	int qid = skb_get_queue_mapping(skb);
262 	bool ext_phy = phy != &dev->phy;
263 
264 	if (mt76_testmode_enabled(dev)) {
265 		ieee80211_free_txskb(phy->hw, skb);
266 		return;
267 	}
268 
269 	if (WARN_ON(qid >= MT_TXQ_PSD)) {
270 		qid = MT_TXQ_BE;
271 		skb_set_queue_mapping(skb, qid);
272 	}
273 
274 	if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
275 	    !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
276 	    !ieee80211_is_data(hdr->frame_control) &&
277 	    !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
278 		qid = MT_TXQ_PSD;
279 		skb_set_queue_mapping(skb, qid);
280 	}
281 
282 	if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
283 		ieee80211_get_tx_rates(info->control.vif, sta, skb,
284 				       info->control.rates, 1);
285 
286 	if (ext_phy)
287 		info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
288 
289 	q = phy->q_tx[qid];
290 
291 	spin_lock_bh(&q->lock);
292 	__mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
293 	dev->queue_ops->kick(dev, q);
294 	spin_unlock_bh(&q->lock);
295 }
296 EXPORT_SYMBOL_GPL(mt76_tx);
297 
298 static struct sk_buff *
299 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
300 {
301 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
302 	struct ieee80211_tx_info *info;
303 	bool ext_phy = phy != &phy->dev->phy;
304 	struct sk_buff *skb;
305 
306 	skb = ieee80211_tx_dequeue(phy->hw, txq);
307 	if (!skb)
308 		return NULL;
309 
310 	info = IEEE80211_SKB_CB(skb);
311 	if (ext_phy)
312 		info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
313 
314 	return skb;
315 }
316 
317 static void
318 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta,
319 		  struct sk_buff *skb, bool last)
320 {
321 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
322 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
323 
324 	info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
325 	if (last)
326 		info->flags |= IEEE80211_TX_STATUS_EOSP |
327 			       IEEE80211_TX_CTL_REQ_TX_STATUS;
328 
329 	mt76_skb_set_moredata(skb, !last);
330 	__mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL);
331 }
332 
333 void
334 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
335 			     u16 tids, int nframes,
336 			     enum ieee80211_frame_release_type reason,
337 			     bool more_data)
338 {
339 	struct mt76_phy *phy = hw->priv;
340 	struct mt76_dev *dev = phy->dev;
341 	struct sk_buff *last_skb = NULL;
342 	struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD];
343 	int i;
344 
345 	spin_lock_bh(&hwq->lock);
346 	for (i = 0; tids && nframes; i++, tids >>= 1) {
347 		struct ieee80211_txq *txq = sta->txq[i];
348 		struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
349 		struct sk_buff *skb;
350 
351 		if (!(tids & 1))
352 			continue;
353 
354 		do {
355 			skb = mt76_txq_dequeue(phy, mtxq);
356 			if (!skb)
357 				break;
358 
359 			nframes--;
360 			if (last_skb)
361 				mt76_queue_ps_skb(phy, sta, last_skb, false);
362 
363 			last_skb = skb;
364 		} while (nframes);
365 	}
366 
367 	if (last_skb) {
368 		mt76_queue_ps_skb(phy, sta, last_skb, true);
369 		dev->queue_ops->kick(dev, hwq);
370 	} else {
371 		ieee80211_sta_eosp(sta);
372 	}
373 
374 	spin_unlock_bh(&hwq->lock);
375 }
376 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
377 
378 static bool
379 mt76_txq_stopped(struct mt76_queue *q)
380 {
381 	return q->stopped || q->blocked ||
382 	       q->queued + MT_TXQ_FREE_THR >= q->ndesc;
383 }
384 
385 static int
386 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
387 		    struct mt76_txq *mtxq)
388 {
389 	struct mt76_dev *dev = phy->dev;
390 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
391 	enum mt76_txq_id qid = mt76_txq_get_qid(txq);
392 	struct mt76_wcid *wcid = mtxq->wcid;
393 	struct ieee80211_tx_info *info;
394 	struct sk_buff *skb;
395 	int n_frames = 1;
396 	bool stop = false;
397 	int idx;
398 
399 	if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
400 		return 0;
401 
402 	if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
403 		return 0;
404 
405 	skb = mt76_txq_dequeue(phy, mtxq);
406 	if (!skb)
407 		return 0;
408 
409 	info = IEEE80211_SKB_CB(skb);
410 	if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
411 		ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
412 				       info->control.rates, 1);
413 
414 	idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
415 	if (idx < 0)
416 		return idx;
417 
418 	do {
419 		if (test_bit(MT76_STATE_PM, &phy->state) ||
420 		    test_bit(MT76_RESET, &phy->state))
421 			return -EBUSY;
422 
423 		if (stop || mt76_txq_stopped(q))
424 			break;
425 
426 		skb = mt76_txq_dequeue(phy, mtxq);
427 		if (!skb)
428 			break;
429 
430 		info = IEEE80211_SKB_CB(skb);
431 		if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
432 			ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
433 					       info->control.rates, 1);
434 
435 		idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
436 		if (idx < 0)
437 			break;
438 
439 		n_frames++;
440 	} while (1);
441 
442 	dev->queue_ops->kick(dev, q);
443 
444 	return n_frames;
445 }
446 
447 static int
448 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
449 {
450 	struct mt76_queue *q = phy->q_tx[qid];
451 	struct mt76_dev *dev = phy->dev;
452 	struct ieee80211_txq *txq;
453 	struct mt76_txq *mtxq;
454 	struct mt76_wcid *wcid;
455 	int ret = 0;
456 
457 	spin_lock_bh(&q->lock);
458 	while (1) {
459 		if (test_bit(MT76_STATE_PM, &phy->state) ||
460 		    test_bit(MT76_RESET, &phy->state)) {
461 			ret = -EBUSY;
462 			break;
463 		}
464 
465 		if (dev->queue_ops->tx_cleanup &&
466 		    q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
467 			spin_unlock_bh(&q->lock);
468 			dev->queue_ops->tx_cleanup(dev, q, false);
469 			spin_lock_bh(&q->lock);
470 		}
471 
472 		if (mt76_txq_stopped(q))
473 			break;
474 
475 		txq = ieee80211_next_txq(phy->hw, qid);
476 		if (!txq)
477 			break;
478 
479 		mtxq = (struct mt76_txq *)txq->drv_priv;
480 		wcid = mtxq->wcid;
481 		if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
482 			continue;
483 
484 		if (mtxq->send_bar && mtxq->aggr) {
485 			struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
486 			struct ieee80211_sta *sta = txq->sta;
487 			struct ieee80211_vif *vif = txq->vif;
488 			u16 agg_ssn = mtxq->agg_ssn;
489 			u8 tid = txq->tid;
490 
491 			mtxq->send_bar = false;
492 			spin_unlock_bh(&q->lock);
493 			ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
494 			spin_lock_bh(&q->lock);
495 		}
496 
497 		ret += mt76_txq_send_burst(phy, q, mtxq);
498 		ieee80211_return_txq(phy->hw, txq, false);
499 	}
500 	spin_unlock_bh(&q->lock);
501 
502 	return ret;
503 }
504 
505 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
506 {
507 	int len;
508 
509 	if (qid >= 4)
510 		return;
511 
512 	rcu_read_lock();
513 
514 	do {
515 		ieee80211_txq_schedule_start(phy->hw, qid);
516 		len = mt76_txq_schedule_list(phy, qid);
517 		ieee80211_txq_schedule_end(phy->hw, qid);
518 	} while (len > 0);
519 
520 	rcu_read_unlock();
521 }
522 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
523 
524 void mt76_txq_schedule_all(struct mt76_phy *phy)
525 {
526 	int i;
527 
528 	for (i = 0; i <= MT_TXQ_BK; i++)
529 		mt76_txq_schedule(phy, i);
530 }
531 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
532 
533 void mt76_tx_worker(struct mt76_worker *w)
534 {
535 	struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
536 
537 	mt76_txq_schedule_all(&dev->phy);
538 	if (dev->phy2)
539 		mt76_txq_schedule_all(dev->phy2);
540 
541 #ifdef CONFIG_NL80211_TESTMODE
542 	if (dev->test.tx_pending)
543 		mt76_testmode_tx_pending(dev);
544 #endif
545 }
546 
547 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
548 			 bool send_bar)
549 {
550 	int i;
551 
552 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
553 		struct ieee80211_txq *txq = sta->txq[i];
554 		struct mt76_queue *hwq;
555 		struct mt76_txq *mtxq;
556 
557 		if (!txq)
558 			continue;
559 
560 		hwq = phy->q_tx[mt76_txq_get_qid(txq)];
561 		mtxq = (struct mt76_txq *)txq->drv_priv;
562 
563 		spin_lock_bh(&hwq->lock);
564 		mtxq->send_bar = mtxq->aggr && send_bar;
565 		spin_unlock_bh(&hwq->lock);
566 	}
567 }
568 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
569 
570 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
571 {
572 	struct mt76_phy *phy = hw->priv;
573 	struct mt76_dev *dev = phy->dev;
574 
575 	if (!test_bit(MT76_STATE_RUNNING, &phy->state))
576 		return;
577 
578 	mt76_worker_schedule(&dev->tx_worker);
579 }
580 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
581 
582 u8 mt76_ac_to_hwq(u8 ac)
583 {
584 	static const u8 wmm_queue_map[] = {
585 		[IEEE80211_AC_BE] = 0,
586 		[IEEE80211_AC_BK] = 1,
587 		[IEEE80211_AC_VI] = 2,
588 		[IEEE80211_AC_VO] = 3,
589 	};
590 
591 	if (WARN_ON(ac >= IEEE80211_NUM_ACS))
592 		return 0;
593 
594 	return wmm_queue_map[ac];
595 }
596 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
597 
598 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
599 {
600 	struct sk_buff *iter, *last = skb;
601 
602 	/* First packet of a A-MSDU burst keeps track of the whole burst
603 	 * length, need to update length of it and the last packet.
604 	 */
605 	skb_walk_frags(skb, iter) {
606 		last = iter;
607 		if (!iter->next) {
608 			skb->data_len += pad;
609 			skb->len += pad;
610 			break;
611 		}
612 	}
613 
614 	if (skb_pad(last, pad))
615 		return -ENOMEM;
616 
617 	__skb_put(last, pad);
618 
619 	return 0;
620 }
621 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
622 
623 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
624 			    struct mt76_queue_entry *e)
625 {
626 	if (e->skb)
627 		dev->drv->tx_complete_skb(dev, e);
628 
629 	spin_lock_bh(&q->lock);
630 	q->tail = (q->tail + 1) % q->ndesc;
631 	q->queued--;
632 	spin_unlock_bh(&q->lock);
633 }
634 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
635