Lines Matching +full:ps +full:- +full:seq +full:- +full:loop

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
116 * What queue to throw the non-QoS TID traffic into
148 if (bf->bf_nseg == 0) in ath_tx_alq_post()
150 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; in ath_tx_alq_post()
151 for (i = 0, ds = (const char *) bf->bf_desc; in ath_tx_alq_post()
153 i++, ds += sc->sc_tx_desclen) { in ath_tx_alq_post()
154 if_ath_alq_post(&sc->sc_alq, in ath_tx_alq_post()
156 sc->sc_tx_desclen, in ath_tx_alq_post()
159 bf = bf->bf_next; in ath_tx_alq_post()
170 return ((sc->sc_ah->ah_magic == 0x20065416) || in ath_tx_is_11n()
171 (sc->sc_ah->ah_magic == 0x19741014)); in ath_tx_is_11n()
177 * Non-QoS frames get mapped to a TID so frames consistently
187 /* Non-QoS: map frame to a TID queue for software queueing */ in ath_tx_gettid()
191 /* QoS - fetch the TID from the header, ignore mbuf WME */ in ath_tx_gettid()
200 wh = mtod(bf->bf_m, struct ieee80211_frame *); in ath_tx_set_retry()
202 if (bf->bf_state.bfs_isretried == 0) { in ath_tx_set_retry()
203 wh->i_fc[1] |= IEEE80211_FC1_RETRY; in ath_tx_set_retry()
204 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, in ath_tx_set_retry()
207 bf->bf_state.bfs_isretried = 1; in ath_tx_set_retry()
208 bf->bf_state.bfs_retries ++; in ath_tx_set_retry()
220 * For non-QoS frames, return the mbuf WMI priority.
222 * This has implications that higher priority non-QoS traffic
223 * may end up being scheduled before other non-QoS traffic,
224 * leading to out-of-sequence packets being emitted.
240 * QoS data frame (sequence number or otherwise) - in ath_tx_getac()
248 * Otherwise - return mbuf QoS pri. in ath_tx_getac()
282 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { in ath_txfrag_setup()
283 /* XXX non-management? */ in ath_txfrag_setup()
309 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, in ath_tx_dmasetup()
310 bf->bf_segs, &bf->bf_nseg, in ath_tx_dmasetup()
314 bf->bf_nseg = ATH_MAX_SCATTER + 1; in ath_tx_dmasetup()
316 sc->sc_stats.ast_tx_busdma++; in ath_tx_dmasetup()
325 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ in ath_tx_dmasetup()
326 sc->sc_stats.ast_tx_linear++; in ath_tx_dmasetup()
330 sc->sc_stats.ast_tx_nombuf++; in ath_tx_dmasetup()
334 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, in ath_tx_dmasetup()
335 bf->bf_segs, &bf->bf_nseg, in ath_tx_dmasetup()
338 sc->sc_stats.ast_tx_busdma++; in ath_tx_dmasetup()
342 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, in ath_tx_dmasetup()
343 ("too many segments after defrag; nseg %u", bf->bf_nseg)); in ath_tx_dmasetup()
344 } else if (bf->bf_nseg == 0) { /* null packet, discard */ in ath_tx_dmasetup()
345 sc->sc_stats.ast_tx_nodata++; in ath_tx_dmasetup()
350 __func__, m0, m0->m_pkthdr.len); in ath_tx_dmasetup()
351 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); in ath_tx_dmasetup()
352 bf->bf_m = m0; in ath_tx_dmasetup()
358 * Chain together segments+descriptors for a frame - 11n or otherwise.
367 struct ath_hal *ah = sc->sc_ah; in ath_tx_chaindesclist()
379 struct ath_descdma *dd = &sc->sc_txdma; in ath_tx_chaindesclist()
390 numTxMaps = sc->sc_tx_nmaps; in ath_tx_chaindesclist()
396 ds = (char *) bf->bf_desc; in ath_tx_chaindesclist()
400 for (i = 0; i < bf->bf_nseg; i++) { in ath_tx_chaindesclist()
401 bufAddrList[bp] = bf->bf_segs[i].ds_addr; in ath_tx_chaindesclist()
402 segLenList[bp] = bf->bf_segs[i].ds_len; in ath_tx_chaindesclist()
409 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) in ath_tx_chaindesclist()
417 if (i == bf->bf_nseg - 1) in ath_tx_chaindesclist()
421 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); in ath_tx_chaindesclist()
432 , bf->bf_descid /* XXX desc id */ in ath_tx_chaindesclist()
433 , bf->bf_state.bfs_tx_queue in ath_tx_chaindesclist()
435 , i == bf->bf_nseg - 1 /* last segment */ in ath_tx_chaindesclist()
444 * sub-frames. Since the descriptors are in in ath_tx_chaindesclist()
445 * non-cacheable memory, this leads to some in ath_tx_chaindesclist()
449 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); in ath_tx_chaindesclist()
456 ath_hal_set11n_aggr_last(sc->sc_ah, in ath_tx_chaindesclist()
467 ath_hal_set11n_aggr_middle(sc->sc_ah, in ath_tx_chaindesclist()
469 bf->bf_state.bfs_ndelim); in ath_tx_chaindesclist()
472 bf->bf_lastds = (struct ath_desc *) ds; in ath_tx_chaindesclist()
477 ds += sc->sc_tx_desclen; in ath_tx_chaindesclist()
486 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); in ath_tx_chaindesclist()
498 * conditionally for the pre-11n chips, and call ath_buf_set_rate
502 * and 4 if multi-rate retry is needed.
508 struct ath_rc_series *rc = bf->bf_state.bfs_rc; in ath_tx_set_ratectrl()
511 if (! bf->bf_state.bfs_ismrr) in ath_tx_set_ratectrl()
518 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { in ath_tx_set_ratectrl()
525 * Always call - that way a retried descriptor will in ath_tx_set_ratectrl()
528 * XXX TODO: see if this is really needed - setting up in ath_tx_set_ratectrl()
535 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc in ath_tx_set_ratectrl()
547 * bf->bf_next.
553 struct ath_desc *ds0 = bf_first->bf_desc; in ath_tx_setds_11n()
556 __func__, bf_first->bf_state.bfs_nframes, in ath_tx_setds_11n()
557 bf_first->bf_state.bfs_al); in ath_tx_setds_11n()
561 if (bf->bf_state.bfs_txrate0 == 0) in ath_tx_setds_11n()
564 if (bf->bf_state.bfs_rc[0].ratecode == 0) in ath_tx_setds_11n()
569 * Setup all descriptors of all subframes - this will in ath_tx_setds_11n()
575 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, in ath_tx_setds_11n()
576 SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_setds_11n()
579 * Setup the initial fields for the first descriptor - all in ath_tx_setds_11n()
580 * the non-11n specific stuff. in ath_tx_setds_11n()
582 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc in ath_tx_setds_11n()
583 , bf->bf_state.bfs_pktlen /* packet length */ in ath_tx_setds_11n()
584 , bf->bf_state.bfs_hdrlen /* header length */ in ath_tx_setds_11n()
585 , bf->bf_state.bfs_atype /* Atheros packet type */ in ath_tx_setds_11n()
586 , bf->bf_state.bfs_txpower /* txpower */ in ath_tx_setds_11n()
587 , bf->bf_state.bfs_txrate0 in ath_tx_setds_11n()
588 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ in ath_tx_setds_11n()
589 , bf->bf_state.bfs_keyix /* key cache index */ in ath_tx_setds_11n()
590 , bf->bf_state.bfs_txantenna /* antenna mode */ in ath_tx_setds_11n()
591 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ in ath_tx_setds_11n()
592 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ in ath_tx_setds_11n()
593 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ in ath_tx_setds_11n()
604 ath_tx_set_ratectrl(sc, bf->bf_node, bf); in ath_tx_setds_11n()
608 * Setup the descriptors for a multi-descriptor frame. in ath_tx_setds_11n()
609 * This is both aggregate and non-aggregate aware. in ath_tx_setds_11n()
614 !! (bf->bf_next == NULL) /* is_last_subframe */ in ath_tx_setds_11n()
622 ath_hal_set11n_aggr_first(sc->sc_ah, in ath_tx_setds_11n()
624 bf->bf_state.bfs_al, in ath_tx_setds_11n()
625 bf->bf_state.bfs_ndelim); in ath_tx_setds_11n()
633 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, in ath_tx_setds_11n()
634 bf->bf_daddr); in ath_tx_setds_11n()
638 bf = bf->bf_next; in ath_tx_setds_11n()
646 bf_first->bf_lastds = bf_prev->bf_lastds; in ath_tx_setds_11n()
652 bf_first->bf_last = bf_prev; in ath_tx_setds_11n()
655 * For non-AR9300 NICs, which require the rate control in ath_tx_setds_11n()
656 * in the final descriptor - let's set that up now. in ath_tx_setds_11n()
660 * if firstSeg is also true. For non-aggregate frames in ath_tx_setds_11n()
667 * non-cachable memory for TX descriptors, but we'll just in ath_tx_setds_11n()
672 * is called on the final descriptor in an MPDU or A-MPDU - in ath_tx_setds_11n()
677 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); in ath_tx_setds_11n()
683 * Hand-off a frame to the multicast TX queue.
702 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, in ath_tx_handoff_mcast()
703 ("%s: busy status 0x%x", __func__, bf->bf_flags)); in ath_tx_handoff_mcast()
709 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { in ath_tx_handoff_mcast()
712 __func__, bf, bf->bf_state.bfs_tx_queue, in ath_tx_handoff_mcast()
713 txq->axq_qnum); in ath_tx_handoff_mcast()
722 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); in ath_tx_handoff_mcast()
723 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; in ath_tx_handoff_mcast()
724 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, in ath_tx_handoff_mcast()
728 ath_hal_settxdesclink(sc->sc_ah, in ath_tx_handoff_mcast()
729 bf_last->bf_lastds, in ath_tx_handoff_mcast()
730 bf->bf_daddr); in ath_tx_handoff_mcast()
737 * Hand-off packet to a hardware queue.
743 struct ath_hal *ah = sc->sc_ah; in ath_tx_handoff_hw()
755 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, in ath_tx_handoff_hw()
756 ("%s: busy status 0x%x", __func__, bf->bf_flags)); in ath_tx_handoff_hw()
757 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, in ath_tx_handoff_hw()
765 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) { in ath_tx_handoff_hw()
766 device_printf(sc->sc_dev, in ath_tx_handoff_hw()
804 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " in ath_tx_handoff_hw()
806 txq->axq_qnum, in ath_tx_handoff_hw()
808 txq->axq_depth); in ath_tx_handoff_hw()
816 if (txq->axq_link != NULL) { in ath_tx_handoff_hw()
817 *txq->axq_link = bf->bf_daddr; in ath_tx_handoff_hw()
820 txq->axq_qnum, txq->axq_link, in ath_tx_handoff_hw()
821 (caddr_t)bf->bf_daddr, bf->bf_desc, in ath_tx_handoff_hw()
822 txq->axq_depth); in ath_tx_handoff_hw()
824 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " in ath_tx_handoff_hw()
826 txq->axq_qnum, txq->axq_link, in ath_tx_handoff_hw()
827 (caddr_t)bf->bf_daddr, bf->bf_desc, in ath_tx_handoff_hw()
828 bf->bf_lastds); in ath_tx_handoff_hw()
837 * So we just don't do that - if we hit the end of the list, in ath_tx_handoff_hw()
839 * re-start DMA by updating the link pointer of _that_ in ath_tx_handoff_hw()
842 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { in ath_tx_handoff_hw()
843 bf_first = TAILQ_FIRST(&txq->axq_q); in ath_tx_handoff_hw()
844 txq->axq_flags |= ATH_TXQ_PUTRUNNING; in ath_tx_handoff_hw()
845 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); in ath_tx_handoff_hw()
848 __func__, txq->axq_qnum, in ath_tx_handoff_hw()
849 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, in ath_tx_handoff_hw()
850 txq->axq_depth); in ath_tx_handoff_hw()
854 txq->axq_qnum, in ath_tx_handoff_hw()
855 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, in ath_tx_handoff_hw()
856 bf_first->bf_lastds, in ath_tx_handoff_hw()
857 txq->axq_depth); in ath_tx_handoff_hw()
864 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { in ath_tx_handoff_hw()
867 __func__, bf, bf->bf_state.bfs_tx_queue, in ath_tx_handoff_hw()
868 txq->axq_qnum); in ath_tx_handoff_hw()
874 if (bf->bf_state.bfs_aggr) in ath_tx_handoff_hw()
875 txq->axq_aggr_depth++; in ath_tx_handoff_hw()
880 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); in ath_tx_handoff_hw()
892 * in that descriptor and then kick TxE here; it will re-read in ath_tx_handoff_hw()
897 ath_hal_txstart(ah, txq->axq_qnum); in ath_tx_handoff_hw()
900 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); in ath_tx_handoff_hw()
916 bf = TAILQ_FIRST(&txq->axq_q); in ath_legacy_tx_dma_restart()
925 txq->axq_qnum, in ath_legacy_tx_dma_restart()
928 (uint32_t) bf->bf_daddr); in ath_legacy_tx_dma_restart()
931 if (sc->sc_debug & ATH_DEBUG_RESET) in ath_legacy_tx_dma_restart()
939 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), in ath_legacy_tx_dma_restart()
942 txq->axq_qnum)); in ath_legacy_tx_dma_restart()
944 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); in ath_legacy_tx_dma_restart()
945 txq->axq_flags |= ATH_TXQ_PUTRUNNING; in ath_legacy_tx_dma_restart()
947 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, in ath_legacy_tx_dma_restart()
948 &txq->axq_link); in ath_legacy_tx_dma_restart()
949 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); in ath_legacy_tx_dma_restart()
964 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) in ath_legacy_xmit_handoff()
968 if (txq->axq_qnum == ATH_TXQ_SWQ) in ath_legacy_xmit_handoff()
977 * If this fails, then an non-zero error is returned. The mbuf
1017 * added to it prior to entry so m0->m_pkthdr.len will in ath_tx_tag_crypto()
1021 cip = k->wk_cipher; in ath_tx_tag_crypto()
1022 (*hdrlen) += cip->ic_header; in ath_tx_tag_crypto()
1023 (*pktlen) += cip->ic_header + cip->ic_trailer; in ath_tx_tag_crypto()
1025 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) in ath_tx_tag_crypto()
1026 (*pktlen) += cip->ic_miclen; in ath_tx_tag_crypto()
1027 (*keyix) = k->wk_keyix; in ath_tx_tag_crypto()
1028 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { in ath_tx_tag_crypto()
1032 (*keyix) = ni->ni_ucastkey.wk_keyix; in ath_tx_tag_crypto()
1056 const HAL_RATE_TABLE *rt = sc->sc_currates; in ath_tx_calc_protection()
1057 struct ieee80211com *ic = &sc->sc_ic; in ath_tx_calc_protection()
1059 flags = bf->bf_state.bfs_txflags; in ath_tx_calc_protection()
1060 rix = bf->bf_state.bfs_rc[0].rix; in ath_tx_calc_protection()
1061 shortPreamble = bf->bf_state.bfs_shpream; in ath_tx_calc_protection()
1062 wh = mtod(bf->bf_m, struct ieee80211_frame *); in ath_tx_calc_protection()
1065 if (bf->bf_flags & ATH_BUF_TOA_PROBE) { in ath_tx_calc_protection()
1068 bf->bf_state.bfs_doprot = 0; in ath_tx_calc_protection()
1077 if ((ic->ic_flags & IEEE80211_F_USEPROT) && in ath_tx_calc_protection()
1078 rt->info[rix].phy == IEEE80211_T_OFDM && in ath_tx_calc_protection()
1080 bf->bf_state.bfs_doprot = 1; in ath_tx_calc_protection()
1082 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { in ath_tx_calc_protection()
1084 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { in ath_tx_calc_protection()
1094 sc->sc_stats.ast_tx_protect++; in ath_tx_calc_protection()
1105 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && in ath_tx_calc_protection()
1106 rt->info[rix].phy == IEEE80211_T_HT && in ath_tx_calc_protection()
1109 sc->sc_stats.ast_tx_htprotect++; in ath_tx_calc_protection()
1113 bf->bf_state.bfs_txflags = flags; in ath_tx_calc_protection()
1129 struct ath_hal *ah = sc->sc_ah; in ath_tx_calc_duration()
1130 const HAL_RATE_TABLE *rt = sc->sc_currates; in ath_tx_calc_duration()
1131 int isfrag = bf->bf_m->m_flags & M_FRAG; in ath_tx_calc_duration()
1133 flags = bf->bf_state.bfs_txflags; in ath_tx_calc_duration()
1134 rix = bf->bf_state.bfs_rc[0].rix; in ath_tx_calc_duration()
1135 shortPreamble = bf->bf_state.bfs_shpream; in ath_tx_calc_duration()
1136 wh = mtod(bf->bf_m, struct ieee80211_frame *); in ath_tx_calc_duration()
1145 dur = rt->info[rix].spAckDuration; in ath_tx_calc_duration()
1147 dur = rt->info[rix].lpAckDuration; in ath_tx_calc_duration()
1148 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { in ath_tx_calc_duration()
1161 bf->bf_nextfraglen, in ath_tx_calc_duration()
1168 * fragment by disabling multi-rate retry which updates in ath_tx_calc_duration()
1169 * duration based on the multi-rate duration table. in ath_tx_calc_duration()
1171 bf->bf_state.bfs_ismrr = 0; in ath_tx_calc_duration()
1172 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; in ath_tx_calc_duration()
1177 *(u_int16_t *)wh->i_dur = htole16(dur); in ath_tx_calc_duration()
1194 ctsrate = rt->info[cix].rateCode; in ath_tx_get_rtscts_rate()
1198 ctsrate |= rt->info[cix].shortPreamble; in ath_tx_get_rtscts_rate()
1214 if (rt->info[cix].phy == IEEE80211_T_HT) { in ath_tx_calc_ctsduration()
1216 __func__, rt->info[cix].rateCode); in ath_tx_calc_ctsduration()
1217 return (-1); in ath_tx_calc_ctsduration()
1231 ctsduration += rt->info[cix].spAckDuration; in ath_tx_calc_ctsduration()
1235 ctsduration += rt->info[rix].spAckDuration; in ath_tx_calc_ctsduration()
1238 ctsduration += rt->info[cix].lpAckDuration; in ath_tx_calc_ctsduration()
1242 ctsduration += rt->info[rix].lpAckDuration; in ath_tx_calc_ctsduration()
1253 * and cts duration must be re-calculated.
1258 * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1259 * XXX The 11n NICs support per-rate RTS/CTS configuration.
1266 uint8_t rix = bf->bf_state.bfs_rc[0].rix; in ath_tx_set_rtscts()
1268 const HAL_RATE_TABLE *rt = sc->sc_currates; in ath_tx_set_rtscts()
1273 if ((bf->bf_state.bfs_txflags & in ath_tx_set_rtscts()
1276 bf->bf_state.bfs_ctsrate = 0; in ath_tx_set_rtscts()
1277 bf->bf_state.bfs_ctsduration = 0; in ath_tx_set_rtscts()
1285 if (bf->bf_state.bfs_doprot) in ath_tx_set_rtscts()
1286 rix = sc->sc_protrix; in ath_tx_set_rtscts()
1288 rix = bf->bf_state.bfs_rc[0].rix; in ath_tx_set_rtscts()
1291 * If the raw path has hard-coded ctsrate0 to something, in ath_tx_set_rtscts()
1294 if (bf->bf_state.bfs_ctsrate0 != 0) in ath_tx_set_rtscts()
1295 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); in ath_tx_set_rtscts()
1298 cix = rt->info[rix].controlRate; in ath_tx_set_rtscts()
1301 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, in ath_tx_set_rtscts()
1302 bf->bf_state.bfs_shpream); in ath_tx_set_rtscts()
1306 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, in ath_tx_set_rtscts()
1307 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, in ath_tx_set_rtscts()
1308 rt, bf->bf_state.bfs_txflags); in ath_tx_set_rtscts()
1311 bf->bf_state.bfs_ctsrate = ctsrate; in ath_tx_set_rtscts()
1312 bf->bf_state.bfs_ctsduration = ctsduration; in ath_tx_set_rtscts()
1315 * Must disable multi-rate retry when using RTS/CTS. in ath_tx_set_rtscts()
1317 if (!sc->sc_mrrprot) { in ath_tx_set_rtscts()
1318 bf->bf_state.bfs_ismrr = 0; in ath_tx_set_rtscts()
1319 bf->bf_state.bfs_try0 = in ath_tx_set_rtscts()
1320 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ in ath_tx_set_rtscts()
1325 * Setup the descriptor chain for a normal or fast-frame
1336 struct ath_desc *ds = bf->bf_desc; in ath_tx_setds()
1337 struct ath_hal *ah = sc->sc_ah; in ath_tx_setds()
1339 if (bf->bf_state.bfs_txrate0 == 0) in ath_tx_setds()
1344 , bf->bf_state.bfs_pktlen /* packet length */ in ath_tx_setds()
1345 , bf->bf_state.bfs_hdrlen /* header length */ in ath_tx_setds()
1346 , bf->bf_state.bfs_atype /* Atheros packet type */ in ath_tx_setds()
1347 , bf->bf_state.bfs_txpower /* txpower */ in ath_tx_setds()
1348 , bf->bf_state.bfs_txrate0 in ath_tx_setds()
1349 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ in ath_tx_setds()
1350 , bf->bf_state.bfs_keyix /* key cache index */ in ath_tx_setds()
1351 , bf->bf_state.bfs_txantenna /* antenna mode */ in ath_tx_setds()
1352 , bf->bf_state.bfs_txflags /* flags */ in ath_tx_setds()
1353 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ in ath_tx_setds()
1354 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ in ath_tx_setds()
1360 bf->bf_lastds = ds; in ath_tx_setds()
1361 bf->bf_last = bf; in ath_tx_setds()
1364 ath_tx_set_ratectrl(sc, bf->bf_node, bf); in ath_tx_setds()
1372 * Non-data frames and raw frames don't require it.
1376 * pre-11n chipsets.
1390 if (! bf->bf_state.bfs_doratelookup) in ath_tx_do_ratelookup()
1394 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); in ath_tx_do_ratelookup()
1396 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); in ath_tx_do_ratelookup()
1397 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, in ath_tx_do_ratelookup()
1401 bf->bf_state.bfs_rc[0].rix = rix; in ath_tx_do_ratelookup()
1402 bf->bf_state.bfs_rc[0].ratecode = rate; in ath_tx_do_ratelookup()
1403 bf->bf_state.bfs_rc[0].tries = try0; in ath_tx_do_ratelookup()
1405 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) in ath_tx_do_ratelookup()
1406 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, in ath_tx_do_ratelookup()
1407 is_aggr, bf->bf_state.bfs_rc); in ath_tx_do_ratelookup()
1408 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); in ath_tx_do_ratelookup()
1410 sc->sc_txrix = rix; /* for LED blinking */ in ath_tx_do_ratelookup()
1411 sc->sc_lastdatarix = rix; /* for fast frames */ in ath_tx_do_ratelookup()
1412 bf->bf_state.bfs_try0 = try0; in ath_tx_do_ratelookup()
1413 bf->bf_state.bfs_txrate0 = rate; in ath_tx_do_ratelookup()
1414 bf->bf_state.bfs_rc_maxpktlen = maxpktlen; in ath_tx_do_ratelookup()
1424 struct ath_node *an = ATH_NODE(bf->bf_node); in ath_tx_update_clrdmask()
1428 if (an->clrdmask == 1) { in ath_tx_update_clrdmask()
1429 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_update_clrdmask()
1430 an->clrdmask = 0; in ath_tx_update_clrdmask()
1450 struct ieee80211_node *ni = &an->an_node; in ath_tx_should_swq_frame()
1455 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; in ath_tx_should_swq_frame()
1456 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; in ath_tx_should_swq_frame()
1460 /* If it's not in powersave - direct-dispatch BAR */ in ath_tx_should_swq_frame()
1461 if ((ATH_NODE(ni)->an_is_powersave == 0) in ath_tx_should_swq_frame()
1467 } else if ((ATH_NODE(ni)->an_is_powersave == 1) in ath_tx_should_swq_frame()
1475 } else if ((ATH_NODE(ni)->an_is_powersave == 1) in ath_tx_should_swq_frame()
1485 __func__, ni->ni_macaddr, ":", type, subtype); in ath_tx_should_swq_frame()
1503 * XXX we don't update the leak count here - if we're doing
1511 struct ath_node *an = ATH_NODE(bf->bf_node); in ath_tx_xmit_normal()
1512 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; in ath_tx_xmit_normal()
1520 * non-aggregate session frames. in ath_tx_xmit_normal()
1523 * frames that must go out - eg management/raw frames. in ath_tx_xmit_normal()
1525 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_xmit_normal()
1528 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false); in ath_tx_xmit_normal()
1535 /* Track per-TID hardware queue depth correctly */ in ath_tx_xmit_normal()
1536 tid->hwq_depth++; in ath_tx_xmit_normal()
1539 bf->bf_comp = ath_tx_normal_comp; in ath_tx_xmit_normal()
1565 struct ieee80211vap *vap = ni->ni_vap; in ath_tx_normal_setup()
1566 struct ieee80211com *ic = &sc->sc_ic; in ath_tx_normal_setup()
1578 /* XXX TODO: this pri is only used for non-QoS check, right? */ in ath_tx_normal_setup()
1585 * re-ordered frames to have out of order CCMP PN's, resulting in ath_tx_normal_setup()
1591 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; in ath_tx_normal_setup()
1592 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); in ath_tx_normal_setup()
1593 isfrag = m0->m_flags & M_FRAG; in ath_tx_normal_setup()
1599 pktlen = m0->m_pkthdr.len - (hdrlen & 3); in ath_tx_normal_setup()
1602 if ((m0->m_flags & M_AMPDU_MPDU) == 0) in ath_tx_normal_setup()
1603 ieee80211_output_seqno_assign(ni, -1, m0); in ath_tx_normal_setup()
1625 bf->bf_node = ni; /* NB: held reference */ in ath_tx_normal_setup()
1626 m0 = bf->bf_m; /* NB: may have changed */ in ath_tx_normal_setup()
1630 ds = bf->bf_desc; in ath_tx_normal_setup()
1631 rt = sc->sc_currates; in ath_tx_normal_setup()
1632 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); in ath_tx_normal_setup()
1639 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && in ath_tx_normal_setup()
1640 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { in ath_tx_normal_setup()
1642 sc->sc_stats.ast_tx_shortpre++; in ath_tx_normal_setup()
1650 ismrr = 0; /* default no multi-rate retry*/ in ath_tx_normal_setup()
1658 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { in ath_tx_normal_setup()
1660 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; in ath_tx_normal_setup()
1669 rix = an->an_mgmtrix; in ath_tx_normal_setup()
1670 txrate = rt->info[rix].rateCode; in ath_tx_normal_setup()
1672 txrate |= rt->info[rix].shortPreamble; in ath_tx_normal_setup()
1678 rix = an->an_mgmtrix; in ath_tx_normal_setup()
1679 txrate = rt->info[rix].rateCode; in ath_tx_normal_setup()
1681 txrate |= rt->info[rix].shortPreamble; in ath_tx_normal_setup()
1693 rix = an->an_mcastrix; in ath_tx_normal_setup()
1694 txrate = rt->info[rix].rateCode; in ath_tx_normal_setup()
1696 txrate |= rt->info[rix].shortPreamble; in ath_tx_normal_setup()
1698 } else if (m0->m_flags & M_EAPOL) { in ath_tx_normal_setup()
1700 rix = an->an_mgmtrix; in ath_tx_normal_setup()
1701 txrate = rt->info[rix].rateCode; in ath_tx_normal_setup()
1703 txrate |= rt->info[rix].shortPreamble; in ath_tx_normal_setup()
1708 * the hard-coded TX information decided here. in ath_tx_normal_setup()
1711 bf->bf_state.bfs_doratelookup = 1; in ath_tx_normal_setup()
1721 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", in ath_tx_normal_setup()
1722 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); in ath_tx_normal_setup()
1733 * + non-QoS frames (eg management?) that the net80211 stack has in ath_tx_normal_setup()
1734 * assigned a higher AC to, but since it's a non-QoS TID, it's in ath_tx_normal_setup()
1740 * surrounding ADDBA request/response - hence why that is special in ath_tx_normal_setup()
1751 if (txq != sc->sc_ac2q[pri]) { in ath_tx_normal_setup()
1756 txq->axq_qnum, in ath_tx_normal_setup()
1758 sc->sc_ac2q[pri], in ath_tx_normal_setup()
1759 sc->sc_ac2q[pri]->axq_qnum); in ath_tx_normal_setup()
1768 } else if (pktlen > vap->iv_rtsthreshold && in ath_tx_normal_setup()
1769 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { in ath_tx_normal_setup()
1771 sc->sc_stats.ast_tx_rts++; in ath_tx_normal_setup()
1774 sc->sc_stats.ast_tx_noack++; in ath_tx_normal_setup()
1776 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { in ath_tx_normal_setup()
1779 sc->sc_stats.ast_tdma_ack++; in ath_tx_normal_setup()
1791 device_printf(sc->sc_dev, in ath_tx_normal_setup()
1801 * So, if you send a RTS-protected NULL data frame, in ath_tx_normal_setup()
1811 bf->bf_flags |= ATH_BUF_TOA_PROBE; in ath_tx_normal_setup()
1839 txq->axq_intrcnt = 0; in ath_tx_normal_setup()
1840 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { in ath_tx_normal_setup()
1842 txq->axq_intrcnt = 0; in ath_tx_normal_setup()
1852 m0->m_nextpkt = NULL; in ath_tx_normal_setup()
1855 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, in ath_tx_normal_setup()
1856 sc->sc_hwmap[rix].ieeerate, -1); in ath_tx_normal_setup()
1859 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; in ath_tx_normal_setup()
1861 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; in ath_tx_normal_setup()
1863 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; in ath_tx_normal_setup()
1864 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; in ath_tx_normal_setup()
1865 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); in ath_tx_normal_setup()
1866 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; in ath_tx_normal_setup()
1872 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); in ath_tx_normal_setup()
1878 bf->bf_state.bfs_rc[0].rix = rix; in ath_tx_normal_setup()
1879 bf->bf_state.bfs_rc[0].tries = try0; in ath_tx_normal_setup()
1880 bf->bf_state.bfs_rc[0].ratecode = txrate; in ath_tx_normal_setup()
1883 bf->bf_state.bfs_pktlen = pktlen; in ath_tx_normal_setup()
1884 bf->bf_state.bfs_hdrlen = hdrlen; in ath_tx_normal_setup()
1885 bf->bf_state.bfs_atype = atype; in ath_tx_normal_setup()
1886 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); in ath_tx_normal_setup()
1887 bf->bf_state.bfs_txrate0 = txrate; in ath_tx_normal_setup()
1888 bf->bf_state.bfs_try0 = try0; in ath_tx_normal_setup()
1889 bf->bf_state.bfs_keyix = keyix; in ath_tx_normal_setup()
1890 bf->bf_state.bfs_txantenna = sc->sc_txantenna; in ath_tx_normal_setup()
1891 bf->bf_state.bfs_txflags = flags; in ath_tx_normal_setup()
1892 bf->bf_state.bfs_shpream = shortPreamble; in ath_tx_normal_setup()
1895 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ in ath_tx_normal_setup()
1896 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ in ath_tx_normal_setup()
1897 bf->bf_state.bfs_ctsduration = 0; in ath_tx_normal_setup()
1898 bf->bf_state.bfs_ismrr = ismrr; in ath_tx_normal_setup()
1918 struct ieee80211vap *vap = ni->ni_vap; in ath_tx_start()
1937 * depending upon the state of PS. If powersave is enabled in ath_tx_start()
1942 * the per-TID pool. That means that even QoS group addressed in ath_tx_start()
1946 * all be out of whack. So - chances are, the right thing in ath_tx_start()
1951 * to see what the TID should be. If it's a non-QoS frame, the in ath_tx_start()
1961 txq = sc->sc_ac2q[pri]; in ath_tx_start()
1963 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); in ath_tx_start()
1964 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; in ath_tx_start()
1965 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; in ath_tx_start()
1972 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { in ath_tx_start()
1973 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth in ath_tx_start()
1974 > sc->sc_txq_mcastq_maxdepth) { in ath_tx_start()
1975 sc->sc_stats.ast_tx_mcastq_overflow++; in ath_tx_start()
1994 * that we are direct-dispatching. in ath_tx_start()
1999 ATH_NODE(ni)->an_is_powersave && in ath_tx_start()
2000 ATH_NODE(ni)->an_swq_depth > in ath_tx_start()
2001 sc->sc_txq_node_psq_maxdepth) { in ath_tx_start()
2002 sc->sc_stats.ast_tx_node_psq_overflow++; in ath_tx_start()
2007 /* A-MPDU TX */ in ath_tx_start()
2016 bf->bf_state.bfs_tid = tid; in ath_tx_start()
2017 bf->bf_state.bfs_tx_queue = txq->axq_qnum; in ath_tx_start()
2018 bf->bf_state.bfs_pri = pri; in ath_tx_start()
2022 * When servicing one or more stations in power-save mode in ath_tx_start()
2029 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { in ath_tx_start()
2030 txq = &avp->av_mcastq; in ath_tx_start()
2036 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; in ath_tx_start()
2042 bf->bf_state.bfs_dobaw = 0; in ath_tx_start()
2044 /* A-MPDU TX? Manually set sequence number */ in ath_tx_start()
2049 * Don't assign A-MPDU sequence numbers to group address in ath_tx_start()
2052 if (is_ampdu_tx && (! IEEE80211_IS_MULTICAST(wh->i_addr1))) { in ath_tx_start()
2056 * and group-addressed frames don't get a sequence number in ath_tx_start()
2062 * Don't add QoS NULL frames and group-addressed frames in ath_tx_start()
2066 (! IEEE80211_IS_MULTICAST(wh->i_addr1)) && in ath_tx_start()
2068 bf->bf_state.bfs_dobaw = 1; in ath_tx_start()
2076 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; in ath_tx_start()
2090 m0 = bf->bf_m; in ath_tx_start()
2094 * If it's a multicast frame, do a direct-dispatch to the in ath_tx_start()
2110 * Until things are better debugged - if this node is asleep in ath_tx_start()
2111 * and we're sending it a non-BAR frame, direct dispatch it. in ath_tx_start()
2113 * sent - eg, during reassociation/reauthentication after in ath_tx_start()
2118 if (txq == &avp->av_mcastq) { in ath_tx_start()
2121 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_start()
2127 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_start()
2133 * direct-dispatch to the hardware. in ath_tx_start()
2135 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_start()
2152 struct ieee80211com *ic = &sc->sc_ic; in ath_tx_raw_start()
2153 struct ieee80211vap *vap = ni->ni_vap; in ath_tx_raw_start()
2163 int o_tid = -1; in ath_tx_raw_start()
2172 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); in ath_tx_raw_start()
2179 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; in ath_tx_raw_start()
2181 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; in ath_tx_raw_start()
2182 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; in ath_tx_raw_start()
2190 pri = params->ibp_pri & 3; in ath_tx_raw_start()
2202 "%s: overriding tid %d pri %d -> %d\n", in ath_tx_raw_start()
2217 if ((m0->m_flags & M_AMPDU_MPDU) == 0) in ath_tx_raw_start()
2218 ieee80211_output_seqno_assign(ni, -1, m0); in ath_tx_raw_start()
2222 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, in ath_tx_raw_start()
2232 bf->bf_state.bfs_dobaw = 0; in ath_tx_raw_start()
2237 m0 = bf->bf_m; /* NB: may have changed */ in ath_tx_raw_start()
2240 bf->bf_node = ni; /* NB: held reference */ in ath_tx_raw_start()
2245 if (params->ibp_flags & IEEE80211_BPF_RTS) in ath_tx_raw_start()
2247 else if (params->ibp_flags & IEEE80211_BPF_CTS) { in ath_tx_raw_start()
2249 bf->bf_state.bfs_doprot = 1; in ath_tx_raw_start()
2253 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) in ath_tx_raw_start()
2256 rt = sc->sc_currates; in ath_tx_raw_start()
2257 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); in ath_tx_raw_start()
2260 rix = ath_tx_findrix(sc, params->ibp_rate0); in ath_tx_raw_start()
2261 try0 = params->ibp_try0; in ath_tx_raw_start()
2266 if (m0->m_flags & M_EAPOL) { in ath_tx_raw_start()
2268 rix = an->an_mgmtrix; in ath_tx_raw_start()
2277 device_printf(sc->sc_dev, in ath_tx_raw_start()
2281 bf->bf_flags |= ATH_BUF_TOA_PROBE; in ath_tx_raw_start()
2284 txrate = rt->info[rix].rateCode; in ath_tx_raw_start()
2285 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) in ath_tx_raw_start()
2286 txrate |= rt->info[rix].shortPreamble; in ath_tx_raw_start()
2287 sc->sc_txrix = rix; in ath_tx_raw_start()
2288 ismrr = (params->ibp_try1 != 0); in ath_tx_raw_start()
2289 txantenna = params->ibp_pri >> 2; in ath_tx_raw_start()
2291 txantenna = sc->sc_txantenna; in ath_tx_raw_start()
2298 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; in ath_tx_raw_start()
2307 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, in ath_tx_raw_start()
2308 sc->sc_hwmap[rix].ieeerate, -1); in ath_tx_raw_start()
2311 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; in ath_tx_raw_start()
2312 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) in ath_tx_raw_start()
2313 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; in ath_tx_raw_start()
2314 if (m0->m_flags & M_FRAG) in ath_tx_raw_start()
2315 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; in ath_tx_raw_start()
2316 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; in ath_tx_raw_start()
2317 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, in ath_tx_raw_start()
2319 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; in ath_tx_raw_start()
2327 ds = bf->bf_desc; in ath_tx_raw_start()
2331 bf->bf_state.bfs_pktlen = pktlen; in ath_tx_raw_start()
2332 bf->bf_state.bfs_hdrlen = hdrlen; in ath_tx_raw_start()
2333 bf->bf_state.bfs_atype = atype; in ath_tx_raw_start()
2334 bf->bf_state.bfs_txpower = MIN(params->ibp_power, in ath_tx_raw_start()
2336 bf->bf_state.bfs_txrate0 = txrate; in ath_tx_raw_start()
2337 bf->bf_state.bfs_try0 = try0; in ath_tx_raw_start()
2338 bf->bf_state.bfs_keyix = keyix; in ath_tx_raw_start()
2339 bf->bf_state.bfs_txantenna = txantenna; in ath_tx_raw_start()
2340 bf->bf_state.bfs_txflags = flags; in ath_tx_raw_start()
2341 bf->bf_state.bfs_shpream = in ath_tx_raw_start()
2342 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); in ath_tx_raw_start()
2345 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); in ath_tx_raw_start()
2346 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; in ath_tx_raw_start()
2347 bf->bf_state.bfs_pri = pri; in ath_tx_raw_start()
2350 bf->bf_state.bfs_ctsrate = 0; in ath_tx_raw_start()
2351 bf->bf_state.bfs_ctsduration = 0; in ath_tx_raw_start()
2352 bf->bf_state.bfs_ismrr = ismrr; in ath_tx_raw_start()
2355 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); in ath_tx_raw_start()
2357 bf->bf_state.bfs_rc[0].rix = rix; in ath_tx_raw_start()
2358 bf->bf_state.bfs_rc[0].tries = try0; in ath_tx_raw_start()
2359 bf->bf_state.bfs_rc[0].ratecode = txrate; in ath_tx_raw_start()
2364 rix = ath_tx_findrix(sc, params->ibp_rate1); in ath_tx_raw_start()
2365 bf->bf_state.bfs_rc[1].rix = rix; in ath_tx_raw_start()
2366 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; in ath_tx_raw_start()
2368 rix = ath_tx_findrix(sc, params->ibp_rate2); in ath_tx_raw_start()
2369 bf->bf_state.bfs_rc[2].rix = rix; in ath_tx_raw_start()
2370 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; in ath_tx_raw_start()
2372 rix = ath_tx_findrix(sc, params->ibp_rate3); in ath_tx_raw_start()
2373 bf->bf_state.bfs_rc[3].rix = rix; in ath_tx_raw_start()
2374 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; in ath_tx_raw_start()
2397 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_raw_start()
2404 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); in ath_tx_raw_start()
2408 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); in ath_tx_raw_start()
2410 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_raw_start()
2411 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); in ath_tx_raw_start()
2414 /* Direct-dispatch to the hardware */ in ath_tx_raw_start()
2415 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_raw_start()
2422 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); in ath_tx_raw_start()
2436 struct ieee80211com *ic = ni->ni_ic; in ath_raw_xmit()
2437 struct ath_softc *sc = ic->ic_softc; in ath_raw_xmit()
2443 if (sc->sc_inreset_cnt > 0) { in ath_raw_xmit()
2450 sc->sc_txstart_cnt++; in ath_raw_xmit()
2460 if (!sc->sc_running || sc->sc_invalid) { in ath_raw_xmit()
2462 __func__, sc->sc_running, sc->sc_invalid); in ath_raw_xmit()
2473 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { in ath_raw_xmit()
2474 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth in ath_raw_xmit()
2475 > sc->sc_txq_mcastq_maxdepth) { in ath_raw_xmit()
2476 sc->sc_stats.ast_tx_mcastq_overflow++; in ath_raw_xmit()
2491 sc->sc_stats.ast_tx_nobuf++; in ath_raw_xmit()
2518 sc->sc_wd_timer = 5; in ath_raw_xmit()
2519 sc->sc_stats.ast_tx_raw++; in ath_raw_xmit()
2522 * Update the TIM - if there's anything queued to the in ath_raw_xmit()
2531 sc->sc_txstart_cnt--; in ath_raw_xmit()
2555 sc->sc_txstart_cnt--; in ath_raw_xmit()
2566 sc->sc_stats.ast_tx_raw_fail++; in ath_raw_xmit()
2576 * it goes out after any pending non-aggregate frames to the
2581 * number -earlier- than the ADDBA can be transmitted (but
2583 * be!) they'll arrive after the ADDBA - and the receiving end
2586 * The frames can't be appended to the TID software queue - it'll
2626 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) in ath_tx_action_frame_override_queue()
2628 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) in ath_tx_action_frame_override_queue()
2632 baparamset = le16toh(ia->rq_baparamset); in ath_tx_action_frame_override_queue()
2638 /* Per-node software queue operations */
2659 if (bf->bf_state.bfs_isretried) in ath_tx_addto_baw()
2662 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_addto_baw()
2664 if (! bf->bf_state.bfs_dobaw) { in ath_tx_addto_baw()
2667 __func__, SEQNO(bf->bf_state.bfs_seqno), in ath_tx_addto_baw()
2668 tap->txa_start, tap->txa_wnd); in ath_tx_addto_baw()
2671 if (bf->bf_state.bfs_addedbaw) in ath_tx_addto_baw()
2673 "%s: re-added? tid=%d, seqno %d; window %d:%d; " in ath_tx_addto_baw()
2675 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), in ath_tx_addto_baw()
2676 tap->txa_start, tap->txa_wnd, tid->baw_head, in ath_tx_addto_baw()
2677 tid->baw_tail); in ath_tx_addto_baw()
2683 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, in ath_tx_addto_baw()
2684 SEQNO(bf->bf_state.bfs_seqno))) { in ath_tx_addto_baw()
2688 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), in ath_tx_addto_baw()
2689 tap->txa_start, tap->txa_wnd, tid->baw_head, in ath_tx_addto_baw()
2690 tid->baw_tail); in ath_tx_addto_baw()
2694 * ni->ni_txseqs[] is the currently allocated seqno. in ath_tx_addto_baw()
2697 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_addto_baw()
2698 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); in ath_tx_addto_baw()
2702 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), in ath_tx_addto_baw()
2703 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, in ath_tx_addto_baw()
2704 tid->baw_tail); in ath_tx_addto_baw()
2707 assert(tid->tx_buf[cindex] == NULL); in ath_tx_addto_baw()
2709 if (tid->tx_buf[cindex] != NULL) { in ath_tx_addto_baw()
2713 __func__, index, cindex, tid->baw_head, tid->baw_tail); in ath_tx_addto_baw()
2717 tid->tx_buf[cindex], in ath_tx_addto_baw()
2718 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), in ath_tx_addto_baw()
2720 SEQNO(bf->bf_state.bfs_seqno) in ath_tx_addto_baw()
2723 tid->tx_buf[cindex] = bf; in ath_tx_addto_baw()
2725 if (index >= ((tid->baw_tail - tid->baw_head) & in ath_tx_addto_baw()
2726 (ATH_TID_MAX_BUFS - 1))) { in ath_tx_addto_baw()
2727 tid->baw_tail = cindex; in ath_tx_addto_baw()
2728 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); in ath_tx_addto_baw()
2735 * When software retransmitting a (sub-)frame, it is entirely possible that
2747 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); in ath_tx_switch_baw_buf()
2751 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_switch_baw_buf()
2752 index = ATH_BA_INDEX(tap->txa_start, seqno); in ath_tx_switch_baw_buf()
2753 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); in ath_tx_switch_baw_buf()
2760 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { in ath_tx_switch_baw_buf()
2767 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); in ath_tx_switch_baw_buf()
2770 if (tid->tx_buf[cindex] != old_bf) { in ath_tx_switch_baw_buf()
2778 tid->tx_buf[cindex] = new_bf; in ath_tx_switch_baw_buf()
2782 * seq_start - left edge of BAW
2783 * seq_next - current/next sequence number to allocate
2794 int seqno = SEQNO(bf->bf_state.bfs_seqno); in ath_tx_update_baw()
2798 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_update_baw()
2799 index = ATH_BA_INDEX(tap->txa_start, seqno); in ath_tx_update_baw()
2800 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); in ath_tx_update_baw()
2805 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, in ath_tx_update_baw()
2806 cindex, tid->baw_head, tid->baw_tail); in ath_tx_update_baw()
2809 * If this occurs then we have a big problem - something else in ath_tx_update_baw()
2810 * has slid tap->txa_start along without updating the BAW in ath_tx_update_baw()
2818 if (tid->tx_buf[cindex] != bf) { in ath_tx_update_baw()
2820 "%s: comp bf=%p, seq=%d; slot bf=%p, seqno=%d\n", in ath_tx_update_baw()
2821 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), in ath_tx_update_baw()
2822 tid->tx_buf[cindex], in ath_tx_update_baw()
2823 (tid->tx_buf[cindex] != NULL) ? in ath_tx_update_baw()
2824 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); in ath_tx_update_baw()
2827 tid->tx_buf[cindex] = NULL; in ath_tx_update_baw()
2829 while (tid->baw_head != tid->baw_tail && in ath_tx_update_baw()
2830 !tid->tx_buf[tid->baw_head]) { in ath_tx_update_baw()
2831 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); in ath_tx_update_baw()
2832 INCR(tid->baw_head, ATH_TID_MAX_BUFS); in ath_tx_update_baw()
2836 __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head); in ath_tx_update_baw()
2847 if (tid->an->an_leak_count > 0) { in ath_tx_leak_count_update()
2848 wh = mtod(bf->bf_m, struct ieee80211_frame *); in ath_tx_leak_count_update()
2853 if ((tid->an->an_stack_psq > 0) in ath_tx_leak_count_update()
2854 || (tid->an->an_swq_depth > 0)) in ath_tx_leak_count_update()
2855 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; in ath_tx_leak_count_update()
2857 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; in ath_tx_leak_count_update()
2862 tid->an->an_node.ni_macaddr, in ath_tx_leak_count_update()
2864 tid->an->an_leak_count, in ath_tx_leak_count_update()
2865 tid->an->an_stack_psq, in ath_tx_leak_count_update()
2866 tid->an->an_swq_depth, in ath_tx_leak_count_update()
2867 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); in ath_tx_leak_count_update()
2870 * Re-sync the underlying buffer. in ath_tx_leak_count_update()
2872 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, in ath_tx_leak_count_update()
2875 tid->an->an_leak_count --; in ath_tx_leak_count_update()
2885 if (tid->an->an_leak_count > 0) { in ath_tx_tid_can_tx_or_sched()
2888 if (tid->paused) in ath_tx_tid_can_tx_or_sched()
2904 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; in ath_tx_tid_sched()
2910 * for PS-POLL, ensure that we allow scheduling to in ath_tx_tid_sched()
2916 if (tid->sched) in ath_tx_tid_sched()
2919 tid->sched = 1; in ath_tx_tid_sched()
2926 if (tid->an->an_leak_count) { in ath_tx_tid_sched()
2927 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); in ath_tx_tid_sched()
2929 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); in ath_tx_tid_sched()
2934 * We can't do the above - it'll confuse the TXQ software in ath_tx_tid_sched()
2945 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); in ath_tx_tid_sched()
2957 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; in ath_tx_tid_unsched()
2961 if (tid->sched == 0) in ath_tx_tid_unsched()
2964 tid->sched = 0; in ath_tx_tid_unsched()
2965 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); in ath_tx_tid_unsched()
2971 * This should only be called for A-MPDU TX frames.
2989 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, qos has seq=%d\n", in ath_tx_tid_seqno_assign()
2996 return -1; in ath_tx_tid_seqno_assign()
3011 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; in ath_tx_tid_seqno_assign()
3014 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; in ath_tx_tid_seqno_assign()
3015 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); in ath_tx_tid_seqno_assign()
3016 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { in ath_tx_tid_seqno_assign()
3021 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; in ath_tx_tid_seqno_assign()
3022 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); in ath_tx_tid_seqno_assign()
3025 seqno = ni->ni_txseqs[tid]; in ath_tx_tid_seqno_assign()
3026 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); in ath_tx_tid_seqno_assign()
3028 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); in ath_tx_tid_seqno_assign()
3033 "%s: -> subtype=0x%x, tid=%d, seqno=%d\n", in ath_tx_tid_seqno_assign()
3047 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; in ath_tx_xmit_aggr()
3052 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_xmit_aggr()
3057 /* XXX don't sched - we're paused! */ in ath_tx_xmit_aggr()
3062 if (bf->bf_state.bfs_dobaw && in ath_tx_xmit_aggr()
3063 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, in ath_tx_xmit_aggr()
3064 SEQNO(bf->bf_state.bfs_seqno)))) { in ath_tx_xmit_aggr()
3080 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { in ath_tx_xmit_aggr()
3083 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); in ath_tx_xmit_aggr()
3084 bf->bf_state.bfs_aggr = 0; in ath_tx_xmit_aggr()
3085 bf->bf_state.bfs_nframes = 1; in ath_tx_xmit_aggr()
3092 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, in ath_tx_xmit_aggr()
3101 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; in ath_tx_xmit_aggr()
3103 /* Track per-TID hardware queue depth correctly */ in ath_tx_xmit_aggr()
3104 tid->hwq_depth++; in ath_tx_xmit_aggr()
3107 if (bf->bf_state.bfs_dobaw) { in ath_tx_xmit_aggr()
3109 bf->bf_state.bfs_addedbaw = 1; in ath_tx_xmit_aggr()
3112 /* Set completion handler, multi-frame aggregate or not */ in ath_tx_xmit_aggr()
3113 bf->bf_comp = ath_tx_aggr_comp; in ath_tx_xmit_aggr()
3128 * If the queue isn't busy, direct-dispatch.
3140 struct mbuf *m0 = bf->bf_m; in ath_tx_swq()
3144 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ in ath_tx_swq()
3148 atid = &an->an_tid[tid]; in ath_tx_swq()
3154 /* XXX potentially duplicate info, re-check */ in ath_tx_swq()
3155 bf->bf_state.bfs_tid = tid; in ath_tx_swq()
3156 bf->bf_state.bfs_tx_queue = txq->axq_qnum; in ath_tx_swq()
3157 bf->bf_state.bfs_pri = pri; in ath_tx_swq()
3165 * If the node is in power-save and we're leaking a frame, in ath_tx_swq()
3186 * AMPDU running, queue single-frame if the hardware queue in ath_tx_swq()
3195 * TODO: maybe we should treat this as two policies - minimise in ath_tx_swq()
3210 * Note: if we're say, configured to do ADDBA but not A-MPDU in ath_tx_swq()
3211 * then maybe we want to still queue two non-aggregate frames in ath_tx_swq()
3212 * to the hardware. Again with the per-TID policy in ath_tx_swq()
3218 if (txq->axq_depth + txq->fifo.axq_depth == 0) { in ath_tx_swq()
3223 * Ensure it's definitely treated as a non-AMPDU in ath_tx_swq()
3224 * frame - this information may have been left in ath_tx_swq()
3227 bf->bf_state.bfs_aggr = 0; in ath_tx_swq()
3228 bf->bf_state.bfs_nframes = 1; in ath_tx_swq()
3243 * If we're not doing A-MPDU, be prepared to direct dispatch in ath_tx_swq()
3246 * traffic and non-aggregate traffic: we want to ensure in ath_tx_swq()
3247 * that non-aggregate stations get a few frames queued to the in ath_tx_swq()
3251 * to the hardware from a non-AMPDU client, check both here in ath_tx_swq()
3253 * non-AMPDU stations get a fair chance to transmit. in ath_tx_swq()
3256 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && in ath_tx_swq()
3257 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { in ath_tx_swq()
3299 if (an->an_tid[i].isfiltered == 1) in ath_tx_set_clrdmask()
3302 an->clrdmask = 1; in ath_tx_set_clrdmask()
3306 * Configure the per-TID node state.
3320 atid = &an->an_tid[i]; in ath_tx_tid_init()
3325 TAILQ_INIT(&atid->tid_q); in ath_tx_tid_init()
3326 TAILQ_INIT(&atid->filtq.tid_q); in ath_tx_tid_init()
3327 atid->tid = i; in ath_tx_tid_init()
3328 atid->an = an; in ath_tx_tid_init()
3330 atid->tx_buf[j] = NULL; in ath_tx_tid_init()
3331 atid->baw_head = atid->baw_tail = 0; in ath_tx_tid_init()
3332 atid->paused = 0; in ath_tx_tid_init()
3333 atid->sched = 0; in ath_tx_tid_init()
3334 atid->hwq_depth = 0; in ath_tx_tid_init()
3335 atid->cleanup_inprogress = 0; in ath_tx_tid_init()
3337 atid->ac = ATH_NONQOS_TID_AC; in ath_tx_tid_init()
3339 atid->ac = TID_TO_WME_AC(i); in ath_tx_tid_init()
3341 an->clrdmask = 1; /* Always start by setting this bit */ in ath_tx_tid_init()
3356 tid->paused++; in ath_tx_tid_pause()
3359 tid->an->an_node.ni_macaddr, ":", in ath_tx_tid_pause()
3360 tid->tid, in ath_tx_tid_pause()
3361 tid->paused); in ath_tx_tid_pause()
3377 if (tid->paused == 0) { in ath_tx_tid_resume()
3378 device_printf(sc->sc_dev, in ath_tx_tid_resume()
3381 tid->an->an_node.ni_macaddr, ":", in ath_tx_tid_resume()
3382 tid->tid); in ath_tx_tid_resume()
3384 tid->paused--; in ath_tx_tid_resume()
3390 tid->an->an_node.ni_macaddr, ":", in ath_tx_tid_resume()
3391 tid->tid, in ath_tx_tid_resume()
3392 tid->paused); in ath_tx_tid_resume()
3394 if (tid->paused) in ath_tx_tid_resume()
3401 ath_tx_set_clrdmask(sc, tid->an); in ath_tx_tid_resume()
3403 if (tid->axq_depth == 0) in ath_tx_tid_resume()
3407 if (tid->isfiltered == 1) { in ath_tx_tid_resume()
3432 if (!tid->isfiltered) in ath_tx_tid_filt_addbuf()
3440 sc->sc_stats.ast_tx_swfiltered++; in ath_tx_tid_filt_addbuf()
3457 if (! tid->isfiltered) { in ath_tx_tid_filt_comp_buf()
3459 __func__, tid->tid); in ath_tx_tid_filt_comp_buf()
3460 tid->isfiltered = 1; in ath_tx_tid_filt_comp_buf()
3472 * the TID if applicable. Otherwise we will wait for a node PS transition
3483 if (tid->hwq_depth != 0) in ath_tx_tid_filt_comp_complete()
3487 __func__, tid->tid); in ath_tx_tid_filt_comp_complete()
3488 if (tid->isfiltered == 1) { in ath_tx_tid_filt_comp_complete()
3489 tid->isfiltered = 0; in ath_tx_tid_filt_comp_complete()
3494 ath_tx_set_clrdmask(sc, tid->an); in ath_tx_tid_filt_comp_complete()
3530 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { in ath_tx_tid_filt_comp_single()
3531 sc->sc_stats.ast_tx_swretrymax++; in ath_tx_tid_filt_comp_single()
3536 SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_tid_filt_comp_single()
3545 if (bf->bf_flags & ATH_BUF_BUSY) { in ath_tx_tid_filt_comp_single()
3546 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); in ath_tx_tid_filt_comp_single()
3548 "%s: busy buffer clone: %p -> %p\n", in ath_tx_tid_filt_comp_single()
3579 bf_next = bf->bf_next; in ath_tx_tid_filt_comp_aggr()
3580 bf->bf_next = NULL; /* Remove it from the aggr list */ in ath_tx_tid_filt_comp_aggr()
3585 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { in ath_tx_tid_filt_comp_aggr()
3586 sc->sc_stats.ast_tx_swretrymax++; in ath_tx_tid_filt_comp_aggr()
3590 tid->tid, in ath_tx_tid_filt_comp_aggr()
3592 SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_tid_filt_comp_aggr()
3597 if (bf->bf_flags & ATH_BUF_BUSY) { in ath_tx_tid_filt_comp_aggr()
3598 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); in ath_tx_tid_filt_comp_aggr()
3600 "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n", in ath_tx_tid_filt_comp_aggr()
3601 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_tid_filt_comp_aggr()
3613 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_tid_filt_comp_aggr()
3637 tid->tid, in ath_tx_tid_bar_suspend()
3638 tid->bar_wait, in ath_tx_tid_bar_suspend()
3639 tid->bar_tx); in ath_tx_tid_bar_suspend()
3642 if (tid->bar_tx) { in ath_tx_tid_bar_suspend()
3648 if (tid->bar_wait) in ath_tx_tid_bar_suspend()
3652 tid->bar_wait = 1; in ath_tx_tid_bar_suspend()
3659 * We've finished with BAR handling - either we succeeded or
3671 tid->an->an_node.ni_macaddr, in ath_tx_tid_bar_unsuspend()
3673 tid->tid); in ath_tx_tid_bar_unsuspend()
3675 if (tid->bar_tx == 0 || tid->bar_wait == 0) { in ath_tx_tid_bar_unsuspend()
3678 __func__, tid->an->an_node.ni_macaddr, ":", in ath_tx_tid_bar_unsuspend()
3679 tid->tid, tid->bar_tx, tid->bar_wait); in ath_tx_tid_bar_unsuspend()
3682 tid->bar_tx = tid->bar_wait = 0; in ath_tx_tid_bar_unsuspend()
3697 if (tid->bar_wait == 0 || tid->hwq_depth > 0) in ath_tx_tid_bar_tx_ready()
3703 tid->an->an_node.ni_macaddr, in ath_tx_tid_bar_tx_ready()
3705 tid->tid); in ath_tx_tid_bar_tx_ready()
3732 tid->an->an_node.ni_macaddr, in ath_tx_tid_bar_tx()
3734 tid->tid); in ath_tx_tid_bar_tx()
3736 tap = ath_tx_get_tx_tid(tid->an, tid->tid); in ath_tx_tid_bar_tx()
3741 if (tid->bar_wait == 0 || tid->bar_tx == 1) { in ath_tx_tid_bar_tx()
3744 __func__, tid->an->an_node.ni_macaddr, ":", in ath_tx_tid_bar_tx()
3745 tid->tid, tid->bar_tx, tid->bar_wait); in ath_tx_tid_bar_tx()
3750 if (tid->hwq_depth > 0) { in ath_tx_tid_bar_tx()
3754 tid->an->an_node.ni_macaddr, in ath_tx_tid_bar_tx()
3756 tid->tid, in ath_tx_tid_bar_tx()
3757 tid->hwq_depth); in ath_tx_tid_bar_tx()
3762 tid->bar_tx = 1; in ath_tx_tid_bar_tx()
3768 ath_tx_set_clrdmask(sc, tid->an); in ath_tx_tid_bar_tx()
3779 tid->an->an_node.ni_macaddr, in ath_tx_tid_bar_tx()
3781 tid->tid, in ath_tx_tid_bar_tx()
3782 tap->txa_start); in ath_tx_tid_bar_tx()
3788 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { in ath_tx_tid_bar_tx()
3798 __func__, tid->an->an_node.ni_macaddr, ":", in ath_tx_tid_bar_tx()
3799 tid->tid); in ath_tx_tid_bar_tx()
3814 if (ath_tx_ampdu_running(sc, an, tid->tid) && in ath_tx_tid_drain_pkt()
3815 bf->bf_state.bfs_dobaw) { in ath_tx_tid_drain_pkt()
3821 if (bf->bf_state.bfs_retries > 0) { in ath_tx_tid_drain_pkt()
3823 bf->bf_state.bfs_dobaw = 0; in ath_tx_tid_drain_pkt()
3827 * This has become a non-fatal error now in ath_tx_tid_drain_pkt()
3829 if (! bf->bf_state.bfs_addedbaw) in ath_tx_tid_drain_pkt()
3832 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_tid_drain_pkt()
3837 bf->bf_next = NULL; in ath_tx_tid_drain_pkt()
3847 struct ieee80211_node *ni = &an->an_node; in ath_tx_tid_drain_print()
3851 txq = sc->sc_ac2q[tid->ac]; in ath_tx_tid_drain_print()
3852 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_tid_drain_print()
3859 ni->ni_macaddr, in ath_tx_tid_drain_print()
3862 bf->bf_state.bfs_addedbaw, in ath_tx_tid_drain_print()
3863 bf->bf_state.bfs_dobaw, in ath_tx_tid_drain_print()
3864 SEQNO(bf->bf_state.bfs_seqno), in ath_tx_tid_drain_print()
3865 bf->bf_state.bfs_retries); in ath_tx_tid_drain_print()
3870 ni->ni_macaddr, in ath_tx_tid_drain_print()
3873 txq->axq_qnum, in ath_tx_tid_drain_print()
3874 txq->axq_depth, in ath_tx_tid_drain_print()
3875 txq->axq_aggr_depth); in ath_tx_tid_drain_print()
3881 ni->ni_macaddr, in ath_tx_tid_drain_print()
3884 tid->axq_depth, in ath_tx_tid_drain_print()
3885 tid->hwq_depth, in ath_tx_tid_drain_print()
3886 tid->bar_wait, in ath_tx_tid_drain_print()
3887 tid->isfiltered); in ath_tx_tid_drain_print()
3895 ni->ni_macaddr, in ath_tx_tid_drain_print()
3897 tid->tid, in ath_tx_tid_drain_print()
3898 tid->sched, tid->paused, in ath_tx_tid_drain_print()
3899 tid->incomp, tid->baw_head, in ath_tx_tid_drain_print()
3900 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, in ath_tx_tid_drain_print()
3901 ni->ni_txseqs[tid->tid]); in ath_tx_tid_drain_print()
3905 ieee80211_dump_pkt(ni->ni_ic, in ath_tx_tid_drain_print()
3906 mtod(bf->bf_m, const uint8_t *), in ath_tx_tid_drain_print()
3907 bf->bf_m->m_len, 0, -1); in ath_tx_tid_drain_print()
3931 struct ieee80211_node *ni = &an->an_node; in ath_tx_tid_drain()
3934 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_tid_drain()
3978 ath_tx_set_clrdmask(sc, tid->an); in ath_tx_tid_drain()
3989 * when the packet is first transmitted - and thus the "retries" in ath_tx_tid_drain()
3993 /* But don't do it for non-QoS TIDs */ in ath_tx_tid_drain()
3999 ni->ni_macaddr, in ath_tx_tid_drain()
4002 tid->tid, in ath_tx_tid_drain()
4003 tap->txa_start); in ath_tx_tid_drain()
4005 ni->ni_txseqs[tid->tid] = tap->txa_start; in ath_tx_tid_drain()
4006 tid->baw_tail = tid->baw_head; in ath_tx_tid_drain()
4020 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; in ath_tx_tid_reset()
4021 tid->paused = tid->sched = tid->addba_tx_pending = 0; in ath_tx_tid_reset()
4022 tid->incomp = tid->cleanup_inprogress = 0; in ath_tx_tid_reset()
4030 * XXX I'm not going through resume here - I don't want the in ath_tx_tid_reset()
4034 if (tid->bar_wait) { in ath_tx_tid_reset()
4035 if (tid->paused > 0) { in ath_tx_tid_reset()
4036 tid->paused --; in ath_tx_tid_reset()
4049 if (tid->isfiltered) { in ath_tx_tid_reset()
4050 if (tid->paused > 0) { in ath_tx_tid_reset()
4051 tid->paused --; in ath_tx_tid_reset()
4060 tid->bar_wait = 0; in ath_tx_tid_reset()
4061 tid->bar_tx = 0; in ath_tx_tid_reset()
4062 tid->isfiltered = 0; in ath_tx_tid_reset()
4063 tid->sched = 0; in ath_tx_tid_reset()
4064 tid->addba_tx_pending = 0; in ath_tx_tid_reset()
4068 * frames for that node as non-aggregate; or mark the ath_node in ath_tx_tid_reset()
4071 * do a complete hard reset of state here - no pause, no in ath_tx_tid_reset()
4094 &an->an_node); in ath_tx_node_flush()
4101 an->an_node.ni_macaddr, in ath_tx_node_flush()
4103 an->an_is_powersave, in ath_tx_node_flush()
4104 an->an_stack_psq, in ath_tx_node_flush()
4105 an->an_tim_set, in ath_tx_node_flush()
4106 an->an_swq_depth, in ath_tx_node_flush()
4107 an->clrdmask, in ath_tx_node_flush()
4108 an->an_leak_count); in ath_tx_node_flush()
4111 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_node_flush()
4119 /* Reset the per-TID pause, BAR, etc state */ in ath_tx_node_flush()
4126 an->an_leak_count = 0; in ath_tx_node_flush()
4153 while (! TAILQ_EMPTY(&txq->axq_tidq)) { in ath_tx_txq_drain()
4154 tid = TAILQ_FIRST(&txq->axq_tidq); in ath_tx_txq_drain()
4155 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); in ath_tx_txq_drain()
4168 * Handle completion of non-aggregate session frames.
4171 * non-aggregate frames!
4173 * Software retransmission of non-aggregate frames needs to obey
4186 struct ieee80211_node *ni = bf->bf_node; in ath_tx_normal_comp()
4188 int tid = bf->bf_state.bfs_tid; in ath_tx_normal_comp()
4189 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_normal_comp()
4190 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; in ath_tx_normal_comp()
4196 __func__, bf, fail, atid->hwq_depth - 1); in ath_tx_normal_comp()
4198 atid->hwq_depth--; in ath_tx_normal_comp()
4205 if ((ts->ts_status & HAL_TXERR_FILT) || in ath_tx_normal_comp()
4206 (ts->ts_status != 0 && atid->isfiltered)) { in ath_tx_normal_comp()
4210 atid->isfiltered, in ath_tx_normal_comp()
4211 ts->ts_status); in ath_tx_normal_comp()
4215 if (atid->isfiltered) in ath_tx_normal_comp()
4217 if (atid->hwq_depth < 0) in ath_tx_normal_comp()
4219 __func__, atid->hwq_depth); in ath_tx_normal_comp()
4223 if (atid->cleanup_inprogress) { in ath_tx_normal_comp()
4224 atid->incomp--; in ath_tx_normal_comp()
4225 if (atid->incomp == 0) { in ath_tx_normal_comp()
4229 atid->cleanup_inprogress = 0; in ath_tx_normal_comp()
4239 * for this end-node that has CLRDMASK set, so it's quite possible in ath_tx_normal_comp()
4240 * that a filtered frame will be followed by a non-filtered in ath_tx_normal_comp()
4245 if (atid->isfiltered) in ath_tx_normal_comp()
4253 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) in ath_tx_normal_comp()
4254 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, in ath_tx_normal_comp()
4256 bf->bf_state.bfs_pktlen, in ath_tx_normal_comp()
4257 bf->bf_state.bfs_pktlen, in ath_tx_normal_comp()
4258 1, (ts->ts_status == 0) ? 0 : 1); in ath_tx_normal_comp()
4265 * an A-MPDU.
4267 * There's no need to update the BAW here - the session is being
4273 struct ieee80211_node *ni = bf->bf_node; in ath_tx_comp_cleanup_unaggr()
4275 int tid = bf->bf_state.bfs_tid; in ath_tx_comp_cleanup_unaggr()
4276 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_comp_cleanup_unaggr()
4279 __func__, tid, atid->incomp); in ath_tx_comp_cleanup_unaggr()
4282 atid->incomp--; in ath_tx_comp_cleanup_unaggr()
4285 if (bf->bf_state.bfs_dobaw) { in ath_tx_comp_cleanup_unaggr()
4287 if (!bf->bf_state.bfs_addedbaw) in ath_tx_comp_cleanup_unaggr()
4290 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_comp_cleanup_unaggr()
4293 if (atid->incomp == 0) { in ath_tx_comp_cleanup_unaggr()
4297 atid->cleanup_inprogress = 0; in ath_tx_comp_cleanup_unaggr()
4314 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_tid_cleanup_frame()
4325 * Loop over all the frames in the aggregate. in ath_tx_tid_cleanup_frame()
4329 bf_next = bf->bf_next; /* next aggregate frame, or NULL */ in ath_tx_tid_cleanup_frame()
4336 * BAW - we shouldn't have it be in an aggregate in ath_tx_tid_cleanup_frame()
4339 if (bf->bf_state.bfs_addedbaw) { in ath_tx_tid_cleanup_frame()
4341 bf->bf_state.bfs_dobaw = 0; in ath_tx_tid_cleanup_frame()
4347 bf->bf_comp = ath_tx_normal_comp; in ath_tx_tid_cleanup_frame()
4348 bf->bf_next = NULL; in ath_tx_tid_cleanup_frame()
4379 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_tid_cleanup()
4386 atid->cleanup_inprogress); in ath_tx_tid_cleanup()
4403 * + Fix the completion function to be non-aggregate in ath_tx_tid_cleanup()
4428 if (atid->hwq_depth > 0) { in ath_tx_tid_cleanup()
4430 * XXX how about we kill atid->incomp, and instead in ath_tx_tid_cleanup()
4431 * replace it with a macro that checks that atid->hwq_depth in ath_tx_tid_cleanup()
4434 atid->incomp = atid->hwq_depth; in ath_tx_tid_cleanup()
4435 atid->cleanup_inprogress = 1; in ath_tx_tid_cleanup()
4438 if (atid->cleanup_inprogress) in ath_tx_tid_cleanup()
4441 __func__, tid, atid->incomp); in ath_tx_tid_cleanup()
4475 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); in ath_tx_retry_clone()
4493 if (bf->bf_state.bfs_dobaw) in ath_tx_retry_clone()
4508 * non-aggregate frames in an aggregate session are
4509 * transmitted in-order; they just have to be in-BAW)
4515 struct ieee80211_node *ni = bf->bf_node; in ath_tx_aggr_retry_unaggr()
4517 int tid = bf->bf_state.bfs_tid; in ath_tx_aggr_retry_unaggr()
4518 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_aggr_retry_unaggr()
4533 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && in ath_tx_aggr_retry_unaggr()
4534 (bf->bf_flags & ATH_BUF_BUSY)) { in ath_tx_aggr_retry_unaggr()
4541 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; in ath_tx_aggr_retry_unaggr()
4544 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { in ath_tx_aggr_retry_unaggr()
4547 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_aggr_retry_unaggr()
4548 sc->sc_stats.ast_tx_swretrymax++; in ath_tx_aggr_retry_unaggr()
4551 if (bf->bf_state.bfs_dobaw) { in ath_tx_aggr_retry_unaggr()
4553 if (! bf->bf_state.bfs_addedbaw) in ath_tx_aggr_retry_unaggr()
4556 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_aggr_retry_unaggr()
4558 bf->bf_state.bfs_dobaw = 0; in ath_tx_aggr_retry_unaggr()
4580 sc->sc_stats.ast_tx_swretries++; in ath_tx_aggr_retry_unaggr()
4606 struct ieee80211_node *ni = bf->bf_node; in ath_tx_retry_subframe()
4608 int tid = bf->bf_state.bfs_tid; in ath_tx_retry_subframe()
4609 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_retry_subframe()
4614 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); in ath_tx_retry_subframe()
4615 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); in ath_tx_retry_subframe()
4617 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ in ath_tx_retry_subframe()
4627 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && in ath_tx_retry_subframe()
4628 (bf->bf_flags & ATH_BUF_BUSY)) { in ath_tx_retry_subframe()
4635 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; in ath_tx_retry_subframe()
4638 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { in ath_tx_retry_subframe()
4639 sc->sc_stats.ast_tx_swretrymax++; in ath_tx_retry_subframe()
4642 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_retry_subframe()
4644 if (!bf->bf_state.bfs_addedbaw) in ath_tx_retry_subframe()
4647 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_retry_subframe()
4648 bf->bf_state.bfs_dobaw = 0; in ath_tx_retry_subframe()
4653 sc->sc_stats.ast_tx_swretries++; in ath_tx_retry_subframe()
4654 bf->bf_next = NULL; /* Just to make sure */ in ath_tx_retry_subframe()
4657 bf->bf_state.bfs_aggr = 0; in ath_tx_retry_subframe()
4658 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ in ath_tx_retry_subframe()
4659 bf->bf_state.bfs_nframes = 1; in ath_tx_retry_subframe()
4672 struct ieee80211_node *ni = bf_first->bf_node; in ath_tx_comp_aggr_error()
4684 * Update rate control - all frames have failed. in ath_tx_comp_aggr_error()
4686 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, in ath_tx_comp_aggr_error()
4687 &bf_first->bf_status.ds_txstat, in ath_tx_comp_aggr_error()
4688 bf_first->bf_state.bfs_al, in ath_tx_comp_aggr_error()
4689 bf_first->bf_state.bfs_rc_maxpktlen, in ath_tx_comp_aggr_error()
4690 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); in ath_tx_comp_aggr_error()
4693 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_comp_aggr_error()
4694 sc->sc_stats.ast_tx_aggr_failall++; in ath_tx_comp_aggr_error()
4699 bf_next = bf->bf_next; in ath_tx_comp_aggr_error()
4700 bf->bf_next = NULL; /* Remove it from the aggr list */ in ath_tx_comp_aggr_error()
4701 sc->sc_stats.ast_tx_aggr_fail++; in ath_tx_comp_aggr_error()
4704 bf->bf_next = NULL; in ath_tx_comp_aggr_error()
4717 * Schedule the TID to be re-tried. in ath_tx_comp_aggr_error()
4749 * Handle clean-up of packets from an aggregate list.
4751 * There's no need to update the BAW here - the session is being
4758 struct ieee80211_node *ni = bf_first->bf_node; in ath_tx_comp_cleanup_aggr()
4760 int tid = bf_first->bf_state.bfs_tid; in ath_tx_comp_cleanup_aggr()
4761 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_comp_cleanup_aggr()
4766 atid->incomp--; in ath_tx_comp_cleanup_aggr()
4772 if (bf->bf_state.bfs_dobaw) { in ath_tx_comp_cleanup_aggr()
4774 if (!bf->bf_state.bfs_addedbaw) in ath_tx_comp_cleanup_aggr()
4777 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_comp_cleanup_aggr()
4779 bf = bf->bf_next; in ath_tx_comp_cleanup_aggr()
4782 if (atid->incomp == 0) { in ath_tx_comp_cleanup_aggr()
4786 atid->cleanup_inprogress = 0; in ath_tx_comp_cleanup_aggr()
4791 /* XXX why would we send a BAR when transitioning to non-aggregation? */ in ath_tx_comp_cleanup_aggr()
4804 bf_next = bf->bf_next; in ath_tx_comp_cleanup_aggr()
4805 bf->bf_next = NULL; in ath_tx_comp_cleanup_aggr()
4821 //struct ath_desc *ds = bf->bf_lastds;
4822 struct ieee80211_node *ni = bf_first->bf_node;
4824 int tid = bf_first->bf_state.bfs_tid;
4825 struct ath_tid *atid = &an->an_tid[tid];
4844 __func__, atid->hwq_depth);
4847 * Take a copy; this may be needed -after- bf_first
4850 ts = bf_first->bf_status.ds_txstat;
4851 agglen = bf_first->bf_state.bfs_al;
4852 rc_agglen = bf_first->bf_state.bfs_rc_maxpktlen;
4860 atid->hwq_depth--;
4861 if (atid->hwq_depth < 0)
4863 __func__, atid->hwq_depth);
4872 if (atid->isfiltered)
4878 if (atid->cleanup_inprogress) {
4879 if (atid->isfiltered)
4896 (ts.ts_status != 0 && atid->isfiltered)) {
4904 if (bf->bf_state.bfs_addedbaw)
4906 if (bf->bf_state.bfs_dobaw) {
4908 if (!bf->bf_state.bfs_addedbaw)
4912 SEQNO(bf->bf_state.bfs_seqno));
4914 bf->bf_state.bfs_dobaw = 0;
4934 pktlen = bf_first->bf_state.bfs_pktlen;
4955 * extract starting sequence and block-ack bitmap
4957 /* XXX endian-ness of seq_st, ba? */
4961 isaggr = bf_first->bf_state.bfs_aggr;
4971 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4976 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4984 * out tid 1 - the aggregate frames are all marked as TID 1,
4999 device_printf(sc->sc_dev,
5003 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
5010 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
5019 nf = bf_first->bf_state.bfs_nframes;
5041 SEQNO(bf->bf_state.bfs_seqno));
5042 bf_next = bf->bf_next;
5043 bf->bf_next = NULL; /* Remove it from the aggr list */
5047 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
5051 sc->sc_stats.ast_tx_aggr_ok++;
5053 bf->bf_state.bfs_dobaw = 0;
5054 if (!bf->bf_state.bfs_addedbaw)
5057 __func__, SEQNO(bf->bf_state.bfs_seqno));
5058 bf->bf_next = NULL;
5061 sc->sc_stats.ast_tx_aggr_fail++;
5064 bf->bf_next = NULL;
5076 * have a consistent view of what -was- in the BAW.
5080 txseq = tap->txa_start;
5108 "%s: txa_start now %d\n", __func__, tap->txa_start);
5124 * If the queue is filtered, re-schedule as required.
5127 * for this end-node that has CLRDMASK set, so it's quite possible
5128 * that a filtered frame will be followed by a non-filtered
5133 if (atid->isfiltered)
5163 struct ieee80211_node *ni = bf->bf_node;
5165 int tid = bf->bf_state.bfs_tid;
5166 struct ath_tid *atid = &an->an_tid[tid];
5174 ts = bf->bf_status.ds_txstat;
5182 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
5183 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
5184 &bf->bf_status.ds_txstat,
5185 bf->bf_state.bfs_pktlen,
5186 bf->bf_state.bfs_pktlen,
5190 * This is called early so atid->hwq_depth can be tracked.
5201 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
5202 SEQNO(bf->bf_state.bfs_seqno));
5204 atid->hwq_depth--;
5205 if (atid->hwq_depth < 0)
5207 __func__, atid->hwq_depth);
5214 if (atid->isfiltered)
5223 if (atid->cleanup_inprogress) {
5224 if (atid->isfiltered)
5243 * However - a busy buffer can't be added to the filtered
5248 (ts.ts_status != 0 && atid->isfiltered)) {
5262 if (bf->bf_state.bfs_addedbaw)
5264 if (bf->bf_state.bfs_dobaw) {
5266 if (!bf->bf_state.bfs_addedbaw)
5269 __func__, SEQNO(bf->bf_state.bfs_seqno));
5271 bf->bf_state.bfs_dobaw = 0;
5302 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5314 __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5315 if (bf->bf_state.bfs_dobaw) {
5317 bf->bf_state.bfs_dobaw = 0;
5318 if (!bf->bf_state.bfs_addedbaw)
5321 __func__, SEQNO(bf->bf_state.bfs_seqno));
5325 * If the queue is filtered, re-schedule as required.
5328 * for this end-node that has CLRDMASK set, so it's quite possible
5329 * that a filtered frame will be followed by a non-filtered
5334 if (atid->isfiltered)
5352 if (bf->bf_state.bfs_aggr)
5376 tap = ath_tx_get_tx_tid(an, tid->tid);
5383 TAILQ_FOREACH(bf, &tid->tid_q, bf_list) {
5390 if (tap != NULL && (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
5391 SEQNO(bf->bf_state.bfs_seqno)))) {
5396 if (! bf->bf_state.bfs_dobaw) {
5400 nbytes += bf->bf_state.bfs_pktlen;
5409 if (an->an_leak_count) {
5427 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5433 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5441 tap = ath_tx_get_tx_tid(an, tid->tid);
5443 if (tid->tid == IEEE80211_NONQOS_TID)
5470 if (! bf->bf_state.bfs_dobaw) {
5472 "%s: non-baw packet\n",
5476 if (bf->bf_state.bfs_nframes > 1)
5480 bf->bf_state.bfs_aggr,
5481 bf->bf_state.bfs_nframes);
5484 * This shouldn't happen - such frames shouldn't
5489 bf->bf_state.bfs_aggr = 0;
5490 bf->bf_state.bfs_nframes = 1;
5495 ath_tx_do_ratelookup(sc, bf, tid->tid,
5496 bf->bf_state.bfs_pktlen, false);
5502 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5504 sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5513 * Loop over the swq to find out how long
5518 ath_tx_do_ratelookup(sc, bf, tid->tid, swq_pktbytes, true);
5537 * No frames to be picked up - out of BAW
5549 sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5552 * If it's the only frame send as non-aggregate
5556 if (bf->bf_state.bfs_nframes == 1) {
5558 "%s: single-frame aggregate\n", __func__);
5563 bf->bf_state.bfs_aggr = 0;
5564 bf->bf_state.bfs_ndelim = 0;
5566 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5568 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5570 sc->sc_aggr_stats.aggr_single_pkt++;
5573 "%s: multi-frame aggregate: %d frames, "
5575 __func__, bf->bf_state.bfs_nframes,
5576 bf->bf_state.bfs_al);
5577 bf->bf_state.bfs_aggr = 1;
5578 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5579 sc->sc_aggr_stats.aggr_aggr_pkt++;
5605 /* Set completion handler, multi-frame aggregate or not */
5606 bf->bf_comp = ath_tx_aggr_comp;
5608 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5624 tid->hwq_depth++;
5634 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5658 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5661 __func__, an, tid->tid);
5665 /* Check - is AMPDU pending or running? then print out something */
5666 if (ath_tx_ampdu_pending(sc, an, tid->tid))
5668 __func__, tid->tid);
5669 if (ath_tx_ampdu_running(sc, an, tid->tid))
5671 __func__, tid->tid);
5692 if (tid->tid != bf->bf_state.bfs_tid) {
5694 " tid %d\n", __func__, bf->bf_state.bfs_tid,
5695 tid->tid);
5698 bf->bf_comp = ath_tx_normal_comp;
5701 * Override this for now, until the non-aggregate
5704 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5710 ath_tx_do_ratelookup(sc, bf, tid->tid,
5711 bf->bf_state.bfs_pktlen, false);
5727 tid->hwq_depth++;
5753 * For non-EDMA chips, aggr frames that have been built are
5755 * There's no FIFO, so txq->axq_depth is what's been scheduled
5765 * The FIFO depth is what's in the hardware; the txq->axq_depth
5769 * into the EDMA FIFO. For multi-frame lists, this is the number
5774 /* For EDMA and non-EDMA, check built/scheduled against aggr limit */
5775 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) {
5776 sc->sc_aggr_stats.aggr_sched_nopkt++;
5781 * For non-EDMA chips, axq_depth is the "what's scheduled to
5786 if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) {
5787 sc->sc_aggr_stats.aggr_sched_nopkt++;
5791 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5793 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5799 __func__, tid->tid, tid->paused);
5802 * This node may be in power-save and we're leaking
5808 if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5809 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5811 ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5813 /* Not empty? Re-schedule */
5814 if (tid->axq_depth != 0)
5823 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5826 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5836 * but are pending a leaking frame in response to a ps-poll?
5855 struct ieee80211_node *ni = &an->an_node;
5861 tap = &ni->ni_tx_ampdu[tid];
5866 * Is AMPDU-TX running?
5880 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5884 * Is AMPDU-TX negotiation pending?
5898 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5902 * Is AMPDU-TX pending for the given TID?
5917 struct ath_softc *sc = ni->ni_ic->ic_softc;
5918 int tid = tap->txa_tid;
5920 struct ath_tid *atid = &an->an_tid[tid];
5927 * However, net80211 will keep self-assigning sequence numbers
5935 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5951 if (atid->addba_tx_pending == 0) {
5953 atid->addba_tx_pending = 1;
5960 ni->ni_macaddr,
5965 __func__, tap->txa_start, ni->ni_txseqs[tid]);
5967 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5979 * Note! net80211 keeps self-assigning sequence numbers until
5980 * ampdu is negotiated. This means the initially-negotiated BAW left
5981 * edge won't match the ni->ni_txseq.
5984 * ni->ni_txseq.
5987 * addba request should be tagged as aggregate and queued as non-aggregate
5995 struct ath_softc *sc = ni->ni_ic->ic_softc;
5996 int tid = tap->txa_tid;
5998 struct ath_tid *atid = &an->an_tid[tid];
6003 ni->ni_macaddr,
6009 __func__, tap->txa_start, ni->ni_txseqs[tid]);
6017 r = sc->sc_addba_response(ni, tap, status, code, batimeout);
6020 atid->addba_tx_pending = 0;
6026 tap->txa_start = ni->ni_txseqs[tid];
6041 struct ath_softc *sc = ni->ni_ic->ic_softc;
6042 int tid = tap->txa_tid;
6044 struct ath_tid *atid = &an->an_tid[tid];
6050 ni->ni_macaddr,
6059 if (atid->bar_wait) {
6065 atid->bar_tx = 1;
6071 sc->sc_addba_stop(ni, tap);
6087 * progress - it means something else is also doing
6090 if (atid->cleanup_inprogress) {
6097 if (! atid->cleanup_inprogress)
6130 tid = &an->an_tid[i];
6131 if (tid->hwq_depth == 0)
6136 an->an_node.ni_macaddr,
6143 if (! tid->cleanup_inprogress) {
6149 if (! tid->cleanup_inprogress)
6167 * ic->ic_addba_stop().
6169 * XXX This uses a hard-coded max BAR count value; the whole
6176 struct ath_softc *sc = ni->ni_ic->ic_softc;
6177 int tid = tap->txa_tid;
6179 struct ath_tid *atid = &an->an_tid[tid];
6180 int attempts = tap->txa_attempts;
6184 …"%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%…
6186 ni->ni_macaddr,
6188 tap->txa_tid,
6189 atid->tid,
6192 tap->txa_start,
6193 tap->txa_seqpending);
6207 old_txa_start = tap->txa_start;
6208 sc->sc_bar_response(ni, tap, status);
6209 if (tap->txa_start != old_txa_start) {
6210 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
6213 tap->txa_start,
6216 tap->txa_start = old_txa_start;
6222 * XXX to a non-aggregate session. So we must unpause the
6230 if (atid->bar_tx == 0 || atid->bar_wait == 0)
6234 atid->bar_tx, atid->bar_wait);
6249 struct ath_softc *sc = ni->ni_ic->ic_softc;
6250 int tid = tap->txa_tid;
6252 struct ath_tid *atid = &an->an_tid[tid];
6257 ni->ni_macaddr,
6262 atid->addba_tx_pending = 0;
6266 sc->sc_addba_response_timeout(ni, tap);
6283 return (an->an_is_powersave);
6300 * doing node/TID operations. There are other complications -
6301 * the sched/unsched operations involve walking the per-txq
6316 if (an->an_is_powersave) {
6319 __func__, an->an_node.ni_macaddr, ":");
6325 atid = &an->an_tid[tid];
6326 txq = sc->sc_ac2q[atid->ac];
6332 an->an_is_powersave = 1;
6353 if (an->an_is_powersave == 0) {
6362 an->an_is_powersave = 0;
6366 an->an_leak_count = 0;
6369 atid = &an->an_tid[tid];
6370 txq = sc->sc_ac2q[atid->ac];
6400 sc->sc_tx_desclen = sizeof(struct ath_desc);
6401 sc->sc_tx_statuslen = sizeof(struct ath_desc);
6402 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */
6404 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6405 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6406 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6408 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6409 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6411 sc->sc_tx.xmit_drain = ath_legacy_tx_drain;