Lines Matching +full:cts +full:- +full:override
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
5 * Copyright (c) 2010-2012 Adrian Chadd, Xenion Pty Ltd
116 * What queue to throw the non-QoS TID traffic into
148 if (bf->bf_nseg == 0) in ath_tx_alq_post()
150 n = ((bf->bf_nseg - 1) / sc->sc_tx_nmaps) + 1; in ath_tx_alq_post()
151 for (i = 0, ds = (const char *) bf->bf_desc; in ath_tx_alq_post()
153 i++, ds += sc->sc_tx_desclen) { in ath_tx_alq_post()
154 if_ath_alq_post(&sc->sc_alq, in ath_tx_alq_post()
156 sc->sc_tx_desclen, in ath_tx_alq_post()
159 bf = bf->bf_next; in ath_tx_alq_post()
170 return ((sc->sc_ah->ah_magic == 0x20065416) || in ath_tx_is_11n()
171 (sc->sc_ah->ah_magic == 0x19741014)); in ath_tx_is_11n()
177 * Non-QoS frames get mapped to a TID so frames consistently
187 /* Non-QoS: map frame to a TID queue for software queueing */ in ath_tx_gettid()
191 /* QoS - fetch the TID from the header, ignore mbuf WME */ in ath_tx_gettid()
200 wh = mtod(bf->bf_m, struct ieee80211_frame *); in ath_tx_set_retry()
202 if (bf->bf_state.bfs_isretried == 0) { in ath_tx_set_retry()
203 wh->i_fc[1] |= IEEE80211_FC1_RETRY; in ath_tx_set_retry()
204 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, in ath_tx_set_retry()
207 bf->bf_state.bfs_isretried = 1; in ath_tx_set_retry()
208 bf->bf_state.bfs_retries ++; in ath_tx_set_retry()
220 * For non-QoS frames, return the mbuf WMI priority.
222 * This has implications that higher priority non-QoS traffic
223 * may end up being scheduled before other non-QoS traffic,
224 * leading to out-of-sequence packets being emitted.
240 * QoS data frame (sequence number or otherwise) - in ath_tx_getac()
248 * Otherwise - return mbuf QoS pri. in ath_tx_getac()
282 for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { in ath_txfrag_setup()
283 /* XXX non-management? */ in ath_txfrag_setup()
309 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, in ath_tx_dmasetup()
310 bf->bf_segs, &bf->bf_nseg, in ath_tx_dmasetup()
314 bf->bf_nseg = ATH_MAX_SCATTER + 1; in ath_tx_dmasetup()
316 sc->sc_stats.ast_tx_busdma++; in ath_tx_dmasetup()
325 if (bf->bf_nseg > ATH_MAX_SCATTER) { /* too many desc's, linearize */ in ath_tx_dmasetup()
326 sc->sc_stats.ast_tx_linear++; in ath_tx_dmasetup()
330 sc->sc_stats.ast_tx_nombuf++; in ath_tx_dmasetup()
334 error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, in ath_tx_dmasetup()
335 bf->bf_segs, &bf->bf_nseg, in ath_tx_dmasetup()
338 sc->sc_stats.ast_tx_busdma++; in ath_tx_dmasetup()
342 KASSERT(bf->bf_nseg <= ATH_MAX_SCATTER, in ath_tx_dmasetup()
343 ("too many segments after defrag; nseg %u", bf->bf_nseg)); in ath_tx_dmasetup()
344 } else if (bf->bf_nseg == 0) { /* null packet, discard */ in ath_tx_dmasetup()
345 sc->sc_stats.ast_tx_nodata++; in ath_tx_dmasetup()
350 __func__, m0, m0->m_pkthdr.len); in ath_tx_dmasetup()
351 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); in ath_tx_dmasetup()
352 bf->bf_m = m0; in ath_tx_dmasetup()
358 * Chain together segments+descriptors for a frame - 11n or otherwise.
367 struct ath_hal *ah = sc->sc_ah; in ath_tx_chaindesclist()
379 struct ath_descdma *dd = &sc->sc_txdma; in ath_tx_chaindesclist()
390 numTxMaps = sc->sc_tx_nmaps; in ath_tx_chaindesclist()
396 ds = (char *) bf->bf_desc; in ath_tx_chaindesclist()
400 for (i = 0; i < bf->bf_nseg; i++) { in ath_tx_chaindesclist()
401 bufAddrList[bp] = bf->bf_segs[i].ds_addr; in ath_tx_chaindesclist()
402 segLenList[bp] = bf->bf_segs[i].ds_len; in ath_tx_chaindesclist()
409 if ((i != bf->bf_nseg - 1) && (bp < numTxMaps)) in ath_tx_chaindesclist()
417 if (i == bf->bf_nseg - 1) in ath_tx_chaindesclist()
421 bf->bf_daddr + dd->dd_descsize * (dsp + 1)); in ath_tx_chaindesclist()
432 , bf->bf_descid /* XXX desc id */ in ath_tx_chaindesclist()
433 , bf->bf_state.bfs_tx_queue in ath_tx_chaindesclist()
435 , i == bf->bf_nseg - 1 /* last segment */ in ath_tx_chaindesclist()
444 * sub-frames. Since the descriptors are in in ath_tx_chaindesclist()
445 * non-cacheable memory, this leads to some in ath_tx_chaindesclist()
449 ath_hal_clr11n_aggr(sc->sc_ah, (struct ath_desc *) ds); in ath_tx_chaindesclist()
456 ath_hal_set11n_aggr_last(sc->sc_ah, in ath_tx_chaindesclist()
467 ath_hal_set11n_aggr_middle(sc->sc_ah, in ath_tx_chaindesclist()
469 bf->bf_state.bfs_ndelim); in ath_tx_chaindesclist()
472 bf->bf_lastds = (struct ath_desc *) ds; in ath_tx_chaindesclist()
477 ds += sc->sc_tx_desclen; in ath_tx_chaindesclist()
486 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); in ath_tx_chaindesclist()
498 * conditionally for the pre-11n chips, and call ath_buf_set_rate
502 * and 4 if multi-rate retry is needed.
508 struct ath_rc_series *rc = bf->bf_state.bfs_rc; in ath_tx_set_ratectrl()
511 if (! bf->bf_state.bfs_ismrr) in ath_tx_set_ratectrl()
518 else if (bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) { in ath_tx_set_ratectrl()
525 * Always call - that way a retried descriptor will in ath_tx_set_ratectrl()
528 * XXX TODO: see if this is really needed - setting up in ath_tx_set_ratectrl()
535 ath_hal_setupxtxdesc(sc->sc_ah, bf->bf_desc in ath_tx_set_ratectrl()
547 * bf->bf_next.
553 struct ath_desc *ds0 = bf_first->bf_desc; in ath_tx_setds_11n()
556 __func__, bf_first->bf_state.bfs_nframes, in ath_tx_setds_11n()
557 bf_first->bf_state.bfs_al); in ath_tx_setds_11n()
561 if (bf->bf_state.bfs_txrate0 == 0) in ath_tx_setds_11n()
564 if (bf->bf_state.bfs_rc[0].ratecode == 0) in ath_tx_setds_11n()
569 * Setup all descriptors of all subframes - this will in ath_tx_setds_11n()
575 __func__, bf, bf->bf_nseg, bf->bf_state.bfs_pktlen, in ath_tx_setds_11n()
576 SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_setds_11n()
579 * Setup the initial fields for the first descriptor - all in ath_tx_setds_11n()
580 * the non-11n specific stuff. in ath_tx_setds_11n()
582 ath_hal_setuptxdesc(sc->sc_ah, bf->bf_desc in ath_tx_setds_11n()
583 , bf->bf_state.bfs_pktlen /* packet length */ in ath_tx_setds_11n()
584 , bf->bf_state.bfs_hdrlen /* header length */ in ath_tx_setds_11n()
585 , bf->bf_state.bfs_atype /* Atheros packet type */ in ath_tx_setds_11n()
586 , bf->bf_state.bfs_txpower /* txpower */ in ath_tx_setds_11n()
587 , bf->bf_state.bfs_txrate0 in ath_tx_setds_11n()
588 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ in ath_tx_setds_11n()
589 , bf->bf_state.bfs_keyix /* key cache index */ in ath_tx_setds_11n()
590 , bf->bf_state.bfs_txantenna /* antenna mode */ in ath_tx_setds_11n()
591 , bf->bf_state.bfs_txflags | HAL_TXDESC_INTREQ /* flags */ in ath_tx_setds_11n()
592 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ in ath_tx_setds_11n()
593 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ in ath_tx_setds_11n()
604 ath_tx_set_ratectrl(sc, bf->bf_node, bf); in ath_tx_setds_11n()
608 * Setup the descriptors for a multi-descriptor frame. in ath_tx_setds_11n()
609 * This is both aggregate and non-aggregate aware. in ath_tx_setds_11n()
614 !! (bf->bf_next == NULL) /* is_last_subframe */ in ath_tx_setds_11n()
622 ath_hal_set11n_aggr_first(sc->sc_ah, in ath_tx_setds_11n()
624 bf->bf_state.bfs_al, in ath_tx_setds_11n()
625 bf->bf_state.bfs_ndelim); in ath_tx_setds_11n()
633 ath_hal_settxdesclink(sc->sc_ah, bf_prev->bf_lastds, in ath_tx_setds_11n()
634 bf->bf_daddr); in ath_tx_setds_11n()
638 bf = bf->bf_next; in ath_tx_setds_11n()
646 bf_first->bf_lastds = bf_prev->bf_lastds; in ath_tx_setds_11n()
652 bf_first->bf_last = bf_prev; in ath_tx_setds_11n()
655 * For non-AR9300 NICs, which require the rate control in ath_tx_setds_11n()
656 * in the final descriptor - let's set that up now. in ath_tx_setds_11n()
660 * if firstSeg is also true. For non-aggregate frames in ath_tx_setds_11n()
667 * non-cachable memory for TX descriptors, but we'll just in ath_tx_setds_11n()
672 * is called on the final descriptor in an MPDU or A-MPDU - in ath_tx_setds_11n()
677 ath_hal_setuplasttxdesc(sc->sc_ah, bf_prev->bf_lastds, ds0); in ath_tx_setds_11n()
683 * Hand-off a frame to the multicast TX queue.
702 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, in ath_tx_handoff_mcast()
703 ("%s: busy status 0x%x", __func__, bf->bf_flags)); in ath_tx_handoff_mcast()
709 if (bf->bf_state.bfs_tx_queue != sc->sc_cabq->axq_qnum) { in ath_tx_handoff_mcast()
712 __func__, bf, bf->bf_state.bfs_tx_queue, in ath_tx_handoff_mcast()
713 txq->axq_qnum); in ath_tx_handoff_mcast()
722 wh = mtod(bf_last->bf_m, struct ieee80211_frame *); in ath_tx_handoff_mcast()
723 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; in ath_tx_handoff_mcast()
724 bus_dmamap_sync(sc->sc_dmat, bf_last->bf_dmamap, in ath_tx_handoff_mcast()
728 ath_hal_settxdesclink(sc->sc_ah, in ath_tx_handoff_mcast()
729 bf_last->bf_lastds, in ath_tx_handoff_mcast()
730 bf->bf_daddr); in ath_tx_handoff_mcast()
737 * Hand-off packet to a hardware queue.
743 struct ath_hal *ah = sc->sc_ah; in ath_tx_handoff_hw()
755 KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0, in ath_tx_handoff_hw()
756 ("%s: busy status 0x%x", __func__, bf->bf_flags)); in ath_tx_handoff_hw()
757 KASSERT(txq->axq_qnum != ATH_TXQ_SWQ, in ath_tx_handoff_hw()
765 if (sc->sc_txproc_cnt == 0 && sc->sc_txstart_cnt == 0) { in ath_tx_handoff_hw()
766 device_printf(sc->sc_dev, in ath_tx_handoff_hw()
804 "ath_tx_handoff: non-tdma: txq=%u, add bf=%p " in ath_tx_handoff_hw()
806 txq->axq_qnum, in ath_tx_handoff_hw()
808 txq->axq_depth); in ath_tx_handoff_hw()
816 if (txq->axq_link != NULL) { in ath_tx_handoff_hw()
817 *txq->axq_link = bf->bf_daddr; in ath_tx_handoff_hw()
820 txq->axq_qnum, txq->axq_link, in ath_tx_handoff_hw()
821 (caddr_t)bf->bf_daddr, bf->bf_desc, in ath_tx_handoff_hw()
822 txq->axq_depth); in ath_tx_handoff_hw()
824 "ath_tx_handoff: non-tdma: link[%u](%p)=%p (%p) " in ath_tx_handoff_hw()
826 txq->axq_qnum, txq->axq_link, in ath_tx_handoff_hw()
827 (caddr_t)bf->bf_daddr, bf->bf_desc, in ath_tx_handoff_hw()
828 bf->bf_lastds); in ath_tx_handoff_hw()
837 * So we just don't do that - if we hit the end of the list, in ath_tx_handoff_hw()
839 * re-start DMA by updating the link pointer of _that_ in ath_tx_handoff_hw()
842 if (! (txq->axq_flags & ATH_TXQ_PUTRUNNING)) { in ath_tx_handoff_hw()
843 bf_first = TAILQ_FIRST(&txq->axq_q); in ath_tx_handoff_hw()
844 txq->axq_flags |= ATH_TXQ_PUTRUNNING; in ath_tx_handoff_hw()
845 ath_hal_puttxbuf(ah, txq->axq_qnum, bf_first->bf_daddr); in ath_tx_handoff_hw()
848 __func__, txq->axq_qnum, in ath_tx_handoff_hw()
849 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, in ath_tx_handoff_hw()
850 txq->axq_depth); in ath_tx_handoff_hw()
854 txq->axq_qnum, in ath_tx_handoff_hw()
855 (caddr_t)bf_first->bf_daddr, bf_first->bf_desc, in ath_tx_handoff_hw()
856 bf_first->bf_lastds, in ath_tx_handoff_hw()
857 txq->axq_depth); in ath_tx_handoff_hw()
864 if (bf->bf_state.bfs_tx_queue != txq->axq_qnum) { in ath_tx_handoff_hw()
867 __func__, bf, bf->bf_state.bfs_tx_queue, in ath_tx_handoff_hw()
868 txq->axq_qnum); in ath_tx_handoff_hw()
874 if (bf->bf_state.bfs_aggr) in ath_tx_handoff_hw()
875 txq->axq_aggr_depth++; in ath_tx_handoff_hw()
880 ath_hal_gettxdesclinkptr(ah, bf->bf_lastds, &txq->axq_link); in ath_tx_handoff_hw()
892 * in that descriptor and then kick TxE here; it will re-read in ath_tx_handoff_hw()
897 ath_hal_txstart(ah, txq->axq_qnum); in ath_tx_handoff_hw()
900 "ath_tx_handoff: txq=%u, txstart", txq->axq_qnum); in ath_tx_handoff_hw()
916 bf = TAILQ_FIRST(&txq->axq_q); in ath_legacy_tx_dma_restart()
925 txq->axq_qnum, in ath_legacy_tx_dma_restart()
928 (uint32_t) bf->bf_daddr); in ath_legacy_tx_dma_restart()
931 if (sc->sc_debug & ATH_DEBUG_RESET) in ath_legacy_tx_dma_restart()
939 KASSERT((!(txq->axq_flags & ATH_TXQ_PUTRUNNING)), in ath_legacy_tx_dma_restart()
942 txq->axq_qnum)); in ath_legacy_tx_dma_restart()
944 ath_hal_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr); in ath_legacy_tx_dma_restart()
945 txq->axq_flags |= ATH_TXQ_PUTRUNNING; in ath_legacy_tx_dma_restart()
947 ath_hal_gettxdesclinkptr(sc->sc_ah, bf_last->bf_lastds, in ath_legacy_tx_dma_restart()
948 &txq->axq_link); in ath_legacy_tx_dma_restart()
949 ath_hal_txstart(sc->sc_ah, txq->axq_qnum); in ath_legacy_tx_dma_restart()
964 if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_TXDESC)) in ath_legacy_xmit_handoff()
968 if (txq->axq_qnum == ATH_TXQ_SWQ) in ath_legacy_xmit_handoff()
1011 * added to it prior to entry so m0->m_pkthdr.len will in ath_tx_tag_crypto()
1015 cip = k->wk_cipher; in ath_tx_tag_crypto()
1016 (*hdrlen) += cip->ic_header; in ath_tx_tag_crypto()
1017 (*pktlen) += cip->ic_header + cip->ic_trailer; in ath_tx_tag_crypto()
1019 if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) in ath_tx_tag_crypto()
1020 (*pktlen) += cip->ic_miclen; in ath_tx_tag_crypto()
1021 (*keyix) = k->wk_keyix; in ath_tx_tag_crypto()
1022 } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { in ath_tx_tag_crypto()
1026 (*keyix) = ni->ni_ucastkey.wk_keyix; in ath_tx_tag_crypto()
1050 const HAL_RATE_TABLE *rt = sc->sc_currates; in ath_tx_calc_protection()
1051 struct ieee80211com *ic = &sc->sc_ic; in ath_tx_calc_protection()
1053 flags = bf->bf_state.bfs_txflags; in ath_tx_calc_protection()
1054 rix = bf->bf_state.bfs_rc[0].rix; in ath_tx_calc_protection()
1055 shortPreamble = bf->bf_state.bfs_shpream; in ath_tx_calc_protection()
1056 wh = mtod(bf->bf_m, struct ieee80211_frame *); in ath_tx_calc_protection()
1059 if (bf->bf_flags & ATH_BUF_TOA_PROBE) { in ath_tx_calc_protection()
1062 bf->bf_state.bfs_doprot = 0; in ath_tx_calc_protection()
1068 * to use RTS/CTS or just CTS. Note that this is only in ath_tx_calc_protection()
1071 if ((ic->ic_flags & IEEE80211_F_USEPROT) && in ath_tx_calc_protection()
1072 rt->info[rix].phy == IEEE80211_T_OFDM && in ath_tx_calc_protection()
1074 bf->bf_state.bfs_doprot = 1; in ath_tx_calc_protection()
1076 if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) { in ath_tx_calc_protection()
1078 } else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { in ath_tx_calc_protection()
1083 * highest CCK rate for RTS/CTS. But stations in ath_tx_calc_protection()
1088 sc->sc_stats.ast_tx_protect++; in ath_tx_calc_protection()
1099 if ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) && in ath_tx_calc_protection()
1100 rt->info[rix].phy == IEEE80211_T_HT && in ath_tx_calc_protection()
1103 sc->sc_stats.ast_tx_htprotect++; in ath_tx_calc_protection()
1107 bf->bf_state.bfs_txflags = flags; in ath_tx_calc_protection()
1123 struct ath_hal *ah = sc->sc_ah; in ath_tx_calc_duration()
1124 const HAL_RATE_TABLE *rt = sc->sc_currates; in ath_tx_calc_duration()
1125 int isfrag = bf->bf_m->m_flags & M_FRAG; in ath_tx_calc_duration()
1127 flags = bf->bf_state.bfs_txflags; in ath_tx_calc_duration()
1128 rix = bf->bf_state.bfs_rc[0].rix; in ath_tx_calc_duration()
1129 shortPreamble = bf->bf_state.bfs_shpream; in ath_tx_calc_duration()
1130 wh = mtod(bf->bf_m, struct ieee80211_frame *); in ath_tx_calc_duration()
1139 dur = rt->info[rix].spAckDuration; in ath_tx_calc_duration()
1141 dur = rt->info[rix].lpAckDuration; in ath_tx_calc_duration()
1142 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { in ath_tx_calc_duration()
1155 bf->bf_nextfraglen, in ath_tx_calc_duration()
1162 * fragment by disabling multi-rate retry which updates in ath_tx_calc_duration()
1163 * duration based on the multi-rate duration table. in ath_tx_calc_duration()
1165 bf->bf_state.bfs_ismrr = 0; in ath_tx_calc_duration()
1166 bf->bf_state.bfs_try0 = ATH_TXMGTTRY; in ath_tx_calc_duration()
1171 *(u_int16_t *)wh->i_dur = htole16(dur); in ath_tx_calc_duration()
1182 * CTS transmit rate is derived from the transmit rate in ath_tx_get_rtscts_rate()
1186 /* NB: cix is set above where RTS/CTS is enabled */ in ath_tx_get_rtscts_rate()
1188 ctsrate = rt->info[cix].rateCode; in ath_tx_get_rtscts_rate()
1192 ctsrate |= rt->info[cix].shortPreamble; in ath_tx_get_rtscts_rate()
1198 * Calculate the RTS/CTS duration for legacy frames.
1208 if (rt->info[cix].phy == IEEE80211_T_HT) { in ath_tx_calc_ctsduration()
1210 __func__, rt->info[cix].rateCode); in ath_tx_calc_ctsduration()
1211 return (-1); in ath_tx_calc_ctsduration()
1220 * NB: CTS is assumed the same size as an ACK so we can in ath_tx_calc_ctsduration()
1224 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ in ath_tx_calc_ctsduration()
1225 ctsduration += rt->info[cix].spAckDuration; in ath_tx_calc_ctsduration()
1229 ctsduration += rt->info[rix].spAckDuration; in ath_tx_calc_ctsduration()
1231 if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ in ath_tx_calc_ctsduration()
1232 ctsduration += rt->info[cix].lpAckDuration; in ath_tx_calc_ctsduration()
1236 ctsduration += rt->info[rix].lpAckDuration; in ath_tx_calc_ctsduration()
1243 * Update the given ath_buf with updated rts/cts setup and duration
1246 * To support rate lookups for each software retry, the rts/cts rate
1247 * and cts duration must be re-calculated.
1249 * This function assumes the RTS/CTS flags have been set as needed;
1252 * XXX TODO: MRR need only be disabled for the pre-11n NICs.
1253 * XXX The 11n NICs support per-rate RTS/CTS configuration.
1260 uint8_t rix = bf->bf_state.bfs_rc[0].rix; in ath_tx_set_rtscts()
1262 const HAL_RATE_TABLE *rt = sc->sc_currates; in ath_tx_set_rtscts()
1265 * No RTS/CTS enabled? Don't bother. in ath_tx_set_rtscts()
1267 if ((bf->bf_state.bfs_txflags & in ath_tx_set_rtscts()
1270 bf->bf_state.bfs_ctsrate = 0; in ath_tx_set_rtscts()
1271 bf->bf_state.bfs_ctsduration = 0; in ath_tx_set_rtscts()
1279 if (bf->bf_state.bfs_doprot) in ath_tx_set_rtscts()
1280 rix = sc->sc_protrix; in ath_tx_set_rtscts()
1282 rix = bf->bf_state.bfs_rc[0].rix; in ath_tx_set_rtscts()
1285 * If the raw path has hard-coded ctsrate0 to something, in ath_tx_set_rtscts()
1288 if (bf->bf_state.bfs_ctsrate0 != 0) in ath_tx_set_rtscts()
1289 cix = ath_tx_findrix(sc, bf->bf_state.bfs_ctsrate0); in ath_tx_set_rtscts()
1292 cix = rt->info[rix].controlRate; in ath_tx_set_rtscts()
1295 ctsrate = ath_tx_get_rtscts_rate(sc->sc_ah, rt, cix, in ath_tx_set_rtscts()
1296 bf->bf_state.bfs_shpream); in ath_tx_set_rtscts()
1300 ctsduration = ath_tx_calc_ctsduration(sc->sc_ah, rix, cix, in ath_tx_set_rtscts()
1301 bf->bf_state.bfs_shpream, bf->bf_state.bfs_pktlen, in ath_tx_set_rtscts()
1302 rt, bf->bf_state.bfs_txflags); in ath_tx_set_rtscts()
1305 bf->bf_state.bfs_ctsrate = ctsrate; in ath_tx_set_rtscts()
1306 bf->bf_state.bfs_ctsduration = ctsduration; in ath_tx_set_rtscts()
1309 * Must disable multi-rate retry when using RTS/CTS. in ath_tx_set_rtscts()
1311 if (!sc->sc_mrrprot) { in ath_tx_set_rtscts()
1312 bf->bf_state.bfs_ismrr = 0; in ath_tx_set_rtscts()
1313 bf->bf_state.bfs_try0 = in ath_tx_set_rtscts()
1314 bf->bf_state.bfs_rc[0].tries = ATH_TXMGTTRY; /* XXX ew */ in ath_tx_set_rtscts()
1319 * Setup the descriptor chain for a normal or fast-frame
1330 struct ath_desc *ds = bf->bf_desc; in ath_tx_setds()
1331 struct ath_hal *ah = sc->sc_ah; in ath_tx_setds()
1333 if (bf->bf_state.bfs_txrate0 == 0) in ath_tx_setds()
1338 , bf->bf_state.bfs_pktlen /* packet length */ in ath_tx_setds()
1339 , bf->bf_state.bfs_hdrlen /* header length */ in ath_tx_setds()
1340 , bf->bf_state.bfs_atype /* Atheros packet type */ in ath_tx_setds()
1341 , bf->bf_state.bfs_txpower /* txpower */ in ath_tx_setds()
1342 , bf->bf_state.bfs_txrate0 in ath_tx_setds()
1343 , bf->bf_state.bfs_try0 /* series 0 rate/tries */ in ath_tx_setds()
1344 , bf->bf_state.bfs_keyix /* key cache index */ in ath_tx_setds()
1345 , bf->bf_state.bfs_txantenna /* antenna mode */ in ath_tx_setds()
1346 , bf->bf_state.bfs_txflags /* flags */ in ath_tx_setds()
1347 , bf->bf_state.bfs_ctsrate /* rts/cts rate */ in ath_tx_setds()
1348 , bf->bf_state.bfs_ctsduration /* rts/cts duration */ in ath_tx_setds()
1354 bf->bf_lastds = ds; in ath_tx_setds()
1355 bf->bf_last = bf; in ath_tx_setds()
1358 ath_tx_set_ratectrl(sc, bf->bf_node, bf); in ath_tx_setds()
1366 * Non-data frames and raw frames don't require it.
1369 * then disabled later on if something requires it (eg RTS/CTS on
1370 * pre-11n chipsets.
1372 * This needs to be done before the RTS/CTS fields are calculated
1384 if (! bf->bf_state.bfs_doratelookup) in ath_tx_do_ratelookup()
1388 bzero(bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); in ath_tx_do_ratelookup()
1390 ATH_NODE_LOCK(ATH_NODE(bf->bf_node)); in ath_tx_do_ratelookup()
1391 ath_rate_findrate(sc, ATH_NODE(bf->bf_node), bf->bf_state.bfs_shpream, in ath_tx_do_ratelookup()
1395 bf->bf_state.bfs_rc[0].rix = rix; in ath_tx_do_ratelookup()
1396 bf->bf_state.bfs_rc[0].ratecode = rate; in ath_tx_do_ratelookup()
1397 bf->bf_state.bfs_rc[0].tries = try0; in ath_tx_do_ratelookup()
1399 if (bf->bf_state.bfs_ismrr && try0 != ATH_TXMAXTRY) in ath_tx_do_ratelookup()
1400 ath_rate_getxtxrates(sc, ATH_NODE(bf->bf_node), rix, in ath_tx_do_ratelookup()
1401 is_aggr, bf->bf_state.bfs_rc); in ath_tx_do_ratelookup()
1402 ATH_NODE_UNLOCK(ATH_NODE(bf->bf_node)); in ath_tx_do_ratelookup()
1404 sc->sc_txrix = rix; /* for LED blinking */ in ath_tx_do_ratelookup()
1405 sc->sc_lastdatarix = rix; /* for fast frames */ in ath_tx_do_ratelookup()
1406 bf->bf_state.bfs_try0 = try0; in ath_tx_do_ratelookup()
1407 bf->bf_state.bfs_txrate0 = rate; in ath_tx_do_ratelookup()
1408 bf->bf_state.bfs_rc_maxpktlen = maxpktlen; in ath_tx_do_ratelookup()
1418 struct ath_node *an = ATH_NODE(bf->bf_node); in ath_tx_update_clrdmask()
1422 if (an->clrdmask == 1) { in ath_tx_update_clrdmask()
1423 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_update_clrdmask()
1424 an->clrdmask = 0; in ath_tx_update_clrdmask()
1444 struct ieee80211_node *ni = &an->an_node; in ath_tx_should_swq_frame()
1449 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; in ath_tx_should_swq_frame()
1450 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; in ath_tx_should_swq_frame()
1454 /* If it's not in powersave - direct-dispatch BAR */ in ath_tx_should_swq_frame()
1455 if ((ATH_NODE(ni)->an_is_powersave == 0) in ath_tx_should_swq_frame()
1461 } else if ((ATH_NODE(ni)->an_is_powersave == 1) in ath_tx_should_swq_frame()
1469 } else if ((ATH_NODE(ni)->an_is_powersave == 1) in ath_tx_should_swq_frame()
1479 __func__, ni->ni_macaddr, ":", type, subtype); in ath_tx_should_swq_frame()
1497 * XXX we don't update the leak count here - if we're doing
1505 struct ath_node *an = ATH_NODE(bf->bf_node); in ath_tx_xmit_normal()
1506 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; in ath_tx_xmit_normal()
1514 * non-aggregate session frames. in ath_tx_xmit_normal()
1517 * frames that must go out - eg management/raw frames. in ath_tx_xmit_normal()
1519 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_xmit_normal()
1522 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, false); in ath_tx_xmit_normal()
1529 /* Track per-TID hardware queue depth correctly */ in ath_tx_xmit_normal()
1530 tid->hwq_depth++; in ath_tx_xmit_normal()
1533 bf->bf_comp = ath_tx_normal_comp; in ath_tx_xmit_normal()
1555 struct ieee80211vap *vap = ni->ni_vap; in ath_tx_normal_setup()
1556 struct ieee80211com *ic = &sc->sc_ic; in ath_tx_normal_setup()
1568 /* XXX TODO: this pri is only used for non-QoS check, right? */ in ath_tx_normal_setup()
1575 * re-ordered frames to have out of order CCMP PN's, resulting in ath_tx_normal_setup()
1581 iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; in ath_tx_normal_setup()
1582 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); in ath_tx_normal_setup()
1583 isfrag = m0->m_flags & M_FRAG; in ath_tx_normal_setup()
1589 pktlen = m0->m_pkthdr.len - (hdrlen & 3); in ath_tx_normal_setup()
1611 bf->bf_node = ni; /* NB: held reference */ in ath_tx_normal_setup()
1612 m0 = bf->bf_m; /* NB: may have changed */ in ath_tx_normal_setup()
1616 ds = bf->bf_desc; in ath_tx_normal_setup()
1617 rt = sc->sc_currates; in ath_tx_normal_setup()
1618 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); in ath_tx_normal_setup()
1625 if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && in ath_tx_normal_setup()
1626 (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { in ath_tx_normal_setup()
1628 sc->sc_stats.ast_tx_shortpre++; in ath_tx_normal_setup()
1636 ismrr = 0; /* default no multi-rate retry*/ in ath_tx_normal_setup()
1644 switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { in ath_tx_normal_setup()
1646 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; in ath_tx_normal_setup()
1655 rix = an->an_mgmtrix; in ath_tx_normal_setup()
1656 txrate = rt->info[rix].rateCode; in ath_tx_normal_setup()
1658 txrate |= rt->info[rix].shortPreamble; in ath_tx_normal_setup()
1664 rix = an->an_mgmtrix; in ath_tx_normal_setup()
1665 txrate = rt->info[rix].rateCode; in ath_tx_normal_setup()
1667 txrate |= rt->info[rix].shortPreamble; in ath_tx_normal_setup()
1679 rix = an->an_mcastrix; in ath_tx_normal_setup()
1680 txrate = rt->info[rix].rateCode; in ath_tx_normal_setup()
1682 txrate |= rt->info[rix].shortPreamble; in ath_tx_normal_setup()
1684 } else if (m0->m_flags & M_EAPOL) { in ath_tx_normal_setup()
1686 rix = an->an_mgmtrix; in ath_tx_normal_setup()
1687 txrate = rt->info[rix].rateCode; in ath_tx_normal_setup()
1689 txrate |= rt->info[rix].shortPreamble; in ath_tx_normal_setup()
1694 * the hard-coded TX information decided here. in ath_tx_normal_setup()
1697 bf->bf_state.bfs_doratelookup = 1; in ath_tx_normal_setup()
1707 device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", in ath_tx_normal_setup()
1708 wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); in ath_tx_normal_setup()
1719 * + non-QoS frames (eg management?) that the net80211 stack has in ath_tx_normal_setup()
1720 * assigned a higher AC to, but since it's a non-QoS TID, it's in ath_tx_normal_setup()
1726 * surrounding ADDBA request/response - hence why that is special in ath_tx_normal_setup()
1737 if (txq != sc->sc_ac2q[pri]) { in ath_tx_normal_setup()
1742 txq->axq_qnum, in ath_tx_normal_setup()
1744 sc->sc_ac2q[pri], in ath_tx_normal_setup()
1745 sc->sc_ac2q[pri]->axq_qnum); in ath_tx_normal_setup()
1754 } else if (pktlen > vap->iv_rtsthreshold && in ath_tx_normal_setup()
1755 (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { in ath_tx_normal_setup()
1757 sc->sc_stats.ast_tx_rts++; in ath_tx_normal_setup()
1760 sc->sc_stats.ast_tx_noack++; in ath_tx_normal_setup()
1762 if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) { in ath_tx_normal_setup()
1765 sc->sc_stats.ast_tdma_ack++; in ath_tx_normal_setup()
1777 device_printf(sc->sc_dev, in ath_tx_normal_setup()
1784 * exchange. So this means things like RTS/CTS in ath_tx_normal_setup()
1787 * So, if you send a RTS-protected NULL data frame, in ath_tx_normal_setup()
1797 bf->bf_flags |= ATH_BUF_TOA_PROBE; in ath_tx_normal_setup()
1825 txq->axq_intrcnt = 0; in ath_tx_normal_setup()
1826 } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { in ath_tx_normal_setup()
1828 txq->axq_intrcnt = 0; in ath_tx_normal_setup()
1838 m0->m_nextpkt = NULL; in ath_tx_normal_setup()
1841 ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, in ath_tx_normal_setup()
1842 sc->sc_hwmap[rix].ieeerate, -1); in ath_tx_normal_setup()
1845 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; in ath_tx_normal_setup()
1847 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; in ath_tx_normal_setup()
1849 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; in ath_tx_normal_setup()
1850 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; in ath_tx_normal_setup()
1851 sc->sc_tx_th.wt_txpower = ieee80211_get_node_txpower(ni); in ath_tx_normal_setup()
1852 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; in ath_tx_normal_setup()
1858 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); in ath_tx_normal_setup()
1864 bf->bf_state.bfs_rc[0].rix = rix; in ath_tx_normal_setup()
1865 bf->bf_state.bfs_rc[0].tries = try0; in ath_tx_normal_setup()
1866 bf->bf_state.bfs_rc[0].ratecode = txrate; in ath_tx_normal_setup()
1869 bf->bf_state.bfs_pktlen = pktlen; in ath_tx_normal_setup()
1870 bf->bf_state.bfs_hdrlen = hdrlen; in ath_tx_normal_setup()
1871 bf->bf_state.bfs_atype = atype; in ath_tx_normal_setup()
1872 bf->bf_state.bfs_txpower = ieee80211_get_node_txpower(ni); in ath_tx_normal_setup()
1873 bf->bf_state.bfs_txrate0 = txrate; in ath_tx_normal_setup()
1874 bf->bf_state.bfs_try0 = try0; in ath_tx_normal_setup()
1875 bf->bf_state.bfs_keyix = keyix; in ath_tx_normal_setup()
1876 bf->bf_state.bfs_txantenna = sc->sc_txantenna; in ath_tx_normal_setup()
1877 bf->bf_state.bfs_txflags = flags; in ath_tx_normal_setup()
1878 bf->bf_state.bfs_shpream = shortPreamble; in ath_tx_normal_setup()
1881 bf->bf_state.bfs_ctsrate0 = 0; /* ie, no hard-coded ctsrate */ in ath_tx_normal_setup()
1882 bf->bf_state.bfs_ctsrate = 0; /* calculated later */ in ath_tx_normal_setup()
1883 bf->bf_state.bfs_ctsduration = 0; in ath_tx_normal_setup()
1884 bf->bf_state.bfs_ismrr = ismrr; in ath_tx_normal_setup()
1904 struct ieee80211vap *vap = ni->ni_vap; in ath_tx_start()
1928 * the per-TID pool. That means that even QoS group addressed in ath_tx_start()
1932 * all be out of whack. So - chances are, the right thing in ath_tx_start()
1937 * to see what the TID should be. If it's a non-QoS frame, the in ath_tx_start()
1947 txq = sc->sc_ac2q[pri]; in ath_tx_start()
1949 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); in ath_tx_start()
1950 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; in ath_tx_start()
1951 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; in ath_tx_start()
1958 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { in ath_tx_start()
1959 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth in ath_tx_start()
1960 > sc->sc_txq_mcastq_maxdepth) { in ath_tx_start()
1961 sc->sc_stats.ast_tx_mcastq_overflow++; in ath_tx_start()
1980 * that we are direct-dispatching. in ath_tx_start()
1985 ATH_NODE(ni)->an_is_powersave && in ath_tx_start()
1986 ATH_NODE(ni)->an_swq_depth > in ath_tx_start()
1987 sc->sc_txq_node_psq_maxdepth) { in ath_tx_start()
1988 sc->sc_stats.ast_tx_node_psq_overflow++; in ath_tx_start()
1993 /* A-MPDU TX */ in ath_tx_start()
2002 bf->bf_state.bfs_tid = tid; in ath_tx_start()
2003 bf->bf_state.bfs_tx_queue = txq->axq_qnum; in ath_tx_start()
2004 bf->bf_state.bfs_pri = pri; in ath_tx_start()
2008 * When servicing one or more stations in power-save mode in ath_tx_start()
2015 if (sc->sc_cabq_enable && ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) { in ath_tx_start()
2016 txq = &avp->av_mcastq; in ath_tx_start()
2022 bf->bf_state.bfs_tx_queue = sc->sc_cabq->axq_qnum; in ath_tx_start()
2028 bf->bf_state.bfs_dobaw = 0; in ath_tx_start()
2030 /* A-MPDU TX? Manually set sequence number */ in ath_tx_start()
2035 * Don't assign A-MPDU sequence numbers to group address in ath_tx_start()
2038 if (is_ampdu_tx && (! IEEE80211_IS_MULTICAST(wh->i_addr1))) { in ath_tx_start()
2042 * and group-addressed frames don't get a sequence number in ath_tx_start()
2048 * Don't add QoS NULL frames and group-addressed frames in ath_tx_start()
2052 (! IEEE80211_IS_MULTICAST(wh->i_addr1)) && in ath_tx_start()
2054 bf->bf_state.bfs_dobaw = 1; in ath_tx_start()
2062 bf->bf_state.bfs_seqno = M_SEQNO_GET(m0) << IEEE80211_SEQ_SEQ_SHIFT; in ath_tx_start()
2077 m0 = bf->bf_m; in ath_tx_start()
2081 * If it's a multicast frame, do a direct-dispatch to the in ath_tx_start()
2097 * Until things are better debugged - if this node is asleep in ath_tx_start()
2098 * and we're sending it a non-BAR frame, direct dispatch it. in ath_tx_start()
2100 * sent - eg, during reassociation/reauthentication after in ath_tx_start()
2105 if (txq == &avp->av_mcastq) { in ath_tx_start()
2108 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_start()
2114 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_start()
2120 * direct-dispatch to the hardware. in ath_tx_start()
2122 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_start()
2140 struct ieee80211com *ic = &sc->sc_ic; in ath_tx_raw_start()
2141 struct ieee80211vap *vap = ni->ni_vap; in ath_tx_raw_start()
2151 int o_tid = -1; in ath_tx_raw_start()
2160 ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); in ath_tx_raw_start()
2167 pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; in ath_tx_raw_start()
2169 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; in ath_tx_raw_start()
2170 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; in ath_tx_raw_start()
2178 pri = params->ibp_pri & 3; in ath_tx_raw_start()
2179 /* Override pri if the frame isn't a QoS one */ in ath_tx_raw_start()
2183 /* XXX If it's an ADDBA, override the correct queue */ in ath_tx_raw_start()
2190 "%s: overriding tid %d pri %d -> %d\n", in ath_tx_raw_start()
2206 m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0, in ath_tx_raw_start()
2216 bf->bf_state.bfs_dobaw = 0; in ath_tx_raw_start()
2221 m0 = bf->bf_m; /* NB: may have changed */ in ath_tx_raw_start()
2224 bf->bf_node = ni; /* NB: held reference */ in ath_tx_raw_start()
2229 if (params->ibp_flags & IEEE80211_BPF_RTS) in ath_tx_raw_start()
2231 else if (params->ibp_flags & IEEE80211_BPF_CTS) { in ath_tx_raw_start()
2233 bf->bf_state.bfs_doprot = 1; in ath_tx_raw_start()
2237 if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) in ath_tx_raw_start()
2240 rt = sc->sc_currates; in ath_tx_raw_start()
2241 KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); in ath_tx_raw_start()
2244 rix = ath_tx_findrix(sc, params->ibp_rate0); in ath_tx_raw_start()
2245 try0 = params->ibp_try0; in ath_tx_raw_start()
2248 * Override EAPOL rate as appropriate. in ath_tx_raw_start()
2250 if (m0->m_flags & M_EAPOL) { in ath_tx_raw_start()
2252 rix = an->an_mgmtrix; in ath_tx_raw_start()
2261 device_printf(sc->sc_dev, in ath_tx_raw_start()
2265 bf->bf_flags |= ATH_BUF_TOA_PROBE; in ath_tx_raw_start()
2268 txrate = rt->info[rix].rateCode; in ath_tx_raw_start()
2269 if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) in ath_tx_raw_start()
2270 txrate |= rt->info[rix].shortPreamble; in ath_tx_raw_start()
2271 sc->sc_txrix = rix; in ath_tx_raw_start()
2272 ismrr = (params->ibp_try1 != 0); in ath_tx_raw_start()
2273 txantenna = params->ibp_pri >> 2; in ath_tx_raw_start()
2275 txantenna = sc->sc_txantenna; in ath_tx_raw_start()
2282 bf->bf_state.bfs_ctsrate0 = params->ibp_ctsrate; in ath_tx_raw_start()
2291 ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, in ath_tx_raw_start()
2292 sc->sc_hwmap[rix].ieeerate, -1); in ath_tx_raw_start()
2295 sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags; in ath_tx_raw_start()
2296 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) in ath_tx_raw_start()
2297 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; in ath_tx_raw_start()
2298 if (m0->m_flags & M_FRAG) in ath_tx_raw_start()
2299 sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; in ath_tx_raw_start()
2300 sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate; in ath_tx_raw_start()
2301 sc->sc_tx_th.wt_txpower = MIN(params->ibp_power, in ath_tx_raw_start()
2303 sc->sc_tx_th.wt_antenna = sc->sc_txantenna; in ath_tx_raw_start()
2311 ds = bf->bf_desc; in ath_tx_raw_start()
2315 bf->bf_state.bfs_pktlen = pktlen; in ath_tx_raw_start()
2316 bf->bf_state.bfs_hdrlen = hdrlen; in ath_tx_raw_start()
2317 bf->bf_state.bfs_atype = atype; in ath_tx_raw_start()
2318 bf->bf_state.bfs_txpower = MIN(params->ibp_power, in ath_tx_raw_start()
2320 bf->bf_state.bfs_txrate0 = txrate; in ath_tx_raw_start()
2321 bf->bf_state.bfs_try0 = try0; in ath_tx_raw_start()
2322 bf->bf_state.bfs_keyix = keyix; in ath_tx_raw_start()
2323 bf->bf_state.bfs_txantenna = txantenna; in ath_tx_raw_start()
2324 bf->bf_state.bfs_txflags = flags; in ath_tx_raw_start()
2325 bf->bf_state.bfs_shpream = in ath_tx_raw_start()
2326 !! (params->ibp_flags & IEEE80211_BPF_SHORTPRE); in ath_tx_raw_start()
2329 bf->bf_state.bfs_tid = WME_AC_TO_TID(pri); in ath_tx_raw_start()
2330 bf->bf_state.bfs_tx_queue = sc->sc_ac2q[pri]->axq_qnum; in ath_tx_raw_start()
2331 bf->bf_state.bfs_pri = pri; in ath_tx_raw_start()
2334 bf->bf_state.bfs_ctsrate = 0; in ath_tx_raw_start()
2335 bf->bf_state.bfs_ctsduration = 0; in ath_tx_raw_start()
2336 bf->bf_state.bfs_ismrr = ismrr; in ath_tx_raw_start()
2339 bzero(&bf->bf_state.bfs_rc, sizeof(bf->bf_state.bfs_rc)); in ath_tx_raw_start()
2341 bf->bf_state.bfs_rc[0].rix = rix; in ath_tx_raw_start()
2342 bf->bf_state.bfs_rc[0].tries = try0; in ath_tx_raw_start()
2343 bf->bf_state.bfs_rc[0].ratecode = txrate; in ath_tx_raw_start()
2348 rix = ath_tx_findrix(sc, params->ibp_rate1); in ath_tx_raw_start()
2349 bf->bf_state.bfs_rc[1].rix = rix; in ath_tx_raw_start()
2350 bf->bf_state.bfs_rc[1].tries = params->ibp_try1; in ath_tx_raw_start()
2352 rix = ath_tx_findrix(sc, params->ibp_rate2); in ath_tx_raw_start()
2353 bf->bf_state.bfs_rc[2].rix = rix; in ath_tx_raw_start()
2354 bf->bf_state.bfs_rc[2].tries = params->ibp_try2; in ath_tx_raw_start()
2356 rix = ath_tx_findrix(sc, params->ibp_rate3); in ath_tx_raw_start()
2357 bf->bf_state.bfs_rc[3].rix = rix; in ath_tx_raw_start()
2358 bf->bf_state.bfs_rc[3].tries = params->ibp_try3; in ath_tx_raw_start()
2381 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_raw_start()
2388 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); in ath_tx_raw_start()
2392 ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf); in ath_tx_raw_start()
2394 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_raw_start()
2395 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); in ath_tx_raw_start()
2398 /* Direct-dispatch to the hardware */ in ath_tx_raw_start()
2399 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK; in ath_tx_raw_start()
2406 ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf); in ath_tx_raw_start()
2420 struct ieee80211com *ic = ni->ni_ic; in ath_raw_xmit()
2421 struct ath_softc *sc = ic->ic_softc; in ath_raw_xmit()
2427 if (sc->sc_inreset_cnt > 0) { in ath_raw_xmit()
2434 sc->sc_txstart_cnt++; in ath_raw_xmit()
2444 if (!sc->sc_running || sc->sc_invalid) { in ath_raw_xmit()
2446 __func__, sc->sc_running, sc->sc_invalid); in ath_raw_xmit()
2457 if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { in ath_raw_xmit()
2458 if (sc->sc_cabq->axq_depth + sc->sc_cabq->fifo.axq_depth in ath_raw_xmit()
2459 > sc->sc_txq_mcastq_maxdepth) { in ath_raw_xmit()
2460 sc->sc_stats.ast_tx_mcastq_overflow++; in ath_raw_xmit()
2475 sc->sc_stats.ast_tx_nobuf++; in ath_raw_xmit()
2502 sc->sc_wd_timer = 5; in ath_raw_xmit()
2503 sc->sc_stats.ast_tx_raw++; in ath_raw_xmit()
2506 * Update the TIM - if there's anything queued to the in ath_raw_xmit()
2515 sc->sc_txstart_cnt--; in ath_raw_xmit()
2539 sc->sc_txstart_cnt--; in ath_raw_xmit()
2550 sc->sc_stats.ast_tx_raw_fail++; in ath_raw_xmit()
2560 * it goes out after any pending non-aggregate frames to the
2565 * number -earlier- than the ADDBA can be transmitted (but
2567 * be!) they'll arrive after the ADDBA - and the receiving end
2570 * The frames can't be appended to the TID software queue - it'll
2610 if (ia->rq_header.ia_category != IEEE80211_ACTION_CAT_BA) in ath_tx_action_frame_override_queue()
2612 if (ia->rq_header.ia_action != IEEE80211_ACTION_BA_ADDBA_REQUEST) in ath_tx_action_frame_override_queue()
2616 baparamset = le16toh(ia->rq_baparamset); in ath_tx_action_frame_override_queue()
2622 /* Per-node software queue operations */
2643 if (bf->bf_state.bfs_isretried) in ath_tx_addto_baw()
2646 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_addto_baw()
2648 if (! bf->bf_state.bfs_dobaw) { in ath_tx_addto_baw()
2651 __func__, SEQNO(bf->bf_state.bfs_seqno), in ath_tx_addto_baw()
2652 tap->txa_start, tap->txa_wnd); in ath_tx_addto_baw()
2655 if (bf->bf_state.bfs_addedbaw) in ath_tx_addto_baw()
2657 "%s: re-added? tid=%d, seqno %d; window %d:%d; " in ath_tx_addto_baw()
2659 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), in ath_tx_addto_baw()
2660 tap->txa_start, tap->txa_wnd, tid->baw_head, in ath_tx_addto_baw()
2661 tid->baw_tail); in ath_tx_addto_baw()
2667 if (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, in ath_tx_addto_baw()
2668 SEQNO(bf->bf_state.bfs_seqno))) { in ath_tx_addto_baw()
2672 __func__, bf, tid->tid, SEQNO(bf->bf_state.bfs_seqno), in ath_tx_addto_baw()
2673 tap->txa_start, tap->txa_wnd, tid->baw_head, in ath_tx_addto_baw()
2674 tid->baw_tail); in ath_tx_addto_baw()
2678 * ni->ni_txseqs[] is the currently allocated seqno. in ath_tx_addto_baw()
2681 index = ATH_BA_INDEX(tap->txa_start, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_addto_baw()
2682 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); in ath_tx_addto_baw()
2686 __func__, tid->tid, SEQNO(bf->bf_state.bfs_seqno), in ath_tx_addto_baw()
2687 tap->txa_start, tap->txa_wnd, index, cindex, tid->baw_head, in ath_tx_addto_baw()
2688 tid->baw_tail); in ath_tx_addto_baw()
2691 assert(tid->tx_buf[cindex] == NULL); in ath_tx_addto_baw()
2693 if (tid->tx_buf[cindex] != NULL) { in ath_tx_addto_baw()
2697 __func__, index, cindex, tid->baw_head, tid->baw_tail); in ath_tx_addto_baw()
2701 tid->tx_buf[cindex], in ath_tx_addto_baw()
2702 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno), in ath_tx_addto_baw()
2704 SEQNO(bf->bf_state.bfs_seqno) in ath_tx_addto_baw()
2707 tid->tx_buf[cindex] = bf; in ath_tx_addto_baw()
2709 if (index >= ((tid->baw_tail - tid->baw_head) & in ath_tx_addto_baw()
2710 (ATH_TID_MAX_BUFS - 1))) { in ath_tx_addto_baw()
2711 tid->baw_tail = cindex; in ath_tx_addto_baw()
2712 INCR(tid->baw_tail, ATH_TID_MAX_BUFS); in ath_tx_addto_baw()
2719 * When software retransmitting a (sub-)frame, it is entirely possible that
2731 int seqno = SEQNO(old_bf->bf_state.bfs_seqno); in ath_tx_switch_baw_buf()
2735 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_switch_baw_buf()
2736 index = ATH_BA_INDEX(tap->txa_start, seqno); in ath_tx_switch_baw_buf()
2737 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); in ath_tx_switch_baw_buf()
2744 if (old_bf->bf_state.bfs_seqno != new_bf->bf_state.bfs_seqno) { in ath_tx_switch_baw_buf()
2751 old_bf->bf_state.bfs_seqno, new_bf->bf_state.bfs_seqno); in ath_tx_switch_baw_buf()
2754 if (tid->tx_buf[cindex] != old_bf) { in ath_tx_switch_baw_buf()
2762 tid->tx_buf[cindex] = new_bf; in ath_tx_switch_baw_buf()
2766 * seq_start - left edge of BAW
2767 * seq_next - current/next sequence number to allocate
2778 int seqno = SEQNO(bf->bf_state.bfs_seqno); in ath_tx_update_baw()
2782 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_update_baw()
2783 index = ATH_BA_INDEX(tap->txa_start, seqno); in ath_tx_update_baw()
2784 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); in ath_tx_update_baw()
2789 __func__, tid->tid, tap->txa_start, tap->txa_wnd, seqno, index, in ath_tx_update_baw()
2790 cindex, tid->baw_head, tid->baw_tail); in ath_tx_update_baw()
2793 * If this occurs then we have a big problem - something else in ath_tx_update_baw()
2794 * has slid tap->txa_start along without updating the BAW in ath_tx_update_baw()
2802 if (tid->tx_buf[cindex] != bf) { in ath_tx_update_baw()
2805 __func__, bf, SEQNO(bf->bf_state.bfs_seqno), in ath_tx_update_baw()
2806 tid->tx_buf[cindex], in ath_tx_update_baw()
2807 (tid->tx_buf[cindex] != NULL) ? in ath_tx_update_baw()
2808 SEQNO(tid->tx_buf[cindex]->bf_state.bfs_seqno) : -1); in ath_tx_update_baw()
2811 tid->tx_buf[cindex] = NULL; in ath_tx_update_baw()
2813 while (tid->baw_head != tid->baw_tail && in ath_tx_update_baw()
2814 !tid->tx_buf[tid->baw_head]) { in ath_tx_update_baw()
2815 INCR(tap->txa_start, IEEE80211_SEQ_RANGE); in ath_tx_update_baw()
2816 INCR(tid->baw_head, ATH_TID_MAX_BUFS); in ath_tx_update_baw()
2820 __func__, tid->tid, tap->txa_start, tap->txa_wnd, tid->baw_head); in ath_tx_update_baw()
2831 if (tid->an->an_leak_count > 0) { in ath_tx_leak_count_update()
2832 wh = mtod(bf->bf_m, struct ieee80211_frame *); in ath_tx_leak_count_update()
2837 if ((tid->an->an_stack_psq > 0) in ath_tx_leak_count_update()
2838 || (tid->an->an_swq_depth > 0)) in ath_tx_leak_count_update()
2839 wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; in ath_tx_leak_count_update()
2841 wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA; in ath_tx_leak_count_update()
2846 tid->an->an_node.ni_macaddr, in ath_tx_leak_count_update()
2848 tid->an->an_leak_count, in ath_tx_leak_count_update()
2849 tid->an->an_stack_psq, in ath_tx_leak_count_update()
2850 tid->an->an_swq_depth, in ath_tx_leak_count_update()
2851 !! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA)); in ath_tx_leak_count_update()
2854 * Re-sync the underlying buffer. in ath_tx_leak_count_update()
2856 bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, in ath_tx_leak_count_update()
2859 tid->an->an_leak_count --; in ath_tx_leak_count_update()
2869 if (tid->an->an_leak_count > 0) { in ath_tx_tid_can_tx_or_sched()
2872 if (tid->paused) in ath_tx_tid_can_tx_or_sched()
2888 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; in ath_tx_tid_sched()
2894 * for PS-POLL, ensure that we allow scheduling to in ath_tx_tid_sched()
2900 if (tid->sched) in ath_tx_tid_sched()
2903 tid->sched = 1; in ath_tx_tid_sched()
2910 if (tid->an->an_leak_count) { in ath_tx_tid_sched()
2911 TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem); in ath_tx_tid_sched()
2913 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); in ath_tx_tid_sched()
2918 * We can't do the above - it'll confuse the TXQ software in ath_tx_tid_sched()
2929 TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem); in ath_tx_tid_sched()
2941 struct ath_txq *txq = sc->sc_ac2q[tid->ac]; in ath_tx_tid_unsched()
2945 if (tid->sched == 0) in ath_tx_tid_unsched()
2948 tid->sched = 0; in ath_tx_tid_unsched()
2949 TAILQ_REMOVE(&txq->axq_tidq, tid, axq_qelem); in ath_tx_tid_unsched()
2955 * This should only be called for A-MPDU TX frames.
2980 return -1; in ath_tx_tid_seqno_assign()
2993 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; in ath_tx_tid_seqno_assign()
2996 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; in ath_tx_tid_seqno_assign()
2997 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); in ath_tx_tid_seqno_assign()
2998 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { in ath_tx_tid_seqno_assign()
3003 seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID]; in ath_tx_tid_seqno_assign()
3004 INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE); in ath_tx_tid_seqno_assign()
3007 seqno = ni->ni_txseqs[tid]; in ath_tx_tid_seqno_assign()
3008 INCR(ni->ni_txseqs[tid], IEEE80211_SEQ_RANGE); in ath_tx_tid_seqno_assign()
3010 *(uint16_t *)&wh->i_seq[0] = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); in ath_tx_tid_seqno_assign()
3015 "%s: -> subtype=0x%x, tid=%d, seqno=%d\n", in ath_tx_tid_seqno_assign()
3029 struct ath_tid *tid = &an->an_tid[bf->bf_state.bfs_tid]; in ath_tx_xmit_aggr()
3034 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_xmit_aggr()
3039 /* XXX don't sched - we're paused! */ in ath_tx_xmit_aggr()
3044 if (bf->bf_state.bfs_dobaw && in ath_tx_xmit_aggr()
3045 (! BAW_WITHIN(tap->txa_start, tap->txa_wnd, in ath_tx_xmit_aggr()
3046 SEQNO(bf->bf_state.bfs_seqno)))) { in ath_tx_xmit_aggr()
3062 if (bf->bf_state.bfs_aggr != 0 || bf->bf_state.bfs_nframes > 1) { in ath_tx_xmit_aggr()
3065 bf->bf_state.bfs_aggr, bf->bf_state.bfs_nframes); in ath_tx_xmit_aggr()
3066 bf->bf_state.bfs_aggr = 0; in ath_tx_xmit_aggr()
3067 bf->bf_state.bfs_nframes = 1; in ath_tx_xmit_aggr()
3074 ath_tx_do_ratelookup(sc, bf, tid->tid, bf->bf_state.bfs_pktlen, in ath_tx_xmit_aggr()
3083 sc->sc_aggr_stats.aggr_low_hwq_single_pkt++; in ath_tx_xmit_aggr()
3085 /* Track per-TID hardware queue depth correctly */ in ath_tx_xmit_aggr()
3086 tid->hwq_depth++; in ath_tx_xmit_aggr()
3089 if (bf->bf_state.bfs_dobaw) { in ath_tx_xmit_aggr()
3091 bf->bf_state.bfs_addedbaw = 1; in ath_tx_xmit_aggr()
3094 /* Set completion handler, multi-frame aggregate or not */ in ath_tx_xmit_aggr()
3095 bf->bf_comp = ath_tx_aggr_comp; in ath_tx_xmit_aggr()
3110 * If the queue isn't busy, direct-dispatch.
3122 struct mbuf *m0 = bf->bf_m; in ath_tx_swq()
3126 /* Fetch the TID - non-QoS frames get assigned to TID 16 */ in ath_tx_swq()
3130 atid = &an->an_tid[tid]; in ath_tx_swq()
3136 /* XXX potentially duplicate info, re-check */ in ath_tx_swq()
3137 bf->bf_state.bfs_tid = tid; in ath_tx_swq()
3138 bf->bf_state.bfs_tx_queue = txq->axq_qnum; in ath_tx_swq()
3139 bf->bf_state.bfs_pri = pri; in ath_tx_swq()
3147 * If the node is in power-save and we're leaking a frame, in ath_tx_swq()
3168 * AMPDU running, queue single-frame if the hardware queue in ath_tx_swq()
3177 * TODO: maybe we should treat this as two policies - minimise in ath_tx_swq()
3192 * Note: if we're say, configured to do ADDBA but not A-MPDU in ath_tx_swq()
3193 * then maybe we want to still queue two non-aggregate frames in ath_tx_swq()
3194 * to the hardware. Again with the per-TID policy in ath_tx_swq()
3200 if (txq->axq_depth + txq->fifo.axq_depth == 0) { in ath_tx_swq()
3205 * Ensure it's definitely treated as a non-AMPDU in ath_tx_swq()
3206 * frame - this information may have been left in ath_tx_swq()
3209 bf->bf_state.bfs_aggr = 0; in ath_tx_swq()
3210 bf->bf_state.bfs_nframes = 1; in ath_tx_swq()
3225 * If we're not doing A-MPDU, be prepared to direct dispatch in ath_tx_swq()
3228 * traffic and non-aggregate traffic: we want to ensure in ath_tx_swq()
3229 * that non-aggregate stations get a few frames queued to the in ath_tx_swq()
3233 * to the hardware from a non-AMPDU client, check both here in ath_tx_swq()
3235 * non-AMPDU stations get a fair chance to transmit. in ath_tx_swq()
3238 } else if ((txq->axq_depth + txq->fifo.axq_depth < sc->sc_hwq_limit_nonaggr) && in ath_tx_swq()
3239 (txq->axq_aggr_depth < sc->sc_hwq_limit_aggr)) { in ath_tx_swq()
3281 if (an->an_tid[i].isfiltered == 1) in ath_tx_set_clrdmask()
3284 an->clrdmask = 1; in ath_tx_set_clrdmask()
3288 * Configure the per-TID node state.
3302 atid = &an->an_tid[i]; in ath_tx_tid_init()
3307 TAILQ_INIT(&atid->tid_q); in ath_tx_tid_init()
3308 TAILQ_INIT(&atid->filtq.tid_q); in ath_tx_tid_init()
3309 atid->tid = i; in ath_tx_tid_init()
3310 atid->an = an; in ath_tx_tid_init()
3312 atid->tx_buf[j] = NULL; in ath_tx_tid_init()
3313 atid->baw_head = atid->baw_tail = 0; in ath_tx_tid_init()
3314 atid->paused = 0; in ath_tx_tid_init()
3315 atid->sched = 0; in ath_tx_tid_init()
3316 atid->hwq_depth = 0; in ath_tx_tid_init()
3317 atid->cleanup_inprogress = 0; in ath_tx_tid_init()
3319 atid->ac = ATH_NONQOS_TID_AC; in ath_tx_tid_init()
3321 atid->ac = TID_TO_WME_AC(i); in ath_tx_tid_init()
3323 an->clrdmask = 1; /* Always start by setting this bit */ in ath_tx_tid_init()
3338 tid->paused++; in ath_tx_tid_pause()
3341 tid->an->an_node.ni_macaddr, ":", in ath_tx_tid_pause()
3342 tid->tid, in ath_tx_tid_pause()
3343 tid->paused); in ath_tx_tid_pause()
3359 if (tid->paused == 0) { in ath_tx_tid_resume()
3360 device_printf(sc->sc_dev, in ath_tx_tid_resume()
3363 tid->an->an_node.ni_macaddr, ":", in ath_tx_tid_resume()
3364 tid->tid); in ath_tx_tid_resume()
3366 tid->paused--; in ath_tx_tid_resume()
3372 tid->an->an_node.ni_macaddr, ":", in ath_tx_tid_resume()
3373 tid->tid, in ath_tx_tid_resume()
3374 tid->paused); in ath_tx_tid_resume()
3376 if (tid->paused) in ath_tx_tid_resume()
3380 * Override the clrdmask configuration for the next frame in ath_tx_tid_resume()
3383 ath_tx_set_clrdmask(sc, tid->an); in ath_tx_tid_resume()
3385 if (tid->axq_depth == 0) in ath_tx_tid_resume()
3389 if (tid->isfiltered == 1) { in ath_tx_tid_resume()
3414 if (!tid->isfiltered) in ath_tx_tid_filt_addbuf()
3422 sc->sc_stats.ast_tx_swfiltered++; in ath_tx_tid_filt_addbuf()
3439 if (! tid->isfiltered) { in ath_tx_tid_filt_comp_buf()
3441 __func__, tid->tid); in ath_tx_tid_filt_comp_buf()
3442 tid->isfiltered = 1; in ath_tx_tid_filt_comp_buf()
3465 if (tid->hwq_depth != 0) in ath_tx_tid_filt_comp_complete()
3469 __func__, tid->tid); in ath_tx_tid_filt_comp_complete()
3470 if (tid->isfiltered == 1) { in ath_tx_tid_filt_comp_complete()
3471 tid->isfiltered = 0; in ath_tx_tid_filt_comp_complete()
3476 ath_tx_set_clrdmask(sc, tid->an); in ath_tx_tid_filt_comp_complete()
3512 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { in ath_tx_tid_filt_comp_single()
3513 sc->sc_stats.ast_tx_swretrymax++; in ath_tx_tid_filt_comp_single()
3518 SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_tid_filt_comp_single()
3527 if (bf->bf_flags & ATH_BUF_BUSY) { in ath_tx_tid_filt_comp_single()
3528 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); in ath_tx_tid_filt_comp_single()
3530 "%s: busy buffer clone: %p -> %p\n", in ath_tx_tid_filt_comp_single()
3561 bf_next = bf->bf_next; in ath_tx_tid_filt_comp_aggr()
3562 bf->bf_next = NULL; /* Remove it from the aggr list */ in ath_tx_tid_filt_comp_aggr()
3567 if (bf->bf_state.bfs_retries > SWMAX_RETRIES) { in ath_tx_tid_filt_comp_aggr()
3568 sc->sc_stats.ast_tx_swretrymax++; in ath_tx_tid_filt_comp_aggr()
3572 tid->tid, in ath_tx_tid_filt_comp_aggr()
3574 SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_tid_filt_comp_aggr()
3579 if (bf->bf_flags & ATH_BUF_BUSY) { in ath_tx_tid_filt_comp_aggr()
3580 nbf = ath_tx_retry_clone(sc, tid->an, tid, bf); in ath_tx_tid_filt_comp_aggr()
3582 "%s: tid=%d, busy buffer cloned: %p -> %p, seqno=%d\n", in ath_tx_tid_filt_comp_aggr()
3583 __func__, tid->tid, bf, nbf, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_tid_filt_comp_aggr()
3595 __func__, tid->tid, bf, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_tid_filt_comp_aggr()
3619 tid->tid, in ath_tx_tid_bar_suspend()
3620 tid->bar_wait, in ath_tx_tid_bar_suspend()
3621 tid->bar_tx); in ath_tx_tid_bar_suspend()
3624 if (tid->bar_tx) { in ath_tx_tid_bar_suspend()
3630 if (tid->bar_wait) in ath_tx_tid_bar_suspend()
3634 tid->bar_wait = 1; in ath_tx_tid_bar_suspend()
3641 * We've finished with BAR handling - either we succeeded or
3653 tid->an->an_node.ni_macaddr, in ath_tx_tid_bar_unsuspend()
3655 tid->tid); in ath_tx_tid_bar_unsuspend()
3657 if (tid->bar_tx == 0 || tid->bar_wait == 0) { in ath_tx_tid_bar_unsuspend()
3660 __func__, tid->an->an_node.ni_macaddr, ":", in ath_tx_tid_bar_unsuspend()
3661 tid->tid, tid->bar_tx, tid->bar_wait); in ath_tx_tid_bar_unsuspend()
3664 tid->bar_tx = tid->bar_wait = 0; in ath_tx_tid_bar_unsuspend()
3679 if (tid->bar_wait == 0 || tid->hwq_depth > 0) in ath_tx_tid_bar_tx_ready()
3685 tid->an->an_node.ni_macaddr, in ath_tx_tid_bar_tx_ready()
3687 tid->tid); in ath_tx_tid_bar_tx_ready()
3714 tid->an->an_node.ni_macaddr, in ath_tx_tid_bar_tx()
3716 tid->tid); in ath_tx_tid_bar_tx()
3718 tap = ath_tx_get_tx_tid(tid->an, tid->tid); in ath_tx_tid_bar_tx()
3723 if (tid->bar_wait == 0 || tid->bar_tx == 1) { in ath_tx_tid_bar_tx()
3726 __func__, tid->an->an_node.ni_macaddr, ":", in ath_tx_tid_bar_tx()
3727 tid->tid, tid->bar_tx, tid->bar_wait); in ath_tx_tid_bar_tx()
3732 if (tid->hwq_depth > 0) { in ath_tx_tid_bar_tx()
3736 tid->an->an_node.ni_macaddr, in ath_tx_tid_bar_tx()
3738 tid->tid, in ath_tx_tid_bar_tx()
3739 tid->hwq_depth); in ath_tx_tid_bar_tx()
3744 tid->bar_tx = 1; in ath_tx_tid_bar_tx()
3747 * Override the clrdmask configuration for the next frame, in ath_tx_tid_bar_tx()
3750 ath_tx_set_clrdmask(sc, tid->an); in ath_tx_tid_bar_tx()
3761 tid->an->an_node.ni_macaddr, in ath_tx_tid_bar_tx()
3763 tid->tid, in ath_tx_tid_bar_tx()
3764 tap->txa_start); in ath_tx_tid_bar_tx()
3770 if (ieee80211_send_bar(&tid->an->an_node, tap, tap->txa_start) == 0) { in ath_tx_tid_bar_tx()
3780 __func__, tid->an->an_node.ni_macaddr, ":", in ath_tx_tid_bar_tx()
3781 tid->tid); in ath_tx_tid_bar_tx()
3796 if (ath_tx_ampdu_running(sc, an, tid->tid) && in ath_tx_tid_drain_pkt()
3797 bf->bf_state.bfs_dobaw) { in ath_tx_tid_drain_pkt()
3803 if (bf->bf_state.bfs_retries > 0) { in ath_tx_tid_drain_pkt()
3805 bf->bf_state.bfs_dobaw = 0; in ath_tx_tid_drain_pkt()
3809 * This has become a non-fatal error now in ath_tx_tid_drain_pkt()
3811 if (! bf->bf_state.bfs_addedbaw) in ath_tx_tid_drain_pkt()
3814 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_tid_drain_pkt()
3819 bf->bf_next = NULL; in ath_tx_tid_drain_pkt()
3829 struct ieee80211_node *ni = &an->an_node; in ath_tx_tid_drain_print()
3833 txq = sc->sc_ac2q[tid->ac]; in ath_tx_tid_drain_print()
3834 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_tid_drain_print()
3841 ni->ni_macaddr, in ath_tx_tid_drain_print()
3844 bf->bf_state.bfs_addedbaw, in ath_tx_tid_drain_print()
3845 bf->bf_state.bfs_dobaw, in ath_tx_tid_drain_print()
3846 SEQNO(bf->bf_state.bfs_seqno), in ath_tx_tid_drain_print()
3847 bf->bf_state.bfs_retries); in ath_tx_tid_drain_print()
3852 ni->ni_macaddr, in ath_tx_tid_drain_print()
3855 txq->axq_qnum, in ath_tx_tid_drain_print()
3856 txq->axq_depth, in ath_tx_tid_drain_print()
3857 txq->axq_aggr_depth); in ath_tx_tid_drain_print()
3863 ni->ni_macaddr, in ath_tx_tid_drain_print()
3866 tid->axq_depth, in ath_tx_tid_drain_print()
3867 tid->hwq_depth, in ath_tx_tid_drain_print()
3868 tid->bar_wait, in ath_tx_tid_drain_print()
3869 tid->isfiltered); in ath_tx_tid_drain_print()
3877 ni->ni_macaddr, in ath_tx_tid_drain_print()
3879 tid->tid, in ath_tx_tid_drain_print()
3880 tid->sched, tid->paused, in ath_tx_tid_drain_print()
3881 tid->incomp, tid->baw_head, in ath_tx_tid_drain_print()
3882 tid->baw_tail, tap == NULL ? -1 : tap->txa_start, in ath_tx_tid_drain_print()
3883 ni->ni_txseqs[tid->tid]); in ath_tx_tid_drain_print()
3887 ieee80211_dump_pkt(ni->ni_ic, in ath_tx_tid_drain_print()
3888 mtod(bf->bf_m, const uint8_t *), in ath_tx_tid_drain_print()
3889 bf->bf_m->m_len, 0, -1); in ath_tx_tid_drain_print()
3913 struct ieee80211_node *ni = &an->an_node; in ath_tx_tid_drain()
3916 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_tid_drain()
3954 * Override the clrdmask configuration for the next frame in ath_tx_tid_drain()
3960 ath_tx_set_clrdmask(sc, tid->an); in ath_tx_tid_drain()
3971 * when the packet is first transmitted - and thus the "retries" in ath_tx_tid_drain()
3975 /* But don't do it for non-QoS TIDs */ in ath_tx_tid_drain()
3981 ni->ni_macaddr, in ath_tx_tid_drain()
3984 tid->tid, in ath_tx_tid_drain()
3985 tap->txa_start); in ath_tx_tid_drain()
3987 ni->ni_txseqs[tid->tid] = tap->txa_start; in ath_tx_tid_drain()
3988 tid->baw_tail = tid->baw_head; in ath_tx_tid_drain()
4002 tid->bar_wait = tid->bar_tx = tid->isfiltered = 0; in ath_tx_tid_reset()
4003 tid->paused = tid->sched = tid->addba_tx_pending = 0; in ath_tx_tid_reset()
4004 tid->incomp = tid->cleanup_inprogress = 0; in ath_tx_tid_reset()
4012 * XXX I'm not going through resume here - I don't want the in ath_tx_tid_reset()
4016 if (tid->bar_wait) { in ath_tx_tid_reset()
4017 if (tid->paused > 0) { in ath_tx_tid_reset()
4018 tid->paused --; in ath_tx_tid_reset()
4031 if (tid->isfiltered) { in ath_tx_tid_reset()
4032 if (tid->paused > 0) { in ath_tx_tid_reset()
4033 tid->paused --; in ath_tx_tid_reset()
4042 tid->bar_wait = 0; in ath_tx_tid_reset()
4043 tid->bar_tx = 0; in ath_tx_tid_reset()
4044 tid->isfiltered = 0; in ath_tx_tid_reset()
4045 tid->sched = 0; in ath_tx_tid_reset()
4046 tid->addba_tx_pending = 0; in ath_tx_tid_reset()
4050 * frames for that node as non-aggregate; or mark the ath_node in ath_tx_tid_reset()
4053 * do a complete hard reset of state here - no pause, no in ath_tx_tid_reset()
4076 &an->an_node); in ath_tx_node_flush()
4083 an->an_node.ni_macaddr, in ath_tx_node_flush()
4085 an->an_is_powersave, in ath_tx_node_flush()
4086 an->an_stack_psq, in ath_tx_node_flush()
4087 an->an_tim_set, in ath_tx_node_flush()
4088 an->an_swq_depth, in ath_tx_node_flush()
4089 an->clrdmask, in ath_tx_node_flush()
4090 an->an_leak_count); in ath_tx_node_flush()
4093 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_node_flush()
4101 /* Reset the per-TID pause, BAR, etc state */ in ath_tx_node_flush()
4108 an->an_leak_count = 0; in ath_tx_node_flush()
4135 while (! TAILQ_EMPTY(&txq->axq_tidq)) { in ath_tx_txq_drain()
4136 tid = TAILQ_FIRST(&txq->axq_tidq); in ath_tx_txq_drain()
4137 ath_tx_tid_drain(sc, tid->an, tid, &bf_cq); in ath_tx_txq_drain()
4150 * Handle completion of non-aggregate session frames.
4153 * non-aggregate frames!
4155 * Software retransmission of non-aggregate frames needs to obey
4163 * ath_tx_hw_queue_norm() must override and set CLRDMASK.
4168 struct ieee80211_node *ni = bf->bf_node; in ath_tx_normal_comp()
4170 int tid = bf->bf_state.bfs_tid; in ath_tx_normal_comp()
4171 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_normal_comp()
4172 struct ath_tx_status *ts = &bf->bf_status.ds_txstat; in ath_tx_normal_comp()
4178 __func__, bf, fail, atid->hwq_depth - 1); in ath_tx_normal_comp()
4180 atid->hwq_depth--; in ath_tx_normal_comp()
4187 if ((ts->ts_status & HAL_TXERR_FILT) || in ath_tx_normal_comp()
4188 (ts->ts_status != 0 && atid->isfiltered)) { in ath_tx_normal_comp()
4192 atid->isfiltered, in ath_tx_normal_comp()
4193 ts->ts_status); in ath_tx_normal_comp()
4197 if (atid->isfiltered) in ath_tx_normal_comp()
4199 if (atid->hwq_depth < 0) in ath_tx_normal_comp()
4201 __func__, atid->hwq_depth); in ath_tx_normal_comp()
4205 if (atid->cleanup_inprogress) { in ath_tx_normal_comp()
4206 atid->incomp--; in ath_tx_normal_comp()
4207 if (atid->incomp == 0) { in ath_tx_normal_comp()
4211 atid->cleanup_inprogress = 0; in ath_tx_normal_comp()
4221 * for this end-node that has CLRDMASK set, so it's quite possible in ath_tx_normal_comp()
4222 * that a filtered frame will be followed by a non-filtered in ath_tx_normal_comp()
4227 if (atid->isfiltered) in ath_tx_normal_comp()
4235 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0)) in ath_tx_normal_comp()
4236 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc, in ath_tx_normal_comp()
4238 bf->bf_state.bfs_pktlen, in ath_tx_normal_comp()
4239 bf->bf_state.bfs_pktlen, in ath_tx_normal_comp()
4240 1, (ts->ts_status == 0) ? 0 : 1); in ath_tx_normal_comp()
4247 * an A-MPDU.
4249 * There's no need to update the BAW here - the session is being
4255 struct ieee80211_node *ni = bf->bf_node; in ath_tx_comp_cleanup_unaggr()
4257 int tid = bf->bf_state.bfs_tid; in ath_tx_comp_cleanup_unaggr()
4258 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_comp_cleanup_unaggr()
4261 __func__, tid, atid->incomp); in ath_tx_comp_cleanup_unaggr()
4264 atid->incomp--; in ath_tx_comp_cleanup_unaggr()
4267 if (bf->bf_state.bfs_dobaw) { in ath_tx_comp_cleanup_unaggr()
4269 if (!bf->bf_state.bfs_addedbaw) in ath_tx_comp_cleanup_unaggr()
4272 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_comp_cleanup_unaggr()
4275 if (atid->incomp == 0) { in ath_tx_comp_cleanup_unaggr()
4279 atid->cleanup_inprogress = 0; in ath_tx_comp_cleanup_unaggr()
4296 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_tid_cleanup_frame()
4311 bf_next = bf->bf_next; /* next aggregate frame, or NULL */ in ath_tx_tid_cleanup_frame()
4318 * BAW - we shouldn't have it be in an aggregate in ath_tx_tid_cleanup_frame()
4321 if (bf->bf_state.bfs_addedbaw) { in ath_tx_tid_cleanup_frame()
4323 bf->bf_state.bfs_dobaw = 0; in ath_tx_tid_cleanup_frame()
4329 bf->bf_comp = ath_tx_normal_comp; in ath_tx_tid_cleanup_frame()
4330 bf->bf_next = NULL; in ath_tx_tid_cleanup_frame()
4361 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_tid_cleanup()
4368 atid->cleanup_inprogress); in ath_tx_tid_cleanup()
4385 * + Fix the completion function to be non-aggregate in ath_tx_tid_cleanup()
4410 if (atid->hwq_depth > 0) { in ath_tx_tid_cleanup()
4412 * XXX how about we kill atid->incomp, and instead in ath_tx_tid_cleanup()
4413 * replace it with a macro that checks that atid->hwq_depth in ath_tx_tid_cleanup()
4416 atid->incomp = atid->hwq_depth; in ath_tx_tid_cleanup()
4417 atid->cleanup_inprogress = 1; in ath_tx_tid_cleanup()
4420 if (atid->cleanup_inprogress) in ath_tx_tid_cleanup()
4423 __func__, tid, atid->incomp); in ath_tx_tid_cleanup()
4457 error = ath_tx_dmasetup(sc, nbf, nbf->bf_m); in ath_tx_retry_clone()
4475 if (bf->bf_state.bfs_dobaw) in ath_tx_retry_clone()
4490 * non-aggregate frames in an aggregate session are
4491 * transmitted in-order; they just have to be in-BAW)
4497 struct ieee80211_node *ni = bf->bf_node; in ath_tx_aggr_retry_unaggr()
4499 int tid = bf->bf_state.bfs_tid; in ath_tx_aggr_retry_unaggr()
4500 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_aggr_retry_unaggr()
4515 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && in ath_tx_aggr_retry_unaggr()
4516 (bf->bf_flags & ATH_BUF_BUSY)) { in ath_tx_aggr_retry_unaggr()
4523 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; in ath_tx_aggr_retry_unaggr()
4526 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { in ath_tx_aggr_retry_unaggr()
4529 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_aggr_retry_unaggr()
4530 sc->sc_stats.ast_tx_swretrymax++; in ath_tx_aggr_retry_unaggr()
4533 if (bf->bf_state.bfs_dobaw) { in ath_tx_aggr_retry_unaggr()
4535 if (! bf->bf_state.bfs_addedbaw) in ath_tx_aggr_retry_unaggr()
4538 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_aggr_retry_unaggr()
4540 bf->bf_state.bfs_dobaw = 0; in ath_tx_aggr_retry_unaggr()
4562 sc->sc_stats.ast_tx_swretries++; in ath_tx_aggr_retry_unaggr()
4588 struct ieee80211_node *ni = bf->bf_node; in ath_tx_retry_subframe()
4590 int tid = bf->bf_state.bfs_tid; in ath_tx_retry_subframe()
4591 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_retry_subframe()
4596 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc); in ath_tx_retry_subframe()
4597 ath_hal_set11nburstduration(sc->sc_ah, bf->bf_desc, 0); in ath_tx_retry_subframe()
4599 /* ath_hal_set11n_virtualmorefrag(sc->sc_ah, bf->bf_desc, 0); */ in ath_tx_retry_subframe()
4609 if ((bf->bf_state.bfs_retries < SWMAX_RETRIES) && in ath_tx_retry_subframe()
4610 (bf->bf_flags & ATH_BUF_BUSY)) { in ath_tx_retry_subframe()
4617 bf->bf_state.bfs_retries = SWMAX_RETRIES + 1; in ath_tx_retry_subframe()
4620 if (bf->bf_state.bfs_retries >= SWMAX_RETRIES) { in ath_tx_retry_subframe()
4621 sc->sc_stats.ast_tx_swretrymax++; in ath_tx_retry_subframe()
4624 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_retry_subframe()
4626 if (!bf->bf_state.bfs_addedbaw) in ath_tx_retry_subframe()
4629 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_retry_subframe()
4630 bf->bf_state.bfs_dobaw = 0; in ath_tx_retry_subframe()
4635 sc->sc_stats.ast_tx_swretries++; in ath_tx_retry_subframe()
4636 bf->bf_next = NULL; /* Just to make sure */ in ath_tx_retry_subframe()
4639 bf->bf_state.bfs_aggr = 0; in ath_tx_retry_subframe()
4640 bf->bf_state.bfs_ndelim = 0; /* ??? needed? */ in ath_tx_retry_subframe()
4641 bf->bf_state.bfs_nframes = 1; in ath_tx_retry_subframe()
4654 struct ieee80211_node *ni = bf_first->bf_node; in ath_tx_comp_aggr_error()
4666 * Update rate control - all frames have failed. in ath_tx_comp_aggr_error()
4668 ath_tx_update_ratectrl(sc, ni, bf_first->bf_state.bfs_rc, in ath_tx_comp_aggr_error()
4669 &bf_first->bf_status.ds_txstat, in ath_tx_comp_aggr_error()
4670 bf_first->bf_state.bfs_al, in ath_tx_comp_aggr_error()
4671 bf_first->bf_state.bfs_rc_maxpktlen, in ath_tx_comp_aggr_error()
4672 bf_first->bf_state.bfs_nframes, bf_first->bf_state.bfs_nframes); in ath_tx_comp_aggr_error()
4675 tap = ath_tx_get_tx_tid(an, tid->tid); in ath_tx_comp_aggr_error()
4676 sc->sc_stats.ast_tx_aggr_failall++; in ath_tx_comp_aggr_error()
4681 bf_next = bf->bf_next; in ath_tx_comp_aggr_error()
4682 bf->bf_next = NULL; /* Remove it from the aggr list */ in ath_tx_comp_aggr_error()
4683 sc->sc_stats.ast_tx_aggr_fail++; in ath_tx_comp_aggr_error()
4686 bf->bf_next = NULL; in ath_tx_comp_aggr_error()
4699 * Schedule the TID to be re-tried. in ath_tx_comp_aggr_error()
4731 * Handle clean-up of packets from an aggregate list.
4733 * There's no need to update the BAW here - the session is being
4740 struct ieee80211_node *ni = bf_first->bf_node; in ath_tx_comp_cleanup_aggr()
4742 int tid = bf_first->bf_state.bfs_tid; in ath_tx_comp_cleanup_aggr()
4743 struct ath_tid *atid = &an->an_tid[tid]; in ath_tx_comp_cleanup_aggr()
4748 atid->incomp--; in ath_tx_comp_cleanup_aggr()
4754 if (bf->bf_state.bfs_dobaw) { in ath_tx_comp_cleanup_aggr()
4756 if (!bf->bf_state.bfs_addedbaw) in ath_tx_comp_cleanup_aggr()
4759 __func__, SEQNO(bf->bf_state.bfs_seqno)); in ath_tx_comp_cleanup_aggr()
4761 bf = bf->bf_next; in ath_tx_comp_cleanup_aggr()
4764 if (atid->incomp == 0) { in ath_tx_comp_cleanup_aggr()
4768 atid->cleanup_inprogress = 0; in ath_tx_comp_cleanup_aggr()
4773 /* XXX why would we send a BAR when transitioning to non-aggregation? */ in ath_tx_comp_cleanup_aggr()
4786 bf_next = bf->bf_next; in ath_tx_comp_cleanup_aggr()
4787 bf->bf_next = NULL; in ath_tx_comp_cleanup_aggr()
4803 //struct ath_desc *ds = bf->bf_lastds;
4804 struct ieee80211_node *ni = bf_first->bf_node;
4806 int tid = bf_first->bf_state.bfs_tid;
4807 struct ath_tid *atid = &an->an_tid[tid];
4826 __func__, atid->hwq_depth);
4829 * Take a copy; this may be needed -after- bf_first
4832 ts = bf_first->bf_status.ds_txstat;
4833 agglen = bf_first->bf_state.bfs_al;
4834 rc_agglen = bf_first->bf_state.bfs_rc_maxpktlen;
4842 atid->hwq_depth--;
4843 if (atid->hwq_depth < 0)
4845 __func__, atid->hwq_depth);
4854 if (atid->isfiltered)
4860 if (atid->cleanup_inprogress) {
4861 if (atid->isfiltered)
4878 (ts.ts_status != 0 && atid->isfiltered)) {
4886 if (bf->bf_state.bfs_addedbaw)
4888 if (bf->bf_state.bfs_dobaw) {
4890 if (!bf->bf_state.bfs_addedbaw)
4894 SEQNO(bf->bf_state.bfs_seqno));
4896 bf->bf_state.bfs_dobaw = 0;
4916 pktlen = bf_first->bf_state.bfs_pktlen;
4937 * extract starting sequence and block-ack bitmap
4939 /* XXX endian-ness of seq_st, ba? */
4943 isaggr = bf_first->bf_state.bfs_aggr;
4953 memcpy(rc, bf_first->bf_state.bfs_rc, sizeof(rc));
4958 __func__, tap->txa_start, tx_ok, ts.ts_status, ts.ts_flags,
4966 * out tid 1 - the aggregate frames are all marked as TID 1,
4981 device_printf(sc->sc_dev,
4985 taskqueue_enqueue(sc->sc_tq, &sc->sc_fataltask);
4992 sc->sc_ac2q[atid->ac]->axq_qnum, 0, 0);
5001 nf = bf_first->bf_state.bfs_nframes;
5023 SEQNO(bf->bf_state.bfs_seqno));
5024 bf_next = bf->bf_next;
5025 bf->bf_next = NULL; /* Remove it from the aggr list */
5029 __func__, bf, SEQNO(bf->bf_state.bfs_seqno),
5033 sc->sc_stats.ast_tx_aggr_ok++;
5035 bf->bf_state.bfs_dobaw = 0;
5036 if (!bf->bf_state.bfs_addedbaw)
5039 __func__, SEQNO(bf->bf_state.bfs_seqno));
5040 bf->bf_next = NULL;
5043 sc->sc_stats.ast_tx_aggr_fail++;
5046 bf->bf_next = NULL;
5058 * have a consistent view of what -was- in the BAW.
5062 txseq = tap->txa_start;
5090 "%s: txa_start now %d\n", __func__, tap->txa_start);
5106 * If the queue is filtered, re-schedule as required.
5109 * for this end-node that has CLRDMASK set, so it's quite possible
5110 * that a filtered frame will be followed by a non-filtered
5115 if (atid->isfiltered)
5145 struct ieee80211_node *ni = bf->bf_node;
5147 int tid = bf->bf_state.bfs_tid;
5148 struct ath_tid *atid = &an->an_tid[tid];
5156 ts = bf->bf_status.ds_txstat;
5164 if (fail == 0 && ((bf->bf_state.bfs_txflags & HAL_TXDESC_NOACK) == 0))
5165 ath_tx_update_ratectrl(sc, ni, bf->bf_state.bfs_rc,
5166 &bf->bf_status.ds_txstat,
5167 bf->bf_state.bfs_pktlen,
5168 bf->bf_state.bfs_pktlen,
5172 * This is called early so atid->hwq_depth can be tracked.
5183 __func__, bf, bf->bf_state.bfs_tid, atid->hwq_depth,
5184 SEQNO(bf->bf_state.bfs_seqno));
5186 atid->hwq_depth--;
5187 if (atid->hwq_depth < 0)
5189 __func__, atid->hwq_depth);
5196 if (atid->isfiltered)
5205 if (atid->cleanup_inprogress) {
5206 if (atid->isfiltered)
5225 * However - a busy buffer can't be added to the filtered
5230 (ts.ts_status != 0 && atid->isfiltered)) {
5244 if (bf->bf_state.bfs_addedbaw)
5246 if (bf->bf_state.bfs_dobaw) {
5248 if (!bf->bf_state.bfs_addedbaw)
5251 __func__, SEQNO(bf->bf_state.bfs_seqno));
5253 bf->bf_state.bfs_dobaw = 0;
5284 if (fail == 0 && ts->ts_status & HAL_TXERR_XRETRY) {
5296 __func__, tid, SEQNO(bf->bf_state.bfs_seqno));
5297 if (bf->bf_state.bfs_dobaw) {
5299 bf->bf_state.bfs_dobaw = 0;
5300 if (!bf->bf_state.bfs_addedbaw)
5303 __func__, SEQNO(bf->bf_state.bfs_seqno));
5307 * If the queue is filtered, re-schedule as required.
5310 * for this end-node that has CLRDMASK set, so it's quite possible
5311 * that a filtered frame will be followed by a non-filtered
5316 if (atid->isfiltered)
5334 if (bf->bf_state.bfs_aggr)
5358 tap = ath_tx_get_tx_tid(an, tid->tid);
5365 TAILQ_FOREACH(bf, &tid->tid_q, bf_list) {
5372 if (tap != NULL && (! BAW_WITHIN(tap->txa_start, tap->txa_wnd,
5373 SEQNO(bf->bf_state.bfs_seqno)))) {
5378 if (! bf->bf_state.bfs_dobaw) {
5382 nbytes += bf->bf_state.bfs_pktlen;
5391 if (an->an_leak_count) {
5409 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5415 DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
5423 tap = ath_tx_get_tx_tid(an, tid->tid);
5425 if (tid->tid == IEEE80211_NONQOS_TID)
5452 if (! bf->bf_state.bfs_dobaw) {
5454 "%s: non-baw packet\n",
5458 if (bf->bf_state.bfs_nframes > 1)
5462 bf->bf_state.bfs_aggr,
5463 bf->bf_state.bfs_nframes);
5466 * This shouldn't happen - such frames shouldn't
5471 bf->bf_state.bfs_aggr = 0;
5472 bf->bf_state.bfs_nframes = 1;
5477 ath_tx_do_ratelookup(sc, bf, tid->tid,
5478 bf->bf_state.bfs_pktlen, false);
5484 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5486 sc->sc_aggr_stats.aggr_nonbaw_pkt++;
5500 ath_tx_do_ratelookup(sc, bf, tid->tid, swq_pktbytes, true);
5519 * No frames to be picked up - out of BAW
5531 sc->sc_aggr_stats.aggr_rts_aggr_limited++;
5534 * If it's the only frame send as non-aggregate
5538 if (bf->bf_state.bfs_nframes == 1) {
5540 "%s: single-frame aggregate\n", __func__);
5545 bf->bf_state.bfs_aggr = 0;
5546 bf->bf_state.bfs_ndelim = 0;
5548 ath_hal_clr11n_aggr(sc->sc_ah, bf->bf_desc);
5550 sc->sc_aggr_stats.aggr_baw_closed_single_pkt++;
5552 sc->sc_aggr_stats.aggr_single_pkt++;
5555 "%s: multi-frame aggregate: %d frames, "
5557 __func__, bf->bf_state.bfs_nframes,
5558 bf->bf_state.bfs_al);
5559 bf->bf_state.bfs_aggr = 1;
5560 sc->sc_aggr_stats.aggr_pkts[bf->bf_state.bfs_nframes]++;
5561 sc->sc_aggr_stats.aggr_aggr_pkt++;
5587 /* Set completion handler, multi-frame aggregate or not */
5588 bf->bf_comp = ath_tx_aggr_comp;
5590 if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
5606 tid->hwq_depth++;
5616 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr ||
5640 struct ath_txq *txq = sc->sc_ac2q[tid->ac];
5643 __func__, an, tid->tid);
5647 /* Check - is AMPDU pending or running? then print out something */
5648 if (ath_tx_ampdu_pending(sc, an, tid->tid))
5650 __func__, tid->tid);
5651 if (ath_tx_ampdu_running(sc, an, tid->tid))
5653 __func__, tid->tid);
5674 if (tid->tid != bf->bf_state.bfs_tid) {
5676 " tid %d\n", __func__, bf->bf_state.bfs_tid,
5677 tid->tid);
5680 bf->bf_comp = ath_tx_normal_comp;
5683 * Override this for now, until the non-aggregate
5686 bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
5692 ath_tx_do_ratelookup(sc, bf, tid->tid,
5693 bf->bf_state.bfs_pktlen, false);
5709 tid->hwq_depth++;
5735 * For non-EDMA chips, aggr frames that have been built are
5737 * There's no FIFO, so txq->axq_depth is what's been scheduled
5747 * The FIFO depth is what's in the hardware; the txq->axq_depth
5751 * into the EDMA FIFO. For multi-frame lists, this is the number
5756 /* For EDMA and non-EDMA, check built/scheduled against aggr limit */
5757 if (txq->axq_aggr_depth >= sc->sc_hwq_limit_aggr) {
5758 sc->sc_aggr_stats.aggr_sched_nopkt++;
5763 * For non-EDMA chips, axq_depth is the "what's scheduled to
5768 if (txq->axq_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_nonaggr) {
5769 sc->sc_aggr_stats.aggr_sched_nopkt++;
5773 last = TAILQ_LAST(&txq->axq_tidq, axq_t_s);
5775 TAILQ_FOREACH_SAFE(tid, &txq->axq_tidq, axq_qelem, next) {
5781 __func__, tid->tid, tid->paused);
5784 * This node may be in power-save and we're leaking
5790 if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
5791 ath_tx_tid_hw_queue_aggr(sc, tid->an, tid);
5793 ath_tx_tid_hw_queue_norm(sc, tid->an, tid);
5795 /* Not empty? Re-schedule */
5796 if (tid->axq_depth != 0)
5805 if (txq->axq_aggr_depth + txq->fifo.axq_depth >= sc->sc_hwq_limit_aggr) {
5808 if (txq->axq_depth >= sc->sc_hwq_limit_nonaggr) {
5818 * but are pending a leaking frame in response to a ps-poll?
5837 struct ieee80211_node *ni = &an->an_node;
5843 tap = &ni->ni_tx_ampdu[tid];
5848 * Is AMPDU-TX running?
5862 return !! (tap->txa_flags & IEEE80211_AGGR_RUNNING);
5866 * Is AMPDU-TX negotiation pending?
5880 return !! (tap->txa_flags & IEEE80211_AGGR_XCHGPEND);
5884 * Is AMPDU-TX pending for the given TID?
5893 * XXX there's no timeout handler we can override?
5899 struct ath_softc *sc = ni->ni_ic->ic_softc;
5900 int tid = tap->txa_tid;
5902 struct ath_tid *atid = &an->an_tid[tid];
5909 * However, net80211 will keep self-assigning sequence numbers
5917 * a "bf->bf_state.bfs_dobaw" flag, and this isn't set for these
5933 if (atid->addba_tx_pending == 0) {
5935 atid->addba_tx_pending = 1;
5942 ni->ni_macaddr,
5947 __func__, tap->txa_start, ni->ni_txseqs[tid]);
5949 return sc->sc_addba_request(ni, tap, dialogtoken, baparamset,
5961 * Note! net80211 keeps self-assigning sequence numbers until
5962 * ampdu is negotiated. This means the initially-negotiated BAW left
5963 * edge won't match the ni->ni_txseq.
5966 * ni->ni_txseq.
5969 * addba request should be tagged as aggregate and queued as non-aggregate
5977 struct ath_softc *sc = ni->ni_ic->ic_softc;
5978 int tid = tap->txa_tid;
5980 struct ath_tid *atid = &an->an_tid[tid];
5985 ni->ni_macaddr,
5991 __func__, tap->txa_start, ni->ni_txseqs[tid]);
5999 r = sc->sc_addba_response(ni, tap, status, code, batimeout);
6002 atid->addba_tx_pending = 0;
6008 tap->txa_start = ni->ni_txseqs[tid];
6023 struct ath_softc *sc = ni->ni_ic->ic_softc;
6024 int tid = tap->txa_tid;
6026 struct ath_tid *atid = &an->an_tid[tid];
6032 ni->ni_macaddr,
6041 if (atid->bar_wait) {
6047 atid->bar_tx = 1;
6053 sc->sc_addba_stop(ni, tap);
6069 * progress - it means something else is also doing
6072 if (atid->cleanup_inprogress) {
6079 if (! atid->cleanup_inprogress)
6112 tid = &an->an_tid[i];
6113 if (tid->hwq_depth == 0)
6118 an->an_node.ni_macaddr,
6125 if (! tid->cleanup_inprogress) {
6131 if (! tid->cleanup_inprogress)
6149 * ic->ic_addba_stop().
6151 * XXX This uses a hard-coded max BAR count value; the whole
6158 struct ath_softc *sc = ni->ni_ic->ic_softc;
6159 int tid = tap->txa_tid;
6161 struct ath_tid *atid = &an->an_tid[tid];
6162 int attempts = tap->txa_attempts;
6166 …"%s: %6D: called; txa_tid=%d, atid->tid=%d, status=%d, attempts=%d, txa_start=%d, txa_seqpending=%…
6168 ni->ni_macaddr,
6170 tap->txa_tid,
6171 atid->tid,
6174 tap->txa_start,
6175 tap->txa_seqpending);
6189 old_txa_start = tap->txa_start;
6190 sc->sc_bar_response(ni, tap, status);
6191 if (tap->txa_start != old_txa_start) {
6192 device_printf(sc->sc_dev, "%s: tid=%d; txa_start=%d, old=%d, adjusting\n",
6195 tap->txa_start,
6198 tap->txa_start = old_txa_start;
6204 * XXX to a non-aggregate session. So we must unpause the
6212 if (atid->bar_tx == 0 || atid->bar_wait == 0)
6216 atid->bar_tx, atid->bar_wait);
6231 struct ath_softc *sc = ni->ni_ic->ic_softc;
6232 int tid = tap->txa_tid;
6234 struct ath_tid *atid = &an->an_tid[tid];
6239 ni->ni_macaddr,
6244 atid->addba_tx_pending = 0;
6248 sc->sc_addba_response_timeout(ni, tap);
6265 return (an->an_is_powersave);
6282 * doing node/TID operations. There are other complications -
6283 * the sched/unsched operations involve walking the per-txq
6298 if (an->an_is_powersave) {
6301 __func__, an->an_node.ni_macaddr, ":");
6307 atid = &an->an_tid[tid];
6308 txq = sc->sc_ac2q[atid->ac];
6314 an->an_is_powersave = 1;
6335 if (an->an_is_powersave == 0) {
6344 an->an_is_powersave = 0;
6348 an->an_leak_count = 0;
6351 atid = &an->an_tid[tid];
6352 txq = sc->sc_ac2q[atid->ac];
6382 sc->sc_tx_desclen = sizeof(struct ath_desc);
6383 sc->sc_tx_statuslen = sizeof(struct ath_desc);
6384 sc->sc_tx_nmaps = 1; /* only one buffer per TX desc */
6386 sc->sc_tx.xmit_setup = ath_legacy_dma_txsetup;
6387 sc->sc_tx.xmit_teardown = ath_legacy_dma_txteardown;
6388 sc->sc_tx.xmit_attach_comp_func = ath_legacy_attach_comp_func;
6390 sc->sc_tx.xmit_dma_restart = ath_legacy_tx_dma_restart;
6391 sc->sc_tx.xmit_handoff = ath_legacy_xmit_handoff;
6393 sc->sc_tx.xmit_drain = ath_legacy_tx_drain;