xref: /freebsd/sys/dev/ath/if_ath_tx.c (revision 0f27aaf940f2fa5a6540285537b33115a96161a4)
1 /*-
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13  *    redistribution must be conditioned upon including a substantially
14  *    similar Disclaimer requirement for further binary redistribution.
15  *
16  * NO WARRANTY
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGES.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 /*
34  * Driver for the Atheros Wireless LAN controller.
35  *
36  * This software is derived from work of Atsushi Onoe; his contribution
37  * is greatly appreciated.
38  */
39 
40 #include "opt_inet.h"
41 #include "opt_ath.h"
42 #include "opt_wlan.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysctl.h>
47 #include <sys/mbuf.h>
48 #include <sys/malloc.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/kernel.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/errno.h>
55 #include <sys/callout.h>
56 #include <sys/bus.h>
57 #include <sys/endian.h>
58 #include <sys/kthread.h>
59 #include <sys/taskqueue.h>
60 #include <sys/priv.h>
61 
62 #include <machine/bus.h>
63 
64 #include <net/if.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_arp.h>
69 #include <net/ethernet.h>
70 #include <net/if_llc.h>
71 
72 #include <net80211/ieee80211_var.h>
73 #include <net80211/ieee80211_regdomain.h>
74 #ifdef IEEE80211_SUPPORT_SUPERG
75 #include <net80211/ieee80211_superg.h>
76 #endif
77 #ifdef IEEE80211_SUPPORT_TDMA
78 #include <net80211/ieee80211_tdma.h>
79 #endif
80 
81 #include <net/bpf.h>
82 
83 #ifdef INET
84 #include <netinet/in.h>
85 #include <netinet/if_ether.h>
86 #endif
87 
88 #include <dev/ath/if_athvar.h>
89 #include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
90 #include <dev/ath/ath_hal/ah_diagcodes.h>
91 
92 #include <dev/ath/if_ath_debug.h>
93 
94 #ifdef ATH_TX99_DIAG
95 #include <dev/ath/ath_tx99/ath_tx99.h>
96 #endif
97 
98 #include <dev/ath/if_ath_misc.h>
99 #include <dev/ath/if_ath_tx.h>
100 
101 void
102 ath_txfrag_cleanup(struct ath_softc *sc,
103 	ath_bufhead *frags, struct ieee80211_node *ni)
104 {
105 	struct ath_buf *bf, *next;
106 
107 	ATH_TXBUF_LOCK_ASSERT(sc);
108 
109 	STAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
110 		/* NB: bf assumed clean */
111 		STAILQ_REMOVE_HEAD(frags, bf_list);
112 		STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
113 		ieee80211_node_decref(ni);
114 	}
115 }
116 
117 /*
118  * Setup xmit of a fragmented frame.  Allocate a buffer
119  * for each frag and bump the node reference count to
120  * reflect the held reference to be setup by ath_tx_start.
121  */
122 int
123 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
124 	struct mbuf *m0, struct ieee80211_node *ni)
125 {
126 	struct mbuf *m;
127 	struct ath_buf *bf;
128 
129 	ATH_TXBUF_LOCK(sc);
130 	for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
131 		bf = _ath_getbuf_locked(sc);
132 		if (bf == NULL) {	/* out of buffers, cleanup */
133 			ath_txfrag_cleanup(sc, frags, ni);
134 			break;
135 		}
136 		ieee80211_node_incref(ni);
137 		STAILQ_INSERT_TAIL(frags, bf, bf_list);
138 	}
139 	ATH_TXBUF_UNLOCK(sc);
140 
141 	return !STAILQ_EMPTY(frags);
142 }
143 
144 /*
145  * Reclaim mbuf resources.  For fragmented frames we
146  * need to claim each frag chained with m_nextpkt.
147  */
148 void
149 ath_freetx(struct mbuf *m)
150 {
151 	struct mbuf *next;
152 
153 	do {
154 		next = m->m_nextpkt;
155 		m->m_nextpkt = NULL;
156 		m_freem(m);
157 	} while ((m = next) != NULL);
158 }
159 
160 static int
161 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
162 {
163 	struct mbuf *m;
164 	int error;
165 
166 	/*
167 	 * Load the DMA map so any coalescing is done.  This
168 	 * also calculates the number of descriptors we need.
169 	 */
170 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
171 				     bf->bf_segs, &bf->bf_nseg,
172 				     BUS_DMA_NOWAIT);
173 	if (error == EFBIG) {
174 		/* XXX packet requires too many descriptors */
175 		bf->bf_nseg = ATH_TXDESC+1;
176 	} else if (error != 0) {
177 		sc->sc_stats.ast_tx_busdma++;
178 		ath_freetx(m0);
179 		return error;
180 	}
181 	/*
182 	 * Discard null packets and check for packets that
183 	 * require too many TX descriptors.  We try to convert
184 	 * the latter to a cluster.
185 	 */
186 	if (bf->bf_nseg > ATH_TXDESC) {		/* too many desc's, linearize */
187 		sc->sc_stats.ast_tx_linear++;
188 		m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC);
189 		if (m == NULL) {
190 			ath_freetx(m0);
191 			sc->sc_stats.ast_tx_nombuf++;
192 			return ENOMEM;
193 		}
194 		m0 = m;
195 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
196 					     bf->bf_segs, &bf->bf_nseg,
197 					     BUS_DMA_NOWAIT);
198 		if (error != 0) {
199 			sc->sc_stats.ast_tx_busdma++;
200 			ath_freetx(m0);
201 			return error;
202 		}
203 		KASSERT(bf->bf_nseg <= ATH_TXDESC,
204 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
205 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
206 		sc->sc_stats.ast_tx_nodata++;
207 		ath_freetx(m0);
208 		return EIO;
209 	}
210 	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
211 		__func__, m0, m0->m_pkthdr.len);
212 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
213 	bf->bf_m = m0;
214 
215 	return 0;
216 }
217 
218 static void
219 ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
220 {
221 	struct ath_hal *ah = sc->sc_ah;
222 	struct ath_desc *ds, *ds0;
223 	int i;
224 
225 	/*
226 	 * Fillin the remainder of the descriptor info.
227 	 */
228 	ds0 = ds = bf->bf_desc;
229 	for (i = 0; i < bf->bf_nseg; i++, ds++) {
230 		ds->ds_data = bf->bf_segs[i].ds_addr;
231 		if (i == bf->bf_nseg - 1)
232 			ds->ds_link = 0;
233 		else
234 			ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1);
235 		ath_hal_filltxdesc(ah, ds
236 			, bf->bf_segs[i].ds_len	/* segment length */
237 			, i == 0		/* first segment */
238 			, i == bf->bf_nseg - 1	/* last segment */
239 			, ds0			/* first descriptor */
240 		);
241 		DPRINTF(sc, ATH_DEBUG_XMIT,
242 			"%s: %d: %08x %08x %08x %08x %08x %08x\n",
243 			__func__, i, ds->ds_link, ds->ds_data,
244 			ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
245 	}
246 	/*
247 	 * Insert the frame on the outbound list and pass it on
248 	 * to the hardware.  Multicast frames buffered for power
249 	 * save stations and transmit from the CAB queue are stored
250 	 * on a s/w only queue and loaded on to the CAB queue in
251 	 * the SWBA handler since frames only go out on DTIM and
252 	 * to avoid possible races.
253 	 */
254 	ATH_TXQ_LOCK(txq);
255 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
256 	     ("busy status 0x%x", bf->bf_flags));
257 	if (txq->axq_qnum != ATH_TXQ_SWQ) {
258 #ifdef IEEE80211_SUPPORT_TDMA
259 		int qbusy;
260 
261 		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
262 		qbusy = ath_hal_txqenabled(ah, txq->axq_qnum);
263 		if (txq->axq_link == NULL) {
264 			/*
265 			 * Be careful writing the address to TXDP.  If
266 			 * the tx q is enabled then this write will be
267 			 * ignored.  Normally this is not an issue but
268 			 * when tdma is in use and the q is beacon gated
269 			 * this race can occur.  If the q is busy then
270 			 * defer the work to later--either when another
271 			 * packet comes along or when we prepare a beacon
272 			 * frame at SWBA.
273 			 */
274 			if (!qbusy) {
275 				ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
276 				txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
277 				DPRINTF(sc, ATH_DEBUG_XMIT,
278 				    "%s: TXDP[%u] = %p (%p) depth %d\n",
279 				    __func__, txq->axq_qnum,
280 				    (caddr_t)bf->bf_daddr, bf->bf_desc,
281 				    txq->axq_depth);
282 			} else {
283 				txq->axq_flags |= ATH_TXQ_PUTPENDING;
284 				DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT,
285 				    "%s: Q%u busy, defer enable\n", __func__,
286 				    txq->axq_qnum);
287 			}
288 		} else {
289 			*txq->axq_link = bf->bf_daddr;
290 			DPRINTF(sc, ATH_DEBUG_XMIT,
291 			    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
292 			    txq->axq_qnum, txq->axq_link,
293 			    (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
294 			if ((txq->axq_flags & ATH_TXQ_PUTPENDING) && !qbusy) {
295 				/*
296 				 * The q was busy when we previously tried
297 				 * to write the address of the first buffer
298 				 * in the chain.  Since it's not busy now
299 				 * handle this chore.  We are certain the
300 				 * buffer at the front is the right one since
301 				 * axq_link is NULL only when the buffer list
302 				 * is/was empty.
303 				 */
304 				ath_hal_puttxbuf(ah, txq->axq_qnum,
305 					STAILQ_FIRST(&txq->axq_q)->bf_daddr);
306 				txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
307 				DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT,
308 				    "%s: Q%u restarted\n", __func__,
309 				    txq->axq_qnum);
310 			}
311 		}
312 #else
313 		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
314 		if (txq->axq_link == NULL) {
315 			ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
316 			DPRINTF(sc, ATH_DEBUG_XMIT,
317 			    "%s: TXDP[%u] = %p (%p) depth %d\n",
318 			    __func__, txq->axq_qnum,
319 			    (caddr_t)bf->bf_daddr, bf->bf_desc,
320 			    txq->axq_depth);
321 		} else {
322 			*txq->axq_link = bf->bf_daddr;
323 			DPRINTF(sc, ATH_DEBUG_XMIT,
324 			    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
325 			    txq->axq_qnum, txq->axq_link,
326 			    (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
327 		}
328 #endif /* IEEE80211_SUPPORT_TDMA */
329 		txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
330 		ath_hal_txstart(ah, txq->axq_qnum);
331 	} else {
332 		if (txq->axq_link != NULL) {
333 			struct ath_buf *last = ATH_TXQ_LAST(txq);
334 			struct ieee80211_frame *wh;
335 
336 			/* mark previous frame */
337 			wh = mtod(last->bf_m, struct ieee80211_frame *);
338 			wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
339 			bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap,
340 			    BUS_DMASYNC_PREWRITE);
341 
342 			/* link descriptor */
343 			*txq->axq_link = bf->bf_daddr;
344 		}
345 		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
346 		txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
347 	}
348 	ATH_TXQ_UNLOCK(txq);
349 }
350 
351 int
352 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf,
353     struct mbuf *m0)
354 {
355 	struct ieee80211vap *vap = ni->ni_vap;
356 	struct ath_vap *avp = ATH_VAP(vap);
357 	struct ath_hal *ah = sc->sc_ah;
358 	struct ifnet *ifp = sc->sc_ifp;
359 	struct ieee80211com *ic = ifp->if_l2com;
360 	const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
361 	int error, iswep, ismcast, isfrag, ismrr;
362 	int keyix, hdrlen, pktlen, try0;
363 	u_int8_t rix, txrate, ctsrate;
364 	u_int8_t cix = 0xff;		/* NB: silence compiler */
365 	struct ath_desc *ds;
366 	struct ath_txq *txq;
367 	struct ieee80211_frame *wh;
368 	u_int subtype, flags, ctsduration;
369 	HAL_PKT_TYPE atype;
370 	const HAL_RATE_TABLE *rt;
371 	HAL_BOOL shortPreamble;
372 	struct ath_node *an;
373 	u_int pri;
374 
375 	wh = mtod(m0, struct ieee80211_frame *);
376 	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
377 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
378 	isfrag = m0->m_flags & M_FRAG;
379 	hdrlen = ieee80211_anyhdrsize(wh);
380 	/*
381 	 * Packet length must not include any
382 	 * pad bytes; deduct them here.
383 	 */
384 	pktlen = m0->m_pkthdr.len - (hdrlen & 3);
385 
386 	if (iswep) {
387 		const struct ieee80211_cipher *cip;
388 		struct ieee80211_key *k;
389 
390 		/*
391 		 * Construct the 802.11 header+trailer for an encrypted
392 		 * frame. The only reason this can fail is because of an
393 		 * unknown or unsupported cipher/key type.
394 		 */
395 		k = ieee80211_crypto_encap(ni, m0);
396 		if (k == NULL) {
397 			/*
398 			 * This can happen when the key is yanked after the
399 			 * frame was queued.  Just discard the frame; the
400 			 * 802.11 layer counts failures and provides
401 			 * debugging/diagnostics.
402 			 */
403 			ath_freetx(m0);
404 			return EIO;
405 		}
406 		/*
407 		 * Adjust the packet + header lengths for the crypto
408 		 * additions and calculate the h/w key index.  When
409 		 * a s/w mic is done the frame will have had any mic
410 		 * added to it prior to entry so m0->m_pkthdr.len will
411 		 * account for it. Otherwise we need to add it to the
412 		 * packet length.
413 		 */
414 		cip = k->wk_cipher;
415 		hdrlen += cip->ic_header;
416 		pktlen += cip->ic_header + cip->ic_trailer;
417 		/* NB: frags always have any TKIP MIC done in s/w */
418 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
419 			pktlen += cip->ic_miclen;
420 		keyix = k->wk_keyix;
421 
422 		/* packet header may have moved, reset our local pointer */
423 		wh = mtod(m0, struct ieee80211_frame *);
424 	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
425 		/*
426 		 * Use station key cache slot, if assigned.
427 		 */
428 		keyix = ni->ni_ucastkey.wk_keyix;
429 		if (keyix == IEEE80211_KEYIX_NONE)
430 			keyix = HAL_TXKEYIX_INVALID;
431 	} else
432 		keyix = HAL_TXKEYIX_INVALID;
433 
434 	pktlen += IEEE80211_CRC_LEN;
435 
436 	/*
437 	 * Load the DMA map so any coalescing is done.  This
438 	 * also calculates the number of descriptors we need.
439 	 */
440 	error = ath_tx_dmasetup(sc, bf, m0);
441 	if (error != 0)
442 		return error;
443 	bf->bf_node = ni;			/* NB: held reference */
444 	m0 = bf->bf_m;				/* NB: may have changed */
445 	wh = mtod(m0, struct ieee80211_frame *);
446 
447 	/* setup descriptors */
448 	ds = bf->bf_desc;
449 	rt = sc->sc_currates;
450 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
451 
452 	/*
453 	 * NB: the 802.11 layer marks whether or not we should
454 	 * use short preamble based on the current mode and
455 	 * negotiated parameters.
456 	 */
457 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
458 	    (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
459 		shortPreamble = AH_TRUE;
460 		sc->sc_stats.ast_tx_shortpre++;
461 	} else {
462 		shortPreamble = AH_FALSE;
463 	}
464 
465 	an = ATH_NODE(ni);
466 	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
467 	ismrr = 0;				/* default no multi-rate retry*/
468 	pri = M_WME_GETAC(m0);			/* honor classification */
469 	/* XXX use txparams instead of fixed values */
470 	/*
471 	 * Calculate Atheros packet type from IEEE80211 packet header,
472 	 * setup for rate calculations, and select h/w transmit queue.
473 	 */
474 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
475 	case IEEE80211_FC0_TYPE_MGT:
476 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
477 		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
478 			atype = HAL_PKT_TYPE_BEACON;
479 		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
480 			atype = HAL_PKT_TYPE_PROBE_RESP;
481 		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
482 			atype = HAL_PKT_TYPE_ATIM;
483 		else
484 			atype = HAL_PKT_TYPE_NORMAL;	/* XXX */
485 		rix = an->an_mgmtrix;
486 		txrate = rt->info[rix].rateCode;
487 		if (shortPreamble)
488 			txrate |= rt->info[rix].shortPreamble;
489 		try0 = ATH_TXMGTTRY;
490 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
491 		break;
492 	case IEEE80211_FC0_TYPE_CTL:
493 		atype = HAL_PKT_TYPE_PSPOLL;	/* stop setting of duration */
494 		rix = an->an_mgmtrix;
495 		txrate = rt->info[rix].rateCode;
496 		if (shortPreamble)
497 			txrate |= rt->info[rix].shortPreamble;
498 		try0 = ATH_TXMGTTRY;
499 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
500 		break;
501 	case IEEE80211_FC0_TYPE_DATA:
502 		atype = HAL_PKT_TYPE_NORMAL;		/* default */
503 		/*
504 		 * Data frames: multicast frames go out at a fixed rate,
505 		 * EAPOL frames use the mgmt frame rate; otherwise consult
506 		 * the rate control module for the rate to use.
507 		 */
508 		if (ismcast) {
509 			rix = an->an_mcastrix;
510 			txrate = rt->info[rix].rateCode;
511 			if (shortPreamble)
512 				txrate |= rt->info[rix].shortPreamble;
513 			try0 = 1;
514 		} else if (m0->m_flags & M_EAPOL) {
515 			/* XXX? maybe always use long preamble? */
516 			rix = an->an_mgmtrix;
517 			txrate = rt->info[rix].rateCode;
518 			if (shortPreamble)
519 				txrate |= rt->info[rix].shortPreamble;
520 			try0 = ATH_TXMAXTRY;	/* XXX?too many? */
521 		} else {
522 			ath_rate_findrate(sc, an, shortPreamble, pktlen,
523 				&rix, &try0, &txrate);
524 			sc->sc_txrix = rix;		/* for LED blinking */
525 			sc->sc_lastdatarix = rix;	/* for fast frames */
526 			if (try0 != ATH_TXMAXTRY)
527 				ismrr = 1;
528 		}
529 		if (cap->cap_wmeParams[pri].wmep_noackPolicy)
530 			flags |= HAL_TXDESC_NOACK;
531 		break;
532 	default:
533 		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
534 			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
535 		/* XXX statistic */
536 		ath_freetx(m0);
537 		return EIO;
538 	}
539 	txq = sc->sc_ac2q[pri];
540 
541 	/*
542 	 * When servicing one or more stations in power-save mode
543 	 * (or) if there is some mcast data waiting on the mcast
544 	 * queue (to prevent out of order delivery) multicast
545 	 * frames must be buffered until after the beacon.
546 	 */
547 	if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth))
548 		txq = &avp->av_mcastq;
549 
550 	/*
551 	 * Calculate miscellaneous flags.
552 	 */
553 	if (ismcast) {
554 		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
555 	} else if (pktlen > vap->iv_rtsthreshold &&
556 	    (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
557 		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
558 		cix = rt->info[rix].controlRate;
559 		sc->sc_stats.ast_tx_rts++;
560 	}
561 	if (flags & HAL_TXDESC_NOACK)		/* NB: avoid double counting */
562 		sc->sc_stats.ast_tx_noack++;
563 #ifdef IEEE80211_SUPPORT_TDMA
564 	if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
565 		DPRINTF(sc, ATH_DEBUG_TDMA,
566 		    "%s: discard frame, ACK required w/ TDMA\n", __func__);
567 		sc->sc_stats.ast_tdma_ack++;
568 		ath_freetx(m0);
569 		return EIO;
570 	}
571 #endif
572 
573 	/*
574 	 * If 802.11g protection is enabled, determine whether
575 	 * to use RTS/CTS or just CTS.  Note that this is only
576 	 * done for OFDM unicast frames.
577 	 */
578 	if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
579 	    rt->info[rix].phy == IEEE80211_T_OFDM &&
580 	    (flags & HAL_TXDESC_NOACK) == 0) {
581 		/* XXX fragments must use CCK rates w/ protection */
582 		if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
583 			flags |= HAL_TXDESC_RTSENA;
584 		else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
585 			flags |= HAL_TXDESC_CTSENA;
586 		if (isfrag) {
587 			/*
588 			 * For frags it would be desirable to use the
589 			 * highest CCK rate for RTS/CTS.  But stations
590 			 * farther away may detect it at a lower CCK rate
591 			 * so use the configured protection rate instead
592 			 * (for now).
593 			 */
594 			cix = rt->info[sc->sc_protrix].controlRate;
595 		} else
596 			cix = rt->info[sc->sc_protrix].controlRate;
597 		sc->sc_stats.ast_tx_protect++;
598 	}
599 
600 	/*
601 	 * Calculate duration.  This logically belongs in the 802.11
602 	 * layer but it lacks sufficient information to calculate it.
603 	 */
604 	if ((flags & HAL_TXDESC_NOACK) == 0 &&
605 	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
606 		u_int16_t dur;
607 		if (shortPreamble)
608 			dur = rt->info[rix].spAckDuration;
609 		else
610 			dur = rt->info[rix].lpAckDuration;
611 		if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
612 			dur += dur;		/* additional SIFS+ACK */
613 			KASSERT(m0->m_nextpkt != NULL, ("no fragment"));
614 			/*
615 			 * Include the size of next fragment so NAV is
616 			 * updated properly.  The last fragment uses only
617 			 * the ACK duration
618 			 */
619 			dur += ath_hal_computetxtime(ah, rt,
620 					m0->m_nextpkt->m_pkthdr.len,
621 					rix, shortPreamble);
622 		}
623 		if (isfrag) {
624 			/*
625 			 * Force hardware to use computed duration for next
626 			 * fragment by disabling multi-rate retry which updates
627 			 * duration based on the multi-rate duration table.
628 			 */
629 			ismrr = 0;
630 			try0 = ATH_TXMGTTRY;	/* XXX? */
631 		}
632 		*(u_int16_t *)wh->i_dur = htole16(dur);
633 	}
634 
635 	/*
636 	 * Calculate RTS/CTS rate and duration if needed.
637 	 */
638 	ctsduration = 0;
639 	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
640 		/*
641 		 * CTS transmit rate is derived from the transmit rate
642 		 * by looking in the h/w rate table.  We must also factor
643 		 * in whether or not a short preamble is to be used.
644 		 */
645 		/* NB: cix is set above where RTS/CTS is enabled */
646 		KASSERT(cix != 0xff, ("cix not setup"));
647 		ctsrate = rt->info[cix].rateCode;
648 		/*
649 		 * Compute the transmit duration based on the frame
650 		 * size and the size of an ACK frame.  We call into the
651 		 * HAL to do the computation since it depends on the
652 		 * characteristics of the actual PHY being used.
653 		 *
654 		 * NB: CTS is assumed the same size as an ACK so we can
655 		 *     use the precalculated ACK durations.
656 		 */
657 		if (shortPreamble) {
658 			ctsrate |= rt->info[cix].shortPreamble;
659 			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
660 				ctsduration += rt->info[cix].spAckDuration;
661 			ctsduration += ath_hal_computetxtime(ah,
662 				rt, pktlen, rix, AH_TRUE);
663 			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
664 				ctsduration += rt->info[rix].spAckDuration;
665 		} else {
666 			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
667 				ctsduration += rt->info[cix].lpAckDuration;
668 			ctsduration += ath_hal_computetxtime(ah,
669 				rt, pktlen, rix, AH_FALSE);
670 			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
671 				ctsduration += rt->info[rix].lpAckDuration;
672 		}
673 		/*
674 		 * Must disable multi-rate retry when using RTS/CTS.
675 		 */
676 		ismrr = 0;
677 		try0 = ATH_TXMGTTRY;		/* XXX */
678 	} else
679 		ctsrate = 0;
680 
681 	/*
682 	 * At this point we are committed to sending the frame
683 	 * and we don't need to look at m_nextpkt; clear it in
684 	 * case this frame is part of frag chain.
685 	 */
686 	m0->m_nextpkt = NULL;
687 
688 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
689 		ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
690 		    sc->sc_hwmap[rix].ieeerate, -1);
691 
692 	if (ieee80211_radiotap_active_vap(vap)) {
693 		u_int64_t tsf = ath_hal_gettsf64(ah);
694 
695 		sc->sc_tx_th.wt_tsf = htole64(tsf);
696 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
697 		if (iswep)
698 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
699 		if (isfrag)
700 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
701 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
702 		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
703 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
704 
705 		ieee80211_radiotap_tx(vap, m0);
706 	}
707 
708 	/*
709 	 * Determine if a tx interrupt should be generated for
710 	 * this descriptor.  We take a tx interrupt to reap
711 	 * descriptors when the h/w hits an EOL condition or
712 	 * when the descriptor is specifically marked to generate
713 	 * an interrupt.  We periodically mark descriptors in this
714 	 * way to insure timely replenishing of the supply needed
715 	 * for sending frames.  Defering interrupts reduces system
716 	 * load and potentially allows more concurrent work to be
717 	 * done but if done to aggressively can cause senders to
718 	 * backup.
719 	 *
720 	 * NB: use >= to deal with sc_txintrperiod changing
721 	 *     dynamically through sysctl.
722 	 */
723 	if (flags & HAL_TXDESC_INTREQ) {
724 		txq->axq_intrcnt = 0;
725 	} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
726 		flags |= HAL_TXDESC_INTREQ;
727 		txq->axq_intrcnt = 0;
728 	}
729 
730 	/*
731 	 * Formulate first tx descriptor with tx controls.
732 	 */
733 	/* XXX check return value? */
734 	ath_hal_setuptxdesc(ah, ds
735 		, pktlen		/* packet length */
736 		, hdrlen		/* header length */
737 		, atype			/* Atheros packet type */
738 		, ni->ni_txpower	/* txpower */
739 		, txrate, try0		/* series 0 rate/tries */
740 		, keyix			/* key cache index */
741 		, sc->sc_txantenna	/* antenna mode */
742 		, flags			/* flags */
743 		, ctsrate		/* rts/cts rate */
744 		, ctsduration		/* rts/cts duration */
745 	);
746 	bf->bf_txflags = flags;
747 	/*
748 	 * Setup the multi-rate retry state only when we're
749 	 * going to use it.  This assumes ath_hal_setuptxdesc
750 	 * initializes the descriptors (so we don't have to)
751 	 * when the hardware supports multi-rate retry and
752 	 * we don't use it.
753 	 */
754 	if (ismrr)
755 		ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix);
756 
757 	ath_tx_handoff(sc, txq, bf);
758 	return 0;
759 }
760 
761 static int
762 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
763 	struct ath_buf *bf, struct mbuf *m0,
764 	const struct ieee80211_bpf_params *params)
765 {
766 	struct ifnet *ifp = sc->sc_ifp;
767 	struct ieee80211com *ic = ifp->if_l2com;
768 	struct ath_hal *ah = sc->sc_ah;
769 	struct ieee80211vap *vap = ni->ni_vap;
770 	int error, ismcast, ismrr;
771 	int keyix, hdrlen, pktlen, try0, txantenna;
772 	u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3;
773 	struct ieee80211_frame *wh;
774 	u_int flags, ctsduration;
775 	HAL_PKT_TYPE atype;
776 	const HAL_RATE_TABLE *rt;
777 	struct ath_desc *ds;
778 	u_int pri;
779 
780 	wh = mtod(m0, struct ieee80211_frame *);
781 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
782 	hdrlen = ieee80211_anyhdrsize(wh);
783 	/*
784 	 * Packet length must not include any
785 	 * pad bytes; deduct them here.
786 	 */
787 	/* XXX honor IEEE80211_BPF_DATAPAD */
788 	pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
789 
790 	if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
791 		const struct ieee80211_cipher *cip;
792 		struct ieee80211_key *k;
793 
794 		/*
795 		 * Construct the 802.11 header+trailer for an encrypted
796 		 * frame. The only reason this can fail is because of an
797 		 * unknown or unsupported cipher/key type.
798 		 */
799 		k = ieee80211_crypto_encap(ni, m0);
800 		if (k == NULL) {
801 			/*
802 			 * This can happen when the key is yanked after the
803 			 * frame was queued.  Just discard the frame; the
804 			 * 802.11 layer counts failures and provides
805 			 * debugging/diagnostics.
806 			 */
807 			ath_freetx(m0);
808 			return EIO;
809 		}
810 		/*
811 		 * Adjust the packet + header lengths for the crypto
812 		 * additions and calculate the h/w key index.  When
813 		 * a s/w mic is done the frame will have had any mic
814 		 * added to it prior to entry so m0->m_pkthdr.len will
815 		 * account for it. Otherwise we need to add it to the
816 		 * packet length.
817 		 */
818 		cip = k->wk_cipher;
819 		hdrlen += cip->ic_header;
820 		pktlen += cip->ic_header + cip->ic_trailer;
821 		/* NB: frags always have any TKIP MIC done in s/w */
822 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
823 			pktlen += cip->ic_miclen;
824 		keyix = k->wk_keyix;
825 
826 		/* packet header may have moved, reset our local pointer */
827 		wh = mtod(m0, struct ieee80211_frame *);
828 	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
829 		/*
830 		 * Use station key cache slot, if assigned.
831 		 */
832 		keyix = ni->ni_ucastkey.wk_keyix;
833 		if (keyix == IEEE80211_KEYIX_NONE)
834 			keyix = HAL_TXKEYIX_INVALID;
835 	} else
836 		keyix = HAL_TXKEYIX_INVALID;
837 
838 	error = ath_tx_dmasetup(sc, bf, m0);
839 	if (error != 0)
840 		return error;
841 	m0 = bf->bf_m;				/* NB: may have changed */
842 	wh = mtod(m0, struct ieee80211_frame *);
843 	bf->bf_node = ni;			/* NB: held reference */
844 
845 	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
846 	flags |= HAL_TXDESC_INTREQ;		/* force interrupt */
847 	if (params->ibp_flags & IEEE80211_BPF_RTS)
848 		flags |= HAL_TXDESC_RTSENA;
849 	else if (params->ibp_flags & IEEE80211_BPF_CTS)
850 		flags |= HAL_TXDESC_CTSENA;
851 	/* XXX leave ismcast to injector? */
852 	if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
853 		flags |= HAL_TXDESC_NOACK;
854 
855 	rt = sc->sc_currates;
856 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
857 	rix = ath_tx_findrix(sc, params->ibp_rate0);
858 	txrate = rt->info[rix].rateCode;
859 	if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
860 		txrate |= rt->info[rix].shortPreamble;
861 	sc->sc_txrix = rix;
862 	try0 = params->ibp_try0;
863 	ismrr = (params->ibp_try1 != 0);
864 	txantenna = params->ibp_pri >> 2;
865 	if (txantenna == 0)			/* XXX? */
866 		txantenna = sc->sc_txantenna;
867 	ctsduration = 0;
868 	if (flags & (HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA)) {
869 		cix = ath_tx_findrix(sc, params->ibp_ctsrate);
870 		ctsrate = rt->info[cix].rateCode;
871 		if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) {
872 			ctsrate |= rt->info[cix].shortPreamble;
873 			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
874 				ctsduration += rt->info[cix].spAckDuration;
875 			ctsduration += ath_hal_computetxtime(ah,
876 				rt, pktlen, rix, AH_TRUE);
877 			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
878 				ctsduration += rt->info[rix].spAckDuration;
879 		} else {
880 			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
881 				ctsduration += rt->info[cix].lpAckDuration;
882 			ctsduration += ath_hal_computetxtime(ah,
883 				rt, pktlen, rix, AH_FALSE);
884 			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
885 				ctsduration += rt->info[rix].lpAckDuration;
886 		}
887 		ismrr = 0;			/* XXX */
888 	} else
889 		ctsrate = 0;
890 	pri = params->ibp_pri & 3;
891 	/*
892 	 * NB: we mark all packets as type PSPOLL so the h/w won't
893 	 * set the sequence number, duration, etc.
894 	 */
895 	atype = HAL_PKT_TYPE_PSPOLL;
896 
897 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
898 		ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
899 		    sc->sc_hwmap[rix].ieeerate, -1);
900 
901 	if (ieee80211_radiotap_active_vap(vap)) {
902 		u_int64_t tsf = ath_hal_gettsf64(ah);
903 
904 		sc->sc_tx_th.wt_tsf = htole64(tsf);
905 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
906 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
907 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
908 		if (m0->m_flags & M_FRAG)
909 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
910 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
911 		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
912 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
913 
914 		ieee80211_radiotap_tx(vap, m0);
915 	}
916 
917 	/*
918 	 * Formulate first tx descriptor with tx controls.
919 	 */
920 	ds = bf->bf_desc;
921 	/* XXX check return value? */
922 	ath_hal_setuptxdesc(ah, ds
923 		, pktlen		/* packet length */
924 		, hdrlen		/* header length */
925 		, atype			/* Atheros packet type */
926 		, params->ibp_power	/* txpower */
927 		, txrate, try0		/* series 0 rate/tries */
928 		, keyix			/* key cache index */
929 		, txantenna		/* antenna mode */
930 		, flags			/* flags */
931 		, ctsrate		/* rts/cts rate */
932 		, ctsduration		/* rts/cts duration */
933 	);
934 	bf->bf_txflags = flags;
935 
936 	if (ismrr) {
937 		rix = ath_tx_findrix(sc, params->ibp_rate1);
938 		rate1 = rt->info[rix].rateCode;
939 		if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
940 			rate1 |= rt->info[rix].shortPreamble;
941 		if (params->ibp_try2) {
942 			rix = ath_tx_findrix(sc, params->ibp_rate2);
943 			rate2 = rt->info[rix].rateCode;
944 			if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
945 				rate2 |= rt->info[rix].shortPreamble;
946 		} else
947 			rate2 = 0;
948 		if (params->ibp_try3) {
949 			rix = ath_tx_findrix(sc, params->ibp_rate3);
950 			rate3 = rt->info[rix].rateCode;
951 			if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
952 				rate3 |= rt->info[rix].shortPreamble;
953 		} else
954 			rate3 = 0;
955 		ath_hal_setupxtxdesc(ah, ds
956 			, rate1, params->ibp_try1	/* series 1 */
957 			, rate2, params->ibp_try2	/* series 2 */
958 			, rate3, params->ibp_try3	/* series 3 */
959 		);
960 	}
961 
962 	/* NB: no buffered multicast in power save support */
963 	ath_tx_handoff(sc, sc->sc_ac2q[pri], bf);
964 	return 0;
965 }
966 
967 int
968 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
969 	const struct ieee80211_bpf_params *params)
970 {
971 	struct ieee80211com *ic = ni->ni_ic;
972 	struct ifnet *ifp = ic->ic_ifp;
973 	struct ath_softc *sc = ifp->if_softc;
974 	struct ath_buf *bf;
975 	int error;
976 
977 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
978 		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__,
979 		    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ?
980 			"!running" : "invalid");
981 		m_freem(m);
982 		error = ENETDOWN;
983 		goto bad;
984 	}
985 	/*
986 	 * Grab a TX buffer and associated resources.
987 	 */
988 	bf = ath_getbuf(sc);
989 	if (bf == NULL) {
990 		sc->sc_stats.ast_tx_nobuf++;
991 		m_freem(m);
992 		error = ENOBUFS;
993 		goto bad;
994 	}
995 
996 	if (params == NULL) {
997 		/*
998 		 * Legacy path; interpret frame contents to decide
999 		 * precisely how to send the frame.
1000 		 */
1001 		if (ath_tx_start(sc, ni, bf, m)) {
1002 			error = EIO;		/* XXX */
1003 			goto bad2;
1004 		}
1005 	} else {
1006 		/*
1007 		 * Caller supplied explicit parameters to use in
1008 		 * sending the frame.
1009 		 */
1010 		if (ath_tx_raw_start(sc, ni, bf, m, params)) {
1011 			error = EIO;		/* XXX */
1012 			goto bad2;
1013 		}
1014 	}
1015 	sc->sc_wd_timer = 5;
1016 	ifp->if_opackets++;
1017 	sc->sc_stats.ast_tx_raw++;
1018 
1019 	return 0;
1020 bad2:
1021 	ATH_TXBUF_LOCK(sc);
1022 	STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1023 	ATH_TXBUF_UNLOCK(sc);
1024 bad:
1025 	ifp->if_oerrors++;
1026 	sc->sc_stats.ast_tx_raw_fail++;
1027 	ieee80211_free_node(ni);
1028 	return error;
1029 }
1030