xref: /freebsd/sys/dev/ath/if_ath.c (revision cf4c5a533126ca1ddb1f070af73f8f53b9e77fd4)
1 /*-
2  * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13  *    redistribution must be conditioned upon including a substantially
14  *    similar Disclaimer requirement for further binary redistribution.
15  *
16  * NO WARRANTY
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGES.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 /*
34  * Driver for the Atheros Wireless LAN controller.
35  *
36  * This software is derived from work of Atsushi Onoe; his contribution
37  * is greatly appreciated.
38  */
39 
40 #include "opt_inet.h"
41 #include "opt_ath.h"
42 #include "opt_wlan.h"
43 
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysctl.h>
47 #include <sys/mbuf.h>
48 #include <sys/malloc.h>
49 #include <sys/lock.h>
50 #include <sys/mutex.h>
51 #include <sys/kernel.h>
52 #include <sys/socket.h>
53 #include <sys/sockio.h>
54 #include <sys/errno.h>
55 #include <sys/callout.h>
56 #include <sys/bus.h>
57 #include <sys/endian.h>
58 #include <sys/kthread.h>
59 #include <sys/taskqueue.h>
60 #include <sys/priv.h>
61 
62 #include <machine/bus.h>
63 
64 #include <net/if.h>
65 #include <net/if_dl.h>
66 #include <net/if_media.h>
67 #include <net/if_types.h>
68 #include <net/if_arp.h>
69 #include <net/ethernet.h>
70 #include <net/if_llc.h>
71 
72 #include <net80211/ieee80211_var.h>
73 #include <net80211/ieee80211_regdomain.h>
74 #ifdef IEEE80211_SUPPORT_SUPERG
75 #include <net80211/ieee80211_superg.h>
76 #endif
77 #ifdef IEEE80211_SUPPORT_TDMA
78 #include <net80211/ieee80211_tdma.h>
79 #endif
80 
81 #include <net/bpf.h>
82 
83 #ifdef INET
84 #include <netinet/in.h>
85 #include <netinet/if_ether.h>
86 #endif
87 
88 #include <dev/ath/if_athvar.h>
89 #include <dev/ath/ath_hal/ah_devid.h>		/* XXX for softled */
90 
91 #ifdef ATH_TX99_DIAG
92 #include <dev/ath/ath_tx99/ath_tx99.h>
93 #endif
94 
95 /*
96  * ATH_BCBUF determines the number of vap's that can transmit
97  * beacons and also (currently) the number of vap's that can
98  * have unique mac addresses/bssid.  When staggering beacons
99  * 4 is probably a good max as otherwise the beacons become
100  * very closely spaced and there is limited time for cab q traffic
101  * to go out.  You can burst beacons instead but that is not good
102  * for stations in power save and at some point you really want
103  * another radio (and channel).
104  *
105  * The limit on the number of mac addresses is tied to our use of
106  * the U/L bit and tracking addresses in a byte; it would be
107  * worthwhile to allow more for applications like proxy sta.
108  */
109 CTASSERT(ATH_BCBUF <= 8);
110 
111 /* unaligned little endian access */
112 #define LE_READ_2(p)							\
113 	((u_int16_t)							\
114 	 ((((u_int8_t *)(p))[0]      ) | (((u_int8_t *)(p))[1] <<  8)))
115 #define LE_READ_4(p)							\
116 	((u_int32_t)							\
117 	 ((((u_int8_t *)(p))[0]      ) | (((u_int8_t *)(p))[1] <<  8) |	\
118 	  (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24)))
119 
120 static struct ieee80211vap *ath_vap_create(struct ieee80211com *,
121 		    const char name[IFNAMSIZ], int unit, int opmode,
122 		    int flags, const uint8_t bssid[IEEE80211_ADDR_LEN],
123 		    const uint8_t mac[IEEE80211_ADDR_LEN]);
124 static void	ath_vap_delete(struct ieee80211vap *);
125 static void	ath_init(void *);
126 static void	ath_stop_locked(struct ifnet *);
127 static void	ath_stop(struct ifnet *);
128 static void	ath_start(struct ifnet *);
129 static int	ath_reset(struct ifnet *);
130 static int	ath_reset_vap(struct ieee80211vap *, u_long);
131 static int	ath_media_change(struct ifnet *);
132 static void	ath_watchdog(void *);
133 static int	ath_ioctl(struct ifnet *, u_long, caddr_t);
134 static void	ath_fatal_proc(void *, int);
135 static void	ath_bmiss_vap(struct ieee80211vap *);
136 static void	ath_bmiss_proc(void *, int);
137 static int	ath_keyset(struct ath_softc *, const struct ieee80211_key *,
138 			struct ieee80211_node *);
139 static int	ath_key_alloc(struct ieee80211vap *,
140 			struct ieee80211_key *,
141 			ieee80211_keyix *, ieee80211_keyix *);
142 static int	ath_key_delete(struct ieee80211vap *,
143 			const struct ieee80211_key *);
144 static int	ath_key_set(struct ieee80211vap *, const struct ieee80211_key *,
145 			const u_int8_t mac[IEEE80211_ADDR_LEN]);
146 static void	ath_key_update_begin(struct ieee80211vap *);
147 static void	ath_key_update_end(struct ieee80211vap *);
148 static void	ath_update_mcast(struct ifnet *);
149 static void	ath_update_promisc(struct ifnet *);
150 static void	ath_mode_init(struct ath_softc *);
151 static void	ath_setslottime(struct ath_softc *);
152 static void	ath_updateslot(struct ifnet *);
153 static int	ath_beaconq_setup(struct ath_hal *);
154 static int	ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *);
155 static void	ath_beacon_update(struct ieee80211vap *, int item);
156 static void	ath_beacon_setup(struct ath_softc *, struct ath_buf *);
157 static void	ath_beacon_proc(void *, int);
158 static struct ath_buf *ath_beacon_generate(struct ath_softc *,
159 			struct ieee80211vap *);
160 static void	ath_bstuck_proc(void *, int);
161 static void	ath_beacon_return(struct ath_softc *, struct ath_buf *);
162 static void	ath_beacon_free(struct ath_softc *);
163 static void	ath_beacon_config(struct ath_softc *, struct ieee80211vap *);
164 static void	ath_descdma_cleanup(struct ath_softc *sc,
165 			struct ath_descdma *, ath_bufhead *);
166 static int	ath_desc_alloc(struct ath_softc *);
167 static void	ath_desc_free(struct ath_softc *);
168 static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
169 			const uint8_t [IEEE80211_ADDR_LEN]);
170 static void	ath_node_free(struct ieee80211_node *);
171 static void	ath_node_getsignal(const struct ieee80211_node *,
172 			int8_t *, int8_t *);
173 static int	ath_rxbuf_init(struct ath_softc *, struct ath_buf *);
174 static void	ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
175 			int subtype, int rssi, int nf);
176 static void	ath_setdefantenna(struct ath_softc *, u_int);
177 static void	ath_rx_proc(void *, int);
178 static void	ath_txq_init(struct ath_softc *sc, struct ath_txq *, int);
179 static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype);
180 static int	ath_tx_setup(struct ath_softc *, int, int);
181 static int	ath_wme_update(struct ieee80211com *);
182 static void	ath_tx_cleanupq(struct ath_softc *, struct ath_txq *);
183 static void	ath_tx_cleanup(struct ath_softc *);
184 static void	ath_freetx(struct mbuf *);
185 static int	ath_tx_start(struct ath_softc *, struct ieee80211_node *,
186 			     struct ath_buf *, struct mbuf *);
187 static void	ath_tx_proc_q0(void *, int);
188 static void	ath_tx_proc_q0123(void *, int);
189 static void	ath_tx_proc(void *, int);
190 static void	ath_tx_draintxq(struct ath_softc *, struct ath_txq *);
191 static int	ath_chan_set(struct ath_softc *, struct ieee80211_channel *);
192 static void	ath_draintxq(struct ath_softc *);
193 static void	ath_stoprecv(struct ath_softc *);
194 static int	ath_startrecv(struct ath_softc *);
195 static void	ath_chan_change(struct ath_softc *, struct ieee80211_channel *);
196 static void	ath_scan_start(struct ieee80211com *);
197 static void	ath_scan_end(struct ieee80211com *);
198 static void	ath_set_channel(struct ieee80211com *);
199 static void	ath_calibrate(void *);
200 static int	ath_newstate(struct ieee80211vap *, enum ieee80211_state, int);
201 static void	ath_setup_stationkey(struct ieee80211_node *);
202 static void	ath_newassoc(struct ieee80211_node *, int);
203 static int	ath_setregdomain(struct ieee80211com *,
204 		    struct ieee80211_regdomain *, int,
205 		    struct ieee80211_channel []);
206 static void	ath_getradiocaps(struct ieee80211com *, int, int *,
207 		    struct ieee80211_channel []);
208 static int	ath_getchannels(struct ath_softc *);
209 static void	ath_led_event(struct ath_softc *, int);
210 
211 static int	ath_rate_setup(struct ath_softc *, u_int mode);
212 static void	ath_setcurmode(struct ath_softc *, enum ieee80211_phymode);
213 
214 static void	ath_sysctlattach(struct ath_softc *);
215 static int	ath_raw_xmit(struct ieee80211_node *,
216 			struct mbuf *, const struct ieee80211_bpf_params *);
217 static void	ath_announce(struct ath_softc *);
218 
219 #ifdef IEEE80211_SUPPORT_TDMA
220 static void	ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt,
221 		    u_int32_t bintval);
222 static void	ath_tdma_bintvalsetup(struct ath_softc *sc,
223 		    const struct ieee80211_tdma_state *tdma);
224 static void	ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap);
225 static void	ath_tdma_update(struct ieee80211_node *ni,
226 		    const struct ieee80211_tdma_param *tdma, int);
227 static void	ath_tdma_beacon_send(struct ath_softc *sc,
228 		    struct ieee80211vap *vap);
229 
230 static __inline void
231 ath_hal_setcca(struct ath_hal *ah, int ena)
232 {
233 	/*
234 	 * NB: fill me in; this is not provided by default because disabling
235 	 *     CCA in most locales violates regulatory.
236 	 */
237 }
238 
239 static __inline int
240 ath_hal_getcca(struct ath_hal *ah)
241 {
242 	u_int32_t diag;
243 	if (ath_hal_getcapability(ah, HAL_CAP_DIAG, 0, &diag) != HAL_OK)
244 		return 1;
245 	return ((diag & 0x500000) == 0);
246 }
247 
248 #define	TDMA_EP_MULTIPLIER	(1<<10) /* pow2 to optimize out * and / */
249 #define	TDMA_LPF_LEN		6
250 #define	TDMA_DUMMY_MARKER	0x127
251 #define	TDMA_EP_MUL(x, mul)	((x) * (mul))
252 #define	TDMA_IN(x)		(TDMA_EP_MUL((x), TDMA_EP_MULTIPLIER))
253 #define	TDMA_LPF(x, y, len) \
254     ((x != TDMA_DUMMY_MARKER) ? (((x) * ((len)-1) + (y)) / (len)) : (y))
255 #define	TDMA_SAMPLE(x, y) do {					\
256 	x = TDMA_LPF((x), TDMA_IN(y), TDMA_LPF_LEN);		\
257 } while (0)
258 #define	TDMA_EP_RND(x,mul) \
259 	((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
260 #define	TDMA_AVG(x)		TDMA_EP_RND(x, TDMA_EP_MULTIPLIER)
261 #endif /* IEEE80211_SUPPORT_TDMA */
262 
263 SYSCTL_DECL(_hw_ath);
264 
265 /* XXX validate sysctl values */
266 static	int ath_longcalinterval = 30;		/* long cals every 30 secs */
267 SYSCTL_INT(_hw_ath, OID_AUTO, longcal, CTLFLAG_RW, &ath_longcalinterval,
268 	    0, "long chip calibration interval (secs)");
269 static	int ath_shortcalinterval = 100;		/* short cals every 100 ms */
270 SYSCTL_INT(_hw_ath, OID_AUTO, shortcal, CTLFLAG_RW, &ath_shortcalinterval,
271 	    0, "short chip calibration interval (msecs)");
272 static	int ath_resetcalinterval = 20*60;	/* reset cal state 20 mins */
273 SYSCTL_INT(_hw_ath, OID_AUTO, resetcal, CTLFLAG_RW, &ath_resetcalinterval,
274 	    0, "reset chip calibration results (secs)");
275 
276 static	int ath_rxbuf = ATH_RXBUF;		/* # rx buffers to allocate */
277 SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
278 	    0, "rx buffers allocated");
279 TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
280 static	int ath_txbuf = ATH_TXBUF;		/* # tx buffers to allocate */
281 SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
282 	    0, "tx buffers allocated");
283 TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
284 
285 static	int ath_bstuck_threshold = 4;		/* max missed beacons */
286 SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
287 	    0, "max missed beacon xmits before chip reset");
288 
289 #ifdef ATH_DEBUG
290 enum {
291 	ATH_DEBUG_XMIT		= 0x00000001,	/* basic xmit operation */
292 	ATH_DEBUG_XMIT_DESC	= 0x00000002,	/* xmit descriptors */
293 	ATH_DEBUG_RECV		= 0x00000004,	/* basic recv operation */
294 	ATH_DEBUG_RECV_DESC	= 0x00000008,	/* recv descriptors */
295 	ATH_DEBUG_RATE		= 0x00000010,	/* rate control */
296 	ATH_DEBUG_RESET		= 0x00000020,	/* reset processing */
297 	ATH_DEBUG_MODE		= 0x00000040,	/* mode init/setup */
298 	ATH_DEBUG_BEACON 	= 0x00000080,	/* beacon handling */
299 	ATH_DEBUG_WATCHDOG 	= 0x00000100,	/* watchdog timeout */
300 	ATH_DEBUG_INTR		= 0x00001000,	/* ISR */
301 	ATH_DEBUG_TX_PROC	= 0x00002000,	/* tx ISR proc */
302 	ATH_DEBUG_RX_PROC	= 0x00004000,	/* rx ISR proc */
303 	ATH_DEBUG_BEACON_PROC	= 0x00008000,	/* beacon ISR proc */
304 	ATH_DEBUG_CALIBRATE	= 0x00010000,	/* periodic calibration */
305 	ATH_DEBUG_KEYCACHE	= 0x00020000,	/* key cache management */
306 	ATH_DEBUG_STATE		= 0x00040000,	/* 802.11 state transitions */
307 	ATH_DEBUG_NODE		= 0x00080000,	/* node management */
308 	ATH_DEBUG_LED		= 0x00100000,	/* led management */
309 	ATH_DEBUG_FF		= 0x00200000,	/* fast frames */
310 	ATH_DEBUG_DFS		= 0x00400000,	/* DFS processing */
311 	ATH_DEBUG_TDMA		= 0x00800000,	/* TDMA processing */
312 	ATH_DEBUG_TDMA_TIMER	= 0x01000000,	/* TDMA timer processing */
313 	ATH_DEBUG_REGDOMAIN	= 0x02000000,	/* regulatory processing */
314 	ATH_DEBUG_FATAL		= 0x80000000,	/* fatal errors */
315 	ATH_DEBUG_ANY		= 0xffffffff
316 };
317 static	int ath_debug = 0;
318 SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug,
319 	    0, "control debugging printfs");
320 TUNABLE_INT("hw.ath.debug", &ath_debug);
321 
322 #define	IFF_DUMPPKTS(sc, m) \
323 	((sc->sc_debug & (m)) || \
324 	    (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
325 #define	DPRINTF(sc, m, fmt, ...) do {				\
326 	if (sc->sc_debug & (m))					\
327 		printf(fmt, __VA_ARGS__);			\
328 } while (0)
329 #define	KEYPRINTF(sc, ix, hk, mac) do {				\
330 	if (sc->sc_debug & ATH_DEBUG_KEYCACHE)			\
331 		ath_keyprint(sc, __func__, ix, hk, mac);	\
332 } while (0)
333 static	void ath_printrxbuf(struct ath_softc *, const struct ath_buf *bf,
334 	u_int ix, int);
335 static	void ath_printtxbuf(struct ath_softc *, const struct ath_buf *bf,
336 	u_int qnum, u_int ix, int done);
337 #else
338 #define	IFF_DUMPPKTS(sc, m) \
339 	((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2))
340 #define	DPRINTF(sc, m, fmt, ...) do {				\
341 	(void) sc;						\
342 } while (0)
343 #define	KEYPRINTF(sc, k, ix, mac) do {				\
344 	(void) sc;						\
345 } while (0)
346 #endif
347 
348 MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers");
349 
350 int
351 ath_attach(u_int16_t devid, struct ath_softc *sc)
352 {
353 	struct ifnet *ifp;
354 	struct ieee80211com *ic;
355 	struct ath_hal *ah = NULL;
356 	HAL_STATUS status;
357 	int error = 0, i;
358 	u_int wmodes;
359 	uint8_t macaddr[IEEE80211_ADDR_LEN];
360 
361 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid);
362 
363 	ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211);
364 	if (ifp == NULL) {
365 		device_printf(sc->sc_dev, "can not if_alloc()\n");
366 		error = ENOSPC;
367 		goto bad;
368 	}
369 	ic = ifp->if_l2com;
370 
371 	/* set these up early for if_printf use */
372 	if_initname(ifp, device_get_name(sc->sc_dev),
373 		device_get_unit(sc->sc_dev));
374 
375 	ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status);
376 	if (ah == NULL) {
377 		if_printf(ifp, "unable to attach hardware; HAL status %u\n",
378 			status);
379 		error = ENXIO;
380 		goto bad;
381 	}
382 	sc->sc_ah = ah;
383 	sc->sc_invalid = 0;	/* ready to go, enable interrupt handling */
384 #ifdef	ATH_DEBUG
385 	sc->sc_debug = ath_debug;
386 #endif
387 
388 	/*
389 	 * Check if the MAC has multi-rate retry support.
390 	 * We do this by trying to setup a fake extended
391 	 * descriptor.  MAC's that don't have support will
392 	 * return false w/o doing anything.  MAC's that do
393 	 * support it will return true w/o doing anything.
394 	 */
395 	sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0);
396 
397 	/*
398 	 * Check if the device has hardware counters for PHY
399 	 * errors.  If so we need to enable the MIB interrupt
400 	 * so we can act on stat triggers.
401 	 */
402 	if (ath_hal_hwphycounters(ah))
403 		sc->sc_needmib = 1;
404 
405 	/*
406 	 * Get the hardware key cache size.
407 	 */
408 	sc->sc_keymax = ath_hal_keycachesize(ah);
409 	if (sc->sc_keymax > ATH_KEYMAX) {
410 		if_printf(ifp, "Warning, using only %u of %u key cache slots\n",
411 			ATH_KEYMAX, sc->sc_keymax);
412 		sc->sc_keymax = ATH_KEYMAX;
413 	}
414 	/*
415 	 * Reset the key cache since some parts do not
416 	 * reset the contents on initial power up.
417 	 */
418 	for (i = 0; i < sc->sc_keymax; i++)
419 		ath_hal_keyreset(ah, i);
420 
421 	/*
422 	 * Collect the default channel list.
423 	 */
424 	error = ath_getchannels(sc);
425 	if (error != 0)
426 		goto bad;
427 
428 	/*
429 	 * Setup rate tables for all potential media types.
430 	 */
431 	ath_rate_setup(sc, IEEE80211_MODE_11A);
432 	ath_rate_setup(sc, IEEE80211_MODE_11B);
433 	ath_rate_setup(sc, IEEE80211_MODE_11G);
434 	ath_rate_setup(sc, IEEE80211_MODE_TURBO_A);
435 	ath_rate_setup(sc, IEEE80211_MODE_TURBO_G);
436 	ath_rate_setup(sc, IEEE80211_MODE_STURBO_A);
437 	ath_rate_setup(sc, IEEE80211_MODE_11NA);
438 	ath_rate_setup(sc, IEEE80211_MODE_11NG);
439 	ath_rate_setup(sc, IEEE80211_MODE_HALF);
440 	ath_rate_setup(sc, IEEE80211_MODE_QUARTER);
441 
442 	/* NB: setup here so ath_rate_update is happy */
443 	ath_setcurmode(sc, IEEE80211_MODE_11A);
444 
445 	/*
446 	 * Allocate tx+rx descriptors and populate the lists.
447 	 */
448 	error = ath_desc_alloc(sc);
449 	if (error != 0) {
450 		if_printf(ifp, "failed to allocate descriptors: %d\n", error);
451 		goto bad;
452 	}
453 	callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
454 	callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
455 
456 	ATH_TXBUF_LOCK_INIT(sc);
457 
458 	sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT,
459 		taskqueue_thread_enqueue, &sc->sc_tq);
460 	taskqueue_start_threads(&sc->sc_tq, 1, PI_NET,
461 		"%s taskq", ifp->if_xname);
462 
463 	TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc);
464 	TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc);
465 	TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc);
466 
467 	/*
468 	 * Allocate hardware transmit queues: one queue for
469 	 * beacon frames and one data queue for each QoS
470 	 * priority.  Note that the hal handles reseting
471 	 * these queues at the needed time.
472 	 *
473 	 * XXX PS-Poll
474 	 */
475 	sc->sc_bhalq = ath_beaconq_setup(ah);
476 	if (sc->sc_bhalq == (u_int) -1) {
477 		if_printf(ifp, "unable to setup a beacon xmit queue!\n");
478 		error = EIO;
479 		goto bad2;
480 	}
481 	sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0);
482 	if (sc->sc_cabq == NULL) {
483 		if_printf(ifp, "unable to setup CAB xmit queue!\n");
484 		error = EIO;
485 		goto bad2;
486 	}
487 	/* NB: insure BK queue is the lowest priority h/w queue */
488 	if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) {
489 		if_printf(ifp, "unable to setup xmit queue for %s traffic!\n",
490 			ieee80211_wme_acnames[WME_AC_BK]);
491 		error = EIO;
492 		goto bad2;
493 	}
494 	if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) ||
495 	    !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) ||
496 	    !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) {
497 		/*
498 		 * Not enough hardware tx queues to properly do WME;
499 		 * just punt and assign them all to the same h/w queue.
500 		 * We could do a better job of this if, for example,
501 		 * we allocate queues when we switch from station to
502 		 * AP mode.
503 		 */
504 		if (sc->sc_ac2q[WME_AC_VI] != NULL)
505 			ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]);
506 		if (sc->sc_ac2q[WME_AC_BE] != NULL)
507 			ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]);
508 		sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK];
509 		sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK];
510 		sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK];
511 	}
512 
513 	/*
514 	 * Special case certain configurations.  Note the
515 	 * CAB queue is handled by these specially so don't
516 	 * include them when checking the txq setup mask.
517 	 */
518 	switch (sc->sc_txqsetup &~ (1<<sc->sc_cabq->axq_qnum)) {
519 	case 0x01:
520 		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc);
521 		break;
522 	case 0x0f:
523 		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc);
524 		break;
525 	default:
526 		TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc);
527 		break;
528 	}
529 
530 	/*
531 	 * Setup rate control.  Some rate control modules
532 	 * call back to change the anntena state so expose
533 	 * the necessary entry points.
534 	 * XXX maybe belongs in struct ath_ratectrl?
535 	 */
536 	sc->sc_setdefantenna = ath_setdefantenna;
537 	sc->sc_rc = ath_rate_attach(sc);
538 	if (sc->sc_rc == NULL) {
539 		error = EIO;
540 		goto bad2;
541 	}
542 
543 	sc->sc_blinking = 0;
544 	sc->sc_ledstate = 1;
545 	sc->sc_ledon = 0;			/* low true */
546 	sc->sc_ledidle = (2700*hz)/1000;	/* 2.7sec */
547 	callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE);
548 	/*
549 	 * Auto-enable soft led processing for IBM cards and for
550 	 * 5211 minipci cards.  Users can also manually enable/disable
551 	 * support with a sysctl.
552 	 */
553 	sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID);
554 	if (sc->sc_softled) {
555 		ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
556 		    HAL_GPIO_MUX_MAC_NETWORK_LED);
557 		ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
558 	}
559 
560 	ifp->if_softc = sc;
561 	ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
562 	ifp->if_start = ath_start;
563 	ifp->if_watchdog = NULL;
564 	ifp->if_ioctl = ath_ioctl;
565 	ifp->if_init = ath_init;
566 	IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
567 	ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
568 	IFQ_SET_READY(&ifp->if_snd);
569 
570 	ic->ic_ifp = ifp;
571 	/* XXX not right but it's not used anywhere important */
572 	ic->ic_phytype = IEEE80211_T_OFDM;
573 	ic->ic_opmode = IEEE80211_M_STA;
574 	ic->ic_caps =
575 		  IEEE80211_C_STA		/* station mode */
576 		| IEEE80211_C_IBSS		/* ibss, nee adhoc, mode */
577 		| IEEE80211_C_HOSTAP		/* hostap mode */
578 		| IEEE80211_C_MONITOR		/* monitor mode */
579 		| IEEE80211_C_AHDEMO		/* adhoc demo mode */
580 		| IEEE80211_C_WDS		/* 4-address traffic works */
581 		| IEEE80211_C_SHPREAMBLE	/* short preamble supported */
582 		| IEEE80211_C_SHSLOT		/* short slot time supported */
583 		| IEEE80211_C_WPA		/* capable of WPA1+WPA2 */
584 		| IEEE80211_C_BGSCAN		/* capable of bg scanning */
585 		| IEEE80211_C_TXFRAG		/* handle tx frags */
586 		;
587 	/*
588 	 * Query the hal to figure out h/w crypto support.
589 	 */
590 	if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP))
591 		ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP;
592 	if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB))
593 		ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB;
594 	if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM))
595 		ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
596 	if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP))
597 		ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP;
598 	if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) {
599 		ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP;
600 		/*
601 		 * Check if h/w does the MIC and/or whether the
602 		 * separate key cache entries are required to
603 		 * handle both tx+rx MIC keys.
604 		 */
605 		if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC))
606 			ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
607 		/*
608 		 * If the h/w supports storing tx+rx MIC keys
609 		 * in one cache slot automatically enable use.
610 		 */
611 		if (ath_hal_hastkipsplit(ah) ||
612 		    !ath_hal_settkipsplit(ah, AH_FALSE))
613 			sc->sc_splitmic = 1;
614 		/*
615 		 * If the h/w can do TKIP MIC together with WME then
616 		 * we use it; otherwise we force the MIC to be done
617 		 * in software by the net80211 layer.
618 		 */
619 		if (ath_hal_haswmetkipmic(ah))
620 			sc->sc_wmetkipmic = 1;
621 	}
622 	sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR);
623 	sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah);
624 	/*
625 	 * Mark key cache slots associated with global keys
626 	 * as in use.  If we knew TKIP was not to be used we
627 	 * could leave the +32, +64, and +32+64 slots free.
628 	 */
629 	for (i = 0; i < IEEE80211_WEP_NKID; i++) {
630 		setbit(sc->sc_keymap, i);
631 		setbit(sc->sc_keymap, i+64);
632 		if (sc->sc_splitmic) {
633 			setbit(sc->sc_keymap, i+32);
634 			setbit(sc->sc_keymap, i+32+64);
635 		}
636 	}
637 	/*
638 	 * TPC support can be done either with a global cap or
639 	 * per-packet support.  The latter is not available on
640 	 * all parts.  We're a bit pedantic here as all parts
641 	 * support a global cap.
642 	 */
643 	if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah))
644 		ic->ic_caps |= IEEE80211_C_TXPMGT;
645 
646 	/*
647 	 * Mark WME capability only if we have sufficient
648 	 * hardware queues to do proper priority scheduling.
649 	 */
650 	if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK])
651 		ic->ic_caps |= IEEE80211_C_WME;
652 	/*
653 	 * Check for misc other capabilities.
654 	 */
655 	if (ath_hal_hasbursting(ah))
656 		ic->ic_caps |= IEEE80211_C_BURST;
657 	sc->sc_hasbmask = ath_hal_hasbssidmask(ah);
658 	sc->sc_hastsfadd = ath_hal_hastsfadjust(ah);
659 	if (ath_hal_hasfastframes(ah))
660 		ic->ic_caps |= IEEE80211_C_FF;
661 	wmodes = ath_hal_getwirelessmodes(ah);
662 	if (wmodes & (HAL_MODE_108G|HAL_MODE_TURBO))
663 		ic->ic_caps |= IEEE80211_C_TURBOP;
664 #ifdef IEEE80211_SUPPORT_TDMA
665 	if (ath_hal_macversion(ah) > 0x78) {
666 		ic->ic_caps |= IEEE80211_C_TDMA; /* capable of TDMA */
667 		ic->ic_tdma_update = ath_tdma_update;
668 	}
669 #endif
670 	/*
671 	 * Indicate we need the 802.11 header padded to a
672 	 * 32-bit boundary for 4-address and QoS frames.
673 	 */
674 	ic->ic_flags |= IEEE80211_F_DATAPAD;
675 
676 	/*
677 	 * Query the hal about antenna support.
678 	 */
679 	sc->sc_defant = ath_hal_getdefantenna(ah);
680 
681 	/*
682 	 * Not all chips have the VEOL support we want to
683 	 * use with IBSS beacons; check here for it.
684 	 */
685 	sc->sc_hasveol = ath_hal_hasveol(ah);
686 
687 	/* get mac address from hardware */
688 	ath_hal_getmac(ah, macaddr);
689 	if (sc->sc_hasbmask)
690 		ath_hal_getbssidmask(ah, sc->sc_hwbssidmask);
691 
692 	/* NB: used to size node table key mapping array */
693 	ic->ic_max_keyix = sc->sc_keymax;
694 	/* call MI attach routine. */
695 	ieee80211_ifattach(ic, macaddr);
696 	ic->ic_setregdomain = ath_setregdomain;
697 	ic->ic_getradiocaps = ath_getradiocaps;
698 	sc->sc_opmode = HAL_M_STA;
699 
700 	/* override default methods */
701 	ic->ic_newassoc = ath_newassoc;
702 	ic->ic_updateslot = ath_updateslot;
703 	ic->ic_wme.wme_update = ath_wme_update;
704 	ic->ic_vap_create = ath_vap_create;
705 	ic->ic_vap_delete = ath_vap_delete;
706 	ic->ic_raw_xmit = ath_raw_xmit;
707 	ic->ic_update_mcast = ath_update_mcast;
708 	ic->ic_update_promisc = ath_update_promisc;
709 	ic->ic_node_alloc = ath_node_alloc;
710 	sc->sc_node_free = ic->ic_node_free;
711 	ic->ic_node_free = ath_node_free;
712 	ic->ic_node_getsignal = ath_node_getsignal;
713 	ic->ic_scan_start = ath_scan_start;
714 	ic->ic_scan_end = ath_scan_end;
715 	ic->ic_set_channel = ath_set_channel;
716 
717 	ieee80211_radiotap_attach(ic,
718 	    &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th),
719 		ATH_TX_RADIOTAP_PRESENT,
720 	    &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th),
721 		ATH_RX_RADIOTAP_PRESENT);
722 
723 	/*
724 	 * Setup dynamic sysctl's now that country code and
725 	 * regdomain are available from the hal.
726 	 */
727 	ath_sysctlattach(sc);
728 
729 	if (bootverbose)
730 		ieee80211_announce(ic);
731 	ath_announce(sc);
732 	return 0;
733 bad2:
734 	ath_tx_cleanup(sc);
735 	ath_desc_free(sc);
736 bad:
737 	if (ah)
738 		ath_hal_detach(ah);
739 	if (ifp != NULL)
740 		if_free(ifp);
741 	sc->sc_invalid = 1;
742 	return error;
743 }
744 
745 int
746 ath_detach(struct ath_softc *sc)
747 {
748 	struct ifnet *ifp = sc->sc_ifp;
749 
750 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
751 		__func__, ifp->if_flags);
752 
753 	/*
754 	 * NB: the order of these is important:
755 	 * o stop the chip so no more interrupts will fire
756 	 * o call the 802.11 layer before detaching the hal to
757 	 *   insure callbacks into the driver to delete global
758 	 *   key cache entries can be handled
759 	 * o free the taskqueue which drains any pending tasks
760 	 * o reclaim the tx queue data structures after calling
761 	 *   the 802.11 layer as we'll get called back to reclaim
762 	 *   node state and potentially want to use them
763 	 * o to cleanup the tx queues the hal is called, so detach
764 	 *   it last
765 	 * Other than that, it's straightforward...
766 	 */
767 	ath_stop(ifp);
768 	ieee80211_ifdetach(ifp->if_l2com);
769 	taskqueue_free(sc->sc_tq);
770 #ifdef ATH_TX99_DIAG
771 	if (sc->sc_tx99 != NULL)
772 		sc->sc_tx99->detach(sc->sc_tx99);
773 #endif
774 	ath_rate_detach(sc->sc_rc);
775 	ath_desc_free(sc);
776 	ath_tx_cleanup(sc);
777 	ath_hal_detach(sc->sc_ah);	/* NB: sets chip in full sleep */
778 	if_free(ifp);
779 
780 	return 0;
781 }
782 
783 /*
784  * MAC address handling for multiple BSS on the same radio.
785  * The first vap uses the MAC address from the EEPROM.  For
786  * subsequent vap's we set the U/L bit (bit 1) in the MAC
787  * address and use the next six bits as an index.
788  */
789 static void
790 assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone)
791 {
792 	int i;
793 
794 	if (clone && sc->sc_hasbmask) {
795 		/* NB: we only do this if h/w supports multiple bssid */
796 		for (i = 0; i < 8; i++)
797 			if ((sc->sc_bssidmask & (1<<i)) == 0)
798 				break;
799 		if (i != 0)
800 			mac[0] |= (i << 2)|0x2;
801 	} else
802 		i = 0;
803 	sc->sc_bssidmask |= 1<<i;
804 	sc->sc_hwbssidmask[0] &= ~mac[0];
805 	if (i == 0)
806 		sc->sc_nbssid0++;
807 }
808 
809 static void
810 reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN])
811 {
812 	int i = mac[0] >> 2;
813 	uint8_t mask;
814 
815 	if (i != 0 || --sc->sc_nbssid0 == 0) {
816 		sc->sc_bssidmask &= ~(1<<i);
817 		/* recalculate bssid mask from remaining addresses */
818 		mask = 0xff;
819 		for (i = 1; i < 8; i++)
820 			if (sc->sc_bssidmask & (1<<i))
821 				mask &= ~((i<<2)|0x2);
822 		sc->sc_hwbssidmask[0] |= mask;
823 	}
824 }
825 
826 /*
827  * Assign a beacon xmit slot.  We try to space out
828  * assignments so when beacons are staggered the
829  * traffic coming out of the cab q has maximal time
830  * to go out before the next beacon is scheduled.
831  */
832 static int
833 assign_bslot(struct ath_softc *sc)
834 {
835 	u_int slot, free;
836 
837 	free = 0;
838 	for (slot = 0; slot < ATH_BCBUF; slot++)
839 		if (sc->sc_bslot[slot] == NULL) {
840 			if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL &&
841 			    sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL)
842 				return slot;
843 			free = slot;
844 			/* NB: keep looking for a double slot */
845 		}
846 	return free;
847 }
848 
849 static struct ieee80211vap *
850 ath_vap_create(struct ieee80211com *ic,
851 	const char name[IFNAMSIZ], int unit, int opmode, int flags,
852 	const uint8_t bssid[IEEE80211_ADDR_LEN],
853 	const uint8_t mac0[IEEE80211_ADDR_LEN])
854 {
855 	struct ath_softc *sc = ic->ic_ifp->if_softc;
856 	struct ath_vap *avp;
857 	struct ieee80211vap *vap;
858 	uint8_t mac[IEEE80211_ADDR_LEN];
859 	int ic_opmode, needbeacon, error;
860 
861 	avp = (struct ath_vap *) malloc(sizeof(struct ath_vap),
862 	    M_80211_VAP, M_WAITOK | M_ZERO);
863 	needbeacon = 0;
864 	IEEE80211_ADDR_COPY(mac, mac0);
865 
866 	ATH_LOCK(sc);
867 	ic_opmode = opmode;		/* default to opmode of new vap */
868 	switch (opmode) {
869 	case IEEE80211_M_STA:
870 		if (sc->sc_nstavaps != 0) {	/* XXX only 1 for now */
871 			device_printf(sc->sc_dev, "only 1 sta vap supported\n");
872 			goto bad;
873 		}
874 		if (sc->sc_nvaps) {
875 			/*
876 			 * With multiple vaps we must fall back
877 			 * to s/w beacon miss handling.
878 			 */
879 			flags |= IEEE80211_CLONE_NOBEACONS;
880 		}
881 		if (flags & IEEE80211_CLONE_NOBEACONS) {
882 			/*
883 			 * Station mode w/o beacons are implemented w/ AP mode.
884 			 */
885 			ic_opmode = IEEE80211_M_HOSTAP;
886 		}
887 		break;
888 	case IEEE80211_M_IBSS:
889 		if (sc->sc_nvaps != 0) {	/* XXX only 1 for now */
890 			device_printf(sc->sc_dev,
891 			    "only 1 ibss vap supported\n");
892 			goto bad;
893 		}
894 		needbeacon = 1;
895 		break;
896 	case IEEE80211_M_AHDEMO:
897 #ifdef IEEE80211_SUPPORT_TDMA
898 		if (flags & IEEE80211_CLONE_TDMA) {
899 			if (sc->sc_nvaps != 0) {
900 				device_printf(sc->sc_dev,
901 				    "only 1 tdma vap supported\n");
902 				goto bad;
903 			}
904 			needbeacon = 1;
905 			flags |= IEEE80211_CLONE_NOBEACONS;
906 		}
907 		/* fall thru... */
908 #endif
909 	case IEEE80211_M_MONITOR:
910 		if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) {
911 			/*
912 			 * Adopt existing mode.  Adding a monitor or ahdemo
913 			 * vap to an existing configuration is of dubious
914 			 * value but should be ok.
915 			 */
916 			/* XXX not right for monitor mode */
917 			ic_opmode = ic->ic_opmode;
918 		}
919 		break;
920 	case IEEE80211_M_HOSTAP:
921 		needbeacon = 1;
922 		break;
923 	case IEEE80211_M_WDS:
924 		if (sc->sc_nvaps != 0 && ic->ic_opmode == IEEE80211_M_STA) {
925 			device_printf(sc->sc_dev,
926 			    "wds not supported in sta mode\n");
927 			goto bad;
928 		}
929 		/*
930 		 * Silently remove any request for a unique
931 		 * bssid; WDS vap's always share the local
932 		 * mac address.
933 		 */
934 		flags &= ~IEEE80211_CLONE_BSSID;
935 		if (sc->sc_nvaps == 0)
936 			ic_opmode = IEEE80211_M_HOSTAP;
937 		else
938 			ic_opmode = ic->ic_opmode;
939 		break;
940 	default:
941 		device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
942 		goto bad;
943 	}
944 	/*
945 	 * Check that a beacon buffer is available; the code below assumes it.
946 	 */
947 	if (needbeacon & STAILQ_EMPTY(&sc->sc_bbuf)) {
948 		device_printf(sc->sc_dev, "no beacon buffer available\n");
949 		goto bad;
950 	}
951 
952 	/* STA, AHDEMO? */
953 	if (opmode == IEEE80211_M_HOSTAP) {
954 		assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID);
955 		ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
956 	}
957 
958 	vap = &avp->av_vap;
959 	/* XXX can't hold mutex across if_alloc */
960 	ATH_UNLOCK(sc);
961 	error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags,
962 	    bssid, mac);
963 	ATH_LOCK(sc);
964 	if (error != 0) {
965 		device_printf(sc->sc_dev, "%s: error %d creating vap\n",
966 		    __func__, error);
967 		goto bad2;
968 	}
969 
970 	/* h/w crypto support */
971 	vap->iv_key_alloc = ath_key_alloc;
972 	vap->iv_key_delete = ath_key_delete;
973 	vap->iv_key_set = ath_key_set;
974 	vap->iv_key_update_begin = ath_key_update_begin;
975 	vap->iv_key_update_end = ath_key_update_end;
976 
977 	/* override various methods */
978 	avp->av_recv_mgmt = vap->iv_recv_mgmt;
979 	vap->iv_recv_mgmt = ath_recv_mgmt;
980 	vap->iv_reset = ath_reset_vap;
981 	vap->iv_update_beacon = ath_beacon_update;
982 	avp->av_newstate = vap->iv_newstate;
983 	vap->iv_newstate = ath_newstate;
984 	avp->av_bmiss = vap->iv_bmiss;
985 	vap->iv_bmiss = ath_bmiss_vap;
986 
987 	avp->av_bslot = -1;
988 	if (needbeacon) {
989 		/*
990 		 * Allocate beacon state and setup the q for buffered
991 		 * multicast frames.  We know a beacon buffer is
992 		 * available because we checked above.
993 		 */
994 		avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf);
995 		STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list);
996 		if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) {
997 			/*
998 			 * Assign the vap to a beacon xmit slot.  As above
999 			 * this cannot fail to find a free one.
1000 			 */
1001 			avp->av_bslot = assign_bslot(sc);
1002 			KASSERT(sc->sc_bslot[avp->av_bslot] == NULL,
1003 			    ("beacon slot %u not empty", avp->av_bslot));
1004 			sc->sc_bslot[avp->av_bslot] = vap;
1005 			sc->sc_nbcnvaps++;
1006 		}
1007 		if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) {
1008 			/*
1009 			 * Multple vaps are to transmit beacons and we
1010 			 * have h/w support for TSF adjusting; enable
1011 			 * use of staggered beacons.
1012 			 */
1013 			sc->sc_stagbeacons = 1;
1014 		}
1015 		ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ);
1016 	}
1017 
1018 	ic->ic_opmode = ic_opmode;
1019 	if (opmode != IEEE80211_M_WDS) {
1020 		sc->sc_nvaps++;
1021 		if (opmode == IEEE80211_M_STA)
1022 			sc->sc_nstavaps++;
1023 	}
1024 	switch (ic_opmode) {
1025 	case IEEE80211_M_IBSS:
1026 		sc->sc_opmode = HAL_M_IBSS;
1027 		break;
1028 	case IEEE80211_M_STA:
1029 		sc->sc_opmode = HAL_M_STA;
1030 		break;
1031 	case IEEE80211_M_AHDEMO:
1032 #ifdef IEEE80211_SUPPORT_TDMA
1033 		if (vap->iv_caps & IEEE80211_C_TDMA) {
1034 			sc->sc_tdma = 1;
1035 			/* NB: disable tsf adjust */
1036 			sc->sc_stagbeacons = 0;
1037 		}
1038 		/*
1039 		 * NB: adhoc demo mode is a pseudo mode; to the hal it's
1040 		 * just ap mode.
1041 		 */
1042 		/* fall thru... */
1043 #endif
1044 	case IEEE80211_M_HOSTAP:
1045 		sc->sc_opmode = HAL_M_HOSTAP;
1046 		break;
1047 	case IEEE80211_M_MONITOR:
1048 		sc->sc_opmode = HAL_M_MONITOR;
1049 		break;
1050 	default:
1051 		/* XXX should not happen */
1052 		break;
1053 	}
1054 	if (sc->sc_hastsfadd) {
1055 		/*
1056 		 * Configure whether or not TSF adjust should be done.
1057 		 */
1058 		ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons);
1059 	}
1060 	if (flags & IEEE80211_CLONE_NOBEACONS) {
1061 		/*
1062 		 * Enable s/w beacon miss handling.
1063 		 */
1064 		sc->sc_swbmiss = 1;
1065 	}
1066 	ATH_UNLOCK(sc);
1067 
1068 	/* complete setup */
1069 	ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status);
1070 	return vap;
1071 bad2:
1072 	reclaim_address(sc, mac);
1073 	ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask);
1074 bad:
1075 	free(avp, M_80211_VAP);
1076 	ATH_UNLOCK(sc);
1077 	return NULL;
1078 }
1079 
1080 static void
1081 ath_vap_delete(struct ieee80211vap *vap)
1082 {
1083 	struct ieee80211com *ic = vap->iv_ic;
1084 	struct ifnet *ifp = ic->ic_ifp;
1085 	struct ath_softc *sc = ifp->if_softc;
1086 	struct ath_hal *ah = sc->sc_ah;
1087 	struct ath_vap *avp = ATH_VAP(vap);
1088 
1089 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1090 		/*
1091 		 * Quiesce the hardware while we remove the vap.  In
1092 		 * particular we need to reclaim all references to
1093 		 * the vap state by any frames pending on the tx queues.
1094 		 */
1095 		ath_hal_intrset(ah, 0);		/* disable interrupts */
1096 		ath_draintxq(sc);		/* stop xmit side */
1097 		ath_stoprecv(sc);		/* stop recv side */
1098 	}
1099 
1100 	ieee80211_vap_detach(vap);
1101 	ATH_LOCK(sc);
1102 	/*
1103 	 * Reclaim beacon state.  Note this must be done before
1104 	 * the vap instance is reclaimed as we may have a reference
1105 	 * to it in the buffer for the beacon frame.
1106 	 */
1107 	if (avp->av_bcbuf != NULL) {
1108 		if (avp->av_bslot != -1) {
1109 			sc->sc_bslot[avp->av_bslot] = NULL;
1110 			sc->sc_nbcnvaps--;
1111 		}
1112 		ath_beacon_return(sc, avp->av_bcbuf);
1113 		avp->av_bcbuf = NULL;
1114 		if (sc->sc_nbcnvaps == 0) {
1115 			sc->sc_stagbeacons = 0;
1116 			if (sc->sc_hastsfadd)
1117 				ath_hal_settsfadjust(sc->sc_ah, 0);
1118 		}
1119 		/*
1120 		 * Reclaim any pending mcast frames for the vap.
1121 		 */
1122 		ath_tx_draintxq(sc, &avp->av_mcastq);
1123 		ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq);
1124 	}
1125 	/*
1126 	 * Update bookkeeping.
1127 	 */
1128 	if (vap->iv_opmode == IEEE80211_M_STA) {
1129 		sc->sc_nstavaps--;
1130 		if (sc->sc_nstavaps == 0 && sc->sc_swbmiss)
1131 			sc->sc_swbmiss = 0;
1132 	} else if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
1133 		reclaim_address(sc, vap->iv_myaddr);
1134 		ath_hal_setbssidmask(ah, sc->sc_hwbssidmask);
1135 	}
1136 	if (vap->iv_opmode != IEEE80211_M_WDS)
1137 		sc->sc_nvaps--;
1138 #ifdef IEEE80211_SUPPORT_TDMA
1139 	/* TDMA operation ceases when the last vap is destroyed */
1140 	if (sc->sc_tdma && sc->sc_nvaps == 0) {
1141 		sc->sc_tdma = 0;
1142 		sc->sc_swbmiss = 0;
1143 	}
1144 #endif
1145 	ATH_UNLOCK(sc);
1146 	free(avp, M_80211_VAP);
1147 
1148 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1149 		/*
1150 		 * Restart rx+tx machines if still running (RUNNING will
1151 		 * be reset if we just destroyed the last vap).
1152 		 */
1153 		if (ath_startrecv(sc) != 0)
1154 			if_printf(ifp, "%s: unable to restart recv logic\n",
1155 			    __func__);
1156 		if (sc->sc_beacons)
1157 			ath_beacon_config(sc, NULL);
1158 		ath_hal_intrset(ah, sc->sc_imask);
1159 	}
1160 }
1161 
1162 void
1163 ath_suspend(struct ath_softc *sc)
1164 {
1165 	struct ifnet *ifp = sc->sc_ifp;
1166 	struct ieee80211com *ic = ifp->if_l2com;
1167 
1168 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1169 		__func__, ifp->if_flags);
1170 
1171 	sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0;
1172 	if (ic->ic_opmode == IEEE80211_M_STA)
1173 		ath_stop(ifp);
1174 	else
1175 		ieee80211_suspend_all(ic);
1176 	/*
1177 	 * NB: don't worry about putting the chip in low power
1178 	 * mode; pci will power off our socket on suspend and
1179 	 * cardbus detaches the device.
1180 	 */
1181 }
1182 
1183 /*
1184  * Reset the key cache since some parts do not reset the
1185  * contents on resume.  First we clear all entries, then
1186  * re-load keys that the 802.11 layer assumes are setup
1187  * in h/w.
1188  */
1189 static void
1190 ath_reset_keycache(struct ath_softc *sc)
1191 {
1192 	struct ifnet *ifp = sc->sc_ifp;
1193 	struct ieee80211com *ic = ifp->if_l2com;
1194 	struct ath_hal *ah = sc->sc_ah;
1195 	int i;
1196 
1197 	for (i = 0; i < sc->sc_keymax; i++)
1198 		ath_hal_keyreset(ah, i);
1199 	ieee80211_crypto_reload_keys(ic);
1200 }
1201 
1202 void
1203 ath_resume(struct ath_softc *sc)
1204 {
1205 	struct ifnet *ifp = sc->sc_ifp;
1206 	struct ieee80211com *ic = ifp->if_l2com;
1207 	struct ath_hal *ah = sc->sc_ah;
1208 	HAL_STATUS status;
1209 
1210 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1211 		__func__, ifp->if_flags);
1212 
1213 	/*
1214 	 * Must reset the chip before we reload the
1215 	 * keycache as we were powered down on suspend.
1216 	 */
1217 	ath_hal_reset(ah, sc->sc_opmode,
1218 	    sc->sc_curchan != NULL ? sc->sc_curchan : ic->ic_curchan,
1219 	    AH_FALSE, &status);
1220 	ath_reset_keycache(sc);
1221 	if (sc->sc_resume_up) {
1222 		if (ic->ic_opmode == IEEE80211_M_STA) {
1223 			ath_init(sc);
1224 			ieee80211_beacon_miss(ic);
1225 		} else
1226 			ieee80211_resume_all(ic);
1227 	}
1228 	if (sc->sc_softled) {
1229 		ath_hal_gpioCfgOutput(ah, sc->sc_ledpin,
1230 		    HAL_GPIO_MUX_MAC_NETWORK_LED);
1231 		ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon);
1232 	}
1233 }
1234 
1235 void
1236 ath_shutdown(struct ath_softc *sc)
1237 {
1238 	struct ifnet *ifp = sc->sc_ifp;
1239 
1240 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n",
1241 		__func__, ifp->if_flags);
1242 
1243 	ath_stop(ifp);
1244 	/* NB: no point powering down chip as we're about to reboot */
1245 }
1246 
1247 /*
1248  * Interrupt handler.  Most of the actual processing is deferred.
1249  */
1250 void
1251 ath_intr(void *arg)
1252 {
1253 	struct ath_softc *sc = arg;
1254 	struct ifnet *ifp = sc->sc_ifp;
1255 	struct ath_hal *ah = sc->sc_ah;
1256 	HAL_INT status;
1257 
1258 	if (sc->sc_invalid) {
1259 		/*
1260 		 * The hardware is not ready/present, don't touch anything.
1261 		 * Note this can happen early on if the IRQ is shared.
1262 		 */
1263 		DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__);
1264 		return;
1265 	}
1266 	if (!ath_hal_intrpend(ah))		/* shared irq, not for us */
1267 		return;
1268 	if ((ifp->if_flags & IFF_UP) == 0 ||
1269 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
1270 		HAL_INT status;
1271 
1272 		DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1273 			__func__, ifp->if_flags);
1274 		ath_hal_getisr(ah, &status);	/* clear ISR */
1275 		ath_hal_intrset(ah, 0);		/* disable further intr's */
1276 		return;
1277 	}
1278 	/*
1279 	 * Figure out the reason(s) for the interrupt.  Note
1280 	 * that the hal returns a pseudo-ISR that may include
1281 	 * bits we haven't explicitly enabled so we mask the
1282 	 * value to insure we only process bits we requested.
1283 	 */
1284 	ath_hal_getisr(ah, &status);		/* NB: clears ISR too */
1285 	DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status);
1286 	status &= sc->sc_imask;			/* discard unasked for bits */
1287 	if (status & HAL_INT_FATAL) {
1288 		sc->sc_stats.ast_hardware++;
1289 		ath_hal_intrset(ah, 0);		/* disable intr's until reset */
1290 		ath_fatal_proc(sc, 0);
1291 	} else {
1292 		if (status & HAL_INT_SWBA) {
1293 			/*
1294 			 * Software beacon alert--time to send a beacon.
1295 			 * Handle beacon transmission directly; deferring
1296 			 * this is too slow to meet timing constraints
1297 			 * under load.
1298 			 */
1299 #ifdef IEEE80211_SUPPORT_TDMA
1300 			if (sc->sc_tdma) {
1301 				if (sc->sc_tdmaswba == 0) {
1302 					struct ieee80211com *ic = ifp->if_l2com;
1303 					struct ieee80211vap *vap =
1304 					    TAILQ_FIRST(&ic->ic_vaps);
1305 					ath_tdma_beacon_send(sc, vap);
1306 					sc->sc_tdmaswba =
1307 					    vap->iv_tdma->tdma_bintval;
1308 				} else
1309 					sc->sc_tdmaswba--;
1310 			} else
1311 #endif
1312 			{
1313 				ath_beacon_proc(sc, 0);
1314 #ifdef IEEE80211_SUPPORT_SUPERG
1315 				/*
1316 				 * Schedule the rx taskq in case there's no
1317 				 * traffic so any frames held on the staging
1318 				 * queue are aged and potentially flushed.
1319 				 */
1320 				taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1321 #endif
1322 			}
1323 		}
1324 		if (status & HAL_INT_RXEOL) {
1325 			/*
1326 			 * NB: the hardware should re-read the link when
1327 			 *     RXE bit is written, but it doesn't work at
1328 			 *     least on older hardware revs.
1329 			 */
1330 			sc->sc_stats.ast_rxeol++;
1331 			sc->sc_rxlink = NULL;
1332 		}
1333 		if (status & HAL_INT_TXURN) {
1334 			sc->sc_stats.ast_txurn++;
1335 			/* bump tx trigger level */
1336 			ath_hal_updatetxtriglevel(ah, AH_TRUE);
1337 		}
1338 		if (status & HAL_INT_RX)
1339 			taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
1340 		if (status & HAL_INT_TX)
1341 			taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask);
1342 		if (status & HAL_INT_BMISS) {
1343 			sc->sc_stats.ast_bmiss++;
1344 			taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask);
1345 		}
1346 		if (status & HAL_INT_MIB) {
1347 			sc->sc_stats.ast_mib++;
1348 			/*
1349 			 * Disable interrupts until we service the MIB
1350 			 * interrupt; otherwise it will continue to fire.
1351 			 */
1352 			ath_hal_intrset(ah, 0);
1353 			/*
1354 			 * Let the hal handle the event.  We assume it will
1355 			 * clear whatever condition caused the interrupt.
1356 			 */
1357 			ath_hal_mibevent(ah, &sc->sc_halstats);
1358 			ath_hal_intrset(ah, sc->sc_imask);
1359 		}
1360 		if (status & HAL_INT_RXORN) {
1361 			/* NB: hal marks HAL_INT_FATAL when RXORN is fatal */
1362 			sc->sc_stats.ast_rxorn++;
1363 		}
1364 	}
1365 }
1366 
1367 static void
1368 ath_fatal_proc(void *arg, int pending)
1369 {
1370 	struct ath_softc *sc = arg;
1371 	struct ifnet *ifp = sc->sc_ifp;
1372 	u_int32_t *state;
1373 	u_int32_t len;
1374 	void *sp;
1375 
1376 	if_printf(ifp, "hardware error; resetting\n");
1377 	/*
1378 	 * Fatal errors are unrecoverable.  Typically these
1379 	 * are caused by DMA errors.  Collect h/w state from
1380 	 * the hal so we can diagnose what's going on.
1381 	 */
1382 	if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) {
1383 		KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len));
1384 		state = sp;
1385 		if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n",
1386 		    state[0], state[1] , state[2], state[3],
1387 		    state[4], state[5]);
1388 	}
1389 	ath_reset(ifp);
1390 }
1391 
1392 static void
1393 ath_bmiss_vap(struct ieee80211vap *vap)
1394 {
1395 	/*
1396 	 * Workaround phantom bmiss interrupts by sanity-checking
1397 	 * the time of our last rx'd frame.  If it is within the
1398 	 * beacon miss interval then ignore the interrupt.  If it's
1399 	 * truly a bmiss we'll get another interrupt soon and that'll
1400 	 * be dispatched up for processing.  Note this applies only
1401 	 * for h/w beacon miss events.
1402 	 */
1403 	if ((vap->iv_flags_ext & IEEE80211_FEXT_SWBMISS) == 0) {
1404 		struct ifnet *ifp = vap->iv_ic->ic_ifp;
1405 		struct ath_softc *sc = ifp->if_softc;
1406 		u_int64_t lastrx = sc->sc_lastrx;
1407 		u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah);
1408 		u_int bmisstimeout =
1409 			vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024;
1410 
1411 		DPRINTF(sc, ATH_DEBUG_BEACON,
1412 		    "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n",
1413 		    __func__, (unsigned long long) tsf,
1414 		    (unsigned long long)(tsf - lastrx),
1415 		    (unsigned long long) lastrx, bmisstimeout);
1416 
1417 		if (tsf - lastrx <= bmisstimeout) {
1418 			sc->sc_stats.ast_bmiss_phantom++;
1419 			return;
1420 		}
1421 	}
1422 	ATH_VAP(vap)->av_bmiss(vap);
1423 }
1424 
1425 static int
1426 ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask, uint32_t *hangs)
1427 {
1428 	uint32_t rsize;
1429 	void *sp;
1430 
1431 	if (!ath_hal_getdiagstate(ah, 32, &mask, sizeof(&mask), &sp, &rsize))
1432 		return 0;
1433 	KASSERT(rsize == sizeof(uint32_t), ("resultsize %u", rsize));
1434 	*hangs = *(uint32_t *)sp;
1435 	return 1;
1436 }
1437 
1438 static void
1439 ath_bmiss_proc(void *arg, int pending)
1440 {
1441 	struct ath_softc *sc = arg;
1442 	struct ifnet *ifp = sc->sc_ifp;
1443 	uint32_t hangs;
1444 
1445 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending);
1446 
1447 	if (ath_hal_gethangstate(sc->sc_ah, 0xff, &hangs) && hangs != 0) {
1448 		if_printf(ifp, "bb hang detected (0x%x), reseting\n", hangs);
1449 		ath_reset(ifp);
1450 	} else
1451 		ieee80211_beacon_miss(ifp->if_l2com);
1452 }
1453 
1454 /*
1455  * Handle TKIP MIC setup to deal hardware that doesn't do MIC
1456  * calcs together with WME.  If necessary disable the crypto
1457  * hardware and mark the 802.11 state so keys will be setup
1458  * with the MIC work done in software.
1459  */
1460 static void
1461 ath_settkipmic(struct ath_softc *sc)
1462 {
1463 	struct ifnet *ifp = sc->sc_ifp;
1464 	struct ieee80211com *ic = ifp->if_l2com;
1465 
1466 	if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) {
1467 		if (ic->ic_flags & IEEE80211_F_WME) {
1468 			ath_hal_settkipmic(sc->sc_ah, AH_FALSE);
1469 			ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC;
1470 		} else {
1471 			ath_hal_settkipmic(sc->sc_ah, AH_TRUE);
1472 			ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC;
1473 		}
1474 	}
1475 }
1476 
1477 static void
1478 ath_init(void *arg)
1479 {
1480 	struct ath_softc *sc = (struct ath_softc *) arg;
1481 	struct ifnet *ifp = sc->sc_ifp;
1482 	struct ieee80211com *ic = ifp->if_l2com;
1483 	struct ath_hal *ah = sc->sc_ah;
1484 	HAL_STATUS status;
1485 
1486 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n",
1487 		__func__, ifp->if_flags);
1488 
1489 	ATH_LOCK(sc);
1490 	/*
1491 	 * Stop anything previously setup.  This is safe
1492 	 * whether this is the first time through or not.
1493 	 */
1494 	ath_stop_locked(ifp);
1495 
1496 	/*
1497 	 * The basic interface to setting the hardware in a good
1498 	 * state is ``reset''.  On return the hardware is known to
1499 	 * be powered up and with interrupts disabled.  This must
1500 	 * be followed by initialization of the appropriate bits
1501 	 * and then setup of the interrupt mask.
1502 	 */
1503 	ath_settkipmic(sc);
1504 	if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_FALSE, &status)) {
1505 		if_printf(ifp, "unable to reset hardware; hal status %u\n",
1506 			status);
1507 		ATH_UNLOCK(sc);
1508 		return;
1509 	}
1510 	ath_chan_change(sc, ic->ic_curchan);
1511 
1512 	/*
1513 	 * Likewise this is set during reset so update
1514 	 * state cached in the driver.
1515 	 */
1516 	sc->sc_diversity = ath_hal_getdiversity(ah);
1517 	sc->sc_lastlongcal = 0;
1518 	sc->sc_resetcal = 1;
1519 	sc->sc_lastcalreset = 0;
1520 
1521 	/*
1522 	 * Setup the hardware after reset: the key cache
1523 	 * is filled as needed and the receive engine is
1524 	 * set going.  Frame transmit is handled entirely
1525 	 * in the frame output path; there's nothing to do
1526 	 * here except setup the interrupt mask.
1527 	 */
1528 	if (ath_startrecv(sc) != 0) {
1529 		if_printf(ifp, "unable to start recv logic\n");
1530 		ATH_UNLOCK(sc);
1531 		return;
1532 	}
1533 
1534 	/*
1535 	 * Enable interrupts.
1536 	 */
1537 	sc->sc_imask = HAL_INT_RX | HAL_INT_TX
1538 		  | HAL_INT_RXEOL | HAL_INT_RXORN
1539 		  | HAL_INT_FATAL | HAL_INT_GLOBAL;
1540 	/*
1541 	 * Enable MIB interrupts when there are hardware phy counters.
1542 	 * Note we only do this (at the moment) for station mode.
1543 	 */
1544 	if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA)
1545 		sc->sc_imask |= HAL_INT_MIB;
1546 
1547 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
1548 	callout_reset(&sc->sc_wd_ch, hz, ath_watchdog, sc);
1549 	ath_hal_intrset(ah, sc->sc_imask);
1550 
1551 	ATH_UNLOCK(sc);
1552 
1553 #ifdef ATH_TX99_DIAG
1554 	if (sc->sc_tx99 != NULL)
1555 		sc->sc_tx99->start(sc->sc_tx99);
1556 	else
1557 #endif
1558 	ieee80211_start_all(ic);		/* start all vap's */
1559 }
1560 
1561 static void
1562 ath_stop_locked(struct ifnet *ifp)
1563 {
1564 	struct ath_softc *sc = ifp->if_softc;
1565 	struct ath_hal *ah = sc->sc_ah;
1566 
1567 	DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n",
1568 		__func__, sc->sc_invalid, ifp->if_flags);
1569 
1570 	ATH_LOCK_ASSERT(sc);
1571 	if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1572 		/*
1573 		 * Shutdown the hardware and driver:
1574 		 *    reset 802.11 state machine
1575 		 *    turn off timers
1576 		 *    disable interrupts
1577 		 *    turn off the radio
1578 		 *    clear transmit machinery
1579 		 *    clear receive machinery
1580 		 *    drain and release tx queues
1581 		 *    reclaim beacon resources
1582 		 *    power down hardware
1583 		 *
1584 		 * Note that some of this work is not possible if the
1585 		 * hardware is gone (invalid).
1586 		 */
1587 #ifdef ATH_TX99_DIAG
1588 		if (sc->sc_tx99 != NULL)
1589 			sc->sc_tx99->stop(sc->sc_tx99);
1590 #endif
1591 		callout_stop(&sc->sc_wd_ch);
1592 		sc->sc_wd_timer = 0;
1593 		ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1594 		if (!sc->sc_invalid) {
1595 			if (sc->sc_softled) {
1596 				callout_stop(&sc->sc_ledtimer);
1597 				ath_hal_gpioset(ah, sc->sc_ledpin,
1598 					!sc->sc_ledon);
1599 				sc->sc_blinking = 0;
1600 			}
1601 			ath_hal_intrset(ah, 0);
1602 		}
1603 		ath_draintxq(sc);
1604 		if (!sc->sc_invalid) {
1605 			ath_stoprecv(sc);
1606 			ath_hal_phydisable(ah);
1607 		} else
1608 			sc->sc_rxlink = NULL;
1609 		ath_beacon_free(sc);	/* XXX not needed */
1610 	}
1611 }
1612 
1613 static void
1614 ath_stop(struct ifnet *ifp)
1615 {
1616 	struct ath_softc *sc = ifp->if_softc;
1617 
1618 	ATH_LOCK(sc);
1619 	ath_stop_locked(ifp);
1620 	ATH_UNLOCK(sc);
1621 }
1622 
1623 /*
1624  * Reset the hardware w/o losing operational state.  This is
1625  * basically a more efficient way of doing ath_stop, ath_init,
1626  * followed by state transitions to the current 802.11
1627  * operational state.  Used to recover from various errors and
1628  * to reset or reload hardware state.
1629  */
1630 static int
1631 ath_reset(struct ifnet *ifp)
1632 {
1633 	struct ath_softc *sc = ifp->if_softc;
1634 	struct ieee80211com *ic = ifp->if_l2com;
1635 	struct ath_hal *ah = sc->sc_ah;
1636 	HAL_STATUS status;
1637 
1638 	ath_hal_intrset(ah, 0);		/* disable interrupts */
1639 	ath_draintxq(sc);		/* stop xmit side */
1640 	ath_stoprecv(sc);		/* stop recv side */
1641 	ath_settkipmic(sc);		/* configure TKIP MIC handling */
1642 	/* NB: indicate channel change so we do a full reset */
1643 	if (!ath_hal_reset(ah, sc->sc_opmode, ic->ic_curchan, AH_TRUE, &status))
1644 		if_printf(ifp, "%s: unable to reset hardware; hal status %u\n",
1645 			__func__, status);
1646 	sc->sc_diversity = ath_hal_getdiversity(ah);
1647 	if (ath_startrecv(sc) != 0)	/* restart recv */
1648 		if_printf(ifp, "%s: unable to start recv logic\n", __func__);
1649 	/*
1650 	 * We may be doing a reset in response to an ioctl
1651 	 * that changes the channel so update any state that
1652 	 * might change as a result.
1653 	 */
1654 	ath_chan_change(sc, ic->ic_curchan);
1655 	if (sc->sc_beacons) {
1656 #ifdef IEEE80211_SUPPORT_TDMA
1657 		if (sc->sc_tdma)
1658 			ath_tdma_config(sc, NULL);
1659 		else
1660 #endif
1661 			ath_beacon_config(sc, NULL);	/* restart beacons */
1662 	}
1663 	ath_hal_intrset(ah, sc->sc_imask);
1664 
1665 	ath_start(ifp);			/* restart xmit */
1666 	return 0;
1667 }
1668 
1669 static int
1670 ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
1671 {
1672 	struct ieee80211com *ic = vap->iv_ic;
1673 	struct ifnet *ifp = ic->ic_ifp;
1674 	struct ath_softc *sc = ifp->if_softc;
1675 	struct ath_hal *ah = sc->sc_ah;
1676 
1677 	switch (cmd) {
1678 	case IEEE80211_IOC_TXPOWER:
1679 		/*
1680 		 * If per-packet TPC is enabled, then we have nothing
1681 		 * to do; otherwise we need to force the global limit.
1682 		 * All this can happen directly; no need to reset.
1683 		 */
1684 		if (!ath_hal_gettpc(ah))
1685 			ath_hal_settxpowlimit(ah, ic->ic_txpowlimit);
1686 		return 0;
1687 	}
1688 	return ath_reset(ifp);
1689 }
1690 
1691 static struct ath_buf *
1692 _ath_getbuf_locked(struct ath_softc *sc)
1693 {
1694 	struct ath_buf *bf;
1695 
1696 	ATH_TXBUF_LOCK_ASSERT(sc);
1697 
1698 	bf = STAILQ_FIRST(&sc->sc_txbuf);
1699 	if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
1700 		STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list);
1701 	else
1702 		bf = NULL;
1703 	if (bf == NULL) {
1704 		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
1705 		    STAILQ_FIRST(&sc->sc_txbuf) == NULL ?
1706 			"out of xmit buffers" : "xmit buffer busy");
1707 		sc->sc_stats.ast_tx_nobuf++;
1708 	}
1709 	return bf;
1710 }
1711 
1712 static struct ath_buf *
1713 ath_getbuf(struct ath_softc *sc)
1714 {
1715 	struct ath_buf *bf;
1716 
1717 	ATH_TXBUF_LOCK(sc);
1718 	bf = _ath_getbuf_locked(sc);
1719 	if (bf == NULL) {
1720 		struct ifnet *ifp = sc->sc_ifp;
1721 
1722 		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: stop queue\n", __func__);
1723 		sc->sc_stats.ast_tx_qstop++;
1724 		ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1725 	}
1726 	ATH_TXBUF_UNLOCK(sc);
1727 	return bf;
1728 }
1729 
1730 /*
1731  * Cleanup driver resources when we run out of buffers
1732  * while processing fragments; return the tx buffers
1733  * allocated and drop node references.
1734  */
1735 static void
1736 ath_txfrag_cleanup(struct ath_softc *sc,
1737 	ath_bufhead *frags, struct ieee80211_node *ni)
1738 {
1739 	struct ath_buf *bf, *next;
1740 
1741 	ATH_TXBUF_LOCK_ASSERT(sc);
1742 
1743 	STAILQ_FOREACH_SAFE(bf, frags, bf_list, next) {
1744 		/* NB: bf assumed clean */
1745 		STAILQ_REMOVE_HEAD(frags, bf_list);
1746 		STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1747 		ieee80211_node_decref(ni);
1748 	}
1749 }
1750 
1751 /*
1752  * Setup xmit of a fragmented frame.  Allocate a buffer
1753  * for each frag and bump the node reference count to
1754  * reflect the held reference to be setup by ath_tx_start.
1755  */
1756 static int
1757 ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
1758 	struct mbuf *m0, struct ieee80211_node *ni)
1759 {
1760 	struct mbuf *m;
1761 	struct ath_buf *bf;
1762 
1763 	ATH_TXBUF_LOCK(sc);
1764 	for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
1765 		bf = _ath_getbuf_locked(sc);
1766 		if (bf == NULL) {	/* out of buffers, cleanup */
1767 			ath_txfrag_cleanup(sc, frags, ni);
1768 			break;
1769 		}
1770 		ieee80211_node_incref(ni);
1771 		STAILQ_INSERT_TAIL(frags, bf, bf_list);
1772 	}
1773 	ATH_TXBUF_UNLOCK(sc);
1774 
1775 	return !STAILQ_EMPTY(frags);
1776 }
1777 
1778 static void
1779 ath_start(struct ifnet *ifp)
1780 {
1781 	struct ath_softc *sc = ifp->if_softc;
1782 	struct ieee80211_node *ni;
1783 	struct ath_buf *bf;
1784 	struct mbuf *m, *next;
1785 	ath_bufhead frags;
1786 
1787 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
1788 		return;
1789 	for (;;) {
1790 		/*
1791 		 * Grab a TX buffer and associated resources.
1792 		 */
1793 		bf = ath_getbuf(sc);
1794 		if (bf == NULL)
1795 			break;
1796 
1797 		IFQ_DEQUEUE(&ifp->if_snd, m);
1798 		if (m == NULL) {
1799 			ATH_TXBUF_LOCK(sc);
1800 			STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1801 			ATH_TXBUF_UNLOCK(sc);
1802 			break;
1803 		}
1804 		ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
1805 		/*
1806 		 * Check for fragmentation.  If this frame
1807 		 * has been broken up verify we have enough
1808 		 * buffers to send all the fragments so all
1809 		 * go out or none...
1810 		 */
1811 		STAILQ_INIT(&frags);
1812 		if ((m->m_flags & M_FRAG) &&
1813 		    !ath_txfrag_setup(sc, &frags, m, ni)) {
1814 			DPRINTF(sc, ATH_DEBUG_XMIT,
1815 			    "%s: out of txfrag buffers\n", __func__);
1816 			sc->sc_stats.ast_tx_nofrag++;
1817 			ath_freetx(m);
1818 			goto bad;
1819 		}
1820 		ifp->if_opackets++;
1821 	nextfrag:
1822 		/*
1823 		 * Pass the frame to the h/w for transmission.
1824 		 * Fragmented frames have each frag chained together
1825 		 * with m_nextpkt.  We know there are sufficient ath_buf's
1826 		 * to send all the frags because of work done by
1827 		 * ath_txfrag_setup.  We leave m_nextpkt set while
1828 		 * calling ath_tx_start so it can use it to extend the
1829 		 * the tx duration to cover the subsequent frag and
1830 		 * so it can reclaim all the mbufs in case of an error;
1831 		 * ath_tx_start clears m_nextpkt once it commits to
1832 		 * handing the frame to the hardware.
1833 		 */
1834 		next = m->m_nextpkt;
1835 		if (ath_tx_start(sc, ni, bf, m)) {
1836 	bad:
1837 			ifp->if_oerrors++;
1838 	reclaim:
1839 			bf->bf_m = NULL;
1840 			bf->bf_node = NULL;
1841 			ATH_TXBUF_LOCK(sc);
1842 			STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
1843 			ath_txfrag_cleanup(sc, &frags, ni);
1844 			ATH_TXBUF_UNLOCK(sc);
1845 			if (ni != NULL)
1846 				ieee80211_free_node(ni);
1847 			continue;
1848 		}
1849 		if (next != NULL) {
1850 			/*
1851 			 * Beware of state changing between frags.
1852 			 * XXX check sta power-save state?
1853 			 */
1854 			if (ni->ni_vap->iv_state != IEEE80211_S_RUN) {
1855 				DPRINTF(sc, ATH_DEBUG_XMIT,
1856 				    "%s: flush fragmented packet, state %s\n",
1857 				    __func__,
1858 				    ieee80211_state_name[ni->ni_vap->iv_state]);
1859 				ath_freetx(next);
1860 				goto reclaim;
1861 			}
1862 			m = next;
1863 			bf = STAILQ_FIRST(&frags);
1864 			KASSERT(bf != NULL, ("no buf for txfrag"));
1865 			STAILQ_REMOVE_HEAD(&frags, bf_list);
1866 			goto nextfrag;
1867 		}
1868 
1869 		sc->sc_wd_timer = 5;
1870 	}
1871 }
1872 
1873 static int
1874 ath_media_change(struct ifnet *ifp)
1875 {
1876 	int error = ieee80211_media_change(ifp);
1877 	/* NB: only the fixed rate can change and that doesn't need a reset */
1878 	return (error == ENETRESET ? 0 : error);
1879 }
1880 
1881 #ifdef ATH_DEBUG
1882 static void
1883 ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix,
1884 	const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
1885 {
1886 	static const char *ciphers[] = {
1887 		"WEP",
1888 		"AES-OCB",
1889 		"AES-CCM",
1890 		"CKIP",
1891 		"TKIP",
1892 		"CLR",
1893 	};
1894 	int i, n;
1895 
1896 	printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]);
1897 	for (i = 0, n = hk->kv_len; i < n; i++)
1898 		printf("%02x", hk->kv_val[i]);
1899 	printf(" mac %s", ether_sprintf(mac));
1900 	if (hk->kv_type == HAL_CIPHER_TKIP) {
1901 		printf(" %s ", sc->sc_splitmic ? "mic" : "rxmic");
1902 		for (i = 0; i < sizeof(hk->kv_mic); i++)
1903 			printf("%02x", hk->kv_mic[i]);
1904 		if (!sc->sc_splitmic) {
1905 			printf(" txmic ");
1906 			for (i = 0; i < sizeof(hk->kv_txmic); i++)
1907 				printf("%02x", hk->kv_txmic[i]);
1908 		}
1909 	}
1910 	printf("\n");
1911 }
1912 #endif
1913 
1914 /*
1915  * Set a TKIP key into the hardware.  This handles the
1916  * potential distribution of key state to multiple key
1917  * cache slots for TKIP.
1918  */
1919 static int
1920 ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k,
1921 	HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN])
1922 {
1923 #define	IEEE80211_KEY_XR	(IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV)
1924 	static const u_int8_t zerobssid[IEEE80211_ADDR_LEN];
1925 	struct ath_hal *ah = sc->sc_ah;
1926 
1927 	KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP,
1928 		("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher));
1929 	if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) {
1930 		if (sc->sc_splitmic) {
1931 			/*
1932 			 * TX key goes at first index, RX key at the rx index.
1933 			 * The hal handles the MIC keys at index+64.
1934 			 */
1935 			memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic));
1936 			KEYPRINTF(sc, k->wk_keyix, hk, zerobssid);
1937 			if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid))
1938 				return 0;
1939 
1940 			memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
1941 			KEYPRINTF(sc, k->wk_keyix+32, hk, mac);
1942 			/* XXX delete tx key on failure? */
1943 			return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac);
1944 		} else {
1945 			/*
1946 			 * Room for both TX+RX MIC keys in one key cache
1947 			 * slot, just set key at the first index; the hal
1948 			 * will handle the rest.
1949 			 */
1950 			memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
1951 			memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
1952 			KEYPRINTF(sc, k->wk_keyix, hk, mac);
1953 			return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
1954 		}
1955 	} else if (k->wk_flags & IEEE80211_KEY_XMIT) {
1956 		if (sc->sc_splitmic) {
1957 			/*
1958 			 * NB: must pass MIC key in expected location when
1959 			 * the keycache only holds one MIC key per entry.
1960 			 */
1961 			memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_txmic));
1962 		} else
1963 			memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic));
1964 		KEYPRINTF(sc, k->wk_keyix, hk, mac);
1965 		return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
1966 	} else if (k->wk_flags & IEEE80211_KEY_RECV) {
1967 		memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic));
1968 		KEYPRINTF(sc, k->wk_keyix, hk, mac);
1969 		return ath_hal_keyset(ah, k->wk_keyix, hk, mac);
1970 	}
1971 	return 0;
1972 #undef IEEE80211_KEY_XR
1973 }
1974 
1975 /*
1976  * Set a net80211 key into the hardware.  This handles the
1977  * potential distribution of key state to multiple key
1978  * cache slots for TKIP with hardware MIC support.
1979  */
1980 static int
1981 ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k,
1982 	struct ieee80211_node *bss)
1983 {
1984 #define	N(a)	(sizeof(a)/sizeof(a[0]))
1985 	static const u_int8_t ciphermap[] = {
1986 		HAL_CIPHER_WEP,		/* IEEE80211_CIPHER_WEP */
1987 		HAL_CIPHER_TKIP,	/* IEEE80211_CIPHER_TKIP */
1988 		HAL_CIPHER_AES_OCB,	/* IEEE80211_CIPHER_AES_OCB */
1989 		HAL_CIPHER_AES_CCM,	/* IEEE80211_CIPHER_AES_CCM */
1990 		(u_int8_t) -1,		/* 4 is not allocated */
1991 		HAL_CIPHER_CKIP,	/* IEEE80211_CIPHER_CKIP */
1992 		HAL_CIPHER_CLR,		/* IEEE80211_CIPHER_NONE */
1993 	};
1994 	struct ath_hal *ah = sc->sc_ah;
1995 	const struct ieee80211_cipher *cip = k->wk_cipher;
1996 	u_int8_t gmac[IEEE80211_ADDR_LEN];
1997 	const u_int8_t *mac;
1998 	HAL_KEYVAL hk;
1999 
2000 	memset(&hk, 0, sizeof(hk));
2001 	/*
2002 	 * Software crypto uses a "clear key" so non-crypto
2003 	 * state kept in the key cache are maintained and
2004 	 * so that rx frames have an entry to match.
2005 	 */
2006 	if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) {
2007 		KASSERT(cip->ic_cipher < N(ciphermap),
2008 			("invalid cipher type %u", cip->ic_cipher));
2009 		hk.kv_type = ciphermap[cip->ic_cipher];
2010 		hk.kv_len = k->wk_keylen;
2011 		memcpy(hk.kv_val, k->wk_key, k->wk_keylen);
2012 	} else
2013 		hk.kv_type = HAL_CIPHER_CLR;
2014 
2015 	if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) {
2016 		/*
2017 		 * Group keys on hardware that supports multicast frame
2018 		 * key search use a mac that is the sender's address with
2019 		 * the high bit set instead of the app-specified address.
2020 		 */
2021 		IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr);
2022 		gmac[0] |= 0x80;
2023 		mac = gmac;
2024 	} else
2025 		mac = k->wk_macaddr;
2026 
2027 	if (hk.kv_type == HAL_CIPHER_TKIP &&
2028 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2029 		return ath_keyset_tkip(sc, k, &hk, mac);
2030 	} else {
2031 		KEYPRINTF(sc, k->wk_keyix, &hk, mac);
2032 		return ath_hal_keyset(ah, k->wk_keyix, &hk, mac);
2033 	}
2034 #undef N
2035 }
2036 
2037 /*
2038  * Allocate tx/rx key slots for TKIP.  We allocate two slots for
2039  * each key, one for decrypt/encrypt and the other for the MIC.
2040  */
2041 static u_int16_t
2042 key_alloc_2pair(struct ath_softc *sc,
2043 	ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2044 {
2045 #define	N(a)	(sizeof(a)/sizeof(a[0]))
2046 	u_int i, keyix;
2047 
2048 	KASSERT(sc->sc_splitmic, ("key cache !split"));
2049 	/* XXX could optimize */
2050 	for (i = 0; i < N(sc->sc_keymap)/4; i++) {
2051 		u_int8_t b = sc->sc_keymap[i];
2052 		if (b != 0xff) {
2053 			/*
2054 			 * One or more slots in this byte are free.
2055 			 */
2056 			keyix = i*NBBY;
2057 			while (b & 1) {
2058 		again:
2059 				keyix++;
2060 				b >>= 1;
2061 			}
2062 			/* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */
2063 			if (isset(sc->sc_keymap, keyix+32) ||
2064 			    isset(sc->sc_keymap, keyix+64) ||
2065 			    isset(sc->sc_keymap, keyix+32+64)) {
2066 				/* full pair unavailable */
2067 				/* XXX statistic */
2068 				if (keyix == (i+1)*NBBY) {
2069 					/* no slots were appropriate, advance */
2070 					continue;
2071 				}
2072 				goto again;
2073 			}
2074 			setbit(sc->sc_keymap, keyix);
2075 			setbit(sc->sc_keymap, keyix+64);
2076 			setbit(sc->sc_keymap, keyix+32);
2077 			setbit(sc->sc_keymap, keyix+32+64);
2078 			DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2079 				"%s: key pair %u,%u %u,%u\n",
2080 				__func__, keyix, keyix+64,
2081 				keyix+32, keyix+32+64);
2082 			*txkeyix = keyix;
2083 			*rxkeyix = keyix+32;
2084 			return 1;
2085 		}
2086 	}
2087 	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2088 	return 0;
2089 #undef N
2090 }
2091 
2092 /*
2093  * Allocate tx/rx key slots for TKIP.  We allocate two slots for
2094  * each key, one for decrypt/encrypt and the other for the MIC.
2095  */
2096 static u_int16_t
2097 key_alloc_pair(struct ath_softc *sc,
2098 	ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2099 {
2100 #define	N(a)	(sizeof(a)/sizeof(a[0]))
2101 	u_int i, keyix;
2102 
2103 	KASSERT(!sc->sc_splitmic, ("key cache split"));
2104 	/* XXX could optimize */
2105 	for (i = 0; i < N(sc->sc_keymap)/4; i++) {
2106 		u_int8_t b = sc->sc_keymap[i];
2107 		if (b != 0xff) {
2108 			/*
2109 			 * One or more slots in this byte are free.
2110 			 */
2111 			keyix = i*NBBY;
2112 			while (b & 1) {
2113 		again:
2114 				keyix++;
2115 				b >>= 1;
2116 			}
2117 			if (isset(sc->sc_keymap, keyix+64)) {
2118 				/* full pair unavailable */
2119 				/* XXX statistic */
2120 				if (keyix == (i+1)*NBBY) {
2121 					/* no slots were appropriate, advance */
2122 					continue;
2123 				}
2124 				goto again;
2125 			}
2126 			setbit(sc->sc_keymap, keyix);
2127 			setbit(sc->sc_keymap, keyix+64);
2128 			DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2129 				"%s: key pair %u,%u\n",
2130 				__func__, keyix, keyix+64);
2131 			*txkeyix = *rxkeyix = keyix;
2132 			return 1;
2133 		}
2134 	}
2135 	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__);
2136 	return 0;
2137 #undef N
2138 }
2139 
2140 /*
2141  * Allocate a single key cache slot.
2142  */
2143 static int
2144 key_alloc_single(struct ath_softc *sc,
2145 	ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix)
2146 {
2147 #define	N(a)	(sizeof(a)/sizeof(a[0]))
2148 	u_int i, keyix;
2149 
2150 	/* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */
2151 	for (i = 0; i < N(sc->sc_keymap); i++) {
2152 		u_int8_t b = sc->sc_keymap[i];
2153 		if (b != 0xff) {
2154 			/*
2155 			 * One or more slots are free.
2156 			 */
2157 			keyix = i*NBBY;
2158 			while (b & 1)
2159 				keyix++, b >>= 1;
2160 			setbit(sc->sc_keymap, keyix);
2161 			DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n",
2162 				__func__, keyix);
2163 			*txkeyix = *rxkeyix = keyix;
2164 			return 1;
2165 		}
2166 	}
2167 	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__);
2168 	return 0;
2169 #undef N
2170 }
2171 
2172 /*
2173  * Allocate one or more key cache slots for a uniacst key.  The
2174  * key itself is needed only to identify the cipher.  For hardware
2175  * TKIP with split cipher+MIC keys we allocate two key cache slot
2176  * pairs so that we can setup separate TX and RX MIC keys.  Note
2177  * that the MIC key for a TKIP key at slot i is assumed by the
2178  * hardware to be at slot i+64.  This limits TKIP keys to the first
2179  * 64 entries.
2180  */
2181 static int
2182 ath_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
2183 	ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
2184 {
2185 	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2186 
2187 	/*
2188 	 * Group key allocation must be handled specially for
2189 	 * parts that do not support multicast key cache search
2190 	 * functionality.  For those parts the key id must match
2191 	 * the h/w key index so lookups find the right key.  On
2192 	 * parts w/ the key search facility we install the sender's
2193 	 * mac address (with the high bit set) and let the hardware
2194 	 * find the key w/o using the key id.  This is preferred as
2195 	 * it permits us to support multiple users for adhoc and/or
2196 	 * multi-station operation.
2197 	 */
2198 	if (k->wk_keyix != IEEE80211_KEYIX_NONE ||	/* global key */
2199 	    ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey)) {
2200 		if (!(&vap->iv_nw_keys[0] <= k &&
2201 		      k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
2202 			/* should not happen */
2203 			DPRINTF(sc, ATH_DEBUG_KEYCACHE,
2204 				"%s: bogus group key\n", __func__);
2205 			return 0;
2206 		}
2207 		/*
2208 		 * XXX we pre-allocate the global keys so
2209 		 * have no way to check if they've already been allocated.
2210 		 */
2211 		*keyix = *rxkeyix = k - vap->iv_nw_keys;
2212 		return 1;
2213 	}
2214 
2215 	/*
2216 	 * We allocate two pair for TKIP when using the h/w to do
2217 	 * the MIC.  For everything else, including software crypto,
2218 	 * we allocate a single entry.  Note that s/w crypto requires
2219 	 * a pass-through slot on the 5211 and 5212.  The 5210 does
2220 	 * not support pass-through cache entries and we map all
2221 	 * those requests to slot 0.
2222 	 */
2223 	if (k->wk_flags & IEEE80211_KEY_SWCRYPT) {
2224 		return key_alloc_single(sc, keyix, rxkeyix);
2225 	} else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP &&
2226 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2227 		if (sc->sc_splitmic)
2228 			return key_alloc_2pair(sc, keyix, rxkeyix);
2229 		else
2230 			return key_alloc_pair(sc, keyix, rxkeyix);
2231 	} else {
2232 		return key_alloc_single(sc, keyix, rxkeyix);
2233 	}
2234 }
2235 
2236 /*
2237  * Delete an entry in the key cache allocated by ath_key_alloc.
2238  */
2239 static int
2240 ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
2241 {
2242 	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2243 	struct ath_hal *ah = sc->sc_ah;
2244 	const struct ieee80211_cipher *cip = k->wk_cipher;
2245 	u_int keyix = k->wk_keyix;
2246 
2247 	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix);
2248 
2249 	ath_hal_keyreset(ah, keyix);
2250 	/*
2251 	 * Handle split tx/rx keying required for TKIP with h/w MIC.
2252 	 */
2253 	if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2254 	    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic)
2255 		ath_hal_keyreset(ah, keyix+32);		/* RX key */
2256 	if (keyix >= IEEE80211_WEP_NKID) {
2257 		/*
2258 		 * Don't touch keymap entries for global keys so
2259 		 * they are never considered for dynamic allocation.
2260 		 */
2261 		clrbit(sc->sc_keymap, keyix);
2262 		if (cip->ic_cipher == IEEE80211_CIPHER_TKIP &&
2263 		    (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) {
2264 			clrbit(sc->sc_keymap, keyix+64);	/* TX key MIC */
2265 			if (sc->sc_splitmic) {
2266 				/* +32 for RX key, +32+64 for RX key MIC */
2267 				clrbit(sc->sc_keymap, keyix+32);
2268 				clrbit(sc->sc_keymap, keyix+32+64);
2269 			}
2270 		}
2271 	}
2272 	return 1;
2273 }
2274 
2275 /*
2276  * Set the key cache contents for the specified key.  Key cache
2277  * slot(s) must already have been allocated by ath_key_alloc.
2278  */
2279 static int
2280 ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k,
2281 	const u_int8_t mac[IEEE80211_ADDR_LEN])
2282 {
2283 	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
2284 
2285 	return ath_keyset(sc, k, vap->iv_bss);
2286 }
2287 
2288 /*
2289  * Block/unblock tx+rx processing while a key change is done.
2290  * We assume the caller serializes key management operations
2291  * so we only need to worry about synchronization with other
2292  * uses that originate in the driver.
2293  */
2294 static void
2295 ath_key_update_begin(struct ieee80211vap *vap)
2296 {
2297 	struct ifnet *ifp = vap->iv_ic->ic_ifp;
2298 	struct ath_softc *sc = ifp->if_softc;
2299 
2300 	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2301 	taskqueue_block(sc->sc_tq);
2302 	IF_LOCK(&ifp->if_snd);		/* NB: doesn't block mgmt frames */
2303 }
2304 
2305 static void
2306 ath_key_update_end(struct ieee80211vap *vap)
2307 {
2308 	struct ifnet *ifp = vap->iv_ic->ic_ifp;
2309 	struct ath_softc *sc = ifp->if_softc;
2310 
2311 	DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__);
2312 	IF_UNLOCK(&ifp->if_snd);
2313 	taskqueue_unblock(sc->sc_tq);
2314 }
2315 
2316 /*
2317  * Calculate the receive filter according to the
2318  * operating mode and state:
2319  *
2320  * o always accept unicast, broadcast, and multicast traffic
2321  * o accept PHY error frames when hardware doesn't have MIB support
2322  *   to count and we need them for ANI (sta mode only until recently)
2323  *   and we are not scanning (ANI is disabled)
2324  *   NB: older hal's add rx filter bits out of sight and we need to
2325  *	 blindly preserve them
2326  * o probe request frames are accepted only when operating in
2327  *   hostap, adhoc, or monitor modes
2328  * o enable promiscuous mode
2329  *   - when in monitor mode
2330  *   - if interface marked PROMISC (assumes bridge setting is filtered)
2331  * o accept beacons:
2332  *   - when operating in station mode for collecting rssi data when
2333  *     the station is otherwise quiet, or
2334  *   - when operating in adhoc mode so the 802.11 layer creates
2335  *     node table entries for peers,
2336  *   - when scanning
2337  *   - when doing s/w beacon miss (e.g. for ap+sta)
2338  *   - when operating in ap mode in 11g to detect overlapping bss that
2339  *     require protection
2340  * o accept control frames:
2341  *   - when in monitor mode
2342  * XXX BAR frames for 11n
2343  * XXX HT protection for 11n
2344  */
2345 static u_int32_t
2346 ath_calcrxfilter(struct ath_softc *sc)
2347 {
2348 	struct ifnet *ifp = sc->sc_ifp;
2349 	struct ieee80211com *ic = ifp->if_l2com;
2350 	u_int32_t rfilt;
2351 
2352 	rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST;
2353 	if (!sc->sc_needmib && !sc->sc_scanning)
2354 		rfilt |= HAL_RX_FILTER_PHYERR;
2355 	if (ic->ic_opmode != IEEE80211_M_STA)
2356 		rfilt |= HAL_RX_FILTER_PROBEREQ;
2357 	/* XXX ic->ic_monvaps != 0? */
2358 	if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC))
2359 		rfilt |= HAL_RX_FILTER_PROM;
2360 	if (ic->ic_opmode == IEEE80211_M_STA ||
2361 	    ic->ic_opmode == IEEE80211_M_IBSS ||
2362 	    sc->sc_swbmiss || sc->sc_scanning)
2363 		rfilt |= HAL_RX_FILTER_BEACON;
2364 	/*
2365 	 * NB: We don't recalculate the rx filter when
2366 	 * ic_protmode changes; otherwise we could do
2367 	 * this only when ic_protmode != NONE.
2368 	 */
2369 	if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
2370 	    IEEE80211_IS_CHAN_ANYG(ic->ic_curchan))
2371 		rfilt |= HAL_RX_FILTER_BEACON;
2372 	if (ic->ic_opmode == IEEE80211_M_MONITOR)
2373 		rfilt |= HAL_RX_FILTER_CONTROL;
2374 	DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n",
2375 	    __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags);
2376 	return rfilt;
2377 }
2378 
2379 static void
2380 ath_update_promisc(struct ifnet *ifp)
2381 {
2382 	struct ath_softc *sc = ifp->if_softc;
2383 	u_int32_t rfilt;
2384 
2385 	/* configure rx filter */
2386 	rfilt = ath_calcrxfilter(sc);
2387 	ath_hal_setrxfilter(sc->sc_ah, rfilt);
2388 
2389 	DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt);
2390 }
2391 
2392 static void
2393 ath_update_mcast(struct ifnet *ifp)
2394 {
2395 	struct ath_softc *sc = ifp->if_softc;
2396 	u_int32_t mfilt[2];
2397 
2398 	/* calculate and install multicast filter */
2399 	if ((ifp->if_flags & IFF_ALLMULTI) == 0) {
2400 		struct ifmultiaddr *ifma;
2401 		/*
2402 		 * Merge multicast addresses to form the hardware filter.
2403 		 */
2404 		mfilt[0] = mfilt[1] = 0;
2405 		IF_ADDR_LOCK(ifp);	/* XXX need some fiddling to remove? */
2406 		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2407 			caddr_t dl;
2408 			u_int32_t val;
2409 			u_int8_t pos;
2410 
2411 			/* calculate XOR of eight 6bit values */
2412 			dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr);
2413 			val = LE_READ_4(dl + 0);
2414 			pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2415 			val = LE_READ_4(dl + 3);
2416 			pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2417 			pos &= 0x3f;
2418 			mfilt[pos / 32] |= (1 << (pos % 32));
2419 		}
2420 		IF_ADDR_UNLOCK(ifp);
2421 	} else
2422 		mfilt[0] = mfilt[1] = ~0;
2423 	ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]);
2424 	DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n",
2425 		__func__, mfilt[0], mfilt[1]);
2426 }
2427 
2428 static void
2429 ath_mode_init(struct ath_softc *sc)
2430 {
2431 	struct ifnet *ifp = sc->sc_ifp;
2432 	struct ath_hal *ah = sc->sc_ah;
2433 	u_int32_t rfilt;
2434 
2435 	/* configure rx filter */
2436 	rfilt = ath_calcrxfilter(sc);
2437 	ath_hal_setrxfilter(ah, rfilt);
2438 
2439 	/* configure operational mode */
2440 	ath_hal_setopmode(ah);
2441 
2442 	/* handle any link-level address change */
2443 	ath_hal_setmac(ah, IF_LLADDR(ifp));
2444 
2445 	/* calculate and install multicast filter */
2446 	ath_update_mcast(ifp);
2447 }
2448 
2449 /*
2450  * Set the slot time based on the current setting.
2451  */
2452 static void
2453 ath_setslottime(struct ath_softc *sc)
2454 {
2455 	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2456 	struct ath_hal *ah = sc->sc_ah;
2457 	u_int usec;
2458 
2459 	if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan))
2460 		usec = 13;
2461 	else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan))
2462 		usec = 21;
2463 	else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) {
2464 		/* honor short/long slot time only in 11g */
2465 		/* XXX shouldn't honor on pure g or turbo g channel */
2466 		if (ic->ic_flags & IEEE80211_F_SHSLOT)
2467 			usec = HAL_SLOT_TIME_9;
2468 		else
2469 			usec = HAL_SLOT_TIME_20;
2470 	} else
2471 		usec = HAL_SLOT_TIME_9;
2472 
2473 	DPRINTF(sc, ATH_DEBUG_RESET,
2474 	    "%s: chan %u MHz flags 0x%x %s slot, %u usec\n",
2475 	    __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags,
2476 	    ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec);
2477 
2478 	ath_hal_setslottime(ah, usec);
2479 	sc->sc_updateslot = OK;
2480 }
2481 
2482 /*
2483  * Callback from the 802.11 layer to update the
2484  * slot time based on the current setting.
2485  */
2486 static void
2487 ath_updateslot(struct ifnet *ifp)
2488 {
2489 	struct ath_softc *sc = ifp->if_softc;
2490 	struct ieee80211com *ic = ifp->if_l2com;
2491 
2492 	/*
2493 	 * When not coordinating the BSS, change the hardware
2494 	 * immediately.  For other operation we defer the change
2495 	 * until beacon updates have propagated to the stations.
2496 	 */
2497 	if (ic->ic_opmode == IEEE80211_M_HOSTAP)
2498 		sc->sc_updateslot = UPDATE;
2499 	else
2500 		ath_setslottime(sc);
2501 }
2502 
2503 /*
2504  * Setup a h/w transmit queue for beacons.
2505  */
2506 static int
2507 ath_beaconq_setup(struct ath_hal *ah)
2508 {
2509 	HAL_TXQ_INFO qi;
2510 
2511 	memset(&qi, 0, sizeof(qi));
2512 	qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
2513 	qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
2514 	qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
2515 	/* NB: for dynamic turbo, don't enable any other interrupts */
2516 	qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE;
2517 	return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi);
2518 }
2519 
2520 /*
2521  * Setup the transmit queue parameters for the beacon queue.
2522  */
2523 static int
2524 ath_beaconq_config(struct ath_softc *sc)
2525 {
2526 #define	ATH_EXPONENT_TO_VALUE(v)	((1<<(v))-1)
2527 	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2528 	struct ath_hal *ah = sc->sc_ah;
2529 	HAL_TXQ_INFO qi;
2530 
2531 	ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi);
2532 	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
2533 		/*
2534 		 * Always burst out beacon and CAB traffic.
2535 		 */
2536 		qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT;
2537 		qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT;
2538 		qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT;
2539 	} else {
2540 		struct wmeParams *wmep =
2541 			&ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE];
2542 		/*
2543 		 * Adhoc mode; important thing is to use 2x cwmin.
2544 		 */
2545 		qi.tqi_aifs = wmep->wmep_aifsn;
2546 		qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
2547 		qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
2548 	}
2549 
2550 	if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) {
2551 		device_printf(sc->sc_dev, "unable to update parameters for "
2552 			"beacon hardware queue!\n");
2553 		return 0;
2554 	} else {
2555 		ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */
2556 		return 1;
2557 	}
2558 #undef ATH_EXPONENT_TO_VALUE
2559 }
2560 
2561 /*
2562  * Allocate and setup an initial beacon frame.
2563  */
2564 static int
2565 ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni)
2566 {
2567 	struct ieee80211vap *vap = ni->ni_vap;
2568 	struct ath_vap *avp = ATH_VAP(vap);
2569 	struct ath_buf *bf;
2570 	struct mbuf *m;
2571 	int error;
2572 
2573 	bf = avp->av_bcbuf;
2574 	if (bf->bf_m != NULL) {
2575 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2576 		m_freem(bf->bf_m);
2577 		bf->bf_m = NULL;
2578 	}
2579 	if (bf->bf_node != NULL) {
2580 		ieee80211_free_node(bf->bf_node);
2581 		bf->bf_node = NULL;
2582 	}
2583 
2584 	/*
2585 	 * NB: the beacon data buffer must be 32-bit aligned;
2586 	 * we assume the mbuf routines will return us something
2587 	 * with this alignment (perhaps should assert).
2588 	 */
2589 	m = ieee80211_beacon_alloc(ni, &avp->av_boff);
2590 	if (m == NULL) {
2591 		device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__);
2592 		sc->sc_stats.ast_be_nombuf++;
2593 		return ENOMEM;
2594 	}
2595 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2596 				     bf->bf_segs, &bf->bf_nseg,
2597 				     BUS_DMA_NOWAIT);
2598 	if (error != 0) {
2599 		device_printf(sc->sc_dev,
2600 		    "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n",
2601 		    __func__, error);
2602 		m_freem(m);
2603 		return error;
2604 	}
2605 
2606 	/*
2607 	 * Calculate a TSF adjustment factor required for staggered
2608 	 * beacons.  Note that we assume the format of the beacon
2609 	 * frame leaves the tstamp field immediately following the
2610 	 * header.
2611 	 */
2612 	if (sc->sc_stagbeacons && avp->av_bslot > 0) {
2613 		uint64_t tsfadjust;
2614 		struct ieee80211_frame *wh;
2615 
2616 		/*
2617 		 * The beacon interval is in TU's; the TSF is in usecs.
2618 		 * We figure out how many TU's to add to align the timestamp
2619 		 * then convert to TSF units and handle byte swapping before
2620 		 * inserting it in the frame.  The hardware will then add this
2621 		 * each time a beacon frame is sent.  Note that we align vap's
2622 		 * 1..N and leave vap 0 untouched.  This means vap 0 has a
2623 		 * timestamp in one beacon interval while the others get a
2624 		 * timstamp aligned to the next interval.
2625 		 */
2626 		tsfadjust = ni->ni_intval *
2627 		    (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF;
2628 		tsfadjust = htole64(tsfadjust << 10);	/* TU -> TSF */
2629 
2630 		DPRINTF(sc, ATH_DEBUG_BEACON,
2631 		    "%s: %s beacons bslot %d intval %u tsfadjust %llu\n",
2632 		    __func__, sc->sc_stagbeacons ? "stagger" : "burst",
2633 		    avp->av_bslot, ni->ni_intval,
2634 		    (long long unsigned) le64toh(tsfadjust));
2635 
2636 		wh = mtod(m, struct ieee80211_frame *);
2637 		memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust));
2638 	}
2639 	bf->bf_m = m;
2640 	bf->bf_node = ieee80211_ref_node(ni);
2641 
2642 	return 0;
2643 }
2644 
2645 /*
2646  * Setup the beacon frame for transmit.
2647  */
2648 static void
2649 ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf)
2650 {
2651 #define	USE_SHPREAMBLE(_ic) \
2652 	(((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\
2653 		== IEEE80211_F_SHPREAMBLE)
2654 	struct ieee80211_node *ni = bf->bf_node;
2655 	struct ieee80211com *ic = ni->ni_ic;
2656 	struct mbuf *m = bf->bf_m;
2657 	struct ath_hal *ah = sc->sc_ah;
2658 	struct ath_desc *ds;
2659 	int flags, antenna;
2660 	const HAL_RATE_TABLE *rt;
2661 	u_int8_t rix, rate;
2662 
2663 	DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n",
2664 		__func__, m, m->m_len);
2665 
2666 	/* setup descriptors */
2667 	ds = bf->bf_desc;
2668 
2669 	flags = HAL_TXDESC_NOACK;
2670 	if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) {
2671 		ds->ds_link = bf->bf_daddr;	/* self-linked */
2672 		flags |= HAL_TXDESC_VEOL;
2673 		/*
2674 		 * Let hardware handle antenna switching.
2675 		 */
2676 		antenna = sc->sc_txantenna;
2677 	} else {
2678 		ds->ds_link = 0;
2679 		/*
2680 		 * Switch antenna every 4 beacons.
2681 		 * XXX assumes two antenna
2682 		 */
2683 		if (sc->sc_txantenna != 0)
2684 			antenna = sc->sc_txantenna;
2685 		else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0)
2686 			antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1);
2687 		else
2688 			antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1);
2689 	}
2690 
2691 	KASSERT(bf->bf_nseg == 1,
2692 		("multi-segment beacon frame; nseg %u", bf->bf_nseg));
2693 	ds->ds_data = bf->bf_segs[0].ds_addr;
2694 	/*
2695 	 * Calculate rate code.
2696 	 * XXX everything at min xmit rate
2697 	 */
2698 	rix = 0;
2699 	rt = sc->sc_currates;
2700 	rate = rt->info[rix].rateCode;
2701 	if (USE_SHPREAMBLE(ic))
2702 		rate |= rt->info[rix].shortPreamble;
2703 	ath_hal_setuptxdesc(ah, ds
2704 		, m->m_len + IEEE80211_CRC_LEN	/* frame length */
2705 		, sizeof(struct ieee80211_frame)/* header length */
2706 		, HAL_PKT_TYPE_BEACON		/* Atheros packet type */
2707 		, ni->ni_txpower		/* txpower XXX */
2708 		, rate, 1			/* series 0 rate/tries */
2709 		, HAL_TXKEYIX_INVALID		/* no encryption */
2710 		, antenna			/* antenna mode */
2711 		, flags				/* no ack, veol for beacons */
2712 		, 0				/* rts/cts rate */
2713 		, 0				/* rts/cts duration */
2714 	);
2715 	/* NB: beacon's BufLen must be a multiple of 4 bytes */
2716 	ath_hal_filltxdesc(ah, ds
2717 		, roundup(m->m_len, 4)		/* buffer length */
2718 		, AH_TRUE			/* first segment */
2719 		, AH_TRUE			/* last segment */
2720 		, ds				/* first descriptor */
2721 	);
2722 #if 0
2723 	ath_desc_swap(ds);
2724 #endif
2725 #undef USE_SHPREAMBLE
2726 }
2727 
2728 static void
2729 ath_beacon_update(struct ieee80211vap *vap, int item)
2730 {
2731 	struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff;
2732 
2733 	setbit(bo->bo_flags, item);
2734 }
2735 
2736 /*
2737  * Append the contents of src to dst; both queues
2738  * are assumed to be locked.
2739  */
2740 static void
2741 ath_txqmove(struct ath_txq *dst, struct ath_txq *src)
2742 {
2743 	STAILQ_CONCAT(&dst->axq_q, &src->axq_q);
2744 	dst->axq_link = src->axq_link;
2745 	src->axq_link = NULL;
2746 	dst->axq_depth += src->axq_depth;
2747 	src->axq_depth = 0;
2748 }
2749 
2750 /*
2751  * Transmit a beacon frame at SWBA.  Dynamic updates to the
2752  * frame contents are done as needed and the slot time is
2753  * also adjusted based on current state.
2754  */
2755 static void
2756 ath_beacon_proc(void *arg, int pending)
2757 {
2758 	struct ath_softc *sc = arg;
2759 	struct ath_hal *ah = sc->sc_ah;
2760 	struct ieee80211vap *vap;
2761 	struct ath_buf *bf;
2762 	int slot, otherant;
2763 	uint32_t bfaddr;
2764 
2765 	DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n",
2766 		__func__, pending);
2767 	/*
2768 	 * Check if the previous beacon has gone out.  If
2769 	 * not don't try to post another, skip this period
2770 	 * and wait for the next.  Missed beacons indicate
2771 	 * a problem and should not occur.  If we miss too
2772 	 * many consecutive beacons reset the device.
2773 	 */
2774 	if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
2775 		sc->sc_bmisscount++;
2776 		DPRINTF(sc, ATH_DEBUG_BEACON,
2777 			"%s: missed %u consecutive beacons\n",
2778 			__func__, sc->sc_bmisscount);
2779 		if (sc->sc_bmisscount >= ath_bstuck_threshold)
2780 			taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
2781 		return;
2782 	}
2783 	if (sc->sc_bmisscount != 0) {
2784 		DPRINTF(sc, ATH_DEBUG_BEACON,
2785 			"%s: resume beacon xmit after %u misses\n",
2786 			__func__, sc->sc_bmisscount);
2787 		sc->sc_bmisscount = 0;
2788 	}
2789 
2790 	if (sc->sc_stagbeacons) {			/* staggered beacons */
2791 		struct ieee80211com *ic = sc->sc_ifp->if_l2com;
2792 		uint32_t tsftu;
2793 
2794 		tsftu = ath_hal_gettsf32(ah) >> 10;
2795 		/* XXX lintval */
2796 		slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval;
2797 		vap = sc->sc_bslot[(slot+1) % ATH_BCBUF];
2798 		bfaddr = 0;
2799 		if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) {
2800 			bf = ath_beacon_generate(sc, vap);
2801 			if (bf != NULL)
2802 				bfaddr = bf->bf_daddr;
2803 		}
2804 	} else {					/* burst'd beacons */
2805 		uint32_t *bflink = &bfaddr;
2806 
2807 		for (slot = 0; slot < ATH_BCBUF; slot++) {
2808 			vap = sc->sc_bslot[slot];
2809 			if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) {
2810 				bf = ath_beacon_generate(sc, vap);
2811 				if (bf != NULL) {
2812 					*bflink = bf->bf_daddr;
2813 					bflink = &bf->bf_desc->ds_link;
2814 				}
2815 			}
2816 		}
2817 		*bflink = 0;				/* terminate list */
2818 	}
2819 
2820 	/*
2821 	 * Handle slot time change when a non-ERP station joins/leaves
2822 	 * an 11g network.  The 802.11 layer notifies us via callback,
2823 	 * we mark updateslot, then wait one beacon before effecting
2824 	 * the change.  This gives associated stations at least one
2825 	 * beacon interval to note the state change.
2826 	 */
2827 	/* XXX locking */
2828 	if (sc->sc_updateslot == UPDATE) {
2829 		sc->sc_updateslot = COMMIT;	/* commit next beacon */
2830 		sc->sc_slotupdate = slot;
2831 	} else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot)
2832 		ath_setslottime(sc);		/* commit change to h/w */
2833 
2834 	/*
2835 	 * Check recent per-antenna transmit statistics and flip
2836 	 * the default antenna if noticeably more frames went out
2837 	 * on the non-default antenna.
2838 	 * XXX assumes 2 anntenae
2839 	 */
2840 	if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) {
2841 		otherant = sc->sc_defant & 1 ? 2 : 1;
2842 		if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
2843 			ath_setdefantenna(sc, otherant);
2844 		sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
2845 	}
2846 
2847 	if (bfaddr != 0) {
2848 		/*
2849 		 * Stop any current dma and put the new frame on the queue.
2850 		 * This should never fail since we check above that no frames
2851 		 * are still pending on the queue.
2852 		 */
2853 		if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
2854 			DPRINTF(sc, ATH_DEBUG_ANY,
2855 				"%s: beacon queue %u did not stop?\n",
2856 				__func__, sc->sc_bhalq);
2857 		}
2858 		/* NB: cabq traffic should already be queued and primed */
2859 		ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr);
2860 		ath_hal_txstart(ah, sc->sc_bhalq);
2861 
2862 		sc->sc_stats.ast_be_xmit++;
2863 	}
2864 }
2865 
2866 static struct ath_buf *
2867 ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap)
2868 {
2869 	struct ath_vap *avp = ATH_VAP(vap);
2870 	struct ath_txq *cabq = sc->sc_cabq;
2871 	struct ath_buf *bf;
2872 	struct mbuf *m;
2873 	int nmcastq, error;
2874 
2875 	KASSERT(vap->iv_state == IEEE80211_S_RUN,
2876 	    ("not running, state %d", vap->iv_state));
2877 	KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
2878 
2879 	/*
2880 	 * Update dynamic beacon contents.  If this returns
2881 	 * non-zero then we need to remap the memory because
2882 	 * the beacon frame changed size (probably because
2883 	 * of the TIM bitmap).
2884 	 */
2885 	bf = avp->av_bcbuf;
2886 	m = bf->bf_m;
2887 	nmcastq = avp->av_mcastq.axq_depth;
2888 	if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) {
2889 		/* XXX too conservative? */
2890 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2891 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2892 					     bf->bf_segs, &bf->bf_nseg,
2893 					     BUS_DMA_NOWAIT);
2894 		if (error != 0) {
2895 			if_printf(vap->iv_ifp,
2896 			    "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
2897 			    __func__, error);
2898 			return NULL;
2899 		}
2900 	}
2901 	if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) {
2902 		DPRINTF(sc, ATH_DEBUG_BEACON,
2903 		    "%s: cabq did not drain, mcastq %u cabq %u\n",
2904 		    __func__, nmcastq, cabq->axq_depth);
2905 		sc->sc_stats.ast_cabq_busy++;
2906 		if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) {
2907 			/*
2908 			 * CABQ traffic from a previous vap is still pending.
2909 			 * We must drain the q before this beacon frame goes
2910 			 * out as otherwise this vap's stations will get cab
2911 			 * frames from a different vap.
2912 			 * XXX could be slow causing us to miss DBA
2913 			 */
2914 			ath_tx_draintxq(sc, cabq);
2915 		}
2916 	}
2917 	ath_beacon_setup(sc, bf);
2918 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
2919 
2920 	/*
2921 	 * Enable the CAB queue before the beacon queue to
2922 	 * insure cab frames are triggered by this beacon.
2923 	 */
2924 	if (avp->av_boff.bo_tim[4] & 1) {
2925 		struct ath_hal *ah = sc->sc_ah;
2926 
2927 		/* NB: only at DTIM */
2928 		ATH_TXQ_LOCK(cabq);
2929 		ATH_TXQ_LOCK(&avp->av_mcastq);
2930 		if (nmcastq) {
2931 			struct ath_buf *bfm;
2932 
2933 			/*
2934 			 * Move frames from the s/w mcast q to the h/w cab q.
2935 			 * XXX MORE_DATA bit
2936 			 */
2937 			bfm = STAILQ_FIRST(&avp->av_mcastq.axq_q);
2938 			if (cabq->axq_link != NULL) {
2939 				*cabq->axq_link = bfm->bf_daddr;
2940 			} else
2941 				ath_hal_puttxbuf(ah, cabq->axq_qnum,
2942 					bfm->bf_daddr);
2943 			ath_txqmove(cabq, &avp->av_mcastq);
2944 
2945 			sc->sc_stats.ast_cabq_xmit += nmcastq;
2946 		}
2947 		/* NB: gated by beacon so safe to start here */
2948 		ath_hal_txstart(ah, cabq->axq_qnum);
2949 		ATH_TXQ_UNLOCK(cabq);
2950 		ATH_TXQ_UNLOCK(&avp->av_mcastq);
2951 	}
2952 	return bf;
2953 }
2954 
2955 static void
2956 ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap)
2957 {
2958 	struct ath_vap *avp = ATH_VAP(vap);
2959 	struct ath_hal *ah = sc->sc_ah;
2960 	struct ath_buf *bf;
2961 	struct mbuf *m;
2962 	int error;
2963 
2964 	KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer"));
2965 
2966 	/*
2967 	 * Update dynamic beacon contents.  If this returns
2968 	 * non-zero then we need to remap the memory because
2969 	 * the beacon frame changed size (probably because
2970 	 * of the TIM bitmap).
2971 	 */
2972 	bf = avp->av_bcbuf;
2973 	m = bf->bf_m;
2974 	if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) {
2975 		/* XXX too conservative? */
2976 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
2977 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m,
2978 					     bf->bf_segs, &bf->bf_nseg,
2979 					     BUS_DMA_NOWAIT);
2980 		if (error != 0) {
2981 			if_printf(vap->iv_ifp,
2982 			    "%s: bus_dmamap_load_mbuf_sg failed, error %u\n",
2983 			    __func__, error);
2984 			return;
2985 		}
2986 	}
2987 	ath_beacon_setup(sc, bf);
2988 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
2989 
2990 	/* NB: caller is known to have already stopped tx dma */
2991 	ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
2992 	ath_hal_txstart(ah, sc->sc_bhalq);
2993 }
2994 
2995 /*
2996  * Reset the hardware after detecting beacons have stopped.
2997  */
2998 static void
2999 ath_bstuck_proc(void *arg, int pending)
3000 {
3001 	struct ath_softc *sc = arg;
3002 	struct ifnet *ifp = sc->sc_ifp;
3003 
3004 	if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n",
3005 		sc->sc_bmisscount);
3006 	sc->sc_stats.ast_bstuck++;
3007 	ath_reset(ifp);
3008 }
3009 
3010 /*
3011  * Reclaim beacon resources and return buffer to the pool.
3012  */
3013 static void
3014 ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf)
3015 {
3016 
3017 	if (bf->bf_m != NULL) {
3018 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3019 		m_freem(bf->bf_m);
3020 		bf->bf_m = NULL;
3021 	}
3022 	if (bf->bf_node != NULL) {
3023 		ieee80211_free_node(bf->bf_node);
3024 		bf->bf_node = NULL;
3025 	}
3026 	STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list);
3027 }
3028 
3029 /*
3030  * Reclaim beacon resources.
3031  */
3032 static void
3033 ath_beacon_free(struct ath_softc *sc)
3034 {
3035 	struct ath_buf *bf;
3036 
3037 	STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) {
3038 		if (bf->bf_m != NULL) {
3039 			bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3040 			m_freem(bf->bf_m);
3041 			bf->bf_m = NULL;
3042 		}
3043 		if (bf->bf_node != NULL) {
3044 			ieee80211_free_node(bf->bf_node);
3045 			bf->bf_node = NULL;
3046 		}
3047 	}
3048 }
3049 
3050 /*
3051  * Configure the beacon and sleep timers.
3052  *
3053  * When operating as an AP this resets the TSF and sets
3054  * up the hardware to notify us when we need to issue beacons.
3055  *
3056  * When operating in station mode this sets up the beacon
3057  * timers according to the timestamp of the last received
3058  * beacon and the current TSF, configures PCF and DTIM
3059  * handling, programs the sleep registers so the hardware
3060  * will wakeup in time to receive beacons, and configures
3061  * the beacon miss handling so we'll receive a BMISS
3062  * interrupt when we stop seeing beacons from the AP
3063  * we've associated with.
3064  */
3065 static void
3066 ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap)
3067 {
3068 #define	TSF_TO_TU(_h,_l) \
3069 	((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
3070 #define	FUDGE	2
3071 	struct ath_hal *ah = sc->sc_ah;
3072 	struct ieee80211com *ic = sc->sc_ifp->if_l2com;
3073 	struct ieee80211_node *ni;
3074 	u_int32_t nexttbtt, intval, tsftu;
3075 	u_int64_t tsf;
3076 
3077 	if (vap == NULL)
3078 		vap = TAILQ_FIRST(&ic->ic_vaps);	/* XXX */
3079 	ni = vap->iv_bss;
3080 
3081 	/* extract tstamp from last beacon and convert to TU */
3082 	nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4),
3083 			     LE_READ_4(ni->ni_tstamp.data));
3084 	if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
3085 		/*
3086 		 * For multi-bss ap support beacons are either staggered
3087 		 * evenly over N slots or burst together.  For the former
3088 		 * arrange for the SWBA to be delivered for each slot.
3089 		 * Slots that are not occupied will generate nothing.
3090 		 */
3091 		/* NB: the beacon interval is kept internally in TU's */
3092 		intval = ni->ni_intval & HAL_BEACON_PERIOD;
3093 		if (sc->sc_stagbeacons)
3094 			intval /= ATH_BCBUF;
3095 	} else {
3096 		/* NB: the beacon interval is kept internally in TU's */
3097 		intval = ni->ni_intval & HAL_BEACON_PERIOD;
3098 	}
3099 	if (nexttbtt == 0)		/* e.g. for ap mode */
3100 		nexttbtt = intval;
3101 	else if (intval)		/* NB: can be 0 for monitor mode */
3102 		nexttbtt = roundup(nexttbtt, intval);
3103 	DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n",
3104 		__func__, nexttbtt, intval, ni->ni_intval);
3105 	if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) {
3106 		HAL_BEACON_STATE bs;
3107 		int dtimperiod, dtimcount;
3108 		int cfpperiod, cfpcount;
3109 
3110 		/*
3111 		 * Setup dtim and cfp parameters according to
3112 		 * last beacon we received (which may be none).
3113 		 */
3114 		dtimperiod = ni->ni_dtim_period;
3115 		if (dtimperiod <= 0)		/* NB: 0 if not known */
3116 			dtimperiod = 1;
3117 		dtimcount = ni->ni_dtim_count;
3118 		if (dtimcount >= dtimperiod)	/* NB: sanity check */
3119 			dtimcount = 0;		/* XXX? */
3120 		cfpperiod = 1;			/* NB: no PCF support yet */
3121 		cfpcount = 0;
3122 		/*
3123 		 * Pull nexttbtt forward to reflect the current
3124 		 * TSF and calculate dtim+cfp state for the result.
3125 		 */
3126 		tsf = ath_hal_gettsf64(ah);
3127 		tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3128 		do {
3129 			nexttbtt += intval;
3130 			if (--dtimcount < 0) {
3131 				dtimcount = dtimperiod - 1;
3132 				if (--cfpcount < 0)
3133 					cfpcount = cfpperiod - 1;
3134 			}
3135 		} while (nexttbtt < tsftu);
3136 		memset(&bs, 0, sizeof(bs));
3137 		bs.bs_intval = intval;
3138 		bs.bs_nexttbtt = nexttbtt;
3139 		bs.bs_dtimperiod = dtimperiod*intval;
3140 		bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
3141 		bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
3142 		bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
3143 		bs.bs_cfpmaxduration = 0;
3144 #if 0
3145 		/*
3146 		 * The 802.11 layer records the offset to the DTIM
3147 		 * bitmap while receiving beacons; use it here to
3148 		 * enable h/w detection of our AID being marked in
3149 		 * the bitmap vector (to indicate frames for us are
3150 		 * pending at the AP).
3151 		 * XXX do DTIM handling in s/w to WAR old h/w bugs
3152 		 * XXX enable based on h/w rev for newer chips
3153 		 */
3154 		bs.bs_timoffset = ni->ni_timoff;
3155 #endif
3156 		/*
3157 		 * Calculate the number of consecutive beacons to miss
3158 		 * before taking a BMISS interrupt.
3159 		 * Note that we clamp the result to at most 10 beacons.
3160 		 */
3161 		bs.bs_bmissthreshold = vap->iv_bmissthreshold;
3162 		if (bs.bs_bmissthreshold > 10)
3163 			bs.bs_bmissthreshold = 10;
3164 		else if (bs.bs_bmissthreshold <= 0)
3165 			bs.bs_bmissthreshold = 1;
3166 
3167 		/*
3168 		 * Calculate sleep duration.  The configuration is
3169 		 * given in ms.  We insure a multiple of the beacon
3170 		 * period is used.  Also, if the sleep duration is
3171 		 * greater than the DTIM period then it makes senses
3172 		 * to make it a multiple of that.
3173 		 *
3174 		 * XXX fixed at 100ms
3175 		 */
3176 		bs.bs_sleepduration =
3177 			roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval);
3178 		if (bs.bs_sleepduration > bs.bs_dtimperiod)
3179 			bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod);
3180 
3181 		DPRINTF(sc, ATH_DEBUG_BEACON,
3182 			"%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n"
3183 			, __func__
3184 			, tsf, tsftu
3185 			, bs.bs_intval
3186 			, bs.bs_nexttbtt
3187 			, bs.bs_dtimperiod
3188 			, bs.bs_nextdtim
3189 			, bs.bs_bmissthreshold
3190 			, bs.bs_sleepduration
3191 			, bs.bs_cfpperiod
3192 			, bs.bs_cfpmaxduration
3193 			, bs.bs_cfpnext
3194 			, bs.bs_timoffset
3195 		);
3196 		ath_hal_intrset(ah, 0);
3197 		ath_hal_beacontimers(ah, &bs);
3198 		sc->sc_imask |= HAL_INT_BMISS;
3199 		ath_hal_intrset(ah, sc->sc_imask);
3200 	} else {
3201 		ath_hal_intrset(ah, 0);
3202 		if (nexttbtt == intval)
3203 			intval |= HAL_BEACON_RESET_TSF;
3204 		if (ic->ic_opmode == IEEE80211_M_IBSS) {
3205 			/*
3206 			 * In IBSS mode enable the beacon timers but only
3207 			 * enable SWBA interrupts if we need to manually
3208 			 * prepare beacon frames.  Otherwise we use a
3209 			 * self-linked tx descriptor and let the hardware
3210 			 * deal with things.
3211 			 */
3212 			intval |= HAL_BEACON_ENA;
3213 			if (!sc->sc_hasveol)
3214 				sc->sc_imask |= HAL_INT_SWBA;
3215 			if ((intval & HAL_BEACON_RESET_TSF) == 0) {
3216 				/*
3217 				 * Pull nexttbtt forward to reflect
3218 				 * the current TSF.
3219 				 */
3220 				tsf = ath_hal_gettsf64(ah);
3221 				tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
3222 				do {
3223 					nexttbtt += intval;
3224 				} while (nexttbtt < tsftu);
3225 			}
3226 			ath_beaconq_config(sc);
3227 		} else if (ic->ic_opmode == IEEE80211_M_HOSTAP) {
3228 			/*
3229 			 * In AP mode we enable the beacon timers and
3230 			 * SWBA interrupts to prepare beacon frames.
3231 			 */
3232 			intval |= HAL_BEACON_ENA;
3233 			sc->sc_imask |= HAL_INT_SWBA;	/* beacon prepare */
3234 			ath_beaconq_config(sc);
3235 		}
3236 		ath_hal_beaconinit(ah, nexttbtt, intval);
3237 		sc->sc_bmisscount = 0;
3238 		ath_hal_intrset(ah, sc->sc_imask);
3239 		/*
3240 		 * When using a self-linked beacon descriptor in
3241 		 * ibss mode load it once here.
3242 		 */
3243 		if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol)
3244 			ath_beacon_start_adhoc(sc, vap);
3245 	}
3246 	sc->sc_syncbeacon = 0;
3247 #undef FUDGE
3248 #undef TSF_TO_TU
3249 }
3250 
3251 static void
3252 ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
3253 {
3254 	bus_addr_t *paddr = (bus_addr_t*) arg;
3255 	KASSERT(error == 0, ("error %u on bus_dma callback", error));
3256 	*paddr = segs->ds_addr;
3257 }
3258 
3259 static int
3260 ath_descdma_setup(struct ath_softc *sc,
3261 	struct ath_descdma *dd, ath_bufhead *head,
3262 	const char *name, int nbuf, int ndesc)
3263 {
3264 #define	DS2PHYS(_dd, _ds) \
3265 	((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
3266 	struct ifnet *ifp = sc->sc_ifp;
3267 	struct ath_desc *ds;
3268 	struct ath_buf *bf;
3269 	int i, bsize, error;
3270 
3271 	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n",
3272 	    __func__, name, nbuf, ndesc);
3273 
3274 	dd->dd_name = name;
3275 	dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
3276 
3277 	/*
3278 	 * Setup DMA descriptor area.
3279 	 */
3280 	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),	/* parent */
3281 		       PAGE_SIZE, 0,		/* alignment, bounds */
3282 		       BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
3283 		       BUS_SPACE_MAXADDR,	/* highaddr */
3284 		       NULL, NULL,		/* filter, filterarg */
3285 		       dd->dd_desc_len,		/* maxsize */
3286 		       1,			/* nsegments */
3287 		       dd->dd_desc_len,		/* maxsegsize */
3288 		       BUS_DMA_ALLOCNOW,	/* flags */
3289 		       NULL,			/* lockfunc */
3290 		       NULL,			/* lockarg */
3291 		       &dd->dd_dmat);
3292 	if (error != 0) {
3293 		if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name);
3294 		return error;
3295 	}
3296 
3297 	/* allocate descriptors */
3298 	error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap);
3299 	if (error != 0) {
3300 		if_printf(ifp, "unable to create dmamap for %s descriptors, "
3301 			"error %u\n", dd->dd_name, error);
3302 		goto fail0;
3303 	}
3304 
3305 	error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc,
3306 				 BUS_DMA_NOWAIT | BUS_DMA_COHERENT,
3307 				 &dd->dd_dmamap);
3308 	if (error != 0) {
3309 		if_printf(ifp, "unable to alloc memory for %u %s descriptors, "
3310 			"error %u\n", nbuf * ndesc, dd->dd_name, error);
3311 		goto fail1;
3312 	}
3313 
3314 	error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap,
3315 				dd->dd_desc, dd->dd_desc_len,
3316 				ath_load_cb, &dd->dd_desc_paddr,
3317 				BUS_DMA_NOWAIT);
3318 	if (error != 0) {
3319 		if_printf(ifp, "unable to map %s descriptors, error %u\n",
3320 			dd->dd_name, error);
3321 		goto fail2;
3322 	}
3323 
3324 	ds = dd->dd_desc;
3325 	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n",
3326 	    __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len,
3327 	    (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len);
3328 
3329 	/* allocate rx buffers */
3330 	bsize = sizeof(struct ath_buf) * nbuf;
3331 	bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO);
3332 	if (bf == NULL) {
3333 		if_printf(ifp, "malloc of %s buffers failed, size %u\n",
3334 			dd->dd_name, bsize);
3335 		goto fail3;
3336 	}
3337 	dd->dd_bufptr = bf;
3338 
3339 	STAILQ_INIT(head);
3340 	for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
3341 		bf->bf_desc = ds;
3342 		bf->bf_daddr = DS2PHYS(dd, ds);
3343 		error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
3344 				&bf->bf_dmamap);
3345 		if (error != 0) {
3346 			if_printf(ifp, "unable to create dmamap for %s "
3347 				"buffer %u, error %u\n", dd->dd_name, i, error);
3348 			ath_descdma_cleanup(sc, dd, head);
3349 			return error;
3350 		}
3351 		STAILQ_INSERT_TAIL(head, bf, bf_list);
3352 	}
3353 	return 0;
3354 fail3:
3355 	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3356 fail2:
3357 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3358 fail1:
3359 	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3360 fail0:
3361 	bus_dma_tag_destroy(dd->dd_dmat);
3362 	memset(dd, 0, sizeof(*dd));
3363 	return error;
3364 #undef DS2PHYS
3365 }
3366 
3367 static void
3368 ath_descdma_cleanup(struct ath_softc *sc,
3369 	struct ath_descdma *dd, ath_bufhead *head)
3370 {
3371 	struct ath_buf *bf;
3372 	struct ieee80211_node *ni;
3373 
3374 	bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap);
3375 	bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap);
3376 	bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap);
3377 	bus_dma_tag_destroy(dd->dd_dmat);
3378 
3379 	STAILQ_FOREACH(bf, head, bf_list) {
3380 		if (bf->bf_m) {
3381 			m_freem(bf->bf_m);
3382 			bf->bf_m = NULL;
3383 		}
3384 		if (bf->bf_dmamap != NULL) {
3385 			bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap);
3386 			bf->bf_dmamap = NULL;
3387 		}
3388 		ni = bf->bf_node;
3389 		bf->bf_node = NULL;
3390 		if (ni != NULL) {
3391 			/*
3392 			 * Reclaim node reference.
3393 			 */
3394 			ieee80211_free_node(ni);
3395 		}
3396 	}
3397 
3398 	STAILQ_INIT(head);
3399 	free(dd->dd_bufptr, M_ATHDEV);
3400 	memset(dd, 0, sizeof(*dd));
3401 }
3402 
3403 static int
3404 ath_desc_alloc(struct ath_softc *sc)
3405 {
3406 	int error;
3407 
3408 	error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
3409 			"rx", ath_rxbuf, 1);
3410 	if (error != 0)
3411 		return error;
3412 
3413 	error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
3414 			"tx", ath_txbuf, ATH_TXDESC);
3415 	if (error != 0) {
3416 		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3417 		return error;
3418 	}
3419 
3420 	error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
3421 			"beacon", ATH_BCBUF, 1);
3422 	if (error != 0) {
3423 		ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3424 		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3425 		return error;
3426 	}
3427 	return 0;
3428 }
3429 
3430 static void
3431 ath_desc_free(struct ath_softc *sc)
3432 {
3433 
3434 	if (sc->sc_bdma.dd_desc_len != 0)
3435 		ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
3436 	if (sc->sc_txdma.dd_desc_len != 0)
3437 		ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
3438 	if (sc->sc_rxdma.dd_desc_len != 0)
3439 		ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
3440 }
3441 
3442 static struct ieee80211_node *
3443 ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
3444 {
3445 	struct ieee80211com *ic = vap->iv_ic;
3446 	struct ath_softc *sc = ic->ic_ifp->if_softc;
3447 	const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space;
3448 	struct ath_node *an;
3449 
3450 	an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO);
3451 	if (an == NULL) {
3452 		/* XXX stat+msg */
3453 		return NULL;
3454 	}
3455 	ath_rate_node_init(sc, an);
3456 
3457 	DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
3458 	return &an->an_node;
3459 }
3460 
3461 static void
3462 ath_node_free(struct ieee80211_node *ni)
3463 {
3464 	struct ieee80211com *ic = ni->ni_ic;
3465         struct ath_softc *sc = ic->ic_ifp->if_softc;
3466 
3467 	DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
3468 
3469 	ath_rate_node_cleanup(sc, ATH_NODE(ni));
3470 	sc->sc_node_free(ni);
3471 }
3472 
3473 static void
3474 ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise)
3475 {
3476 	struct ieee80211com *ic = ni->ni_ic;
3477 	struct ath_softc *sc = ic->ic_ifp->if_softc;
3478 	struct ath_hal *ah = sc->sc_ah;
3479 
3480 	*rssi = ic->ic_node_getrssi(ni);
3481 	if (ni->ni_chan != IEEE80211_CHAN_ANYC)
3482 		*noise = ath_hal_getchannoise(ah, ni->ni_chan);
3483 	else
3484 		*noise = -95;		/* nominally correct */
3485 }
3486 
3487 static int
3488 ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
3489 {
3490 	struct ath_hal *ah = sc->sc_ah;
3491 	int error;
3492 	struct mbuf *m;
3493 	struct ath_desc *ds;
3494 
3495 	m = bf->bf_m;
3496 	if (m == NULL) {
3497 		/*
3498 		 * NB: by assigning a page to the rx dma buffer we
3499 		 * implicitly satisfy the Atheros requirement that
3500 		 * this buffer be cache-line-aligned and sized to be
3501 		 * multiple of the cache line size.  Not doing this
3502 		 * causes weird stuff to happen (for the 5210 at least).
3503 		 */
3504 		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
3505 		if (m == NULL) {
3506 			DPRINTF(sc, ATH_DEBUG_ANY,
3507 				"%s: no mbuf/cluster\n", __func__);
3508 			sc->sc_stats.ast_rx_nombuf++;
3509 			return ENOMEM;
3510 		}
3511 		m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
3512 
3513 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat,
3514 					     bf->bf_dmamap, m,
3515 					     bf->bf_segs, &bf->bf_nseg,
3516 					     BUS_DMA_NOWAIT);
3517 		if (error != 0) {
3518 			DPRINTF(sc, ATH_DEBUG_ANY,
3519 			    "%s: bus_dmamap_load_mbuf_sg failed; error %d\n",
3520 			    __func__, error);
3521 			sc->sc_stats.ast_rx_busdma++;
3522 			m_freem(m);
3523 			return error;
3524 		}
3525 		KASSERT(bf->bf_nseg == 1,
3526 			("multi-segment packet; nseg %u", bf->bf_nseg));
3527 		bf->bf_m = m;
3528 	}
3529 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD);
3530 
3531 	/*
3532 	 * Setup descriptors.  For receive we always terminate
3533 	 * the descriptor list with a self-linked entry so we'll
3534 	 * not get overrun under high load (as can happen with a
3535 	 * 5212 when ANI processing enables PHY error frames).
3536 	 *
3537 	 * To insure the last descriptor is self-linked we create
3538 	 * each descriptor as self-linked and add it to the end.  As
3539 	 * each additional descriptor is added the previous self-linked
3540 	 * entry is ``fixed'' naturally.  This should be safe even
3541 	 * if DMA is happening.  When processing RX interrupts we
3542 	 * never remove/process the last, self-linked, entry on the
3543 	 * descriptor list.  This insures the hardware always has
3544 	 * someplace to write a new frame.
3545 	 */
3546 	ds = bf->bf_desc;
3547 	ds->ds_link = bf->bf_daddr;	/* link to self */
3548 	ds->ds_data = bf->bf_segs[0].ds_addr;
3549 	ath_hal_setuprxdesc(ah, ds
3550 		, m->m_len		/* buffer size */
3551 		, 0
3552 	);
3553 
3554 	if (sc->sc_rxlink != NULL)
3555 		*sc->sc_rxlink = bf->bf_daddr;
3556 	sc->sc_rxlink = &ds->ds_link;
3557 	return 0;
3558 }
3559 
3560 /*
3561  * Extend 15-bit time stamp from rx descriptor to
3562  * a full 64-bit TSF using the specified TSF.
3563  */
3564 static __inline u_int64_t
3565 ath_extend_tsf(u_int32_t rstamp, u_int64_t tsf)
3566 {
3567 	if ((tsf & 0x7fff) < rstamp)
3568 		tsf -= 0x8000;
3569 	return ((tsf &~ 0x7fff) | rstamp);
3570 }
3571 
3572 /*
3573  * Intercept management frames to collect beacon rssi data
3574  * and to do ibss merges.
3575  */
3576 static void
3577 ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
3578 	int subtype, int rssi, int nf)
3579 {
3580 	struct ieee80211vap *vap = ni->ni_vap;
3581 	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
3582 
3583 	/*
3584 	 * Call up first so subsequent work can use information
3585 	 * potentially stored in the node (e.g. for ibss merge).
3586 	 */
3587 	ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, nf);
3588 	switch (subtype) {
3589 	case IEEE80211_FC0_SUBTYPE_BEACON:
3590 		/* update rssi statistics for use by the hal */
3591 		ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi);
3592 		if (sc->sc_syncbeacon &&
3593 		    ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) {
3594 			/*
3595 			 * Resync beacon timers using the tsf of the beacon
3596 			 * frame we just received.
3597 			 */
3598 			ath_beacon_config(sc, vap);
3599 		}
3600 		/* fall thru... */
3601 	case IEEE80211_FC0_SUBTYPE_PROBE_RESP:
3602 		if (vap->iv_opmode == IEEE80211_M_IBSS &&
3603 		    vap->iv_state == IEEE80211_S_RUN) {
3604 			uint32_t rstamp = sc->sc_lastrs->rs_tstamp;
3605 			u_int64_t tsf = ath_extend_tsf(rstamp,
3606 				ath_hal_gettsf64(sc->sc_ah));
3607 			/*
3608 			 * Handle ibss merge as needed; check the tsf on the
3609 			 * frame before attempting the merge.  The 802.11 spec
3610 			 * says the station should change it's bssid to match
3611 			 * the oldest station with the same ssid, where oldest
3612 			 * is determined by the tsf.  Note that hardware
3613 			 * reconfiguration happens through callback to
3614 			 * ath_newstate as the state machine will go from
3615 			 * RUN -> RUN when this happens.
3616 			 */
3617 			if (le64toh(ni->ni_tstamp.tsf) >= tsf) {
3618 				DPRINTF(sc, ATH_DEBUG_STATE,
3619 				    "ibss merge, rstamp %u tsf %ju "
3620 				    "tstamp %ju\n", rstamp, (uintmax_t)tsf,
3621 				    (uintmax_t)ni->ni_tstamp.tsf);
3622 				(void) ieee80211_ibss_merge(ni);
3623 			}
3624 		}
3625 		break;
3626 	}
3627 }
3628 
3629 /*
3630  * Set the default antenna.
3631  */
3632 static void
3633 ath_setdefantenna(struct ath_softc *sc, u_int antenna)
3634 {
3635 	struct ath_hal *ah = sc->sc_ah;
3636 
3637 	/* XXX block beacon interrupts */
3638 	ath_hal_setdefantenna(ah, antenna);
3639 	if (sc->sc_defant != antenna)
3640 		sc->sc_stats.ast_ant_defswitch++;
3641 	sc->sc_defant = antenna;
3642 	sc->sc_rxotherant = 0;
3643 }
3644 
3645 static void
3646 ath_rx_tap(struct ifnet *ifp, struct mbuf *m,
3647 	const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf)
3648 {
3649 #define	CHAN_HT20	htole32(IEEE80211_CHAN_HT20)
3650 #define	CHAN_HT40U	htole32(IEEE80211_CHAN_HT40U)
3651 #define	CHAN_HT40D	htole32(IEEE80211_CHAN_HT40D)
3652 #define	CHAN_HT		(CHAN_HT20|CHAN_HT40U|CHAN_HT40D)
3653 	struct ath_softc *sc = ifp->if_softc;
3654 	const HAL_RATE_TABLE *rt;
3655 	uint8_t rix;
3656 
3657 	rt = sc->sc_currates;
3658 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
3659 	rix = rt->rateCodeToIndex[rs->rs_rate];
3660 	sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate;
3661 	sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags;
3662 #ifdef AH_SUPPORT_AR5416
3663 	sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT;
3664 	if (sc->sc_rx_th.wr_rate & IEEE80211_RATE_MCS) {	/* HT rate */
3665 		struct ieee80211com *ic = ifp->if_l2com;
3666 
3667 		if ((rs->rs_flags & HAL_RX_2040) == 0)
3668 			sc->sc_rx_th.wr_chan_flags |= CHAN_HT20;
3669 		else if (IEEE80211_IS_CHAN_HT40U(ic->ic_curchan))
3670 			sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U;
3671 		else
3672 			sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D;
3673 		if ((rs->rs_flags & HAL_RX_GI) == 0)
3674 			sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI;
3675 	}
3676 #endif
3677 	sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(rs->rs_tstamp, tsf));
3678 	if (rs->rs_status & HAL_RXERR_CRC)
3679 		sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS;
3680 	/* XXX propagate other error flags from descriptor */
3681 	sc->sc_rx_th.wr_antnoise = nf;
3682 	sc->sc_rx_th.wr_antsignal = nf + rs->rs_rssi;
3683 	sc->sc_rx_th.wr_antenna = rs->rs_antenna;
3684 #undef CHAN_HT
3685 #undef CHAN_HT20
3686 #undef CHAN_HT40U
3687 #undef CHAN_HT40D
3688 }
3689 
3690 static void
3691 ath_handle_micerror(struct ieee80211com *ic,
3692 	struct ieee80211_frame *wh, int keyix)
3693 {
3694 	struct ieee80211_node *ni;
3695 
3696 	/* XXX recheck MIC to deal w/ chips that lie */
3697 	/* XXX discard MIC errors on !data frames */
3698 	ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh);
3699 	if (ni != NULL) {
3700 		ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix);
3701 		ieee80211_free_node(ni);
3702 	}
3703 }
3704 
3705 static void
3706 ath_rx_proc(void *arg, int npending)
3707 {
3708 #define	PA2DESC(_sc, _pa) \
3709 	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
3710 		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
3711 	struct ath_softc *sc = arg;
3712 	struct ath_buf *bf;
3713 	struct ifnet *ifp = sc->sc_ifp;
3714 	struct ieee80211com *ic = ifp->if_l2com;
3715 	struct ath_hal *ah = sc->sc_ah;
3716 	struct ath_desc *ds;
3717 	struct ath_rx_status *rs;
3718 	struct mbuf *m;
3719 	struct ieee80211_node *ni;
3720 	int len, type, ngood;
3721 	u_int phyerr;
3722 	HAL_STATUS status;
3723 	int16_t nf;
3724 	u_int64_t tsf;
3725 
3726 	DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending);
3727 	ngood = 0;
3728 	nf = ath_hal_getchannoise(ah, sc->sc_curchan);
3729 	sc->sc_stats.ast_rx_noise = nf;
3730 	tsf = ath_hal_gettsf64(ah);
3731 	do {
3732 		bf = STAILQ_FIRST(&sc->sc_rxbuf);
3733 		if (bf == NULL) {		/* NB: shouldn't happen */
3734 			if_printf(ifp, "%s: no buffer!\n", __func__);
3735 			break;
3736 		}
3737 		m = bf->bf_m;
3738 		if (m == NULL) {		/* NB: shouldn't happen */
3739 			/*
3740 			 * If mbuf allocation failed previously there
3741 			 * will be no mbuf; try again to re-populate it.
3742 			 */
3743 			/* XXX make debug msg */
3744 			if_printf(ifp, "%s: no mbuf!\n", __func__);
3745 			STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
3746 			goto rx_next;
3747 		}
3748 		ds = bf->bf_desc;
3749 		if (ds->ds_link == bf->bf_daddr) {
3750 			/* NB: never process the self-linked entry at the end */
3751 			break;
3752 		}
3753 		/* XXX sync descriptor memory */
3754 		/*
3755 		 * Must provide the virtual address of the current
3756 		 * descriptor, the physical address, and the virtual
3757 		 * address of the next descriptor in the h/w chain.
3758 		 * This allows the HAL to look ahead to see if the
3759 		 * hardware is done with a descriptor by checking the
3760 		 * done bit in the following descriptor and the address
3761 		 * of the current descriptor the DMA engine is working
3762 		 * on.  All this is necessary because of our use of
3763 		 * a self-linked list to avoid rx overruns.
3764 		 */
3765 		rs = &bf->bf_status.ds_rxstat;
3766 		status = ath_hal_rxprocdesc(ah, ds,
3767 				bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
3768 #ifdef ATH_DEBUG
3769 		if (sc->sc_debug & ATH_DEBUG_RECV_DESC)
3770 			ath_printrxbuf(sc, bf, 0, status == HAL_OK);
3771 #endif
3772 		if (status == HAL_EINPROGRESS)
3773 			break;
3774 		STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list);
3775 		if (rs->rs_status != 0) {
3776 			if (rs->rs_status & HAL_RXERR_CRC)
3777 				sc->sc_stats.ast_rx_crcerr++;
3778 			if (rs->rs_status & HAL_RXERR_FIFO)
3779 				sc->sc_stats.ast_rx_fifoerr++;
3780 			if (rs->rs_status & HAL_RXERR_PHY) {
3781 				sc->sc_stats.ast_rx_phyerr++;
3782 				phyerr = rs->rs_phyerr & 0x1f;
3783 				sc->sc_stats.ast_rx_phy[phyerr]++;
3784 				goto rx_error;	/* NB: don't count in ierrors */
3785 			}
3786 			if (rs->rs_status & HAL_RXERR_DECRYPT) {
3787 				/*
3788 				 * Decrypt error.  If the error occurred
3789 				 * because there was no hardware key, then
3790 				 * let the frame through so the upper layers
3791 				 * can process it.  This is necessary for 5210
3792 				 * parts which have no way to setup a ``clear''
3793 				 * key cache entry.
3794 				 *
3795 				 * XXX do key cache faulting
3796 				 */
3797 				if (rs->rs_keyix == HAL_RXKEYIX_INVALID)
3798 					goto rx_accept;
3799 				sc->sc_stats.ast_rx_badcrypt++;
3800 			}
3801 			if (rs->rs_status & HAL_RXERR_MIC) {
3802 				sc->sc_stats.ast_rx_badmic++;
3803 				/*
3804 				 * Do minimal work required to hand off
3805 				 * the 802.11 header for notification.
3806 				 */
3807 				/* XXX frag's and qos frames */
3808 				len = rs->rs_datalen;
3809 				if (len >= sizeof (struct ieee80211_frame)) {
3810 					bus_dmamap_sync(sc->sc_dmat,
3811 					    bf->bf_dmamap,
3812 					    BUS_DMASYNC_POSTREAD);
3813 					ath_handle_micerror(ic,
3814 					    mtod(m, struct ieee80211_frame *),
3815 					    sc->sc_splitmic ?
3816 						rs->rs_keyix-32 : rs->rs_keyix);
3817 				}
3818 			}
3819 			ifp->if_ierrors++;
3820 rx_error:
3821 			/*
3822 			 * Cleanup any pending partial frame.
3823 			 */
3824 			if (sc->sc_rxpending != NULL) {
3825 				m_freem(sc->sc_rxpending);
3826 				sc->sc_rxpending = NULL;
3827 			}
3828 			/*
3829 			 * When a tap is present pass error frames
3830 			 * that have been requested.  By default we
3831 			 * pass decrypt+mic errors but others may be
3832 			 * interesting (e.g. crc).
3833 			 */
3834 			if (ieee80211_radiotap_active(ic) &&
3835 			    (rs->rs_status & sc->sc_monpass)) {
3836 				bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3837 				    BUS_DMASYNC_POSTREAD);
3838 				/* NB: bpf needs the mbuf length setup */
3839 				len = rs->rs_datalen;
3840 				m->m_pkthdr.len = m->m_len = len;
3841 				ath_rx_tap(ifp, m, rs, tsf, nf);
3842 				ieee80211_radiotap_rx_all(ic, m);
3843 			}
3844 			/* XXX pass MIC errors up for s/w reclaculation */
3845 			goto rx_next;
3846 		}
3847 rx_accept:
3848 		/*
3849 		 * Sync and unmap the frame.  At this point we're
3850 		 * committed to passing the mbuf somewhere so clear
3851 		 * bf_m; this means a new mbuf must be allocated
3852 		 * when the rx descriptor is setup again to receive
3853 		 * another frame.
3854 		 */
3855 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
3856 		    BUS_DMASYNC_POSTREAD);
3857 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
3858 		bf->bf_m = NULL;
3859 
3860 		len = rs->rs_datalen;
3861 		m->m_len = len;
3862 
3863 		if (rs->rs_more) {
3864 			/*
3865 			 * Frame spans multiple descriptors; save
3866 			 * it for the next completed descriptor, it
3867 			 * will be used to construct a jumbogram.
3868 			 */
3869 			if (sc->sc_rxpending != NULL) {
3870 				/* NB: max frame size is currently 2 clusters */
3871 				sc->sc_stats.ast_rx_toobig++;
3872 				m_freem(sc->sc_rxpending);
3873 			}
3874 			m->m_pkthdr.rcvif = ifp;
3875 			m->m_pkthdr.len = len;
3876 			sc->sc_rxpending = m;
3877 			goto rx_next;
3878 		} else if (sc->sc_rxpending != NULL) {
3879 			/*
3880 			 * This is the second part of a jumbogram,
3881 			 * chain it to the first mbuf, adjust the
3882 			 * frame length, and clear the rxpending state.
3883 			 */
3884 			sc->sc_rxpending->m_next = m;
3885 			sc->sc_rxpending->m_pkthdr.len += len;
3886 			m = sc->sc_rxpending;
3887 			sc->sc_rxpending = NULL;
3888 		} else {
3889 			/*
3890 			 * Normal single-descriptor receive; setup
3891 			 * the rcvif and packet length.
3892 			 */
3893 			m->m_pkthdr.rcvif = ifp;
3894 			m->m_pkthdr.len = len;
3895 		}
3896 
3897 		ifp->if_ipackets++;
3898 		sc->sc_stats.ast_ant_rx[rs->rs_antenna]++;
3899 
3900 		/*
3901 		 * Populate the rx status block.  When there are bpf
3902 		 * listeners we do the additional work to provide
3903 		 * complete status.  Otherwise we fill in only the
3904 		 * material required by ieee80211_input.  Note that
3905 		 * noise setting is filled in above.
3906 		 */
3907 		if (ieee80211_radiotap_active(ic))
3908 			ath_rx_tap(ifp, m, rs, tsf, nf);
3909 
3910 		/*
3911 		 * From this point on we assume the frame is at least
3912 		 * as large as ieee80211_frame_min; verify that.
3913 		 */
3914 		if (len < IEEE80211_MIN_LEN) {
3915 			if (!ieee80211_radiotap_active(ic)) {
3916 				DPRINTF(sc, ATH_DEBUG_RECV,
3917 				    "%s: short packet %d\n", __func__, len);
3918 				sc->sc_stats.ast_rx_tooshort++;
3919 			} else {
3920 				/* NB: in particular this captures ack's */
3921 				ieee80211_radiotap_rx_all(ic, m);
3922 			}
3923 			m_freem(m);
3924 			goto rx_next;
3925 		}
3926 
3927 		if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) {
3928 			const HAL_RATE_TABLE *rt = sc->sc_currates;
3929 			uint8_t rix = rt->rateCodeToIndex[rs->rs_rate];
3930 
3931 			ieee80211_dump_pkt(ic, mtod(m, caddr_t), len,
3932 			    sc->sc_hwmap[rix].ieeerate, rs->rs_rssi);
3933 		}
3934 
3935 		m_adj(m, -IEEE80211_CRC_LEN);
3936 
3937 		/*
3938 		 * Locate the node for sender, track state, and then
3939 		 * pass the (referenced) node up to the 802.11 layer
3940 		 * for its use.
3941 		 */
3942 		ni = ieee80211_find_rxnode_withkey(ic,
3943 			mtod(m, const struct ieee80211_frame_min *),
3944 			rs->rs_keyix == HAL_RXKEYIX_INVALID ?
3945 				IEEE80211_KEYIX_NONE : rs->rs_keyix);
3946 		if (ni != NULL) {
3947 			/*
3948 			 * Sending station is known, dispatch directly.
3949 			 */
3950 			sc->sc_lastrs = rs;
3951 			type = ieee80211_input(ni, m, rs->rs_rssi, nf);
3952 			ieee80211_free_node(ni);
3953 			/*
3954 			 * Arrange to update the last rx timestamp only for
3955 			 * frames from our ap when operating in station mode.
3956 			 * This assumes the rx key is always setup when
3957 			 * associated.
3958 			 */
3959 			if (ic->ic_opmode == IEEE80211_M_STA &&
3960 			    rs->rs_keyix != HAL_RXKEYIX_INVALID)
3961 				ngood++;
3962 		} else {
3963 			type = ieee80211_input_all(ic, m, rs->rs_rssi, nf);
3964 		}
3965 		/*
3966 		 * Track rx rssi and do any rx antenna management.
3967 		 */
3968 		ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi);
3969 		if (sc->sc_diversity) {
3970 			/*
3971 			 * When using fast diversity, change the default rx
3972 			 * antenna if diversity chooses the other antenna 3
3973 			 * times in a row.
3974 			 */
3975 			if (sc->sc_defant != rs->rs_antenna) {
3976 				if (++sc->sc_rxotherant >= 3)
3977 					ath_setdefantenna(sc, rs->rs_antenna);
3978 			} else
3979 				sc->sc_rxotherant = 0;
3980 		}
3981 		if (sc->sc_softled) {
3982 			/*
3983 			 * Blink for any data frame.  Otherwise do a
3984 			 * heartbeat-style blink when idle.  The latter
3985 			 * is mainly for station mode where we depend on
3986 			 * periodic beacon frames to trigger the poll event.
3987 			 */
3988 			if (type == IEEE80211_FC0_TYPE_DATA) {
3989 				const HAL_RATE_TABLE *rt = sc->sc_currates;
3990 				ath_led_event(sc,
3991 				    rt->rateCodeToIndex[rs->rs_rate]);
3992 			} else if (ticks - sc->sc_ledevent >= sc->sc_ledidle)
3993 				ath_led_event(sc, 0);
3994 		}
3995 rx_next:
3996 		STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list);
3997 	} while (ath_rxbuf_init(sc, bf) == 0);
3998 
3999 	/* rx signal state monitoring */
4000 	ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan);
4001 	if (ngood)
4002 		sc->sc_lastrx = tsf;
4003 
4004 	if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
4005 #ifdef IEEE80211_SUPPORT_SUPERG
4006 		ieee80211_ff_age_all(ic, 100);
4007 #endif
4008 		if (!IFQ_IS_EMPTY(&ifp->if_snd))
4009 			ath_start(ifp);
4010 	}
4011 #undef PA2DESC
4012 }
4013 
4014 static void
4015 ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
4016 {
4017 	txq->axq_qnum = qnum;
4018 	txq->axq_ac = 0;
4019 	txq->axq_depth = 0;
4020 	txq->axq_intrcnt = 0;
4021 	txq->axq_link = NULL;
4022 	STAILQ_INIT(&txq->axq_q);
4023 	ATH_TXQ_LOCK_INIT(sc, txq);
4024 }
4025 
4026 /*
4027  * Setup a h/w transmit queue.
4028  */
4029 static struct ath_txq *
4030 ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
4031 {
4032 #define	N(a)	(sizeof(a)/sizeof(a[0]))
4033 	struct ath_hal *ah = sc->sc_ah;
4034 	HAL_TXQ_INFO qi;
4035 	int qnum;
4036 
4037 	memset(&qi, 0, sizeof(qi));
4038 	qi.tqi_subtype = subtype;
4039 	qi.tqi_aifs = HAL_TXQ_USEDEFAULT;
4040 	qi.tqi_cwmin = HAL_TXQ_USEDEFAULT;
4041 	qi.tqi_cwmax = HAL_TXQ_USEDEFAULT;
4042 	/*
4043 	 * Enable interrupts only for EOL and DESC conditions.
4044 	 * We mark tx descriptors to receive a DESC interrupt
4045 	 * when a tx queue gets deep; otherwise waiting for the
4046 	 * EOL to reap descriptors.  Note that this is done to
4047 	 * reduce interrupt load and this only defers reaping
4048 	 * descriptors, never transmitting frames.  Aside from
4049 	 * reducing interrupts this also permits more concurrency.
4050 	 * The only potential downside is if the tx queue backs
4051 	 * up in which case the top half of the kernel may backup
4052 	 * due to a lack of tx descriptors.
4053 	 */
4054 	qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE;
4055 	qnum = ath_hal_setuptxqueue(ah, qtype, &qi);
4056 	if (qnum == -1) {
4057 		/*
4058 		 * NB: don't print a message, this happens
4059 		 * normally on parts with too few tx queues
4060 		 */
4061 		return NULL;
4062 	}
4063 	if (qnum >= N(sc->sc_txq)) {
4064 		device_printf(sc->sc_dev,
4065 			"hal qnum %u out of range, max %zu!\n",
4066 			qnum, N(sc->sc_txq));
4067 		ath_hal_releasetxqueue(ah, qnum);
4068 		return NULL;
4069 	}
4070 	if (!ATH_TXQ_SETUP(sc, qnum)) {
4071 		ath_txq_init(sc, &sc->sc_txq[qnum], qnum);
4072 		sc->sc_txqsetup |= 1<<qnum;
4073 	}
4074 	return &sc->sc_txq[qnum];
4075 #undef N
4076 }
4077 
4078 /*
4079  * Setup a hardware data transmit queue for the specified
4080  * access control.  The hal may not support all requested
4081  * queues in which case it will return a reference to a
4082  * previously setup queue.  We record the mapping from ac's
4083  * to h/w queues for use by ath_tx_start and also track
4084  * the set of h/w queues being used to optimize work in the
4085  * transmit interrupt handler and related routines.
4086  */
4087 static int
4088 ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
4089 {
4090 #define	N(a)	(sizeof(a)/sizeof(a[0]))
4091 	struct ath_txq *txq;
4092 
4093 	if (ac >= N(sc->sc_ac2q)) {
4094 		device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n",
4095 			ac, N(sc->sc_ac2q));
4096 		return 0;
4097 	}
4098 	txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
4099 	if (txq != NULL) {
4100 		txq->axq_ac = ac;
4101 		sc->sc_ac2q[ac] = txq;
4102 		return 1;
4103 	} else
4104 		return 0;
4105 #undef N
4106 }
4107 
4108 /*
4109  * Update WME parameters for a transmit queue.
4110  */
4111 static int
4112 ath_txq_update(struct ath_softc *sc, int ac)
4113 {
4114 #define	ATH_EXPONENT_TO_VALUE(v)	((1<<v)-1)
4115 #define	ATH_TXOP_TO_US(v)		(v<<5)
4116 	struct ifnet *ifp = sc->sc_ifp;
4117 	struct ieee80211com *ic = ifp->if_l2com;
4118 	struct ath_txq *txq = sc->sc_ac2q[ac];
4119 	struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
4120 	struct ath_hal *ah = sc->sc_ah;
4121 	HAL_TXQ_INFO qi;
4122 
4123 	ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi);
4124 #ifdef IEEE80211_SUPPORT_TDMA
4125 	if (sc->sc_tdma) {
4126 		/*
4127 		 * AIFS is zero so there's no pre-transmit wait.  The
4128 		 * burst time defines the slot duration and is configured
4129 		 * via sysctl.  The QCU is setup to not do post-xmit
4130 		 * back off, lockout all lower-priority QCU's, and fire
4131 		 * off the DMA beacon alert timer which is setup based
4132 		 * on the slot configuration.
4133 		 */
4134 		qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4135 			      | HAL_TXQ_TXERRINT_ENABLE
4136 			      | HAL_TXQ_TXURNINT_ENABLE
4137 			      | HAL_TXQ_TXEOLINT_ENABLE
4138 			      | HAL_TXQ_DBA_GATED
4139 			      | HAL_TXQ_BACKOFF_DISABLE
4140 			      | HAL_TXQ_ARB_LOCKOUT_GLOBAL
4141 			      ;
4142 		qi.tqi_aifs = 0;
4143 		/* XXX +dbaprep? */
4144 		qi.tqi_readyTime = sc->sc_tdmaslotlen;
4145 		qi.tqi_burstTime = qi.tqi_readyTime;
4146 	} else {
4147 #endif
4148 		qi.tqi_qflags = HAL_TXQ_TXOKINT_ENABLE
4149 			      | HAL_TXQ_TXERRINT_ENABLE
4150 			      | HAL_TXQ_TXDESCINT_ENABLE
4151 			      | HAL_TXQ_TXURNINT_ENABLE
4152 			      ;
4153 		qi.tqi_aifs = wmep->wmep_aifsn;
4154 		qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin);
4155 		qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax);
4156 		qi.tqi_readyTime = 0;
4157 		qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit);
4158 #ifdef IEEE80211_SUPPORT_TDMA
4159 	}
4160 #endif
4161 
4162 	DPRINTF(sc, ATH_DEBUG_RESET,
4163 	    "%s: Q%u qflags 0x%x aifs %u cwmin %u cwmax %u burstTime %u\n",
4164 	    __func__, txq->axq_qnum, qi.tqi_qflags,
4165 	    qi.tqi_aifs, qi.tqi_cwmin, qi.tqi_cwmax, qi.tqi_burstTime);
4166 
4167 	if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) {
4168 		if_printf(ifp, "unable to update hardware queue "
4169 			"parameters for %s traffic!\n",
4170 			ieee80211_wme_acnames[ac]);
4171 		return 0;
4172 	} else {
4173 		ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */
4174 		return 1;
4175 	}
4176 #undef ATH_TXOP_TO_US
4177 #undef ATH_EXPONENT_TO_VALUE
4178 }
4179 
4180 /*
4181  * Callback from the 802.11 layer to update WME parameters.
4182  */
4183 static int
4184 ath_wme_update(struct ieee80211com *ic)
4185 {
4186 	struct ath_softc *sc = ic->ic_ifp->if_softc;
4187 
4188 	return !ath_txq_update(sc, WME_AC_BE) ||
4189 	    !ath_txq_update(sc, WME_AC_BK) ||
4190 	    !ath_txq_update(sc, WME_AC_VI) ||
4191 	    !ath_txq_update(sc, WME_AC_VO) ? EIO : 0;
4192 }
4193 
4194 /*
4195  * Reclaim resources for a setup queue.
4196  */
4197 static void
4198 ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
4199 {
4200 
4201 	ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum);
4202 	ATH_TXQ_LOCK_DESTROY(txq);
4203 	sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
4204 }
4205 
4206 /*
4207  * Reclaim all tx queue resources.
4208  */
4209 static void
4210 ath_tx_cleanup(struct ath_softc *sc)
4211 {
4212 	int i;
4213 
4214 	ATH_TXBUF_LOCK_DESTROY(sc);
4215 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
4216 		if (ATH_TXQ_SETUP(sc, i))
4217 			ath_tx_cleanupq(sc, &sc->sc_txq[i]);
4218 }
4219 
4220 /*
4221  * Return h/w rate index for an IEEE rate (w/o basic rate bit)
4222  * using the current rates in sc_rixmap.
4223  */
4224 static __inline int
4225 ath_tx_findrix(const struct ath_softc *sc, uint8_t rate)
4226 {
4227 	int rix = sc->sc_rixmap[rate];
4228 	/* NB: return lowest rix for invalid rate */
4229 	return (rix == 0xff ? 0 : rix);
4230 }
4231 
4232 /*
4233  * Reclaim mbuf resources.  For fragmented frames we
4234  * need to claim each frag chained with m_nextpkt.
4235  */
4236 static void
4237 ath_freetx(struct mbuf *m)
4238 {
4239 	struct mbuf *next;
4240 
4241 	do {
4242 		next = m->m_nextpkt;
4243 		m->m_nextpkt = NULL;
4244 		m_freem(m);
4245 	} while ((m = next) != NULL);
4246 }
4247 
4248 static int
4249 ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
4250 {
4251 	struct mbuf *m;
4252 	int error;
4253 
4254 	/*
4255 	 * Load the DMA map so any coalescing is done.  This
4256 	 * also calculates the number of descriptors we need.
4257 	 */
4258 	error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
4259 				     bf->bf_segs, &bf->bf_nseg,
4260 				     BUS_DMA_NOWAIT);
4261 	if (error == EFBIG) {
4262 		/* XXX packet requires too many descriptors */
4263 		bf->bf_nseg = ATH_TXDESC+1;
4264 	} else if (error != 0) {
4265 		sc->sc_stats.ast_tx_busdma++;
4266 		ath_freetx(m0);
4267 		return error;
4268 	}
4269 	/*
4270 	 * Discard null packets and check for packets that
4271 	 * require too many TX descriptors.  We try to convert
4272 	 * the latter to a cluster.
4273 	 */
4274 	if (bf->bf_nseg > ATH_TXDESC) {		/* too many desc's, linearize */
4275 		sc->sc_stats.ast_tx_linear++;
4276 		m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC);
4277 		if (m == NULL) {
4278 			ath_freetx(m0);
4279 			sc->sc_stats.ast_tx_nombuf++;
4280 			return ENOMEM;
4281 		}
4282 		m0 = m;
4283 		error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0,
4284 					     bf->bf_segs, &bf->bf_nseg,
4285 					     BUS_DMA_NOWAIT);
4286 		if (error != 0) {
4287 			sc->sc_stats.ast_tx_busdma++;
4288 			ath_freetx(m0);
4289 			return error;
4290 		}
4291 		KASSERT(bf->bf_nseg <= ATH_TXDESC,
4292 		    ("too many segments after defrag; nseg %u", bf->bf_nseg));
4293 	} else if (bf->bf_nseg == 0) {		/* null packet, discard */
4294 		sc->sc_stats.ast_tx_nodata++;
4295 		ath_freetx(m0);
4296 		return EIO;
4297 	}
4298 	DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n",
4299 		__func__, m0, m0->m_pkthdr.len);
4300 	bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE);
4301 	bf->bf_m = m0;
4302 
4303 	return 0;
4304 }
4305 
4306 static void
4307 ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf)
4308 {
4309 	struct ath_hal *ah = sc->sc_ah;
4310 	struct ath_desc *ds, *ds0;
4311 	int i;
4312 
4313 	/*
4314 	 * Fillin the remainder of the descriptor info.
4315 	 */
4316 	ds0 = ds = bf->bf_desc;
4317 	for (i = 0; i < bf->bf_nseg; i++, ds++) {
4318 		ds->ds_data = bf->bf_segs[i].ds_addr;
4319 		if (i == bf->bf_nseg - 1)
4320 			ds->ds_link = 0;
4321 		else
4322 			ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1);
4323 		ath_hal_filltxdesc(ah, ds
4324 			, bf->bf_segs[i].ds_len	/* segment length */
4325 			, i == 0		/* first segment */
4326 			, i == bf->bf_nseg - 1	/* last segment */
4327 			, ds0			/* first descriptor */
4328 		);
4329 		DPRINTF(sc, ATH_DEBUG_XMIT,
4330 			"%s: %d: %08x %08x %08x %08x %08x %08x\n",
4331 			__func__, i, ds->ds_link, ds->ds_data,
4332 			ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]);
4333 	}
4334 	/*
4335 	 * Insert the frame on the outbound list and pass it on
4336 	 * to the hardware.  Multicast frames buffered for power
4337 	 * save stations and transmit from the CAB queue are stored
4338 	 * on a s/w only queue and loaded on to the CAB queue in
4339 	 * the SWBA handler since frames only go out on DTIM and
4340 	 * to avoid possible races.
4341 	 */
4342 	ATH_TXQ_LOCK(txq);
4343 	KASSERT((bf->bf_flags & ATH_BUF_BUSY) == 0,
4344 	     ("busy status 0x%x", bf->bf_flags));
4345 	if (txq->axq_qnum != ATH_TXQ_SWQ) {
4346 #ifdef IEEE80211_SUPPORT_TDMA
4347 		int qbusy;
4348 
4349 		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4350 		qbusy = ath_hal_txqenabled(ah, txq->axq_qnum);
4351 		if (txq->axq_link == NULL) {
4352 			/*
4353 			 * Be careful writing the address to TXDP.  If
4354 			 * the tx q is enabled then this write will be
4355 			 * ignored.  Normally this is not an issue but
4356 			 * when tdma is in use and the q is beacon gated
4357 			 * this race can occur.  If the q is busy then
4358 			 * defer the work to later--either when another
4359 			 * packet comes along or when we prepare a beacon
4360 			 * frame at SWBA.
4361 			 */
4362 			if (!qbusy) {
4363 				ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
4364 				txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
4365 				DPRINTF(sc, ATH_DEBUG_XMIT,
4366 				    "%s: TXDP[%u] = %p (%p) depth %d\n",
4367 				    __func__, txq->axq_qnum,
4368 				    (caddr_t)bf->bf_daddr, bf->bf_desc,
4369 				    txq->axq_depth);
4370 			} else {
4371 				txq->axq_flags |= ATH_TXQ_PUTPENDING;
4372 				DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT,
4373 				    "%s: Q%u busy, defer enable\n", __func__,
4374 				    txq->axq_qnum);
4375 			}
4376 		} else {
4377 			*txq->axq_link = bf->bf_daddr;
4378 			DPRINTF(sc, ATH_DEBUG_XMIT,
4379 			    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
4380 			    txq->axq_qnum, txq->axq_link,
4381 			    (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
4382 			if ((txq->axq_flags & ATH_TXQ_PUTPENDING) && !qbusy) {
4383 				/*
4384 				 * The q was busy when we previously tried
4385 				 * to write the address of the first buffer
4386 				 * in the chain.  Since it's not busy now
4387 				 * handle this chore.  We are certain the
4388 				 * buffer at the front is the right one since
4389 				 * axq_link is NULL only when the buffer list
4390 				 * is/was empty.
4391 				 */
4392 				ath_hal_puttxbuf(ah, txq->axq_qnum,
4393 					STAILQ_FIRST(&txq->axq_q)->bf_daddr);
4394 				txq->axq_flags &= ~ATH_TXQ_PUTPENDING;
4395 				DPRINTF(sc, ATH_DEBUG_TDMA | ATH_DEBUG_XMIT,
4396 				    "%s: Q%u restarted\n", __func__,
4397 				    txq->axq_qnum);
4398 			}
4399 		}
4400 #else
4401 		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4402 		if (txq->axq_link == NULL) {
4403 			ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
4404 			DPRINTF(sc, ATH_DEBUG_XMIT,
4405 			    "%s: TXDP[%u] = %p (%p) depth %d\n",
4406 			    __func__, txq->axq_qnum,
4407 			    (caddr_t)bf->bf_daddr, bf->bf_desc,
4408 			    txq->axq_depth);
4409 		} else {
4410 			*txq->axq_link = bf->bf_daddr;
4411 			DPRINTF(sc, ATH_DEBUG_XMIT,
4412 			    "%s: link[%u](%p)=%p (%p) depth %d\n", __func__,
4413 			    txq->axq_qnum, txq->axq_link,
4414 			    (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth);
4415 		}
4416 #endif /* IEEE80211_SUPPORT_TDMA */
4417 		txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
4418 		ath_hal_txstart(ah, txq->axq_qnum);
4419 	} else {
4420 		if (txq->axq_link != NULL) {
4421 			struct ath_buf *last = ATH_TXQ_LAST(txq);
4422 			struct ieee80211_frame *wh;
4423 
4424 			/* mark previous frame */
4425 			wh = mtod(last->bf_m, struct ieee80211_frame *);
4426 			wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
4427 			bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap,
4428 			    BUS_DMASYNC_PREWRITE);
4429 
4430 			/* link descriptor */
4431 			*txq->axq_link = bf->bf_daddr;
4432 		}
4433 		ATH_TXQ_INSERT_TAIL(txq, bf, bf_list);
4434 		txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link;
4435 	}
4436 	ATH_TXQ_UNLOCK(txq);
4437 }
4438 
4439 static int
4440 ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf,
4441     struct mbuf *m0)
4442 {
4443 	struct ieee80211vap *vap = ni->ni_vap;
4444 	struct ath_vap *avp = ATH_VAP(vap);
4445 	struct ath_hal *ah = sc->sc_ah;
4446 	struct ifnet *ifp = sc->sc_ifp;
4447 	struct ieee80211com *ic = ifp->if_l2com;
4448 	const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams;
4449 	int error, iswep, ismcast, isfrag, ismrr;
4450 	int keyix, hdrlen, pktlen, try0;
4451 	u_int8_t rix, txrate, ctsrate;
4452 	u_int8_t cix = 0xff;		/* NB: silence compiler */
4453 	struct ath_desc *ds;
4454 	struct ath_txq *txq;
4455 	struct ieee80211_frame *wh;
4456 	u_int subtype, flags, ctsduration;
4457 	HAL_PKT_TYPE atype;
4458 	const HAL_RATE_TABLE *rt;
4459 	HAL_BOOL shortPreamble;
4460 	struct ath_node *an;
4461 	u_int pri;
4462 
4463 	wh = mtod(m0, struct ieee80211_frame *);
4464 	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
4465 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
4466 	isfrag = m0->m_flags & M_FRAG;
4467 	hdrlen = ieee80211_anyhdrsize(wh);
4468 	/*
4469 	 * Packet length must not include any
4470 	 * pad bytes; deduct them here.
4471 	 */
4472 	pktlen = m0->m_pkthdr.len - (hdrlen & 3);
4473 
4474 	if (iswep) {
4475 		const struct ieee80211_cipher *cip;
4476 		struct ieee80211_key *k;
4477 
4478 		/*
4479 		 * Construct the 802.11 header+trailer for an encrypted
4480 		 * frame. The only reason this can fail is because of an
4481 		 * unknown or unsupported cipher/key type.
4482 		 */
4483 		k = ieee80211_crypto_encap(ni, m0);
4484 		if (k == NULL) {
4485 			/*
4486 			 * This can happen when the key is yanked after the
4487 			 * frame was queued.  Just discard the frame; the
4488 			 * 802.11 layer counts failures and provides
4489 			 * debugging/diagnostics.
4490 			 */
4491 			ath_freetx(m0);
4492 			return EIO;
4493 		}
4494 		/*
4495 		 * Adjust the packet + header lengths for the crypto
4496 		 * additions and calculate the h/w key index.  When
4497 		 * a s/w mic is done the frame will have had any mic
4498 		 * added to it prior to entry so m0->m_pkthdr.len will
4499 		 * account for it. Otherwise we need to add it to the
4500 		 * packet length.
4501 		 */
4502 		cip = k->wk_cipher;
4503 		hdrlen += cip->ic_header;
4504 		pktlen += cip->ic_header + cip->ic_trailer;
4505 		/* NB: frags always have any TKIP MIC done in s/w */
4506 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag)
4507 			pktlen += cip->ic_miclen;
4508 		keyix = k->wk_keyix;
4509 
4510 		/* packet header may have moved, reset our local pointer */
4511 		wh = mtod(m0, struct ieee80211_frame *);
4512 	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
4513 		/*
4514 		 * Use station key cache slot, if assigned.
4515 		 */
4516 		keyix = ni->ni_ucastkey.wk_keyix;
4517 		if (keyix == IEEE80211_KEYIX_NONE)
4518 			keyix = HAL_TXKEYIX_INVALID;
4519 	} else
4520 		keyix = HAL_TXKEYIX_INVALID;
4521 
4522 	pktlen += IEEE80211_CRC_LEN;
4523 
4524 	/*
4525 	 * Load the DMA map so any coalescing is done.  This
4526 	 * also calculates the number of descriptors we need.
4527 	 */
4528 	error = ath_tx_dmasetup(sc, bf, m0);
4529 	if (error != 0)
4530 		return error;
4531 	bf->bf_node = ni;			/* NB: held reference */
4532 	m0 = bf->bf_m;				/* NB: may have changed */
4533 	wh = mtod(m0, struct ieee80211_frame *);
4534 
4535 	/* setup descriptors */
4536 	ds = bf->bf_desc;
4537 	rt = sc->sc_currates;
4538 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
4539 
4540 	/*
4541 	 * NB: the 802.11 layer marks whether or not we should
4542 	 * use short preamble based on the current mode and
4543 	 * negotiated parameters.
4544 	 */
4545 	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
4546 	    (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) {
4547 		shortPreamble = AH_TRUE;
4548 		sc->sc_stats.ast_tx_shortpre++;
4549 	} else {
4550 		shortPreamble = AH_FALSE;
4551 	}
4552 
4553 	an = ATH_NODE(ni);
4554 	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
4555 	ismrr = 0;				/* default no multi-rate retry*/
4556 	pri = M_WME_GETAC(m0);			/* honor classification */
4557 	/* XXX use txparams instead of fixed values */
4558 	/*
4559 	 * Calculate Atheros packet type from IEEE80211 packet header,
4560 	 * setup for rate calculations, and select h/w transmit queue.
4561 	 */
4562 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
4563 	case IEEE80211_FC0_TYPE_MGT:
4564 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4565 		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
4566 			atype = HAL_PKT_TYPE_BEACON;
4567 		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
4568 			atype = HAL_PKT_TYPE_PROBE_RESP;
4569 		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
4570 			atype = HAL_PKT_TYPE_ATIM;
4571 		else
4572 			atype = HAL_PKT_TYPE_NORMAL;	/* XXX */
4573 		rix = an->an_mgmtrix;
4574 		txrate = rt->info[rix].rateCode;
4575 		if (shortPreamble)
4576 			txrate |= rt->info[rix].shortPreamble;
4577 		try0 = ATH_TXMGTTRY;
4578 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
4579 		break;
4580 	case IEEE80211_FC0_TYPE_CTL:
4581 		atype = HAL_PKT_TYPE_PSPOLL;	/* stop setting of duration */
4582 		rix = an->an_mgmtrix;
4583 		txrate = rt->info[rix].rateCode;
4584 		if (shortPreamble)
4585 			txrate |= rt->info[rix].shortPreamble;
4586 		try0 = ATH_TXMGTTRY;
4587 		flags |= HAL_TXDESC_INTREQ;	/* force interrupt */
4588 		break;
4589 	case IEEE80211_FC0_TYPE_DATA:
4590 		atype = HAL_PKT_TYPE_NORMAL;		/* default */
4591 		/*
4592 		 * Data frames: multicast frames go out at a fixed rate,
4593 		 * EAPOL frames use the mgmt frame rate; otherwise consult
4594 		 * the rate control module for the rate to use.
4595 		 */
4596 		if (ismcast) {
4597 			rix = an->an_mcastrix;
4598 			txrate = rt->info[rix].rateCode;
4599 			if (shortPreamble)
4600 				txrate |= rt->info[rix].shortPreamble;
4601 			try0 = 1;
4602 		} else if (m0->m_flags & M_EAPOL) {
4603 			/* XXX? maybe always use long preamble? */
4604 			rix = an->an_mgmtrix;
4605 			txrate = rt->info[rix].rateCode;
4606 			if (shortPreamble)
4607 				txrate |= rt->info[rix].shortPreamble;
4608 			try0 = ATH_TXMAXTRY;	/* XXX?too many? */
4609 		} else {
4610 			ath_rate_findrate(sc, an, shortPreamble, pktlen,
4611 				&rix, &try0, &txrate);
4612 			sc->sc_txrix = rix;		/* for LED blinking */
4613 			sc->sc_lastdatarix = rix;	/* for fast frames */
4614 			if (try0 != ATH_TXMAXTRY)
4615 				ismrr = 1;
4616 		}
4617 		if (cap->cap_wmeParams[pri].wmep_noackPolicy)
4618 			flags |= HAL_TXDESC_NOACK;
4619 		break;
4620 	default:
4621 		if_printf(ifp, "bogus frame type 0x%x (%s)\n",
4622 			wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__);
4623 		/* XXX statistic */
4624 		ath_freetx(m0);
4625 		return EIO;
4626 	}
4627 	txq = sc->sc_ac2q[pri];
4628 
4629 	/*
4630 	 * When servicing one or more stations in power-save mode
4631 	 * (or) if there is some mcast data waiting on the mcast
4632 	 * queue (to prevent out of order delivery) multicast
4633 	 * frames must be buffered until after the beacon.
4634 	 */
4635 	if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth))
4636 		txq = &avp->av_mcastq;
4637 
4638 	/*
4639 	 * Calculate miscellaneous flags.
4640 	 */
4641 	if (ismcast) {
4642 		flags |= HAL_TXDESC_NOACK;	/* no ack on broad/multicast */
4643 	} else if (pktlen > vap->iv_rtsthreshold &&
4644 	    (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) {
4645 		flags |= HAL_TXDESC_RTSENA;	/* RTS based on frame length */
4646 		cix = rt->info[rix].controlRate;
4647 		sc->sc_stats.ast_tx_rts++;
4648 	}
4649 	if (flags & HAL_TXDESC_NOACK)		/* NB: avoid double counting */
4650 		sc->sc_stats.ast_tx_noack++;
4651 #ifdef IEEE80211_SUPPORT_TDMA
4652 	if (sc->sc_tdma && (flags & HAL_TXDESC_NOACK) == 0) {
4653 		DPRINTF(sc, ATH_DEBUG_TDMA,
4654 		    "%s: discard frame, ACK required w/ TDMA\n", __func__);
4655 		sc->sc_stats.ast_tdma_ack++;
4656 		ath_freetx(m0);
4657 		return EIO;
4658 	}
4659 #endif
4660 
4661 	/*
4662 	 * If 802.11g protection is enabled, determine whether
4663 	 * to use RTS/CTS or just CTS.  Note that this is only
4664 	 * done for OFDM unicast frames.
4665 	 */
4666 	if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
4667 	    rt->info[rix].phy == IEEE80211_T_OFDM &&
4668 	    (flags & HAL_TXDESC_NOACK) == 0) {
4669 		/* XXX fragments must use CCK rates w/ protection */
4670 		if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
4671 			flags |= HAL_TXDESC_RTSENA;
4672 		else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
4673 			flags |= HAL_TXDESC_CTSENA;
4674 		if (isfrag) {
4675 			/*
4676 			 * For frags it would be desirable to use the
4677 			 * highest CCK rate for RTS/CTS.  But stations
4678 			 * farther away may detect it at a lower CCK rate
4679 			 * so use the configured protection rate instead
4680 			 * (for now).
4681 			 */
4682 			cix = rt->info[sc->sc_protrix].controlRate;
4683 		} else
4684 			cix = rt->info[sc->sc_protrix].controlRate;
4685 		sc->sc_stats.ast_tx_protect++;
4686 	}
4687 
4688 	/*
4689 	 * Calculate duration.  This logically belongs in the 802.11
4690 	 * layer but it lacks sufficient information to calculate it.
4691 	 */
4692 	if ((flags & HAL_TXDESC_NOACK) == 0 &&
4693 	    (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
4694 		u_int16_t dur;
4695 		if (shortPreamble)
4696 			dur = rt->info[rix].spAckDuration;
4697 		else
4698 			dur = rt->info[rix].lpAckDuration;
4699 		if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) {
4700 			dur += dur;		/* additional SIFS+ACK */
4701 			KASSERT(m0->m_nextpkt != NULL, ("no fragment"));
4702 			/*
4703 			 * Include the size of next fragment so NAV is
4704 			 * updated properly.  The last fragment uses only
4705 			 * the ACK duration
4706 			 */
4707 			dur += ath_hal_computetxtime(ah, rt,
4708 					m0->m_nextpkt->m_pkthdr.len,
4709 					rix, shortPreamble);
4710 		}
4711 		if (isfrag) {
4712 			/*
4713 			 * Force hardware to use computed duration for next
4714 			 * fragment by disabling multi-rate retry which updates
4715 			 * duration based on the multi-rate duration table.
4716 			 */
4717 			ismrr = 0;
4718 			try0 = ATH_TXMGTTRY;	/* XXX? */
4719 		}
4720 		*(u_int16_t *)wh->i_dur = htole16(dur);
4721 	}
4722 
4723 	/*
4724 	 * Calculate RTS/CTS rate and duration if needed.
4725 	 */
4726 	ctsduration = 0;
4727 	if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) {
4728 		/*
4729 		 * CTS transmit rate is derived from the transmit rate
4730 		 * by looking in the h/w rate table.  We must also factor
4731 		 * in whether or not a short preamble is to be used.
4732 		 */
4733 		/* NB: cix is set above where RTS/CTS is enabled */
4734 		KASSERT(cix != 0xff, ("cix not setup"));
4735 		ctsrate = rt->info[cix].rateCode;
4736 		/*
4737 		 * Compute the transmit duration based on the frame
4738 		 * size and the size of an ACK frame.  We call into the
4739 		 * HAL to do the computation since it depends on the
4740 		 * characteristics of the actual PHY being used.
4741 		 *
4742 		 * NB: CTS is assumed the same size as an ACK so we can
4743 		 *     use the precalculated ACK durations.
4744 		 */
4745 		if (shortPreamble) {
4746 			ctsrate |= rt->info[cix].shortPreamble;
4747 			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
4748 				ctsduration += rt->info[cix].spAckDuration;
4749 			ctsduration += ath_hal_computetxtime(ah,
4750 				rt, pktlen, rix, AH_TRUE);
4751 			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
4752 				ctsduration += rt->info[rix].spAckDuration;
4753 		} else {
4754 			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
4755 				ctsduration += rt->info[cix].lpAckDuration;
4756 			ctsduration += ath_hal_computetxtime(ah,
4757 				rt, pktlen, rix, AH_FALSE);
4758 			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
4759 				ctsduration += rt->info[rix].lpAckDuration;
4760 		}
4761 		/*
4762 		 * Must disable multi-rate retry when using RTS/CTS.
4763 		 */
4764 		ismrr = 0;
4765 		try0 = ATH_TXMGTTRY;		/* XXX */
4766 	} else
4767 		ctsrate = 0;
4768 
4769 	/*
4770 	 * At this point we are committed to sending the frame
4771 	 * and we don't need to look at m_nextpkt; clear it in
4772 	 * case this frame is part of frag chain.
4773 	 */
4774 	m0->m_nextpkt = NULL;
4775 
4776 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
4777 		ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len,
4778 		    sc->sc_hwmap[rix].ieeerate, -1);
4779 
4780 	if (ieee80211_radiotap_active_vap(vap)) {
4781 		u_int64_t tsf = ath_hal_gettsf64(ah);
4782 
4783 		sc->sc_tx_th.wt_tsf = htole64(tsf);
4784 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
4785 		if (iswep)
4786 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4787 		if (isfrag)
4788 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
4789 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
4790 		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
4791 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
4792 
4793 		ieee80211_radiotap_tx(vap, m0);
4794 	}
4795 
4796 	/*
4797 	 * Determine if a tx interrupt should be generated for
4798 	 * this descriptor.  We take a tx interrupt to reap
4799 	 * descriptors when the h/w hits an EOL condition or
4800 	 * when the descriptor is specifically marked to generate
4801 	 * an interrupt.  We periodically mark descriptors in this
4802 	 * way to insure timely replenishing of the supply needed
4803 	 * for sending frames.  Defering interrupts reduces system
4804 	 * load and potentially allows more concurrent work to be
4805 	 * done but if done to aggressively can cause senders to
4806 	 * backup.
4807 	 *
4808 	 * NB: use >= to deal with sc_txintrperiod changing
4809 	 *     dynamically through sysctl.
4810 	 */
4811 	if (flags & HAL_TXDESC_INTREQ) {
4812 		txq->axq_intrcnt = 0;
4813 	} else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) {
4814 		flags |= HAL_TXDESC_INTREQ;
4815 		txq->axq_intrcnt = 0;
4816 	}
4817 
4818 	/*
4819 	 * Formulate first tx descriptor with tx controls.
4820 	 */
4821 	/* XXX check return value? */
4822 	ath_hal_setuptxdesc(ah, ds
4823 		, pktlen		/* packet length */
4824 		, hdrlen		/* header length */
4825 		, atype			/* Atheros packet type */
4826 		, ni->ni_txpower	/* txpower */
4827 		, txrate, try0		/* series 0 rate/tries */
4828 		, keyix			/* key cache index */
4829 		, sc->sc_txantenna	/* antenna mode */
4830 		, flags			/* flags */
4831 		, ctsrate		/* rts/cts rate */
4832 		, ctsduration		/* rts/cts duration */
4833 	);
4834 	bf->bf_txflags = flags;
4835 	/*
4836 	 * Setup the multi-rate retry state only when we're
4837 	 * going to use it.  This assumes ath_hal_setuptxdesc
4838 	 * initializes the descriptors (so we don't have to)
4839 	 * when the hardware supports multi-rate retry and
4840 	 * we don't use it.
4841 	 */
4842 	if (ismrr)
4843 		ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix);
4844 
4845 	ath_tx_handoff(sc, txq, bf);
4846 	return 0;
4847 }
4848 
4849 /*
4850  * Process completed xmit descriptors from the specified queue.
4851  */
4852 static int
4853 ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
4854 {
4855 	struct ath_hal *ah = sc->sc_ah;
4856 	struct ifnet *ifp = sc->sc_ifp;
4857 	struct ieee80211com *ic = ifp->if_l2com;
4858 	struct ath_buf *bf, *last;
4859 	struct ath_desc *ds, *ds0;
4860 	struct ath_tx_status *ts;
4861 	struct ieee80211_node *ni;
4862 	struct ath_node *an;
4863 	int sr, lr, pri, nacked;
4864 	HAL_STATUS status;
4865 
4866 	DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n",
4867 		__func__, txq->axq_qnum,
4868 		(caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum),
4869 		txq->axq_link);
4870 	nacked = 0;
4871 	for (;;) {
4872 		ATH_TXQ_LOCK(txq);
4873 		txq->axq_intrcnt = 0;	/* reset periodic desc intr count */
4874 		bf = STAILQ_FIRST(&txq->axq_q);
4875 		if (bf == NULL) {
4876 			ATH_TXQ_UNLOCK(txq);
4877 			break;
4878 		}
4879 		ds0 = &bf->bf_desc[0];
4880 		ds = &bf->bf_desc[bf->bf_nseg - 1];
4881 		ts = &bf->bf_status.ds_txstat;
4882 		status = ath_hal_txprocdesc(ah, ds, ts);
4883 #ifdef ATH_DEBUG
4884 		if (sc->sc_debug & ATH_DEBUG_XMIT_DESC)
4885 			ath_printtxbuf(sc, bf, txq->axq_qnum, 0,
4886 			    status == HAL_OK);
4887 #endif
4888 		if (status == HAL_EINPROGRESS) {
4889 			ATH_TXQ_UNLOCK(txq);
4890 			break;
4891 		}
4892 		ATH_TXQ_REMOVE_HEAD(txq, bf_list);
4893 #ifdef IEEE80211_SUPPORT_TDMA
4894 		if (txq->axq_depth > 0) {
4895 			/*
4896 			 * More frames follow.  Mark the buffer busy
4897 			 * so it's not re-used while the hardware may
4898 			 * still re-read the link field in the descriptor.
4899 			 */
4900 			bf->bf_flags |= ATH_BUF_BUSY;
4901 		} else
4902 #else
4903 		if (txq->axq_depth == 0)
4904 #endif
4905 			txq->axq_link = NULL;
4906 		ATH_TXQ_UNLOCK(txq);
4907 
4908 		ni = bf->bf_node;
4909 		if (ni != NULL) {
4910 			an = ATH_NODE(ni);
4911 			if (ts->ts_status == 0) {
4912 				u_int8_t txant = ts->ts_antenna;
4913 				sc->sc_stats.ast_ant_tx[txant]++;
4914 				sc->sc_ant_tx[txant]++;
4915 				if (ts->ts_rate & HAL_TXSTAT_ALTRATE)
4916 					sc->sc_stats.ast_tx_altrate++;
4917 				pri = M_WME_GETAC(bf->bf_m);
4918 				if (pri >= WME_AC_VO)
4919 					ic->ic_wme.wme_hipri_traffic++;
4920 				if ((bf->bf_txflags & HAL_TXDESC_NOACK) == 0)
4921 					ni->ni_inact = ni->ni_inact_reload;
4922 			} else {
4923 				if (ts->ts_status & HAL_TXERR_XRETRY)
4924 					sc->sc_stats.ast_tx_xretries++;
4925 				if (ts->ts_status & HAL_TXERR_FIFO)
4926 					sc->sc_stats.ast_tx_fifoerr++;
4927 				if (ts->ts_status & HAL_TXERR_FILT)
4928 					sc->sc_stats.ast_tx_filtered++;
4929 				if (bf->bf_m->m_flags & M_FF)
4930 					sc->sc_stats.ast_ff_txerr++;
4931 			}
4932 			sr = ts->ts_shortretry;
4933 			lr = ts->ts_longretry;
4934 			sc->sc_stats.ast_tx_shortretry += sr;
4935 			sc->sc_stats.ast_tx_longretry += lr;
4936 			/*
4937 			 * Hand the descriptor to the rate control algorithm.
4938 			 */
4939 			if ((ts->ts_status & HAL_TXERR_FILT) == 0 &&
4940 			    (bf->bf_txflags & HAL_TXDESC_NOACK) == 0) {
4941 				/*
4942 				 * If frame was ack'd update statistics,
4943 				 * including the last rx time used to
4944 				 * workaround phantom bmiss interrupts.
4945 				 */
4946 				if (ts->ts_status == 0) {
4947 					nacked++;
4948 					sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
4949 					ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi,
4950 						ts->ts_rssi);
4951 				}
4952 				ath_rate_tx_complete(sc, an, bf);
4953 			}
4954 			/*
4955 			 * Do any tx complete callback.  Note this must
4956 			 * be done before releasing the node reference.
4957 			 */
4958 			if (bf->bf_m->m_flags & M_TXCB)
4959 				ieee80211_process_callback(ni, bf->bf_m,
4960 				    (bf->bf_txflags & HAL_TXDESC_NOACK) == 0 ?
4961 				        ts->ts_status : HAL_TXERR_XRETRY);
4962 			ieee80211_free_node(ni);
4963 		}
4964 		bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
4965 		    BUS_DMASYNC_POSTWRITE);
4966 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
4967 
4968 		m_freem(bf->bf_m);
4969 		bf->bf_m = NULL;
4970 		bf->bf_node = NULL;
4971 
4972 		ATH_TXBUF_LOCK(sc);
4973 		last = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
4974 		if (last != NULL)
4975 			last->bf_flags &= ~ATH_BUF_BUSY;
4976 		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
4977 		ATH_TXBUF_UNLOCK(sc);
4978 	}
4979 #ifdef IEEE80211_SUPPORT_SUPERG
4980 	/*
4981 	 * Flush fast-frame staging queue when traffic slows.
4982 	 */
4983 	if (txq->axq_depth <= 1)
4984 		ieee80211_ff_flush(ic, txq->axq_ac);
4985 #endif
4986 	return nacked;
4987 }
4988 
4989 static __inline int
4990 txqactive(struct ath_hal *ah, int qnum)
4991 {
4992 	u_int32_t txqs = 1<<qnum;
4993 	ath_hal_gettxintrtxqs(ah, &txqs);
4994 	return (txqs & (1<<qnum));
4995 }
4996 
4997 /*
4998  * Deferred processing of transmit interrupt; special-cased
4999  * for a single hardware transmit queue (e.g. 5210 and 5211).
5000  */
5001 static void
5002 ath_tx_proc_q0(void *arg, int npending)
5003 {
5004 	struct ath_softc *sc = arg;
5005 	struct ifnet *ifp = sc->sc_ifp;
5006 
5007 	if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0]))
5008 		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5009 	if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
5010 		ath_tx_processq(sc, sc->sc_cabq);
5011 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5012 	sc->sc_wd_timer = 0;
5013 
5014 	if (sc->sc_softled)
5015 		ath_led_event(sc, sc->sc_txrix);
5016 
5017 	ath_start(ifp);
5018 }
5019 
5020 /*
5021  * Deferred processing of transmit interrupt; special-cased
5022  * for four hardware queues, 0-3 (e.g. 5212 w/ WME support).
5023  */
5024 static void
5025 ath_tx_proc_q0123(void *arg, int npending)
5026 {
5027 	struct ath_softc *sc = arg;
5028 	struct ifnet *ifp = sc->sc_ifp;
5029 	int nacked;
5030 
5031 	/*
5032 	 * Process each active queue.
5033 	 */
5034 	nacked = 0;
5035 	if (txqactive(sc->sc_ah, 0))
5036 		nacked += ath_tx_processq(sc, &sc->sc_txq[0]);
5037 	if (txqactive(sc->sc_ah, 1))
5038 		nacked += ath_tx_processq(sc, &sc->sc_txq[1]);
5039 	if (txqactive(sc->sc_ah, 2))
5040 		nacked += ath_tx_processq(sc, &sc->sc_txq[2]);
5041 	if (txqactive(sc->sc_ah, 3))
5042 		nacked += ath_tx_processq(sc, &sc->sc_txq[3]);
5043 	if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum))
5044 		ath_tx_processq(sc, sc->sc_cabq);
5045 	if (nacked)
5046 		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5047 
5048 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5049 	sc->sc_wd_timer = 0;
5050 
5051 	if (sc->sc_softled)
5052 		ath_led_event(sc, sc->sc_txrix);
5053 
5054 	ath_start(ifp);
5055 }
5056 
5057 /*
5058  * Deferred processing of transmit interrupt.
5059  */
5060 static void
5061 ath_tx_proc(void *arg, int npending)
5062 {
5063 	struct ath_softc *sc = arg;
5064 	struct ifnet *ifp = sc->sc_ifp;
5065 	int i, nacked;
5066 
5067 	/*
5068 	 * Process each active queue.
5069 	 */
5070 	nacked = 0;
5071 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5072 		if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i))
5073 			nacked += ath_tx_processq(sc, &sc->sc_txq[i]);
5074 	if (nacked)
5075 		sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah);
5076 
5077 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5078 	sc->sc_wd_timer = 0;
5079 
5080 	if (sc->sc_softled)
5081 		ath_led_event(sc, sc->sc_txrix);
5082 
5083 	ath_start(ifp);
5084 }
5085 
5086 static void
5087 ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
5088 {
5089 #ifdef ATH_DEBUG
5090 	struct ath_hal *ah = sc->sc_ah;
5091 #endif
5092 	struct ieee80211_node *ni;
5093 	struct ath_buf *bf;
5094 	u_int ix;
5095 
5096 	/*
5097 	 * NB: this assumes output has been stopped and
5098 	 *     we do not need to block ath_tx_proc
5099 	 */
5100 	ATH_TXBUF_LOCK(sc);
5101 	bf = STAILQ_LAST(&sc->sc_txbuf, ath_buf, bf_list);
5102 	if (bf != NULL)
5103 		bf->bf_flags &= ~ATH_BUF_BUSY;
5104 	ATH_TXBUF_UNLOCK(sc);
5105 	for (ix = 0;; ix++) {
5106 		ATH_TXQ_LOCK(txq);
5107 		bf = STAILQ_FIRST(&txq->axq_q);
5108 		if (bf == NULL) {
5109 			txq->axq_link = NULL;
5110 			ATH_TXQ_UNLOCK(txq);
5111 			break;
5112 		}
5113 		ATH_TXQ_REMOVE_HEAD(txq, bf_list);
5114 		ATH_TXQ_UNLOCK(txq);
5115 #ifdef ATH_DEBUG
5116 		if (sc->sc_debug & ATH_DEBUG_RESET) {
5117 			struct ieee80211com *ic = sc->sc_ifp->if_l2com;
5118 
5119 			ath_printtxbuf(sc, bf, txq->axq_qnum, ix,
5120 				ath_hal_txprocdesc(ah, bf->bf_desc,
5121 				    &bf->bf_status.ds_txstat) == HAL_OK);
5122 			ieee80211_dump_pkt(ic, mtod(bf->bf_m, const uint8_t *),
5123 			    bf->bf_m->m_len, 0, -1);
5124 		}
5125 #endif /* ATH_DEBUG */
5126 		bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap);
5127 		ni = bf->bf_node;
5128 		bf->bf_node = NULL;
5129 		if (ni != NULL) {
5130 			/*
5131 			 * Do any callback and reclaim the node reference.
5132 			 */
5133 			if (bf->bf_m->m_flags & M_TXCB)
5134 				ieee80211_process_callback(ni, bf->bf_m, -1);
5135 			ieee80211_free_node(ni);
5136 		}
5137 		m_freem(bf->bf_m);
5138 		bf->bf_m = NULL;
5139 		bf->bf_flags &= ~ATH_BUF_BUSY;
5140 
5141 		ATH_TXBUF_LOCK(sc);
5142 		STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
5143 		ATH_TXBUF_UNLOCK(sc);
5144 	}
5145 }
5146 
5147 static void
5148 ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq)
5149 {
5150 	struct ath_hal *ah = sc->sc_ah;
5151 
5152 	DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5153 	    __func__, txq->axq_qnum,
5154 	    (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum),
5155 	    txq->axq_link);
5156 	(void) ath_hal_stoptxdma(ah, txq->axq_qnum);
5157 }
5158 
5159 /*
5160  * Drain the transmit queues and reclaim resources.
5161  */
5162 static void
5163 ath_draintxq(struct ath_softc *sc)
5164 {
5165 	struct ath_hal *ah = sc->sc_ah;
5166 	struct ifnet *ifp = sc->sc_ifp;
5167 	int i;
5168 
5169 	/* XXX return value */
5170 	if (!sc->sc_invalid) {
5171 		/* don't touch the hardware if marked invalid */
5172 		DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n",
5173 		    __func__, sc->sc_bhalq,
5174 		    (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq),
5175 		    NULL);
5176 		(void) ath_hal_stoptxdma(ah, sc->sc_bhalq);
5177 		for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5178 			if (ATH_TXQ_SETUP(sc, i))
5179 				ath_tx_stopdma(sc, &sc->sc_txq[i]);
5180 	}
5181 	for (i = 0; i < HAL_NUM_TX_QUEUES; i++)
5182 		if (ATH_TXQ_SETUP(sc, i))
5183 			ath_tx_draintxq(sc, &sc->sc_txq[i]);
5184 #ifdef ATH_DEBUG
5185 	if (sc->sc_debug & ATH_DEBUG_RESET) {
5186 		struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf);
5187 		if (bf != NULL && bf->bf_m != NULL) {
5188 			ath_printtxbuf(sc, bf, sc->sc_bhalq, 0,
5189 				ath_hal_txprocdesc(ah, bf->bf_desc,
5190 				    &bf->bf_status.ds_txstat) == HAL_OK);
5191 			ieee80211_dump_pkt(ifp->if_l2com,
5192 			    mtod(bf->bf_m, const uint8_t *), bf->bf_m->m_len,
5193 			    0, -1);
5194 		}
5195 	}
5196 #endif /* ATH_DEBUG */
5197 	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
5198 	sc->sc_wd_timer = 0;
5199 }
5200 
5201 /*
5202  * Disable the receive h/w in preparation for a reset.
5203  */
5204 static void
5205 ath_stoprecv(struct ath_softc *sc)
5206 {
5207 #define	PA2DESC(_sc, _pa) \
5208 	((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
5209 		((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
5210 	struct ath_hal *ah = sc->sc_ah;
5211 
5212 	ath_hal_stoppcurecv(ah);	/* disable PCU */
5213 	ath_hal_setrxfilter(ah, 0);	/* clear recv filter */
5214 	ath_hal_stopdmarecv(ah);	/* disable DMA engine */
5215 	DELAY(3000);			/* 3ms is long enough for 1 frame */
5216 #ifdef ATH_DEBUG
5217 	if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) {
5218 		struct ath_buf *bf;
5219 		u_int ix;
5220 
5221 		printf("%s: rx queue %p, link %p\n", __func__,
5222 			(caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink);
5223 		ix = 0;
5224 		STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5225 			struct ath_desc *ds = bf->bf_desc;
5226 			struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
5227 			HAL_STATUS status = ath_hal_rxprocdesc(ah, ds,
5228 				bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs);
5229 			if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL))
5230 				ath_printrxbuf(sc, bf, ix, status == HAL_OK);
5231 			ix++;
5232 		}
5233 	}
5234 #endif
5235 	if (sc->sc_rxpending != NULL) {
5236 		m_freem(sc->sc_rxpending);
5237 		sc->sc_rxpending = NULL;
5238 	}
5239 	sc->sc_rxlink = NULL;		/* just in case */
5240 #undef PA2DESC
5241 }
5242 
5243 /*
5244  * Enable the receive h/w following a reset.
5245  */
5246 static int
5247 ath_startrecv(struct ath_softc *sc)
5248 {
5249 	struct ath_hal *ah = sc->sc_ah;
5250 	struct ath_buf *bf;
5251 
5252 	sc->sc_rxlink = NULL;
5253 	sc->sc_rxpending = NULL;
5254 	STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) {
5255 		int error = ath_rxbuf_init(sc, bf);
5256 		if (error != 0) {
5257 			DPRINTF(sc, ATH_DEBUG_RECV,
5258 				"%s: ath_rxbuf_init failed %d\n",
5259 				__func__, error);
5260 			return error;
5261 		}
5262 	}
5263 
5264 	bf = STAILQ_FIRST(&sc->sc_rxbuf);
5265 	ath_hal_putrxbuf(ah, bf->bf_daddr);
5266 	ath_hal_rxena(ah);		/* enable recv descriptors */
5267 	ath_mode_init(sc);		/* set filters, etc. */
5268 	ath_hal_startpcurecv(ah);	/* re-enable PCU/DMA engine */
5269 	return 0;
5270 }
5271 
5272 /*
5273  * Update internal state after a channel change.
5274  */
5275 static void
5276 ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan)
5277 {
5278 	enum ieee80211_phymode mode;
5279 
5280 	/*
5281 	 * Change channels and update the h/w rate map
5282 	 * if we're switching; e.g. 11a to 11b/g.
5283 	 */
5284 	mode = ieee80211_chan2mode(chan);
5285 	if (mode != sc->sc_curmode)
5286 		ath_setcurmode(sc, mode);
5287 	sc->sc_curchan = chan;
5288 }
5289 
5290 /*
5291  * Set/change channels.  If the channel is really being changed,
5292  * it's done by reseting the chip.  To accomplish this we must
5293  * first cleanup any pending DMA, then restart stuff after a la
5294  * ath_init.
5295  */
5296 static int
5297 ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan)
5298 {
5299 	struct ifnet *ifp = sc->sc_ifp;
5300 	struct ieee80211com *ic = ifp->if_l2com;
5301 	struct ath_hal *ah = sc->sc_ah;
5302 
5303 	DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, flags 0x%x)\n",
5304 	    __func__, ieee80211_chan2ieee(ic, chan),
5305 	    chan->ic_freq, chan->ic_flags);
5306 	if (chan != sc->sc_curchan) {
5307 		HAL_STATUS status;
5308 		/*
5309 		 * To switch channels clear any pending DMA operations;
5310 		 * wait long enough for the RX fifo to drain, reset the
5311 		 * hardware at the new frequency, and then re-enable
5312 		 * the relevant bits of the h/w.
5313 		 */
5314 		ath_hal_intrset(ah, 0);		/* disable interrupts */
5315 		ath_draintxq(sc);		/* clear pending tx frames */
5316 		ath_stoprecv(sc);		/* turn off frame recv */
5317 		if (!ath_hal_reset(ah, sc->sc_opmode, chan, AH_TRUE, &status)) {
5318 			if_printf(ifp, "%s: unable to reset "
5319 			    "channel %u (%u Mhz, flags 0x%x), hal status %u\n",
5320 			    __func__, ieee80211_chan2ieee(ic, chan),
5321 			    chan->ic_freq, chan->ic_flags, status);
5322 			return EIO;
5323 		}
5324 		sc->sc_diversity = ath_hal_getdiversity(ah);
5325 
5326 		/*
5327 		 * Re-enable rx framework.
5328 		 */
5329 		if (ath_startrecv(sc) != 0) {
5330 			if_printf(ifp, "%s: unable to restart recv logic\n",
5331 			    __func__);
5332 			return EIO;
5333 		}
5334 
5335 		/*
5336 		 * Change channels and update the h/w rate map
5337 		 * if we're switching; e.g. 11a to 11b/g.
5338 		 */
5339 		ath_chan_change(sc, chan);
5340 
5341 		/*
5342 		 * Re-enable interrupts.
5343 		 */
5344 		ath_hal_intrset(ah, sc->sc_imask);
5345 	}
5346 	return 0;
5347 }
5348 
5349 /*
5350  * Periodically recalibrate the PHY to account
5351  * for temperature/environment changes.
5352  */
5353 static void
5354 ath_calibrate(void *arg)
5355 {
5356 	struct ath_softc *sc = arg;
5357 	struct ath_hal *ah = sc->sc_ah;
5358 	struct ifnet *ifp = sc->sc_ifp;
5359 	struct ieee80211com *ic = ifp->if_l2com;
5360 	HAL_BOOL longCal, isCalDone;
5361 	int nextcal;
5362 
5363 	if (ic->ic_flags & IEEE80211_F_SCAN)	/* defer, off channel */
5364 		goto restart;
5365 	longCal = (ticks - sc->sc_lastlongcal >= ath_longcalinterval*hz);
5366 	if (longCal) {
5367 		sc->sc_stats.ast_per_cal++;
5368 		if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) {
5369 			/*
5370 			 * Rfgain is out of bounds, reset the chip
5371 			 * to load new gain values.
5372 			 */
5373 			DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5374 				"%s: rfgain change\n", __func__);
5375 			sc->sc_stats.ast_per_rfgain++;
5376 			ath_reset(ifp);
5377 		}
5378 		/*
5379 		 * If this long cal is after an idle period, then
5380 		 * reset the data collection state so we start fresh.
5381 		 */
5382 		if (sc->sc_resetcal) {
5383 			(void) ath_hal_calreset(ah, sc->sc_curchan);
5384 			sc->sc_lastcalreset = ticks;
5385 			sc->sc_resetcal = 0;
5386 		}
5387 	}
5388 	if (ath_hal_calibrateN(ah, sc->sc_curchan, longCal, &isCalDone)) {
5389 		if (longCal) {
5390 			/*
5391 			 * Calibrate noise floor data again in case of change.
5392 			 */
5393 			ath_hal_process_noisefloor(ah);
5394 		}
5395 	} else {
5396 		DPRINTF(sc, ATH_DEBUG_ANY,
5397 			"%s: calibration of channel %u failed\n",
5398 			__func__, sc->sc_curchan->ic_freq);
5399 		sc->sc_stats.ast_per_calfail++;
5400 	}
5401 	if (!isCalDone) {
5402 restart:
5403 		/*
5404 		 * Use a shorter interval to potentially collect multiple
5405 		 * data samples required to complete calibration.  Once
5406 		 * we're told the work is done we drop back to a longer
5407 		 * interval between requests.  We're more aggressive doing
5408 		 * work when operating as an AP to improve operation right
5409 		 * after startup.
5410 		 */
5411 		nextcal = (1000*ath_shortcalinterval)/hz;
5412 		if (sc->sc_opmode != HAL_M_HOSTAP)
5413 			nextcal *= 10;
5414 	} else {
5415 		nextcal = ath_longcalinterval*hz;
5416 		sc->sc_lastlongcal = ticks;
5417 		if (sc->sc_lastcalreset == 0)
5418 			sc->sc_lastcalreset = sc->sc_lastlongcal;
5419 		else if (ticks - sc->sc_lastcalreset >= ath_resetcalinterval*hz)
5420 			sc->sc_resetcal = 1;	/* setup reset next trip */
5421 	}
5422 
5423 	if (nextcal != 0) {
5424 		DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%sisCalDone)\n",
5425 		    __func__, nextcal, isCalDone ? "" : "!");
5426 		callout_reset(&sc->sc_cal_ch, nextcal, ath_calibrate, sc);
5427 	} else {
5428 		DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: calibration disabled\n",
5429 		    __func__);
5430 		/* NB: don't rearm timer */
5431 	}
5432 }
5433 
5434 static void
5435 ath_scan_start(struct ieee80211com *ic)
5436 {
5437 	struct ifnet *ifp = ic->ic_ifp;
5438 	struct ath_softc *sc = ifp->if_softc;
5439 	struct ath_hal *ah = sc->sc_ah;
5440 	u_int32_t rfilt;
5441 
5442 	/* XXX calibration timer? */
5443 
5444 	sc->sc_scanning = 1;
5445 	sc->sc_syncbeacon = 0;
5446 	rfilt = ath_calcrxfilter(sc);
5447 	ath_hal_setrxfilter(ah, rfilt);
5448 	ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0);
5449 
5450 	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n",
5451 		 __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr));
5452 }
5453 
5454 static void
5455 ath_scan_end(struct ieee80211com *ic)
5456 {
5457 	struct ifnet *ifp = ic->ic_ifp;
5458 	struct ath_softc *sc = ifp->if_softc;
5459 	struct ath_hal *ah = sc->sc_ah;
5460 	u_int32_t rfilt;
5461 
5462 	sc->sc_scanning = 0;
5463 	rfilt = ath_calcrxfilter(sc);
5464 	ath_hal_setrxfilter(ah, rfilt);
5465 	ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5466 
5467 	ath_hal_process_noisefloor(ah);
5468 
5469 	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5470 		 __func__, rfilt, ether_sprintf(sc->sc_curbssid),
5471 		 sc->sc_curaid);
5472 }
5473 
5474 static void
5475 ath_set_channel(struct ieee80211com *ic)
5476 {
5477 	struct ifnet *ifp = ic->ic_ifp;
5478 	struct ath_softc *sc = ifp->if_softc;
5479 
5480 	(void) ath_chan_set(sc, ic->ic_curchan);
5481 	/*
5482 	 * If we are returning to our bss channel then mark state
5483 	 * so the next recv'd beacon's tsf will be used to sync the
5484 	 * beacon timers.  Note that since we only hear beacons in
5485 	 * sta/ibss mode this has no effect in other operating modes.
5486 	 */
5487 	if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan)
5488 		sc->sc_syncbeacon = 1;
5489 }
5490 
5491 /*
5492  * Walk the vap list and check if there any vap's in RUN state.
5493  */
5494 static int
5495 ath_isanyrunningvaps(struct ieee80211vap *this)
5496 {
5497 	struct ieee80211com *ic = this->iv_ic;
5498 	struct ieee80211vap *vap;
5499 
5500 	IEEE80211_LOCK_ASSERT(ic);
5501 
5502 	TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
5503 		if (vap != this && vap->iv_state == IEEE80211_S_RUN)
5504 			return 1;
5505 	}
5506 	return 0;
5507 }
5508 
5509 static int
5510 ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
5511 {
5512 	struct ieee80211com *ic = vap->iv_ic;
5513 	struct ath_softc *sc = ic->ic_ifp->if_softc;
5514 	struct ath_vap *avp = ATH_VAP(vap);
5515 	struct ath_hal *ah = sc->sc_ah;
5516 	struct ieee80211_node *ni = NULL;
5517 	int i, error, stamode;
5518 	u_int32_t rfilt;
5519 	static const HAL_LED_STATE leds[] = {
5520 	    HAL_LED_INIT,	/* IEEE80211_S_INIT */
5521 	    HAL_LED_SCAN,	/* IEEE80211_S_SCAN */
5522 	    HAL_LED_AUTH,	/* IEEE80211_S_AUTH */
5523 	    HAL_LED_ASSOC, 	/* IEEE80211_S_ASSOC */
5524 	    HAL_LED_RUN, 	/* IEEE80211_S_CAC */
5525 	    HAL_LED_RUN, 	/* IEEE80211_S_RUN */
5526 	    HAL_LED_RUN, 	/* IEEE80211_S_CSA */
5527 	    HAL_LED_RUN, 	/* IEEE80211_S_SLEEP */
5528 	};
5529 
5530 	DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__,
5531 		ieee80211_state_name[vap->iv_state],
5532 		ieee80211_state_name[nstate]);
5533 
5534 	callout_drain(&sc->sc_cal_ch);
5535 	ath_hal_setledstate(ah, leds[nstate]);	/* set LED */
5536 
5537 	if (nstate == IEEE80211_S_SCAN) {
5538 		/*
5539 		 * Scanning: turn off beacon miss and don't beacon.
5540 		 * Mark beacon state so when we reach RUN state we'll
5541 		 * [re]setup beacons.  Unblock the task q thread so
5542 		 * deferred interrupt processing is done.
5543 		 */
5544 		ath_hal_intrset(ah,
5545 		    sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS));
5546 		sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5547 		sc->sc_beacons = 0;
5548 		taskqueue_unblock(sc->sc_tq);
5549 	}
5550 
5551 	ni = vap->iv_bss;
5552 	rfilt = ath_calcrxfilter(sc);
5553 	stamode = (vap->iv_opmode == IEEE80211_M_STA ||
5554 		   vap->iv_opmode == IEEE80211_M_AHDEMO ||
5555 		   vap->iv_opmode == IEEE80211_M_IBSS);
5556 	if (stamode && nstate == IEEE80211_S_RUN) {
5557 		sc->sc_curaid = ni->ni_associd;
5558 		IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid);
5559 		ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid);
5560 	}
5561 	DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n",
5562 	   __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid);
5563 	ath_hal_setrxfilter(ah, rfilt);
5564 
5565 	/* XXX is this to restore keycache on resume? */
5566 	if (vap->iv_opmode != IEEE80211_M_STA &&
5567 	    (vap->iv_flags & IEEE80211_F_PRIVACY)) {
5568 		for (i = 0; i < IEEE80211_WEP_NKID; i++)
5569 			if (ath_hal_keyisvalid(ah, i))
5570 				ath_hal_keysetmac(ah, i, ni->ni_bssid);
5571 	}
5572 
5573 	/*
5574 	 * Invoke the parent method to do net80211 work.
5575 	 */
5576 	error = avp->av_newstate(vap, nstate, arg);
5577 	if (error != 0)
5578 		goto bad;
5579 
5580 	if (nstate == IEEE80211_S_RUN) {
5581 		/* NB: collect bss node again, it may have changed */
5582 		ni = vap->iv_bss;
5583 
5584 		DPRINTF(sc, ATH_DEBUG_STATE,
5585 		    "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s "
5586 		    "capinfo 0x%04x chan %d\n", __func__,
5587 		    vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid),
5588 		    ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan));
5589 
5590 		switch (vap->iv_opmode) {
5591 #ifdef IEEE80211_SUPPORT_TDMA
5592 		case IEEE80211_M_AHDEMO:
5593 			if ((vap->iv_caps & IEEE80211_C_TDMA) == 0)
5594 				break;
5595 			/* fall thru... */
5596 #endif
5597 		case IEEE80211_M_HOSTAP:
5598 		case IEEE80211_M_IBSS:
5599 			/*
5600 			 * Allocate and setup the beacon frame.
5601 			 *
5602 			 * Stop any previous beacon DMA.  This may be
5603 			 * necessary, for example, when an ibss merge
5604 			 * causes reconfiguration; there will be a state
5605 			 * transition from RUN->RUN that means we may
5606 			 * be called with beacon transmission active.
5607 			 */
5608 			ath_hal_stoptxdma(ah, sc->sc_bhalq);
5609 
5610 			error = ath_beacon_alloc(sc, ni);
5611 			if (error != 0)
5612 				goto bad;
5613 			/*
5614 			 * If joining an adhoc network defer beacon timer
5615 			 * configuration to the next beacon frame so we
5616 			 * have a current TSF to use.  Otherwise we're
5617 			 * starting an ibss/bss so there's no need to delay;
5618 			 * if this is the first vap moving to RUN state, then
5619 			 * beacon state needs to be [re]configured.
5620 			 */
5621 			if (vap->iv_opmode == IEEE80211_M_IBSS &&
5622 			    ni->ni_tstamp.tsf != 0) {
5623 				sc->sc_syncbeacon = 1;
5624 			} else if (!sc->sc_beacons) {
5625 #ifdef IEEE80211_SUPPORT_TDMA
5626 				if (vap->iv_caps & IEEE80211_C_TDMA)
5627 					ath_tdma_config(sc, vap);
5628 				else
5629 #endif
5630 					ath_beacon_config(sc, vap);
5631 				sc->sc_beacons = 1;
5632 			}
5633 			break;
5634 		case IEEE80211_M_STA:
5635 			/*
5636 			 * Defer beacon timer configuration to the next
5637 			 * beacon frame so we have a current TSF to use
5638 			 * (any TSF collected when scanning is likely old).
5639 			 */
5640 			sc->sc_syncbeacon = 1;
5641 			break;
5642 		case IEEE80211_M_MONITOR:
5643 			/*
5644 			 * Monitor mode vaps have only INIT->RUN and RUN->RUN
5645 			 * transitions so we must re-enable interrupts here to
5646 			 * handle the case of a single monitor mode vap.
5647 			 */
5648 			ath_hal_intrset(ah, sc->sc_imask);
5649 			break;
5650 		case IEEE80211_M_WDS:
5651 			break;
5652 		default:
5653 			break;
5654 		}
5655 		/*
5656 		 * Let the hal process statistics collected during a
5657 		 * scan so it can provide calibrated noise floor data.
5658 		 */
5659 		ath_hal_process_noisefloor(ah);
5660 		/*
5661 		 * Reset rssi stats; maybe not the best place...
5662 		 */
5663 		sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
5664 		sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
5665 		sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
5666 		/*
5667 		 * Finally, start any timers and the task q thread
5668 		 * (in case we didn't go through SCAN state).
5669 		 */
5670 		if (ath_longcalinterval != 0) {
5671 			/* start periodic recalibration timer */
5672 			callout_reset(&sc->sc_cal_ch, 1, ath_calibrate, sc);
5673 		} else {
5674 			DPRINTF(sc, ATH_DEBUG_CALIBRATE,
5675 			    "%s: calibration disabled\n", __func__);
5676 		}
5677 		taskqueue_unblock(sc->sc_tq);
5678 	} else if (nstate == IEEE80211_S_INIT) {
5679 		/*
5680 		 * If there are no vaps left in RUN state then
5681 		 * shutdown host/driver operation:
5682 		 * o disable interrupts
5683 		 * o disable the task queue thread
5684 		 * o mark beacon processing as stopped
5685 		 */
5686 		if (!ath_isanyrunningvaps(vap)) {
5687 			sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS);
5688 			/* disable interrupts  */
5689 			ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL);
5690 			taskqueue_block(sc->sc_tq);
5691 			sc->sc_beacons = 0;
5692 		}
5693 #ifdef IEEE80211_SUPPORT_TDMA
5694 		ath_hal_setcca(ah, AH_TRUE);
5695 #endif
5696 	}
5697 bad:
5698 	return error;
5699 }
5700 
5701 /*
5702  * Allocate a key cache slot to the station so we can
5703  * setup a mapping from key index to node. The key cache
5704  * slot is needed for managing antenna state and for
5705  * compression when stations do not use crypto.  We do
5706  * it uniliaterally here; if crypto is employed this slot
5707  * will be reassigned.
5708  */
5709 static void
5710 ath_setup_stationkey(struct ieee80211_node *ni)
5711 {
5712 	struct ieee80211vap *vap = ni->ni_vap;
5713 	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
5714 	ieee80211_keyix keyix, rxkeyix;
5715 
5716 	if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) {
5717 		/*
5718 		 * Key cache is full; we'll fall back to doing
5719 		 * the more expensive lookup in software.  Note
5720 		 * this also means no h/w compression.
5721 		 */
5722 		/* XXX msg+statistic */
5723 	} else {
5724 		/* XXX locking? */
5725 		ni->ni_ucastkey.wk_keyix = keyix;
5726 		ni->ni_ucastkey.wk_rxkeyix = rxkeyix;
5727 		/* NB: must mark device key to get called back on delete */
5728 		ni->ni_ucastkey.wk_flags |= IEEE80211_KEY_DEVKEY;
5729 		IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr);
5730 		/* NB: this will create a pass-thru key entry */
5731 		ath_keyset(sc, &ni->ni_ucastkey, vap->iv_bss);
5732 	}
5733 }
5734 
5735 /*
5736  * Setup driver-specific state for a newly associated node.
5737  * Note that we're called also on a re-associate, the isnew
5738  * param tells us if this is the first time or not.
5739  */
5740 static void
5741 ath_newassoc(struct ieee80211_node *ni, int isnew)
5742 {
5743 	struct ath_node *an = ATH_NODE(ni);
5744 	struct ieee80211vap *vap = ni->ni_vap;
5745 	struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc;
5746 	const struct ieee80211_txparam *tp = ni->ni_txparms;
5747 
5748 	an->an_mcastrix = ath_tx_findrix(sc, tp->mcastrate);
5749 	an->an_mgmtrix = ath_tx_findrix(sc, tp->mgmtrate);
5750 
5751 	ath_rate_newassoc(sc, an, isnew);
5752 	if (isnew &&
5753 	    (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey &&
5754 	    ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE)
5755 		ath_setup_stationkey(ni);
5756 }
5757 
5758 static int
5759 ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *reg,
5760 	int nchans, struct ieee80211_channel chans[])
5761 {
5762 	struct ath_softc *sc = ic->ic_ifp->if_softc;
5763 	struct ath_hal *ah = sc->sc_ah;
5764 	HAL_STATUS status;
5765 
5766 	DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
5767 	    "%s: rd %u cc %u location %c%s\n",
5768 	    __func__, reg->regdomain, reg->country, reg->location,
5769 	    reg->ecm ? " ecm" : "");
5770 
5771 	status = ath_hal_set_channels(ah, chans, nchans,
5772 	    reg->country, reg->regdomain);
5773 	if (status != HAL_OK) {
5774 		DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: failed, status %u\n",
5775 		    __func__, status);
5776 		return EINVAL;		/* XXX */
5777 	}
5778 	return 0;
5779 }
5780 
5781 static void
5782 ath_getradiocaps(struct ieee80211com *ic,
5783 	int maxchans, int *nchans, struct ieee80211_channel chans[])
5784 {
5785 	struct ath_softc *sc = ic->ic_ifp->if_softc;
5786 	struct ath_hal *ah = sc->sc_ah;
5787 
5788 	DPRINTF(sc, ATH_DEBUG_REGDOMAIN, "%s: use rd %u cc %d\n",
5789 	    __func__, SKU_DEBUG, CTRY_DEFAULT);
5790 
5791 	/* XXX check return */
5792 	(void) ath_hal_getchannels(ah, chans, maxchans, nchans,
5793 	    HAL_MODE_ALL, CTRY_DEFAULT, SKU_DEBUG, AH_TRUE);
5794 
5795 }
5796 
5797 static int
5798 ath_getchannels(struct ath_softc *sc)
5799 {
5800 	struct ifnet *ifp = sc->sc_ifp;
5801 	struct ieee80211com *ic = ifp->if_l2com;
5802 	struct ath_hal *ah = sc->sc_ah;
5803 	HAL_STATUS status;
5804 
5805 	/*
5806 	 * Collect channel set based on EEPROM contents.
5807 	 */
5808 	status = ath_hal_init_channels(ah, ic->ic_channels, IEEE80211_CHAN_MAX,
5809 	    &ic->ic_nchans, HAL_MODE_ALL, CTRY_DEFAULT, SKU_NONE, AH_TRUE);
5810 	if (status != HAL_OK) {
5811 		if_printf(ifp, "%s: unable to collect channel list from hal, "
5812 		    "status %d\n", __func__, status);
5813 		return EINVAL;
5814 	}
5815 	(void) ath_hal_getregdomain(ah, &sc->sc_eerd);
5816 	ath_hal_getcountrycode(ah, &sc->sc_eecc);	/* NB: cannot fail */
5817 	/* XXX map Atheros sku's to net80211 SKU's */
5818 	/* XXX net80211 types too small */
5819 	ic->ic_regdomain.regdomain = (uint16_t) sc->sc_eerd;
5820 	ic->ic_regdomain.country = (uint16_t) sc->sc_eecc;
5821 	ic->ic_regdomain.isocc[0] = ' ';	/* XXX don't know */
5822 	ic->ic_regdomain.isocc[1] = ' ';
5823 
5824 	ic->ic_regdomain.ecm = 1;
5825 	ic->ic_regdomain.location = 'I';
5826 
5827 	DPRINTF(sc, ATH_DEBUG_REGDOMAIN,
5828 	    "%s: eeprom rd %u cc %u (mapped rd %u cc %u) location %c%s\n",
5829 	    __func__, sc->sc_eerd, sc->sc_eecc,
5830 	    ic->ic_regdomain.regdomain, ic->ic_regdomain.country,
5831 	    ic->ic_regdomain.location, ic->ic_regdomain.ecm ? " ecm" : "");
5832 	return 0;
5833 }
5834 
5835 static void
5836 ath_led_done(void *arg)
5837 {
5838 	struct ath_softc *sc = arg;
5839 
5840 	sc->sc_blinking = 0;
5841 }
5842 
5843 /*
5844  * Turn the LED off: flip the pin and then set a timer so no
5845  * update will happen for the specified duration.
5846  */
5847 static void
5848 ath_led_off(void *arg)
5849 {
5850 	struct ath_softc *sc = arg;
5851 
5852 	ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon);
5853 	callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc);
5854 }
5855 
5856 /*
5857  * Blink the LED according to the specified on/off times.
5858  */
5859 static void
5860 ath_led_blink(struct ath_softc *sc, int on, int off)
5861 {
5862 	DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off);
5863 	ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon);
5864 	sc->sc_blinking = 1;
5865 	sc->sc_ledoff = off;
5866 	callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc);
5867 }
5868 
5869 static void
5870 ath_led_event(struct ath_softc *sc, int rix)
5871 {
5872 	sc->sc_ledevent = ticks;	/* time of last event */
5873 	if (sc->sc_blinking)		/* don't interrupt active blink */
5874 		return;
5875 	ath_led_blink(sc, sc->sc_hwmap[rix].ledon, sc->sc_hwmap[rix].ledoff);
5876 }
5877 
5878 static int
5879 ath_rate_setup(struct ath_softc *sc, u_int mode)
5880 {
5881 	struct ath_hal *ah = sc->sc_ah;
5882 	const HAL_RATE_TABLE *rt;
5883 
5884 	switch (mode) {
5885 	case IEEE80211_MODE_11A:
5886 		rt = ath_hal_getratetable(ah, HAL_MODE_11A);
5887 		break;
5888 	case IEEE80211_MODE_HALF:
5889 		rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE);
5890 		break;
5891 	case IEEE80211_MODE_QUARTER:
5892 		rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE);
5893 		break;
5894 	case IEEE80211_MODE_11B:
5895 		rt = ath_hal_getratetable(ah, HAL_MODE_11B);
5896 		break;
5897 	case IEEE80211_MODE_11G:
5898 		rt = ath_hal_getratetable(ah, HAL_MODE_11G);
5899 		break;
5900 	case IEEE80211_MODE_TURBO_A:
5901 		rt = ath_hal_getratetable(ah, HAL_MODE_108A);
5902 		break;
5903 	case IEEE80211_MODE_TURBO_G:
5904 		rt = ath_hal_getratetable(ah, HAL_MODE_108G);
5905 		break;
5906 	case IEEE80211_MODE_STURBO_A:
5907 		rt = ath_hal_getratetable(ah, HAL_MODE_TURBO);
5908 		break;
5909 	case IEEE80211_MODE_11NA:
5910 		rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20);
5911 		break;
5912 	case IEEE80211_MODE_11NG:
5913 		rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20);
5914 		break;
5915 	default:
5916 		DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n",
5917 			__func__, mode);
5918 		return 0;
5919 	}
5920 	sc->sc_rates[mode] = rt;
5921 	return (rt != NULL);
5922 }
5923 
5924 static void
5925 ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode)
5926 {
5927 #define	N(a)	(sizeof(a)/sizeof(a[0]))
5928 	/* NB: on/off times from the Atheros NDIS driver, w/ permission */
5929 	static const struct {
5930 		u_int		rate;		/* tx/rx 802.11 rate */
5931 		u_int16_t	timeOn;		/* LED on time (ms) */
5932 		u_int16_t	timeOff;	/* LED off time (ms) */
5933 	} blinkrates[] = {
5934 		{ 108,  40,  10 },
5935 		{  96,  44,  11 },
5936 		{  72,  50,  13 },
5937 		{  48,  57,  14 },
5938 		{  36,  67,  16 },
5939 		{  24,  80,  20 },
5940 		{  22, 100,  25 },
5941 		{  18, 133,  34 },
5942 		{  12, 160,  40 },
5943 		{  10, 200,  50 },
5944 		{   6, 240,  58 },
5945 		{   4, 267,  66 },
5946 		{   2, 400, 100 },
5947 		{   0, 500, 130 },
5948 		/* XXX half/quarter rates */
5949 	};
5950 	const HAL_RATE_TABLE *rt;
5951 	int i, j;
5952 
5953 	memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
5954 	rt = sc->sc_rates[mode];
5955 	KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode));
5956 	for (i = 0; i < rt->rateCount; i++) {
5957 		uint8_t ieeerate = rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5958 		if (rt->info[i].phy != IEEE80211_T_HT)
5959 			sc->sc_rixmap[ieeerate] = i;
5960 		else
5961 			sc->sc_rixmap[ieeerate | IEEE80211_RATE_MCS] = i;
5962 	}
5963 	memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap));
5964 	for (i = 0; i < N(sc->sc_hwmap); i++) {
5965 		if (i >= rt->rateCount) {
5966 			sc->sc_hwmap[i].ledon = (500 * hz) / 1000;
5967 			sc->sc_hwmap[i].ledoff = (130 * hz) / 1000;
5968 			continue;
5969 		}
5970 		sc->sc_hwmap[i].ieeerate =
5971 			rt->info[i].dot11Rate & IEEE80211_RATE_VAL;
5972 		if (rt->info[i].phy == IEEE80211_T_HT)
5973 			sc->sc_hwmap[i].ieeerate |= IEEE80211_RATE_MCS;
5974 		sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
5975 		if (rt->info[i].shortPreamble ||
5976 		    rt->info[i].phy == IEEE80211_T_OFDM)
5977 			sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE;
5978 		sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags;
5979 		for (j = 0; j < N(blinkrates)-1; j++)
5980 			if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate)
5981 				break;
5982 		/* NB: this uses the last entry if the rate isn't found */
5983 		/* XXX beware of overlow */
5984 		sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000;
5985 		sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000;
5986 	}
5987 	sc->sc_currates = rt;
5988 	sc->sc_curmode = mode;
5989 	/*
5990 	 * All protection frames are transmited at 2Mb/s for
5991 	 * 11g, otherwise at 1Mb/s.
5992 	 */
5993 	if (mode == IEEE80211_MODE_11G)
5994 		sc->sc_protrix = ath_tx_findrix(sc, 2*2);
5995 	else
5996 		sc->sc_protrix = ath_tx_findrix(sc, 2*1);
5997 	/* NB: caller is responsible for reseting rate control state */
5998 #undef N
5999 }
6000 
6001 #ifdef ATH_DEBUG
6002 static void
6003 ath_printrxbuf(struct ath_softc *sc, const struct ath_buf *bf,
6004 	u_int ix, int done)
6005 {
6006 	const struct ath_rx_status *rs = &bf->bf_status.ds_rxstat;
6007 	struct ath_hal *ah = sc->sc_ah;
6008 	const struct ath_desc *ds;
6009 	int i;
6010 
6011 	for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
6012 		printf("R[%2u] (DS.V:%p DS.P:%p) L:%08x D:%08x%s\n"
6013 		       "      %08x %08x %08x %08x\n",
6014 		    ix, ds, (const struct ath_desc *)bf->bf_daddr + i,
6015 		    ds->ds_link, ds->ds_data,
6016 		    !done ? "" : (rs->rs_status == 0) ? " *" : " !",
6017 		    ds->ds_ctl0, ds->ds_ctl1,
6018 		    ds->ds_hw[0], ds->ds_hw[1]);
6019 		if (ah->ah_magic == 0x20065416) {
6020 			printf("        %08x %08x %08x %08x %08x %08x %08x\n",
6021 			    ds->ds_hw[2], ds->ds_hw[3], ds->ds_hw[4],
6022 			    ds->ds_hw[5], ds->ds_hw[6], ds->ds_hw[7],
6023 			    ds->ds_hw[8]);
6024 		}
6025 	}
6026 }
6027 
6028 static void
6029 ath_printtxbuf(struct ath_softc *sc, const struct ath_buf *bf,
6030 	u_int qnum, u_int ix, int done)
6031 {
6032 	const struct ath_tx_status *ts = &bf->bf_status.ds_txstat;
6033 	struct ath_hal *ah = sc->sc_ah;
6034 	const struct ath_desc *ds;
6035 	int i;
6036 
6037 	printf("Q%u[%3u]", qnum, ix);
6038 	for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) {
6039 		printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:04%x%s\n"
6040 		       "        %08x %08x %08x %08x %08x %08x\n",
6041 		    ds, (const struct ath_desc *)bf->bf_daddr + i,
6042 		    ds->ds_link, ds->ds_data, bf->bf_txflags,
6043 		    !done ? "" : (ts->ts_status == 0) ? " *" : " !",
6044 		    ds->ds_ctl0, ds->ds_ctl1,
6045 		    ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]);
6046 		if (ah->ah_magic == 0x20065416) {
6047 			printf("        %08x %08x %08x %08x %08x %08x %08x %08x\n",
6048 			    ds->ds_hw[4], ds->ds_hw[5], ds->ds_hw[6],
6049 			    ds->ds_hw[7], ds->ds_hw[8], ds->ds_hw[9],
6050 			    ds->ds_hw[10],ds->ds_hw[11]);
6051 			printf("        %08x %08x %08x %08x %08x %08x %08x %08x\n",
6052 			    ds->ds_hw[12],ds->ds_hw[13],ds->ds_hw[14],
6053 			    ds->ds_hw[15],ds->ds_hw[16],ds->ds_hw[17],
6054 			    ds->ds_hw[18], ds->ds_hw[19]);
6055 		}
6056 	}
6057 }
6058 #endif /* ATH_DEBUG */
6059 
6060 static void
6061 ath_watchdog(void *arg)
6062 {
6063 	struct ath_softc *sc = arg;
6064 
6065 	if (sc->sc_wd_timer != 0 && --sc->sc_wd_timer == 0) {
6066 		struct ifnet *ifp = sc->sc_ifp;
6067 		uint32_t hangs;
6068 
6069 		if (ath_hal_gethangstate(sc->sc_ah, 0xffff, &hangs) &&
6070 		    hangs != 0) {
6071 			if_printf(ifp, "%s hang detected (0x%x)\n",
6072 			    hangs & 0xff ? "bb" : "mac", hangs);
6073 		} else
6074 			if_printf(ifp, "device timeout\n");
6075 		ath_reset(ifp);
6076 		ifp->if_oerrors++;
6077 		sc->sc_stats.ast_watchdog++;
6078 	}
6079 	callout_schedule(&sc->sc_wd_ch, hz);
6080 }
6081 
6082 #ifdef ATH_DIAGAPI
6083 /*
6084  * Diagnostic interface to the HAL.  This is used by various
6085  * tools to do things like retrieve register contents for
6086  * debugging.  The mechanism is intentionally opaque so that
6087  * it can change frequently w/o concern for compatiblity.
6088  */
6089 static int
6090 ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad)
6091 {
6092 	struct ath_hal *ah = sc->sc_ah;
6093 	u_int id = ad->ad_id & ATH_DIAG_ID;
6094 	void *indata = NULL;
6095 	void *outdata = NULL;
6096 	u_int32_t insize = ad->ad_in_size;
6097 	u_int32_t outsize = ad->ad_out_size;
6098 	int error = 0;
6099 
6100 	if (ad->ad_id & ATH_DIAG_IN) {
6101 		/*
6102 		 * Copy in data.
6103 		 */
6104 		indata = malloc(insize, M_TEMP, M_NOWAIT);
6105 		if (indata == NULL) {
6106 			error = ENOMEM;
6107 			goto bad;
6108 		}
6109 		error = copyin(ad->ad_in_data, indata, insize);
6110 		if (error)
6111 			goto bad;
6112 	}
6113 	if (ad->ad_id & ATH_DIAG_DYN) {
6114 		/*
6115 		 * Allocate a buffer for the results (otherwise the HAL
6116 		 * returns a pointer to a buffer where we can read the
6117 		 * results).  Note that we depend on the HAL leaving this
6118 		 * pointer for us to use below in reclaiming the buffer;
6119 		 * may want to be more defensive.
6120 		 */
6121 		outdata = malloc(outsize, M_TEMP, M_NOWAIT);
6122 		if (outdata == NULL) {
6123 			error = ENOMEM;
6124 			goto bad;
6125 		}
6126 	}
6127 	if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) {
6128 		if (outsize < ad->ad_out_size)
6129 			ad->ad_out_size = outsize;
6130 		if (outdata != NULL)
6131 			error = copyout(outdata, ad->ad_out_data,
6132 					ad->ad_out_size);
6133 	} else {
6134 		error = EINVAL;
6135 	}
6136 bad:
6137 	if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL)
6138 		free(indata, M_TEMP);
6139 	if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL)
6140 		free(outdata, M_TEMP);
6141 	return error;
6142 }
6143 #endif /* ATH_DIAGAPI */
6144 
6145 static int
6146 ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
6147 {
6148 #define	IS_RUNNING(ifp) \
6149 	((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))
6150 	struct ath_softc *sc = ifp->if_softc;
6151 	struct ieee80211com *ic = ifp->if_l2com;
6152 	struct ifreq *ifr = (struct ifreq *)data;
6153 	const HAL_RATE_TABLE *rt;
6154 	int error = 0;
6155 
6156 	switch (cmd) {
6157 	case SIOCSIFFLAGS:
6158 		ATH_LOCK(sc);
6159 		if (IS_RUNNING(ifp)) {
6160 			/*
6161 			 * To avoid rescanning another access point,
6162 			 * do not call ath_init() here.  Instead,
6163 			 * only reflect promisc mode settings.
6164 			 */
6165 			ath_mode_init(sc);
6166 		} else if (ifp->if_flags & IFF_UP) {
6167 			/*
6168 			 * Beware of being called during attach/detach
6169 			 * to reset promiscuous mode.  In that case we
6170 			 * will still be marked UP but not RUNNING.
6171 			 * However trying to re-init the interface
6172 			 * is the wrong thing to do as we've already
6173 			 * torn down much of our state.  There's
6174 			 * probably a better way to deal with this.
6175 			 */
6176 			if (!sc->sc_invalid)
6177 				ath_init(sc);	/* XXX lose error */
6178 		} else {
6179 			ath_stop_locked(ifp);
6180 #ifdef notyet
6181 			/* XXX must wakeup in places like ath_vap_delete */
6182 			if (!sc->sc_invalid)
6183 				ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP);
6184 #endif
6185 		}
6186 		ATH_UNLOCK(sc);
6187 		break;
6188 	case SIOCGIFMEDIA:
6189 	case SIOCSIFMEDIA:
6190 		error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd);
6191 		break;
6192 	case SIOCGATHSTATS:
6193 		/* NB: embed these numbers to get a consistent view */
6194 		sc->sc_stats.ast_tx_packets = ifp->if_opackets;
6195 		sc->sc_stats.ast_rx_packets = ifp->if_ipackets;
6196 		sc->sc_stats.ast_tx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgtxrssi);
6197 		sc->sc_stats.ast_rx_rssi = ATH_RSSI(sc->sc_halstats.ns_avgrssi);
6198 #ifdef IEEE80211_SUPPORT_TDMA
6199 		sc->sc_stats.ast_tdma_tsfadjp = TDMA_AVG(sc->sc_avgtsfdeltap);
6200 		sc->sc_stats.ast_tdma_tsfadjm = TDMA_AVG(sc->sc_avgtsfdeltam);
6201 #endif
6202 		rt = sc->sc_currates;
6203 		/* XXX HT rates */
6204 		sc->sc_stats.ast_tx_rate =
6205 		    rt->info[sc->sc_txrix].dot11Rate &~ IEEE80211_RATE_BASIC;
6206 		return copyout(&sc->sc_stats,
6207 		    ifr->ifr_data, sizeof (sc->sc_stats));
6208 	case SIOCZATHSTATS:
6209 		error = priv_check(curthread, PRIV_DRIVER);
6210 		if (error == 0)
6211 			memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
6212 		break;
6213 #ifdef ATH_DIAGAPI
6214 	case SIOCGATHDIAG:
6215 		error = ath_ioctl_diag(sc, (struct ath_diag *) ifr);
6216 		break;
6217 #endif
6218 	case SIOCGIFADDR:
6219 		error = ether_ioctl(ifp, cmd, data);
6220 		break;
6221 	default:
6222 		error = EINVAL;
6223 		break;
6224 	}
6225 	return error;
6226 #undef IS_RUNNING
6227 }
6228 
6229 static int
6230 ath_sysctl_slottime(SYSCTL_HANDLER_ARGS)
6231 {
6232 	struct ath_softc *sc = arg1;
6233 	u_int slottime = ath_hal_getslottime(sc->sc_ah);
6234 	int error;
6235 
6236 	error = sysctl_handle_int(oidp, &slottime, 0, req);
6237 	if (error || !req->newptr)
6238 		return error;
6239 	return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0;
6240 }
6241 
6242 static int
6243 ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS)
6244 {
6245 	struct ath_softc *sc = arg1;
6246 	u_int acktimeout = ath_hal_getacktimeout(sc->sc_ah);
6247 	int error;
6248 
6249 	error = sysctl_handle_int(oidp, &acktimeout, 0, req);
6250 	if (error || !req->newptr)
6251 		return error;
6252 	return !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0;
6253 }
6254 
6255 static int
6256 ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS)
6257 {
6258 	struct ath_softc *sc = arg1;
6259 	u_int ctstimeout = ath_hal_getctstimeout(sc->sc_ah);
6260 	int error;
6261 
6262 	error = sysctl_handle_int(oidp, &ctstimeout, 0, req);
6263 	if (error || !req->newptr)
6264 		return error;
6265 	return !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0;
6266 }
6267 
6268 static int
6269 ath_sysctl_softled(SYSCTL_HANDLER_ARGS)
6270 {
6271 	struct ath_softc *sc = arg1;
6272 	int softled = sc->sc_softled;
6273 	int error;
6274 
6275 	error = sysctl_handle_int(oidp, &softled, 0, req);
6276 	if (error || !req->newptr)
6277 		return error;
6278 	softled = (softled != 0);
6279 	if (softled != sc->sc_softled) {
6280 		if (softled) {
6281 			/* NB: handle any sc_ledpin change */
6282 			ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin,
6283 			    HAL_GPIO_MUX_MAC_NETWORK_LED);
6284 			ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin,
6285 				!sc->sc_ledon);
6286 		}
6287 		sc->sc_softled = softled;
6288 	}
6289 	return 0;
6290 }
6291 
6292 static int
6293 ath_sysctl_ledpin(SYSCTL_HANDLER_ARGS)
6294 {
6295 	struct ath_softc *sc = arg1;
6296 	int ledpin = sc->sc_ledpin;
6297 	int error;
6298 
6299 	error = sysctl_handle_int(oidp, &ledpin, 0, req);
6300 	if (error || !req->newptr)
6301 		return error;
6302 	if (ledpin != sc->sc_ledpin) {
6303 		sc->sc_ledpin = ledpin;
6304 		if (sc->sc_softled) {
6305 			ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin,
6306 			    HAL_GPIO_MUX_MAC_NETWORK_LED);
6307 			ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin,
6308 				!sc->sc_ledon);
6309 		}
6310 	}
6311 	return 0;
6312 }
6313 
6314 static int
6315 ath_sysctl_txantenna(SYSCTL_HANDLER_ARGS)
6316 {
6317 	struct ath_softc *sc = arg1;
6318 	u_int txantenna = ath_hal_getantennaswitch(sc->sc_ah);
6319 	int error;
6320 
6321 	error = sysctl_handle_int(oidp, &txantenna, 0, req);
6322 	if (!error && req->newptr) {
6323 		/* XXX assumes 2 antenna ports */
6324 		if (txantenna < HAL_ANT_VARIABLE || txantenna > HAL_ANT_FIXED_B)
6325 			return EINVAL;
6326 		ath_hal_setantennaswitch(sc->sc_ah, txantenna);
6327 		/*
6328 		 * NB: with the switch locked this isn't meaningful,
6329 		 *     but set it anyway so things like radiotap get
6330 		 *     consistent info in their data.
6331 		 */
6332 		sc->sc_txantenna = txantenna;
6333 	}
6334 	return error;
6335 }
6336 
6337 static int
6338 ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS)
6339 {
6340 	struct ath_softc *sc = arg1;
6341 	u_int defantenna = ath_hal_getdefantenna(sc->sc_ah);
6342 	int error;
6343 
6344 	error = sysctl_handle_int(oidp, &defantenna, 0, req);
6345 	if (!error && req->newptr)
6346 		ath_hal_setdefantenna(sc->sc_ah, defantenna);
6347 	return error;
6348 }
6349 
6350 static int
6351 ath_sysctl_diversity(SYSCTL_HANDLER_ARGS)
6352 {
6353 	struct ath_softc *sc = arg1;
6354 	u_int diversity = ath_hal_getdiversity(sc->sc_ah);
6355 	int error;
6356 
6357 	error = sysctl_handle_int(oidp, &diversity, 0, req);
6358 	if (error || !req->newptr)
6359 		return error;
6360 	if (!ath_hal_setdiversity(sc->sc_ah, diversity))
6361 		return EINVAL;
6362 	sc->sc_diversity = diversity;
6363 	return 0;
6364 }
6365 
6366 static int
6367 ath_sysctl_diag(SYSCTL_HANDLER_ARGS)
6368 {
6369 	struct ath_softc *sc = arg1;
6370 	u_int32_t diag;
6371 	int error;
6372 
6373 	if (!ath_hal_getdiag(sc->sc_ah, &diag))
6374 		return EINVAL;
6375 	error = sysctl_handle_int(oidp, &diag, 0, req);
6376 	if (error || !req->newptr)
6377 		return error;
6378 	return !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0;
6379 }
6380 
6381 static int
6382 ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS)
6383 {
6384 	struct ath_softc *sc = arg1;
6385 	struct ifnet *ifp = sc->sc_ifp;
6386 	u_int32_t scale;
6387 	int error;
6388 
6389 	(void) ath_hal_gettpscale(sc->sc_ah, &scale);
6390 	error = sysctl_handle_int(oidp, &scale, 0, req);
6391 	if (error || !req->newptr)
6392 		return error;
6393 	return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL :
6394 	    (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
6395 }
6396 
6397 static int
6398 ath_sysctl_tpc(SYSCTL_HANDLER_ARGS)
6399 {
6400 	struct ath_softc *sc = arg1;
6401 	u_int tpc = ath_hal_gettpc(sc->sc_ah);
6402 	int error;
6403 
6404 	error = sysctl_handle_int(oidp, &tpc, 0, req);
6405 	if (error || !req->newptr)
6406 		return error;
6407 	return !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0;
6408 }
6409 
6410 static int
6411 ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS)
6412 {
6413 	struct ath_softc *sc = arg1;
6414 	struct ifnet *ifp = sc->sc_ifp;
6415 	struct ath_hal *ah = sc->sc_ah;
6416 	u_int rfkill = ath_hal_getrfkill(ah);
6417 	int error;
6418 
6419 	error = sysctl_handle_int(oidp, &rfkill, 0, req);
6420 	if (error || !req->newptr)
6421 		return error;
6422 	if (rfkill == ath_hal_getrfkill(ah))	/* unchanged */
6423 		return 0;
6424 	if (!ath_hal_setrfkill(ah, rfkill))
6425 		return EINVAL;
6426 	return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
6427 }
6428 
6429 static int
6430 ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS)
6431 {
6432 	struct ath_softc *sc = arg1;
6433 	u_int rfsilent;
6434 	int error;
6435 
6436 	(void) ath_hal_getrfsilent(sc->sc_ah, &rfsilent);
6437 	error = sysctl_handle_int(oidp, &rfsilent, 0, req);
6438 	if (error || !req->newptr)
6439 		return error;
6440 	if (!ath_hal_setrfsilent(sc->sc_ah, rfsilent))
6441 		return EINVAL;
6442 	sc->sc_rfsilentpin = rfsilent & 0x1c;
6443 	sc->sc_rfsilentpol = (rfsilent & 0x2) != 0;
6444 	return 0;
6445 }
6446 
6447 static int
6448 ath_sysctl_tpack(SYSCTL_HANDLER_ARGS)
6449 {
6450 	struct ath_softc *sc = arg1;
6451 	u_int32_t tpack;
6452 	int error;
6453 
6454 	(void) ath_hal_gettpack(sc->sc_ah, &tpack);
6455 	error = sysctl_handle_int(oidp, &tpack, 0, req);
6456 	if (error || !req->newptr)
6457 		return error;
6458 	return !ath_hal_settpack(sc->sc_ah, tpack) ? EINVAL : 0;
6459 }
6460 
6461 static int
6462 ath_sysctl_tpcts(SYSCTL_HANDLER_ARGS)
6463 {
6464 	struct ath_softc *sc = arg1;
6465 	u_int32_t tpcts;
6466 	int error;
6467 
6468 	(void) ath_hal_gettpcts(sc->sc_ah, &tpcts);
6469 	error = sysctl_handle_int(oidp, &tpcts, 0, req);
6470 	if (error || !req->newptr)
6471 		return error;
6472 	return !ath_hal_settpcts(sc->sc_ah, tpcts) ? EINVAL : 0;
6473 }
6474 
6475 static int
6476 ath_sysctl_intmit(SYSCTL_HANDLER_ARGS)
6477 {
6478 	struct ath_softc *sc = arg1;
6479 	int intmit, error;
6480 
6481 	intmit = ath_hal_getintmit(sc->sc_ah);
6482 	error = sysctl_handle_int(oidp, &intmit, 0, req);
6483 	if (error || !req->newptr)
6484 		return error;
6485 	return !ath_hal_setintmit(sc->sc_ah, intmit) ? EINVAL : 0;
6486 }
6487 
6488 #ifdef IEEE80211_SUPPORT_TDMA
6489 static int
6490 ath_sysctl_setcca(SYSCTL_HANDLER_ARGS)
6491 {
6492 	struct ath_softc *sc = arg1;
6493 	int setcca, error;
6494 
6495 	setcca = sc->sc_setcca;
6496 	error = sysctl_handle_int(oidp, &setcca, 0, req);
6497 	if (error || !req->newptr)
6498 		return error;
6499 	sc->sc_setcca = (setcca != 0);
6500 	return 0;
6501 }
6502 #endif /* IEEE80211_SUPPORT_TDMA */
6503 
6504 static void
6505 ath_sysctlattach(struct ath_softc *sc)
6506 {
6507 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
6508 	struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
6509 	struct ath_hal *ah = sc->sc_ah;
6510 
6511 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6512 		"countrycode", CTLFLAG_RD, &sc->sc_eecc, 0,
6513 		"EEPROM country code");
6514 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6515 		"regdomain", CTLFLAG_RD, &sc->sc_eerd, 0,
6516 		"EEPROM regdomain code");
6517 #ifdef	ATH_DEBUG
6518 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6519 		"debug", CTLFLAG_RW, &sc->sc_debug, 0,
6520 		"control debugging printfs");
6521 #endif
6522 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6523 		"slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6524 		ath_sysctl_slottime, "I", "802.11 slot time (us)");
6525 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6526 		"acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6527 		ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)");
6528 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6529 		"ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6530 		ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)");
6531 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6532 		"softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6533 		ath_sysctl_softled, "I", "enable/disable software LED support");
6534 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6535 		"ledpin", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6536 		ath_sysctl_ledpin, "I", "GPIO pin connected to LED");
6537 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6538 		"ledon", CTLFLAG_RW, &sc->sc_ledon, 0,
6539 		"setting to turn LED on");
6540 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6541 		"ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0,
6542 		"idle time for inactivity LED (ticks)");
6543 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6544 		"txantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6545 		ath_sysctl_txantenna, "I", "antenna switch");
6546 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6547 		"rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6548 		ath_sysctl_rxantenna, "I", "default/rx antenna");
6549 	if (ath_hal_hasdiversity(ah))
6550 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6551 			"diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6552 			ath_sysctl_diversity, "I", "antenna diversity");
6553 	sc->sc_txintrperiod = ATH_TXINTR_PERIOD;
6554 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6555 		"txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0,
6556 		"tx descriptor batching");
6557 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6558 		"diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6559 		ath_sysctl_diag, "I", "h/w diagnostic control");
6560 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6561 		"tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6562 		ath_sysctl_tpscale, "I", "tx power scaling");
6563 	if (ath_hal_hastpc(ah)) {
6564 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6565 			"tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6566 			ath_sysctl_tpc, "I", "enable/disable per-packet TPC");
6567 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6568 			"tpack", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6569 			ath_sysctl_tpack, "I", "tx power for ack frames");
6570 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6571 			"tpcts", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6572 			ath_sysctl_tpcts, "I", "tx power for cts frames");
6573 	}
6574 	if (ath_hal_hasrfsilent(ah)) {
6575 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6576 			"rfsilent", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6577 			ath_sysctl_rfsilent, "I", "h/w RF silent config");
6578 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6579 			"rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6580 			ath_sysctl_rfkill, "I", "enable/disable RF kill switch");
6581 	}
6582 	if (ath_hal_hasintmit(ah)) {
6583 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6584 			"intmit", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6585 			ath_sysctl_intmit, "I", "interference mitigation");
6586 	}
6587 	sc->sc_monpass = HAL_RXERR_DECRYPT | HAL_RXERR_MIC;
6588 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6589 		"monpass", CTLFLAG_RW, &sc->sc_monpass, 0,
6590 		"mask of error frames to pass when monitoring");
6591 #ifdef IEEE80211_SUPPORT_TDMA
6592 	if (ath_hal_macversion(ah) > 0x78) {
6593 		sc->sc_tdmadbaprep = 2;
6594 		SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6595 			"dbaprep", CTLFLAG_RW, &sc->sc_tdmadbaprep, 0,
6596 			"TDMA DBA preparation time");
6597 		sc->sc_tdmaswbaprep = 10;
6598 		SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6599 			"swbaprep", CTLFLAG_RW, &sc->sc_tdmaswbaprep, 0,
6600 			"TDMA SWBA preparation time");
6601 		SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6602 			"guardtime", CTLFLAG_RW, &sc->sc_tdmaguard, 0,
6603 			"TDMA slot guard time");
6604 		SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6605 			"superframe", CTLFLAG_RD, &sc->sc_tdmabintval, 0,
6606 			"TDMA calculated super frame");
6607 		SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
6608 			"setcca", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
6609 			ath_sysctl_setcca, "I", "enable CCA control");
6610 	}
6611 #endif
6612 }
6613 
6614 static int
6615 ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
6616 	struct ath_buf *bf, struct mbuf *m0,
6617 	const struct ieee80211_bpf_params *params)
6618 {
6619 	struct ifnet *ifp = sc->sc_ifp;
6620 	struct ieee80211com *ic = ifp->if_l2com;
6621 	struct ath_hal *ah = sc->sc_ah;
6622 	struct ieee80211vap *vap = ni->ni_vap;
6623 	int error, ismcast, ismrr;
6624 	int keyix, hdrlen, pktlen, try0, txantenna;
6625 	u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3;
6626 	struct ieee80211_frame *wh;
6627 	u_int flags, ctsduration;
6628 	HAL_PKT_TYPE atype;
6629 	const HAL_RATE_TABLE *rt;
6630 	struct ath_desc *ds;
6631 	u_int pri;
6632 
6633 	wh = mtod(m0, struct ieee80211_frame *);
6634 	ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
6635 	hdrlen = ieee80211_anyhdrsize(wh);
6636 	/*
6637 	 * Packet length must not include any
6638 	 * pad bytes; deduct them here.
6639 	 */
6640 	/* XXX honor IEEE80211_BPF_DATAPAD */
6641 	pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
6642 
6643 	if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
6644 		const struct ieee80211_cipher *cip;
6645 		struct ieee80211_key *k;
6646 
6647 		/*
6648 		 * Construct the 802.11 header+trailer for an encrypted
6649 		 * frame. The only reason this can fail is because of an
6650 		 * unknown or unsupported cipher/key type.
6651 		 */
6652 		k = ieee80211_crypto_encap(ni, m0);
6653 		if (k == NULL) {
6654 			/*
6655 			 * This can happen when the key is yanked after the
6656 			 * frame was queued.  Just discard the frame; the
6657 			 * 802.11 layer counts failures and provides
6658 			 * debugging/diagnostics.
6659 			 */
6660 			ath_freetx(m0);
6661 			return EIO;
6662 		}
6663 		/*
6664 		 * Adjust the packet + header lengths for the crypto
6665 		 * additions and calculate the h/w key index.  When
6666 		 * a s/w mic is done the frame will have had any mic
6667 		 * added to it prior to entry so m0->m_pkthdr.len will
6668 		 * account for it. Otherwise we need to add it to the
6669 		 * packet length.
6670 		 */
6671 		cip = k->wk_cipher;
6672 		hdrlen += cip->ic_header;
6673 		pktlen += cip->ic_header + cip->ic_trailer;
6674 		/* NB: frags always have any TKIP MIC done in s/w */
6675 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
6676 			pktlen += cip->ic_miclen;
6677 		keyix = k->wk_keyix;
6678 
6679 		/* packet header may have moved, reset our local pointer */
6680 		wh = mtod(m0, struct ieee80211_frame *);
6681 	} else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) {
6682 		/*
6683 		 * Use station key cache slot, if assigned.
6684 		 */
6685 		keyix = ni->ni_ucastkey.wk_keyix;
6686 		if (keyix == IEEE80211_KEYIX_NONE)
6687 			keyix = HAL_TXKEYIX_INVALID;
6688 	} else
6689 		keyix = HAL_TXKEYIX_INVALID;
6690 
6691 	error = ath_tx_dmasetup(sc, bf, m0);
6692 	if (error != 0)
6693 		return error;
6694 	m0 = bf->bf_m;				/* NB: may have changed */
6695 	wh = mtod(m0, struct ieee80211_frame *);
6696 	bf->bf_node = ni;			/* NB: held reference */
6697 
6698 	flags = HAL_TXDESC_CLRDMASK;		/* XXX needed for crypto errs */
6699 	flags |= HAL_TXDESC_INTREQ;		/* force interrupt */
6700 	if (params->ibp_flags & IEEE80211_BPF_RTS)
6701 		flags |= HAL_TXDESC_RTSENA;
6702 	else if (params->ibp_flags & IEEE80211_BPF_CTS)
6703 		flags |= HAL_TXDESC_CTSENA;
6704 	/* XXX leave ismcast to injector? */
6705 	if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast)
6706 		flags |= HAL_TXDESC_NOACK;
6707 
6708 	rt = sc->sc_currates;
6709 	KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
6710 	rix = ath_tx_findrix(sc, params->ibp_rate0);
6711 	txrate = rt->info[rix].rateCode;
6712 	if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
6713 		txrate |= rt->info[rix].shortPreamble;
6714 	sc->sc_txrix = rix;
6715 	try0 = params->ibp_try0;
6716 	ismrr = (params->ibp_try1 != 0);
6717 	txantenna = params->ibp_pri >> 2;
6718 	if (txantenna == 0)			/* XXX? */
6719 		txantenna = sc->sc_txantenna;
6720 	ctsduration = 0;
6721 	if (flags & (HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA)) {
6722 		cix = ath_tx_findrix(sc, params->ibp_ctsrate);
6723 		ctsrate = rt->info[cix].rateCode;
6724 		if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) {
6725 			ctsrate |= rt->info[cix].shortPreamble;
6726 			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
6727 				ctsduration += rt->info[cix].spAckDuration;
6728 			ctsduration += ath_hal_computetxtime(ah,
6729 				rt, pktlen, rix, AH_TRUE);
6730 			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
6731 				ctsduration += rt->info[rix].spAckDuration;
6732 		} else {
6733 			if (flags & HAL_TXDESC_RTSENA)		/* SIFS + CTS */
6734 				ctsduration += rt->info[cix].lpAckDuration;
6735 			ctsduration += ath_hal_computetxtime(ah,
6736 				rt, pktlen, rix, AH_FALSE);
6737 			if ((flags & HAL_TXDESC_NOACK) == 0)	/* SIFS + ACK */
6738 				ctsduration += rt->info[rix].lpAckDuration;
6739 		}
6740 		ismrr = 0;			/* XXX */
6741 	} else
6742 		ctsrate = 0;
6743 	pri = params->ibp_pri & 3;
6744 	/*
6745 	 * NB: we mark all packets as type PSPOLL so the h/w won't
6746 	 * set the sequence number, duration, etc.
6747 	 */
6748 	atype = HAL_PKT_TYPE_PSPOLL;
6749 
6750 	if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT))
6751 		ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len,
6752 		    sc->sc_hwmap[rix].ieeerate, -1);
6753 
6754 	if (ieee80211_radiotap_active_vap(vap)) {
6755 		u_int64_t tsf = ath_hal_gettsf64(ah);
6756 
6757 		sc->sc_tx_th.wt_tsf = htole64(tsf);
6758 		sc->sc_tx_th.wt_flags = sc->sc_hwmap[rix].txflags;
6759 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
6760 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP;
6761 		if (m0->m_flags & M_FRAG)
6762 			sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG;
6763 		sc->sc_tx_th.wt_rate = sc->sc_hwmap[rix].ieeerate;
6764 		sc->sc_tx_th.wt_txpower = ni->ni_txpower;
6765 		sc->sc_tx_th.wt_antenna = sc->sc_txantenna;
6766 
6767 		ieee80211_radiotap_tx(vap, m0);
6768 	}
6769 
6770 	/*
6771 	 * Formulate first tx descriptor with tx controls.
6772 	 */
6773 	ds = bf->bf_desc;
6774 	/* XXX check return value? */
6775 	ath_hal_setuptxdesc(ah, ds
6776 		, pktlen		/* packet length */
6777 		, hdrlen		/* header length */
6778 		, atype			/* Atheros packet type */
6779 		, params->ibp_power	/* txpower */
6780 		, txrate, try0		/* series 0 rate/tries */
6781 		, keyix			/* key cache index */
6782 		, txantenna		/* antenna mode */
6783 		, flags			/* flags */
6784 		, ctsrate		/* rts/cts rate */
6785 		, ctsduration		/* rts/cts duration */
6786 	);
6787 	bf->bf_txflags = flags;
6788 
6789 	if (ismrr) {
6790 		rix = ath_tx_findrix(sc, params->ibp_rate1);
6791 		rate1 = rt->info[rix].rateCode;
6792 		if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
6793 			rate1 |= rt->info[rix].shortPreamble;
6794 		if (params->ibp_try2) {
6795 			rix = ath_tx_findrix(sc, params->ibp_rate2);
6796 			rate2 = rt->info[rix].rateCode;
6797 			if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
6798 				rate2 |= rt->info[rix].shortPreamble;
6799 		} else
6800 			rate2 = 0;
6801 		if (params->ibp_try3) {
6802 			rix = ath_tx_findrix(sc, params->ibp_rate3);
6803 			rate3 = rt->info[rix].rateCode;
6804 			if (params->ibp_flags & IEEE80211_BPF_SHORTPRE)
6805 				rate3 |= rt->info[rix].shortPreamble;
6806 		} else
6807 			rate3 = 0;
6808 		ath_hal_setupxtxdesc(ah, ds
6809 			, rate1, params->ibp_try1	/* series 1 */
6810 			, rate2, params->ibp_try2	/* series 2 */
6811 			, rate3, params->ibp_try3	/* series 3 */
6812 		);
6813 	}
6814 
6815 	/* NB: no buffered multicast in power save support */
6816 	ath_tx_handoff(sc, sc->sc_ac2q[pri], bf);
6817 	return 0;
6818 }
6819 
6820 static int
6821 ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
6822 	const struct ieee80211_bpf_params *params)
6823 {
6824 	struct ieee80211com *ic = ni->ni_ic;
6825 	struct ifnet *ifp = ic->ic_ifp;
6826 	struct ath_softc *sc = ifp->if_softc;
6827 	struct ath_buf *bf;
6828 
6829 	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) {
6830 		DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard frame, %s", __func__,
6831 		    (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ?
6832 			"!running" : "invalid");
6833 		sc->sc_stats.ast_tx_raw_fail++;
6834 		ieee80211_free_node(ni);
6835 		m_freem(m);
6836 		return ENETDOWN;
6837 	}
6838 	/*
6839 	 * Grab a TX buffer and associated resources.
6840 	 */
6841 	bf = ath_getbuf(sc);
6842 	if (bf == NULL) {
6843 		/* NB: ath_getbuf handles stat+msg */
6844 		ieee80211_free_node(ni);
6845 		m_freem(m);
6846 		return ENOBUFS;
6847 	}
6848 
6849 	ifp->if_opackets++;
6850 	sc->sc_stats.ast_tx_raw++;
6851 
6852 	if (params == NULL) {
6853 		/*
6854 		 * Legacy path; interpret frame contents to decide
6855 		 * precisely how to send the frame.
6856 		 */
6857 		if (ath_tx_start(sc, ni, bf, m))
6858 			goto bad;
6859 	} else {
6860 		/*
6861 		 * Caller supplied explicit parameters to use in
6862 		 * sending the frame.
6863 		 */
6864 		if (ath_tx_raw_start(sc, ni, bf, m, params))
6865 			goto bad;
6866 	}
6867 	sc->sc_wd_timer = 5;
6868 
6869 	return 0;
6870 bad:
6871 	ifp->if_oerrors++;
6872 	ATH_TXBUF_LOCK(sc);
6873 	STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
6874 	ATH_TXBUF_UNLOCK(sc);
6875 	ieee80211_free_node(ni);
6876 	return EIO;		/* XXX */
6877 }
6878 
6879 /*
6880  * Announce various information on device/driver attach.
6881  */
6882 static void
6883 ath_announce(struct ath_softc *sc)
6884 {
6885 	struct ifnet *ifp = sc->sc_ifp;
6886 	struct ath_hal *ah = sc->sc_ah;
6887 
6888 	if_printf(ifp, "AR%s mac %d.%d RF%s phy %d.%d\n",
6889 		ath_hal_mac_name(ah), ah->ah_macVersion, ah->ah_macRev,
6890 		ath_hal_rf_name(ah), ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf);
6891 	if (bootverbose) {
6892 		int i;
6893 		for (i = 0; i <= WME_AC_VO; i++) {
6894 			struct ath_txq *txq = sc->sc_ac2q[i];
6895 			if_printf(ifp, "Use hw queue %u for %s traffic\n",
6896 				txq->axq_qnum, ieee80211_wme_acnames[i]);
6897 		}
6898 		if_printf(ifp, "Use hw queue %u for CAB traffic\n",
6899 			sc->sc_cabq->axq_qnum);
6900 		if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq);
6901 	}
6902 	if (ath_rxbuf != ATH_RXBUF)
6903 		if_printf(ifp, "using %u rx buffers\n", ath_rxbuf);
6904 	if (ath_txbuf != ATH_TXBUF)
6905 		if_printf(ifp, "using %u tx buffers\n", ath_txbuf);
6906 }
6907 
6908 #ifdef IEEE80211_SUPPORT_TDMA
6909 static __inline uint32_t
6910 ath_hal_getnexttbtt(struct ath_hal *ah)
6911 {
6912 #define	AR_TIMER0	0x8028
6913 	return OS_REG_READ(ah, AR_TIMER0);
6914 }
6915 
6916 static __inline void
6917 ath_hal_adjusttsf(struct ath_hal *ah, int32_t tsfdelta)
6918 {
6919 	/* XXX handle wrap/overflow */
6920 	OS_REG_WRITE(ah, AR_TSF_L32, OS_REG_READ(ah, AR_TSF_L32) + tsfdelta);
6921 }
6922 
6923 static void
6924 ath_tdma_settimers(struct ath_softc *sc, u_int32_t nexttbtt, u_int32_t bintval)
6925 {
6926 	struct ath_hal *ah = sc->sc_ah;
6927 	HAL_BEACON_TIMERS bt;
6928 
6929 	bt.bt_intval = bintval | HAL_BEACON_ENA;
6930 	bt.bt_nexttbtt = nexttbtt;
6931 	bt.bt_nextdba = (nexttbtt<<3) - sc->sc_tdmadbaprep;
6932 	bt.bt_nextswba = (nexttbtt<<3) - sc->sc_tdmaswbaprep;
6933 	bt.bt_nextatim = nexttbtt+1;
6934 	ath_hal_beaconsettimers(ah, &bt);
6935 }
6936 
6937 /*
6938  * Calculate the beacon interval.  This is periodic in the
6939  * superframe for the bss.  We assume each station is configured
6940  * identically wrt transmit rate so the guard time we calculate
6941  * above will be the same on all stations.  Note we need to
6942  * factor in the xmit time because the hardware will schedule
6943  * a frame for transmit if the start of the frame is within
6944  * the burst time.  When we get hardware that properly kills
6945  * frames in the PCU we can reduce/eliminate the guard time.
6946  *
6947  * Roundup to 1024 is so we have 1 TU buffer in the guard time
6948  * to deal with the granularity of the nexttbtt timer.  11n MAC's
6949  * with 1us timer granularity should allow us to reduce/eliminate
6950  * this.
6951  */
6952 static void
6953 ath_tdma_bintvalsetup(struct ath_softc *sc,
6954 	const struct ieee80211_tdma_state *tdma)
6955 {
6956 	/* copy from vap state (XXX check all vaps have same value?) */
6957 	sc->sc_tdmaslotlen = tdma->tdma_slotlen;
6958 
6959 	sc->sc_tdmabintval = roundup((sc->sc_tdmaslotlen+sc->sc_tdmaguard) *
6960 		tdma->tdma_slotcnt, 1024);
6961 	sc->sc_tdmabintval >>= 10;		/* TSF -> TU */
6962 	if (sc->sc_tdmabintval & 1)
6963 		sc->sc_tdmabintval++;
6964 
6965 	if (tdma->tdma_slot == 0) {
6966 		/*
6967 		 * Only slot 0 beacons; other slots respond.
6968 		 */
6969 		sc->sc_imask |= HAL_INT_SWBA;
6970 		sc->sc_tdmaswba = 0;		/* beacon immediately */
6971 	} else {
6972 		/* XXX all vaps must be slot 0 or slot !0 */
6973 		sc->sc_imask &= ~HAL_INT_SWBA;
6974 	}
6975 }
6976 
6977 /*
6978  * Max 802.11 overhead.  This assumes no 4-address frames and
6979  * the encapsulation done by ieee80211_encap (llc).  We also
6980  * include potential crypto overhead.
6981  */
6982 #define	IEEE80211_MAXOVERHEAD \
6983 	(sizeof(struct ieee80211_qosframe) \
6984 	 + sizeof(struct llc) \
6985 	 + IEEE80211_ADDR_LEN \
6986 	 + IEEE80211_WEP_IVLEN \
6987 	 + IEEE80211_WEP_KIDLEN \
6988 	 + IEEE80211_WEP_CRCLEN \
6989 	 + IEEE80211_WEP_MICLEN \
6990 	 + IEEE80211_CRC_LEN)
6991 
6992 /*
6993  * Setup initially for tdma operation.  Start the beacon
6994  * timers and enable SWBA if we are slot 0.  Otherwise
6995  * we wait for slot 0 to arrive so we can sync up before
6996  * starting to transmit.
6997  */
6998 static void
6999 ath_tdma_config(struct ath_softc *sc, struct ieee80211vap *vap)
7000 {
7001 	struct ath_hal *ah = sc->sc_ah;
7002 	struct ifnet *ifp = sc->sc_ifp;
7003 	struct ieee80211com *ic = ifp->if_l2com;
7004 	const struct ieee80211_txparam *tp;
7005 	const struct ieee80211_tdma_state *tdma = NULL;
7006 	int rix;
7007 
7008 	if (vap == NULL) {
7009 		vap = TAILQ_FIRST(&ic->ic_vaps);   /* XXX */
7010 		if (vap == NULL) {
7011 			if_printf(ifp, "%s: no vaps?\n", __func__);
7012 			return;
7013 		}
7014 	}
7015 	tp = vap->iv_bss->ni_txparms;
7016 	/*
7017 	 * Calculate the guard time for each slot.  This is the
7018 	 * time to send a maximal-size frame according to the
7019 	 * fixed/lowest transmit rate.  Note that the interface
7020 	 * mtu does not include the 802.11 overhead so we must
7021 	 * tack that on (ath_hal_computetxtime includes the
7022 	 * preamble and plcp in it's calculation).
7023 	 */
7024 	tdma = vap->iv_tdma;
7025 	if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
7026 		rix = ath_tx_findrix(sc, tp->ucastrate);
7027 	else
7028 		rix = ath_tx_findrix(sc, tp->mcastrate);
7029 	/* XXX short preamble assumed */
7030 	sc->sc_tdmaguard = ath_hal_computetxtime(ah, sc->sc_currates,
7031 		ifp->if_mtu + IEEE80211_MAXOVERHEAD, rix, AH_TRUE);
7032 
7033 	ath_hal_intrset(ah, 0);
7034 
7035 	ath_beaconq_config(sc);			/* setup h/w beacon q */
7036 	if (sc->sc_setcca)
7037 		ath_hal_setcca(ah, AH_FALSE);	/* disable CCA */
7038 	ath_tdma_bintvalsetup(sc, tdma);	/* calculate beacon interval */
7039 	ath_tdma_settimers(sc, sc->sc_tdmabintval,
7040 		sc->sc_tdmabintval | HAL_BEACON_RESET_TSF);
7041 	sc->sc_syncbeacon = 0;
7042 
7043 	sc->sc_avgtsfdeltap = TDMA_DUMMY_MARKER;
7044 	sc->sc_avgtsfdeltam = TDMA_DUMMY_MARKER;
7045 
7046 	ath_hal_intrset(ah, sc->sc_imask);
7047 
7048 	DPRINTF(sc, ATH_DEBUG_TDMA, "%s: slot %u len %uus cnt %u "
7049 	    "bsched %u guard %uus bintval %u TU dba prep %u\n", __func__,
7050 	    tdma->tdma_slot, tdma->tdma_slotlen, tdma->tdma_slotcnt,
7051 	    tdma->tdma_bintval, sc->sc_tdmaguard, sc->sc_tdmabintval,
7052 	    sc->sc_tdmadbaprep);
7053 }
7054 
7055 /*
7056  * Update tdma operation.  Called from the 802.11 layer
7057  * when a beacon is received from the TDMA station operating
7058  * in the slot immediately preceding us in the bss.  Use
7059  * the rx timestamp for the beacon frame to update our
7060  * beacon timers so we follow their schedule.  Note that
7061  * by using the rx timestamp we implicitly include the
7062  * propagation delay in our schedule.
7063  */
7064 static void
7065 ath_tdma_update(struct ieee80211_node *ni,
7066 	const struct ieee80211_tdma_param *tdma, int changed)
7067 {
7068 #define	TSF_TO_TU(_h,_l) \
7069 	((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10))
7070 #define	TU_TO_TSF(_tu)	(((u_int64_t)(_tu)) << 10)
7071 	struct ieee80211vap *vap = ni->ni_vap;
7072 	struct ieee80211com *ic = ni->ni_ic;
7073 	struct ath_softc *sc = ic->ic_ifp->if_softc;
7074 	struct ath_hal *ah = sc->sc_ah;
7075 	const HAL_RATE_TABLE *rt = sc->sc_currates;
7076 	u_int64_t tsf, rstamp, nextslot;
7077 	u_int32_t txtime, nextslottu, timer0;
7078 	int32_t tudelta, tsfdelta;
7079 	const struct ath_rx_status *rs;
7080 	int rix;
7081 
7082 	sc->sc_stats.ast_tdma_update++;
7083 
7084 	/*
7085 	 * Check for and adopt configuration changes.
7086 	 */
7087 	if (changed != 0) {
7088 		const struct ieee80211_tdma_state *ts = vap->iv_tdma;
7089 
7090 		ath_tdma_bintvalsetup(sc, ts);
7091 		if (changed & TDMA_UPDATE_SLOTLEN)
7092 			ath_wme_update(ic);
7093 
7094 		DPRINTF(sc, ATH_DEBUG_TDMA,
7095 		    "%s: adopt slot %u slotcnt %u slotlen %u us "
7096 		    "bintval %u TU\n", __func__,
7097 		    ts->tdma_slot, ts->tdma_slotcnt, ts->tdma_slotlen,
7098 		    sc->sc_tdmabintval);
7099 
7100 		/* XXX right? */
7101 		ath_hal_intrset(ah, sc->sc_imask);
7102 		/* NB: beacon timers programmed below */
7103 	}
7104 
7105 	/* extend rx timestamp to 64 bits */
7106 	rs = sc->sc_lastrs;
7107 	tsf = ath_hal_gettsf64(ah);
7108 	rstamp = ath_extend_tsf(rs->rs_tstamp, tsf);
7109 	/*
7110 	 * The rx timestamp is set by the hardware on completing
7111 	 * reception (at the point where the rx descriptor is DMA'd
7112 	 * to the host).  To find the start of our next slot we
7113 	 * must adjust this time by the time required to send
7114 	 * the packet just received.
7115 	 */
7116 	rix = rt->rateCodeToIndex[rs->rs_rate];
7117 	txtime = ath_hal_computetxtime(ah, rt, rs->rs_datalen, rix,
7118 	    rt->info[rix].shortPreamble);
7119 	/* NB: << 9 is to cvt to TU and /2 */
7120 	nextslot = (rstamp - txtime) + (sc->sc_tdmabintval << 9);
7121 	nextslottu = TSF_TO_TU(nextslot>>32, nextslot) & HAL_BEACON_PERIOD;
7122 
7123 	/*
7124 	 * TIMER0 is the h/w's idea of NextTBTT (in TU's).  Convert
7125 	 * to usecs and calculate the difference between what the
7126 	 * other station thinks and what we have programmed.  This
7127 	 * lets us figure how to adjust our timers to match.  The
7128 	 * adjustments are done by pulling the TSF forward and possibly
7129 	 * rewriting the beacon timers.
7130 	 */
7131 	timer0 = ath_hal_getnexttbtt(ah);
7132 	tsfdelta = (int32_t)((nextslot % TU_TO_TSF(HAL_BEACON_PERIOD+1)) - TU_TO_TSF(timer0));
7133 
7134 	DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
7135 	    "tsfdelta %d avg +%d/-%d\n", tsfdelta,
7136 	    TDMA_AVG(sc->sc_avgtsfdeltap), TDMA_AVG(sc->sc_avgtsfdeltam));
7137 
7138 	if (tsfdelta < 0) {
7139 		TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
7140 		TDMA_SAMPLE(sc->sc_avgtsfdeltam, -tsfdelta);
7141 		tsfdelta = -tsfdelta % 1024;
7142 		nextslottu++;
7143 	} else if (tsfdelta > 0) {
7144 		TDMA_SAMPLE(sc->sc_avgtsfdeltap, tsfdelta);
7145 		TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
7146 		tsfdelta = 1024 - (tsfdelta % 1024);
7147 		nextslottu++;
7148 	} else {
7149 		TDMA_SAMPLE(sc->sc_avgtsfdeltap, 0);
7150 		TDMA_SAMPLE(sc->sc_avgtsfdeltam, 0);
7151 	}
7152 	tudelta = nextslottu - timer0;
7153 
7154 	/*
7155 	 * Copy sender's timetstamp into tdma ie so they can
7156 	 * calculate roundtrip time.  We submit a beacon frame
7157 	 * below after any timer adjustment.  The frame goes out
7158 	 * at the next TBTT so the sender can calculate the
7159 	 * roundtrip by inspecting the tdma ie in our beacon frame.
7160 	 *
7161 	 * NB: This tstamp is subtlely preserved when
7162 	 *     IEEE80211_BEACON_TDMA is marked (e.g. when the
7163 	 *     slot position changes) because ieee80211_add_tdma
7164 	 *     skips over the data.
7165 	 */
7166 	memcpy(ATH_VAP(vap)->av_boff.bo_tdma +
7167 		__offsetof(struct ieee80211_tdma_param, tdma_tstamp),
7168 		&ni->ni_tstamp.data, 8);
7169 #if 0
7170 	DPRINTF(sc, ATH_DEBUG_TDMA_TIMER,
7171 	    "tsf %llu nextslot %llu (%d, %d) nextslottu %u timer0 %u (%d)\n",
7172 	    (unsigned long long) tsf, (unsigned long long) nextslot,
7173 	    (int)(nextslot - tsf), tsfdelta,
7174 	    nextslottu, timer0, tudelta);
7175 #endif
7176 	/*
7177 	 * Adjust the beacon timers only when pulling them forward
7178 	 * or when going back by less than the beacon interval.
7179 	 * Negative jumps larger than the beacon interval seem to
7180 	 * cause the timers to stop and generally cause instability.
7181 	 * This basically filters out jumps due to missed beacons.
7182 	 */
7183 	if (tudelta != 0 && (tudelta > 0 || -tudelta < sc->sc_tdmabintval)) {
7184 		ath_tdma_settimers(sc, nextslottu, sc->sc_tdmabintval);
7185 		sc->sc_stats.ast_tdma_timers++;
7186 	}
7187 	if (tsfdelta > 0) {
7188 		ath_hal_adjusttsf(ah, tsfdelta);
7189 		sc->sc_stats.ast_tdma_tsf++;
7190 	}
7191 	ath_tdma_beacon_send(sc, vap);		/* prepare response */
7192 #undef TU_TO_TSF
7193 #undef TSF_TO_TU
7194 }
7195 
7196 /*
7197  * Transmit a beacon frame at SWBA.  Dynamic updates
7198  * to the frame contents are done as needed.
7199  */
7200 static void
7201 ath_tdma_beacon_send(struct ath_softc *sc, struct ieee80211vap *vap)
7202 {
7203 	struct ath_hal *ah = sc->sc_ah;
7204 	struct ath_buf *bf;
7205 	int otherant;
7206 
7207 	/*
7208 	 * Check if the previous beacon has gone out.  If
7209 	 * not don't try to post another, skip this period
7210 	 * and wait for the next.  Missed beacons indicate
7211 	 * a problem and should not occur.  If we miss too
7212 	 * many consecutive beacons reset the device.
7213 	 */
7214 	if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) {
7215 		sc->sc_bmisscount++;
7216 		DPRINTF(sc, ATH_DEBUG_BEACON,
7217 			"%s: missed %u consecutive beacons\n",
7218 			__func__, sc->sc_bmisscount);
7219 		if (sc->sc_bmisscount >= ath_bstuck_threshold)
7220 			taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask);
7221 		return;
7222 	}
7223 	if (sc->sc_bmisscount != 0) {
7224 		DPRINTF(sc, ATH_DEBUG_BEACON,
7225 			"%s: resume beacon xmit after %u misses\n",
7226 			__func__, sc->sc_bmisscount);
7227 		sc->sc_bmisscount = 0;
7228 	}
7229 
7230 	/*
7231 	 * Check recent per-antenna transmit statistics and flip
7232 	 * the default antenna if noticeably more frames went out
7233 	 * on the non-default antenna.
7234 	 * XXX assumes 2 anntenae
7235 	 */
7236 	if (!sc->sc_diversity) {
7237 		otherant = sc->sc_defant & 1 ? 2 : 1;
7238 		if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2)
7239 			ath_setdefantenna(sc, otherant);
7240 		sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0;
7241 	}
7242 
7243 	bf = ath_beacon_generate(sc, vap);
7244 	if (bf != NULL) {
7245 		/*
7246 		 * Stop any current dma and put the new frame on the queue.
7247 		 * This should never fail since we check above that no frames
7248 		 * are still pending on the queue.
7249 		 */
7250 		if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) {
7251 			DPRINTF(sc, ATH_DEBUG_ANY,
7252 				"%s: beacon queue %u did not stop?\n",
7253 				__func__, sc->sc_bhalq);
7254 			/* NB: the HAL still stops DMA, so proceed */
7255 		}
7256 		ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr);
7257 		ath_hal_txstart(ah, sc->sc_bhalq);
7258 
7259 		sc->sc_stats.ast_be_xmit++;		/* XXX per-vap? */
7260 
7261 		/*
7262 		 * Record local TSF for our last send for use
7263 		 * in arbitrating slot collisions.
7264 		 */
7265 		vap->iv_bss->ni_tstamp.tsf = ath_hal_gettsf64(ah);
7266 	}
7267 }
7268 #endif /* IEEE80211_SUPPORT_TDMA */
7269